hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e96757c37df00a4561207275579e02e7d774aeb
| 3,836
|
py
|
Python
|
molly/routing/providers/cyclestreets.py
|
mollyproject/mollyproject
|
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
|
[
"Apache-2.0"
] | 7
|
2015-05-16T13:27:21.000Z
|
2019-08-06T11:09:24.000Z
|
molly/routing/providers/cyclestreets.py
|
mollyproject/mollyproject
|
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
|
[
"Apache-2.0"
] | null | null | null |
molly/routing/providers/cyclestreets.py
|
mollyproject/mollyproject
|
3247c6bac3f39ce8d275d19aa410b30c6284b8a7
|
[
"Apache-2.0"
] | 4
|
2015-11-27T13:36:36.000Z
|
2021-03-09T17:55:53.000Z
|
from urllib import urlencode
from urllib2 import urlopen
import simplejson
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from molly.apps.places.models import bearing_to_compass
from molly.utils.templatetags.molly_utils import humanise_distance, humanise_seconds
CYCLESTREETS_URL = 'http://www.cyclestreets.net/api/journey.json?%s'
if 'cyclestreets' not in settings.API_KEYS:
# Cyclestreets not configured
raise ImportError()
def generate_route(points, type):
"""
Given 2 Points, this will return a route between them. The route consists
of a dictionary with the following keys:
* error (optional, and if set means that the object contains no route),
which is a string describing any errors that occurred in plotting the
route
* total_time: An int of the number of seconds this route is estimated to
take
* total_distance: An int of the number of metres this route is expected to
take
* waypoints: A list of dictionaries, where each dictionary has 2 keys:
'instruction', which is a human-readable description of the steps to be
taken here, and 'location', which is a Point describing the route to be
taken
@param points: An ordered list of points to be included in this route
@type points: [Point]
@param type: The type of route to generate (foot, car or bike)
@type type: str
@return: A dictionary containing the route and metadata associated with it
@rtype: dict
"""
# Build Cyclestreets request:
url = CYCLESTREETS_URL % urlencode({
'key': settings.API_KEYS['cyclestreets'],
'plan': 'balanced',
'itinerarypoints': '|'.join('%f,%f' % (p[0], p[1]) for p in points)
})
json = simplejson.load(urlopen(url))
if not json:
return {
'error': _('Unable to plot route')
}
else:
summary = json['marker'][0]['@attributes']
waypoints = []
for i, waypoint in enumerate(json['marker'][1:]):
segment = waypoint['@attributes']
waypoints.append({
'instruction': _('%(instruction)s at %(name)s') % {
'instruction': capfirst(segment['turn']),
'name': segment['name']
},
'additional': _('%(direction)s for %(distance)s (taking approximately %(time)s)') % {
'direction': bearing_to_compass(int(segment['startBearing'])),
'distance': humanise_distance(segment['distance'], False),
'time': humanise_seconds(segment['time'])
},
'waypoint_type': {
'straight on': 'straight',
'turn left': 'left',
'bear left': 'slight-left',
'sharp left': 'sharp-left',
'turn right': 'right',
'bear right': 'slight-right',
'sharp right': 'sharp-right',
'double-back': 'turn-around',
}.get(segment['turn']),
'location': Point(*map(float, segment['points'].split(' ')[0].split(','))),
'path': LineString(map(lambda ps: Point(*map(float, ps.split(','))),
segment['points'].split(' ')))
})
return {
'total_time': summary['time'],
'total_distance': summary['length'],
'waypoints': waypoints,
'path': LineString(map(lambda ps: Point(*map(float, ps.split(','))), summary['coordinates'].split(' ')))
}
| 40.808511
| 116
| 0.569343
| 420
| 3,836
| 5.145238
| 0.414286
| 0.01851
| 0.011106
| 0.024988
| 0.058306
| 0.058306
| 0.041647
| 0.041647
| 0.041647
| 0.041647
| 0
| 0.003037
| 0.313347
| 3,836
| 94
| 117
| 40.808511
| 0.817388
| 0.25365
| 0
| 0.068966
| 0
| 0
| 0.217564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0.034483
| 0.172414
| 0
| 0.224138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e9c35d7a10e21f257f971c50e260fb397455462
| 5,829
|
py
|
Python
|
web/impact/impact/tests/api_test_case.py
|
masschallenge/impact-api
|
81075ced8fcc95de9390dd83c15e523e67fc48c0
|
[
"MIT"
] | 5
|
2017-10-19T15:11:52.000Z
|
2020-03-08T07:16:21.000Z
|
web/impact/impact/tests/api_test_case.py
|
masschallenge/impact-api
|
81075ced8fcc95de9390dd83c15e523e67fc48c0
|
[
"MIT"
] | 182
|
2017-06-21T19:32:13.000Z
|
2021-03-22T13:38:16.000Z
|
web/impact/impact/tests/api_test_case.py
|
masschallenge/impact-api
|
81075ced8fcc95de9390dd83c15e523e67fc48c0
|
[
"MIT"
] | 1
|
2018-06-23T11:53:18.000Z
|
2018-06-23T11:53:18.000Z
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
import json
from oauth2_provider.models import get_application_model
from rest_framework.test import APIClient
from test_plus.test import TestCase
from django.core import mail
from django.conf import settings
from django.contrib.auth.models import Group
from django.urls import reverse
from accelerator_abstract.models.base_clearance import (
CLEARANCE_LEVEL_GLOBAL_MANAGER,
CLEARANCE_LEVEL_STAFF
)
from impact.tests.factories import (
ClearanceFactory,
UserFactory,
)
OAuth_App = get_application_model()
API_GROUPS = [settings.V0_API_GROUP, settings.V1_API_GROUP]
DESCRIPTION_CONTENT = 'DESCRIPTION:Topics: {topics}'
LOCATION_CONTENT = 'LOCATION:{location}\\;'
LOCATION_INFO = 'LOCATION:{location}\\;{meeting_info}'
class APITestCase(TestCase):
SOME_SITE_NAME = "somesite.com"
_user_count = 0
client_class = APIClient
user_factory = UserFactory
@classmethod
def setUpClass(cls):
[Group.objects.get_or_create(name=name) for name in API_GROUPS]
@classmethod
def tearDownClass(cls):
[Group.objects.get(name=name).delete() for name in API_GROUPS]
def basic_user(self):
user = self.make_user('basic_user{}@test.com'.format(self._user_count),
perms=["mc.view_startup"])
self._user_count += 1
for group in Group.objects.filter(name__in=API_GROUPS):
user.groups.add(group)
user.set_password('password')
user.save()
return user
def staff_user(self, program_family=None, level=CLEARANCE_LEVEL_STAFF):
user = self.make_user('basic_user{}@test.com'.format(self._user_count))
self._user_count += 1
kwargs = {"level": level,
"user": user}
if program_family:
kwargs['program_family'] = program_family
clearance = ClearanceFactory(**kwargs)
return clearance.user
def global_operations_manager(self, program_family):
user = self.staff_user()
ClearanceFactory(user=user,
level=CLEARANCE_LEVEL_GLOBAL_MANAGER,
program_family=program_family)
return user
def get_access_token(self, user):
app = OAuth_App.objects.create(
user=user,
name="Test666",
client_type=OAuth_App.CLIENT_PUBLIC,
authorization_grant_type=OAuth_App.GRANT_PASSWORD,
redirect_uris="http://thirdparty.com/exchange/",
)
response = self.client.post(
self.reverse("oauth2_provider:token"),
data={
"password": 'password',
"client_id": app.client_id,
"username": user.username,
"grant_type": "password",
},
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
response_json = json.loads(response.content)
return response_json['access_token']
def assert_options_include(self, method, expected_options, object_id=None):
if object_id:
args = [object_id]
else:
args = []
url = reverse(self.view.view_name, args=args)
with self.login(email=self.basic_user().email):
response = self.client.options(url)
result = json.loads(response.content)
assert method in result['actions']
options = result['actions'][method]['properties']
for key, params in expected_options.items():
self.assertIn(key, options)
self.assertEqual(options[key], params)
def assert_ui_notification(self, response, success, notification):
data = response.data
detail = notification if notification else ""
header = self.success_header if success else self.fail_header
self.assertTrue(all([
data['success'] == success,
data['header'] == header,
data['detail'] == detail
]), msg='Notification data was not as expected')
def assert_notified(self,
user,
message="",
subject="",
check_alternative=False):
'''Assert that the user received a notification.
If `message` is specified, assert that the message appears in one of
the outgoing emails to this user
'''
emails = [email for email in mail.outbox if user.email in email.to]
self.assertGreater(len(emails), 0)
if message:
if check_alternative:
self.assertTrue(any([_message_included_in_email_alternative(
email, message) for email in emails]))
else:
self.assertTrue(any([
message in email.body for email in emails]))
if subject:
self.assertIn(subject, [email.subject for email in emails])
def assert_ics_email_attachments(self, user):
'''assert that the ics email attachment exists
'''
emails = [email for email in mail.outbox if user.email in email.to]
for email in emails:
attachments = email.attachments
self.assertGreater(len(email.attachments), 0)
self.assertIn("reminder.ics",
[attachment[0] for attachment in attachments])
def assert_not_notified(self, user):
'''Assert that the specified user did not receive a notification.
'''
if mail.outbox:
self.assertNotIn(user.email, [email.to for email in mail.outbox],
msg="Found an email sent to user")
def _message_included_in_email_alternative(email, message):
return any([message in alt[0] for alt in email.alternatives])
| 36.892405
| 79
| 0.621891
| 660
| 5,829
| 5.315152
| 0.293939
| 0.020525
| 0.019954
| 0.018244
| 0.116591
| 0.083808
| 0.083808
| 0.058153
| 0.058153
| 0.058153
| 0
| 0.004312
| 0.283925
| 5,829
| 157
| 80
| 37.127389
| 0.836128
| 0.055584
| 0
| 0.078125
| 0
| 0
| 0.086399
| 0.02819
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.09375
| false
| 0.03125
| 0.078125
| 0.007813
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e9d488202a407ec4de3fade6bfb2e435ba6bb6b
| 607
|
py
|
Python
|
pydis_site/apps/api/models/bot/aoc_link.py
|
Robin5605/site
|
81aa42aa748cb228d7a09e6cf6b211484b654496
|
[
"MIT"
] | 13
|
2018-02-03T22:57:41.000Z
|
2018-05-17T07:38:36.000Z
|
pydis_site/apps/api/models/bot/aoc_link.py
|
Robin5605/site
|
81aa42aa748cb228d7a09e6cf6b211484b654496
|
[
"MIT"
] | 61
|
2018-02-07T21:34:39.000Z
|
2018-06-05T16:15:28.000Z
|
pydis_site/apps/api/models/bot/aoc_link.py
|
Robin5605/site
|
81aa42aa748cb228d7a09e6cf6b211484b654496
|
[
"MIT"
] | 16
|
2018-02-03T12:37:48.000Z
|
2018-06-02T17:14:55.000Z
|
from django.db import models
from pydis_site.apps.api.models.bot.user import User
from pydis_site.apps.api.models.mixins import ModelReprMixin
class AocAccountLink(ModelReprMixin, models.Model):
"""An AoC account link for a Discord User."""
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
help_text="The user that is blocked from getting the AoC Completionist Role",
primary_key=True
)
aoc_username = models.CharField(
max_length=120,
help_text="The AoC username associated with the Discord User.",
blank=False
)
| 27.590909
| 85
| 0.698517
| 80
| 607
| 5.2
| 0.6
| 0.043269
| 0.0625
| 0.081731
| 0.125
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.224053
| 607
| 21
| 86
| 28.904762
| 0.876858
| 0.06425
| 0
| 0
| 0
| 0
| 0.202847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e9e7a69ae46de63cdefe46d785a1f6e94dac1e1
| 624
|
py
|
Python
|
parse.py
|
Mimori256/kdb-parse
|
45f7aca85fea9a7db612da86e9c31daaec52a580
|
[
"MIT"
] | 3
|
2021-06-20T04:35:05.000Z
|
2021-10-05T06:30:09.000Z
|
parse.py
|
Mimori256/kdb-parse
|
45f7aca85fea9a7db612da86e9c31daaec52a580
|
[
"MIT"
] | 2
|
2021-06-13T01:19:12.000Z
|
2022-03-23T04:27:05.000Z
|
parse.py
|
Mimori256/kdb-parse
|
45f7aca85fea9a7db612da86e9c31daaec52a580
|
[
"MIT"
] | null | null | null |
import json
#Parse csv to kdb.json
with open("kdb.csv", "r", encoding="utf_8") as f:
l=[]
lines = f.readlines()
# remove the header
lines.pop(0)
for line in lines:
tmp1 = line.split('"')
if tmp1[15] == "":
tmp1[15] = " "
if not "" in set([tmp1[1], tmp1[3], tmp1[11], tmp1[13], tmp1[15], tmp1[21]]):
l.append([tmp1[1], tmp1[3], tmp1[11], tmp1[13], tmp1[15], tmp1[21]])
json_data = {}
l.pop(0)
for i in l:
json_data[i[0]] = i[1:]
enc = json.dumps(json_data,ensure_ascii=False)
with open("kdb.json", "w") as f:
f.write(enc)
print("complete")
| 20.8
| 85
| 0.540064
| 104
| 624
| 3.192308
| 0.461538
| 0.072289
| 0.090361
| 0.060241
| 0.204819
| 0.204819
| 0.204819
| 0.204819
| 0.204819
| 0.204819
| 0
| 0.094421
| 0.253205
| 624
| 29
| 86
| 21.517241
| 0.618026
| 0.0625
| 0
| 0
| 0
| 0
| 0.054889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e9e92e9363d4d32c2609f2f36539abe9b27e294
| 2,600
|
py
|
Python
|
DLFrameWork/dataset/CIFAR_10.py
|
Mostafa-ashraf19/TourchPIP
|
a5090a0ec9cc81a91fe1fd6af41d77841361cec1
|
[
"MIT"
] | null | null | null |
DLFrameWork/dataset/CIFAR_10.py
|
Mostafa-ashraf19/TourchPIP
|
a5090a0ec9cc81a91fe1fd6af41d77841361cec1
|
[
"MIT"
] | null | null | null |
DLFrameWork/dataset/CIFAR_10.py
|
Mostafa-ashraf19/TourchPIP
|
a5090a0ec9cc81a91fe1fd6af41d77841361cec1
|
[
"MIT"
] | null | null | null |
import os
import shutil
import tarfile
import urllib.request
import pandas as pd
CIFAR10_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
class CIFAR_10:
def __init__(self, path, download=True, train=True):
self.path = path
self.download = download
self.train = train
self.csv_list = []
if self.download:
self._Download()
self.path = os.getcwd() + '/' + self.path
self.toCSV()
self.TrainFile = self.path + '/' + 'cifar-10-batches-py/train_cifar.csv'
self.TestFile = self.path + '/' + 'cifar-10-batches-py/test_batch.csv'
def _Download(self):
if not os.path.exists(os.getcwd() + '/' + self.path):
os.mkdir(self.path)
file_name = 'CIFAR-10.tar.gz'
with urllib.request.urlopen(CIFAR10_URL) as response, open(os.getcwd() + '/' + self.path + '/' + file_name,
'wb') as out_file:
shutil.copyfileobj(response, out_file)
tar = tarfile.open(os.getcwd() + '/' + self.path + '/' + file_name, "r:gz")
tar.extractall(os.getcwd() + '/' + self.path + '/')
tar.close()
def unpickle(self, file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def toCSV(self):
file_names = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5', 'test_batch']
for name in file_names:
df_labels = pd.DataFrame(self.unpickle(self.path + '/' + 'cifar-10-batches-py/' + name)[b'labels'])
df_data = pd.DataFrame(self.unpickle(self.path + '/' + 'cifar-10-batches-py/' + name)[b'data'])
new = pd.concat([df_labels, df_data], axis=1)
if not os.path.exists(self.path + '/' + 'cifar-10-batches-py/' + name + '.csv'):
new.to_csv(self.path + '/' + 'cifar-10-batches-py/' + name + '.csv', index=False)
for name in file_names[0:5]:
self.csv_list.append(self.path + '/' + 'cifar-10-batches-py/' + name + '.csv')
if not os.path.exists(self.path + '/' + 'cifar-10-batches-py/train_cifar.csv'):
df_from_each_file = (pd.read_csv(f, sep=',', header=None) for f in self.csv_list)
df_merged = pd.concat(df_from_each_file, ignore_index=True)
df_merged.to_csv(self.path + '/' + 'cifar-10-batches-py/train_cifar.csv', index=False)
def __repr__(self):
return self.TrainFile if self.train == True else self.TestFile
| 40.625
| 115
| 0.575385
| 350
| 2,600
| 4.117143
| 0.277143
| 0.099931
| 0.081194
| 0.093685
| 0.336572
| 0.299792
| 0.283137
| 0.244275
| 0.189452
| 0.129077
| 0
| 0.018848
| 0.265385
| 2,600
| 63
| 116
| 41.269841
| 0.735602
| 0
| 0
| 0
| 0
| 0.020408
| 0.166346
| 0.053523
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.122449
| 0.020408
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ea10581f2a6479a2145424512ce3b01dbcd78d5
| 367
|
py
|
Python
|
Python_Codes_for_BJ/stage12 큐 사용하기/프린터 큐.py
|
ch96an/BaekJoonSolution
|
25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33
|
[
"MIT"
] | null | null | null |
Python_Codes_for_BJ/stage12 큐 사용하기/프린터 큐.py
|
ch96an/BaekJoonSolution
|
25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33
|
[
"MIT"
] | null | null | null |
Python_Codes_for_BJ/stage12 큐 사용하기/프린터 큐.py
|
ch96an/BaekJoonSolution
|
25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33
|
[
"MIT"
] | null | null | null |
def printer(n,k,order):
lst = [(order[x],False if x==k else True) for x in range(len(order))]
flag, i = True, 0
while flag:
if lst[0][0] == max(lst,key=lambda x:x[0])[0]:
flag = lst.pop(0)[1]
i +=1
else:
lst.append(lst.pop(0))
print(i)
for _ in range(int(input())):
n,k=map(int,input().split())
lst=list(map(int,input().split()))
printer(n,k,lst)
| 24.466667
| 70
| 0.59673
| 74
| 367
| 2.945946
| 0.418919
| 0.027523
| 0.082569
| 0.146789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029316
| 0.163488
| 367
| 15
| 71
| 24.466667
| 0.680782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.071429
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ea47fc79c5dcbec42ef206e57f938c9dff9b024
| 2,101
|
py
|
Python
|
zhang.py
|
AndrewQuijano/Treespace_REU_2017
|
e1aff2224ad5152d82f529675444146a70623bca
|
[
"MIT"
] | 2
|
2021-06-07T12:22:46.000Z
|
2021-09-14T00:19:03.000Z
|
zhang.py
|
AndrewQuijano/Treespace_REU_2017
|
e1aff2224ad5152d82f529675444146a70623bca
|
[
"MIT"
] | null | null | null |
zhang.py
|
AndrewQuijano/Treespace_REU_2017
|
e1aff2224ad5152d82f529675444146a70623bca
|
[
"MIT"
] | null | null | null |
import networkx as nx
from misc import maximum_matching_all
from networkx import get_node_attributes
def is_tree_based(graph):
if is_binary(graph):
# print("Graph is not binary! Zhang's won't work!")
return None
unmatched_reticulation = zhang_graph(graph)
if len(unmatched_reticulation) == 0:
return True
else:
return False
def is_binary(graph):
for node in graph.nodes():
if graph.out_degree(node) > 2 or graph.in_degree(node) > 2:
return False
return True
# Use this for non-binary graph
def zhang_graph(graph):
try:
zhang = zhang_bipartite(graph)
max_match = maximum_matching_all(zhang)
reticulations = [n for n, d in zhang.nodes(data=True) if d['biparite'] == 0]
data = get_node_attributes(zhang, 'biparite')
matched_reticulations = set()
for s, t in max_match.items():
try:
if data[s] == 1:
matched_reticulations.add(s)
if data[t] == 1:
matched_reticulations.add(t)
except KeyError:
continue
except nx.exception.NetworkXPointlessConcept:
return list()
set_minus = set(reticulations) - matched_reticulations
return list(set_minus)
def zhang_bipartite(graph):
zhang = nx.Graph()
for node in graph.nodes():
# This is a reticulation vertex
if graph.in_degree(node) == 2 and graph.out_degree(node) == 1:
zhang.add_node(node, bipartite=0)
# BE CAREFUL NOT TO ADD RETICULATIONS AGAIN ON OTHER SIDE!
for parent in graph.predecessors(node):
if graph.in_degree(parent) == 1 and graph.out_degree(parent) == 2:
zhang.add_node(parent, bipartite=1)
for e in graph.edges(parent):
# Add the edge only if we know the child is a reticulation
if graph.in_degree(e[1]) == 2 and graph.out_degree(e[1]) == 1:
zhang.add_edge(node, parent)
return zhang
| 32.828125
| 86
| 0.595907
| 272
| 2,101
| 4.466912
| 0.316176
| 0.023045
| 0.046091
| 0.037037
| 0.097942
| 0.039506
| 0
| 0
| 0
| 0
| 0
| 0.01115
| 0.316992
| 2,101
| 63
| 87
| 33.349206
| 0.83554
| 0.10614
| 0
| 0.170213
| 0
| 0
| 0.008547
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.06383
| 0
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eaa0630211dc9678f367337b57ebf1618235962
| 3,765
|
py
|
Python
|
sme_financing/main/apis/document_api.py
|
BuildForSDG/team-214-backend
|
f1aff9c27d7b7588b4bbb2bc68956b35051d4506
|
[
"MIT"
] | 1
|
2020-05-20T16:32:33.000Z
|
2020-05-20T16:32:33.000Z
|
sme_financing/main/apis/document_api.py
|
BuildForSDG/team-214-backend
|
f1aff9c27d7b7588b4bbb2bc68956b35051d4506
|
[
"MIT"
] | 23
|
2020-05-19T07:12:53.000Z
|
2020-06-21T03:57:54.000Z
|
sme_financing/main/apis/document_api.py
|
BuildForSDG/team-214-backend
|
f1aff9c27d7b7588b4bbb2bc68956b35051d4506
|
[
"MIT"
] | 1
|
2020-05-18T14:18:12.000Z
|
2020-05-18T14:18:12.000Z
|
"""RESTful API Document resource."""
from flask_restx import Resource, reqparse
from flask_restx._http import HTTPStatus
from werkzeug.datastructures import FileStorage
from ..service.document_service import (
delete_document,
edit_document,
get_all_documents,
get_document,
save_document,
)
from .dto import DocumentDTO
api = DocumentDTO.document_api
_document = DocumentDTO.document
parser = reqparse.RequestParser()
parser.add_argument("document_name", type=str, help="Document name", location="form")
parser.add_argument("file", type=FileStorage, location="files")
@api.route("/")
class DocumentList(Resource):
@api.doc("list of documents")
@api.marshal_list_with(_document, envelope="data")
def get(self):
"""List all documents."""
return get_all_documents()
@api.doc("Create a new Document")
@api.expect(parser, validate=True)
@api.response(HTTPStatus.CREATED, "Document successfully saved")
@api.response(HTTPStatus.NOT_FOUND, "File not found")
@api.response(HTTPStatus.BAD_REQUEST, "File empty")
@api.response(HTTPStatus.NOT_ACCEPTABLE, "File extension not allowed")
@api.response(HTTPStatus.REQUEST_ENTITY_TOO_LARGE, "File exceeds max upload size")
def post(self):
"""Create a new Document."""
parse_data = parser.parse_args()
document_name = parse_data["document_name"]
file = parse_data["file"]
if not file or not document_name:
self.api.abort(
code=HTTPStatus.NOT_FOUND,
message="File not found or document name empty",
)
else:
return save_document(document_name, file)
@api.route("/<int:doc_id>")
@api.param("doc_id", "The ID of the docuemnt to process")
@api.response(HTTPStatus.NOT_FOUND, "Document not found")
@api.response(HTTPStatus.NOT_ACCEPTABLE, "File and document name empty")
class DocumentByID(Resource):
@api.doc("Get a single document")
@api.marshal_with(_document)
def get(self, doc_id):
"""Retrieve a document."""
document = get_document(doc_id)
if not document:
self.api.abort(code=HTTPStatus.NOT_FOUND, message="Document not found")
else:
return document
@api.doc("Patch a document")
@api.expect(parser)
def patch(self, doc_id):
"""Patch a document."""
document = get_document(doc_id)
if not document:
self.api.abort(code=HTTPStatus.NOT_FOUND, message="Document not found")
else:
parse_data = parser.parse_args()
document_name = parse_data["document_name"]
file = parse_data["file"]
if not file and not document_name:
self.api.abort(HTTPStatus.NOT_ACCEPTABLE, message="Both inputs empty")
else:
return edit_document(document, document_name, file)
# return self.get(doc_id)
@api.doc("Delete a document")
@api.response(HTTPStatus.BAD_REQUEST, "Can't delete document")
def delete(self, doc_id):
"""Delete a document."""
document = get_document(doc_id)
if not document:
self.api.abort(code=HTTPStatus.NOT_FOUND, message="Document not found")
else:
return delete_document(document)
# @api.route("/smes/<sme_id>")
# @api.param("sme_id", "The SME id")
# @api.response(HTTPStatus.NOT_FOUND, "SME not found")
# class DocumentSME(Resource):
# @api.doc("List all documents of an SME")
# @api.marshal_list_with(_document, envelope="data")
# def get(self, sme_id):
# """List all documents of an SME."""
# if not get_sme_by_id(sme_id):
# api.abort(404)
# return get_all_sme_documents(sme_id)
| 35.186916
| 86
| 0.657371
| 474
| 3,765
| 5.048523
| 0.219409
| 0.046803
| 0.07898
| 0.050146
| 0.40326
| 0.334308
| 0.265775
| 0.265775
| 0.248642
| 0.248642
| 0
| 0.001028
| 0.224967
| 3,765
| 106
| 87
| 35.518868
| 0.819054
| 0.154847
| 0
| 0.266667
| 0
| 0
| 0.159873
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.226667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eaadef4bc857f47d228828cdfd23ca47dfe5099
| 1,405
|
py
|
Python
|
column_name_renaming.py
|
strathclyde-rse/strathclyde-software-survey
|
1dd3805a416f1da6cbfa27958ae96a5ad685fe19
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
column_name_renaming.py
|
strathclyde-rse/strathclyde-software-survey
|
1dd3805a416f1da6cbfa27958ae96a5ad685fe19
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
column_name_renaming.py
|
strathclyde-rse/strathclyde-software-survey
|
1dd3805a416f1da6cbfa27958ae96a5ad685fe19
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
col_shortener = {
'Q1':'confirm',
'Q2':'faculty',
'Q3':'department',
'Q4':'funders',
'Q5':'position',
'Q6':'use_software',
'Q7':'importance_software',
'Q8':'develop_own_code',
'Q9':'development_expertise',
'Q10':'sufficient_training',
'Q11':'want_to_commercialise',
'Q12':'ready_to_release',
'Q13':'hpc_use',
'Q14_1':'version_control',
'Q14_2':'unit_regression_testing',
'Q14_3':'continuous_integration',
'Q14_4':'compilation',
'Q14_5':'documentation',
'Q15':'uni_support',
'Q16':'hired_developer',
'Q17':'costed_developer',
'Q18_1':'hire_full_time_developer',
'Q18_2':'hire_pool_developer',
'Q19':'voucher',
'Q20':'consulting',
'Q21':'mailing'
}
add_an_other_category = [
'funders',
'position',
'hpc_use'
]
sort_no_further_analysis = [
'faculty',
'funders',
'position',
'hpc_use'
]
yes_no_analysis = [
'use_software',
'develop_own_code',
'sufficient_training',
'want_to_commercialise',
'ready_to_release',
'hired_developer'
]
scale_analysis = [
'importance_software',
'development_expertise',
'sufficient_training'
]
worded_scale_analysis = [
'version_control',
'continuous_integration',
'unit_regression_testing',
'hire_full_time_developer',
'hire_pool_developer'
]
| 19.788732
| 39
| 0.635587
| 151
| 1,405
| 5.483444
| 0.582781
| 0.065217
| 0.033816
| 0.050725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045053
| 0.194306
| 1,405
| 70
| 40
| 20.071429
| 0.686396
| 0.025623
| 0
| 0.101695
| 0
| 0
| 0.554499
| 0.162399
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033898
| 0
| 0.033898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eae8bae94764d3c5a64b90797dd929834fa6067
| 1,974
|
py
|
Python
|
scanpy/tests/test_scaling.py
|
alexcwsmith/scanpy
|
b69015e9e7007193c9ac461d5c6fbf845b3d6962
|
[
"BSD-3-Clause"
] | 1,171
|
2017-01-17T14:01:02.000Z
|
2022-03-31T23:02:57.000Z
|
scanpy/tests/test_scaling.py
|
alexcwsmith/scanpy
|
b69015e9e7007193c9ac461d5c6fbf845b3d6962
|
[
"BSD-3-Clause"
] | 1,946
|
2017-01-22T10:19:04.000Z
|
2022-03-31T17:13:03.000Z
|
scanpy/tests/test_scaling.py
|
alexcwsmith/scanpy
|
b69015e9e7007193c9ac461d5c6fbf845b3d6962
|
[
"BSD-3-Clause"
] | 499
|
2017-01-21T11:39:29.000Z
|
2022-03-23T13:57:35.000Z
|
import pytest
import numpy as np
from anndata import AnnData
from scipy.sparse import csr_matrix
import scanpy as sc
# test "data" for 3 cells * 4 genes
X = [
[-1, 2, 0, 0],
[1, 2, 4, 0],
[0, 2, 2, 0],
] # with gene std 1,0,2,0 and center 0,2,2,0
X_scaled = [
[-1, 2, 0, 0],
[1, 2, 2, 0],
[0, 2, 1, 0],
] # with gene std 1,0,1,0 and center 0,2,1,0
X_centered = [
[-1, 0, -1, 0],
[1, 0, 1, 0],
[0, 0, 0, 0],
] # with gene std 1,0,1,0 and center 0,0,0,0
@pytest.mark.parametrize('typ', [np.array, csr_matrix], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['float32', 'int64'])
def test_scale(typ, dtype):
# test AnnData arguments
# test scaling with default zero_center == True
adata0 = AnnData(typ(X), dtype=dtype)
sc.pp.scale(adata0)
assert np.allclose(csr_matrix(adata0.X).toarray(), X_centered)
# test scaling with explicit zero_center == True
adata1 = AnnData(typ(X), dtype=dtype)
sc.pp.scale(adata1, zero_center=True)
assert np.allclose(csr_matrix(adata1.X).toarray(), X_centered)
# test scaling with explicit zero_center == False
adata2 = AnnData(typ(X), dtype=dtype)
sc.pp.scale(adata2, zero_center=False)
assert np.allclose(csr_matrix(adata2.X).toarray(), X_scaled)
# test bare count arguments, for simplicity only with explicit copy=True
# test scaling with default zero_center == True
data0 = typ(X, dtype=dtype)
cdata0 = sc.pp.scale(data0, copy=True)
assert np.allclose(csr_matrix(cdata0).toarray(), X_centered)
# test scaling with explicit zero_center == True
data1 = typ(X, dtype=dtype)
cdata1 = sc.pp.scale(data1, zero_center=True, copy=True)
assert np.allclose(csr_matrix(cdata1).toarray(), X_centered)
# test scaling with explicit zero_center == False
data2 = typ(X, dtype=dtype)
cdata2 = sc.pp.scale(data2, zero_center=False, copy=True)
assert np.allclose(csr_matrix(cdata2).toarray(), X_scaled)
| 35.890909
| 81
| 0.662614
| 322
| 1,974
| 3.965839
| 0.21118
| 0.017228
| 0.070478
| 0.065779
| 0.512921
| 0.463587
| 0.414252
| 0.280345
| 0.209867
| 0.209867
| 0
| 0.054648
| 0.193516
| 1,974
| 54
| 82
| 36.555556
| 0.747487
| 0.269504
| 0
| 0.04878
| 0
| 0
| 0.013996
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 1
| 0.02439
| false
| 0
| 0.121951
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eae8be82d67b6164b7865425e58eaf76d1e1eba
| 7,810
|
py
|
Python
|
wsireg/tmpSaves/demo_self6_complete unit.py
|
luweishuang/wsireg
|
344af8585932e3e0f5df3ce40a7dc75846a0214b
|
[
"MIT"
] | null | null | null |
wsireg/tmpSaves/demo_self6_complete unit.py
|
luweishuang/wsireg
|
344af8585932e3e0f5df3ce40a7dc75846a0214b
|
[
"MIT"
] | null | null | null |
wsireg/tmpSaves/demo_self6_complete unit.py
|
luweishuang/wsireg
|
344af8585932e3e0f5df3ce40a7dc75846a0214b
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import bilinear
import patchreg
from skimage.util import view_as_windows
def bilinear_interpolation_of_patch_registration(master_srcdata, target_srcdata):
print("Beginning bilinear_interpolation_of_patch_registration...")
w_shape = (1000, 1000, 4) # window_size
w_step = (500, 500, 4) # 步长
padding = w_step[0] # must do step padding
master_data = cv2.copyMakeBorder(master_srcdata, padding, padding, padding, padding, cv2.BORDER_REFLECT)
target_data = cv2.copyMakeBorder(target_srcdata, padding, padding, padding, padding, cv2.BORDER_REFLECT)
master_img = cv2.cvtColor(master_data, code=cv2.COLOR_BGRA2RGBA)
target_img = cv2.cvtColor(target_data, code=cv2.COLOR_BGRA2RGBA)
# Stage One: Low-precision feature alignment
h, _ = patchreg.alignFeatures(target_img, master_img)
height, width = target_img.shape[:2]
master_aligned = cv2.warpPerspective(master_img, h, (width, height))
# Stage Two: Calculate patch-level registrations
stack1 = np.concatenate((target_img, master_aligned), axis=-1) # (2000, 40000, 8)
patches = view_as_windows(stack1, window_shape=w_shape, step=w_step)
morphs = patchreg.calcPlateMorphs(patches) # (3,7,2,3,3)
# Stage Three: Compute patch-level DVFs=dense displacement vector field
id_patches = patchreg.calc_id_patches(img_shape=master_aligned.shape, patch_size=1000) # (3,7,3,2000,2000,1)
map_morphs = np.append(morphs, morphs[:, :, 1, None], axis=2) # (3,7,3,3,3)
reg_patches_src = patchreg.applyMorphs(id_patches, map_morphs) # (3,7,3,2000,2000,1)
map_patches = reg_patches_src[:, :, 1:, 500:1500, 500:1500, :]
# Stage Four: Merge patch-level DVFs into a single global transform.
quilts = bilinear.quilter(map_patches)
wquilts = bilinear.bilinear_wquilts(map_patches)
qmaps = [q * w for q, w in zip(quilts, wquilts)] # 对应位置的元素相乘
qmaps_sum = qmaps[0] + qmaps[1] + qmaps[2] + qmaps[3]
summed = (qmaps_sum).reshape(qmaps_sum.shape[:-1]).astype(np.float32)
master_remap = cv2.remap(master_img, summed[0], summed[1], interpolation=cv2.INTER_LINEAR) # summed 是坐标映射关系
master_reg = master_remap[padding:height-padding, padding:width-padding, :]
return master_reg
def draw_img():
master_srcdata = cv2.imread("../data/OK1_1_32.jpg")
padding = 500
master_data = cv2.copyMakeBorder(master_srcdata, padding, padding, padding, padding, cv2.BORDER_CONSTANT,value=(255, 255, 255))
cv2.line(master_data, (0, 1000), (5000, 1000), (0, 255, 0), 2)
cv2.line(master_data, (0, 2000), (5000, 2000), (0, 255, 0), 2)
cv2.line(master_data, (1000, 0), (1000, 3000), (0, 255, 0), 2)
cv2.line(master_data, (2000, 0), (2000, 3000), (0, 255, 0), 2)
cv2.line(master_data, (3000, 0), (3000, 3000), (0, 255, 0), 2)
cv2.line(master_data, (4000, 0), (4000, 3000), (0, 255, 0), 2)
cv2.imwrite("master_data.jpg", master_data)
def pad_imgs(master3, target3):
master_h, master_w, _ = master3.shape
target_h, target_w, _ = target3.shape
assert master_h == target_h and master_w == target_w
src_w = master_w
src_h = master_h
mid_h = int(max(2000, np.ceil(src_h/1000)*1000))
mid_w = int(max(2000, np.ceil(src_w/1000)*1000))
assert mid_w >= src_w and mid_h >= src_h
left_pad = int((mid_w-src_w)/2)
right_pad = int(mid_w - src_w - left_pad)
top_pad = int((mid_h - src_h) / 2)
down_pad = int(mid_h - src_h - top_pad)
master3_pad = cv2.copyMakeBorder(master3, top_pad, down_pad, left_pad, right_pad, cv2.BORDER_REFLECT)
target3_pad = cv2.copyMakeBorder(target3, top_pad, down_pad, left_pad, right_pad, cv2.BORDER_REFLECT)
return master3_pad, target3_pad, top_pad, down_pad, left_pad, right_pad
MAX_FEATURES = 5000
GOOD_MATCH_PERCENT = 0.45
def alignImages_Perspective(img1, img2):
im1Gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = list(matcher.match(descriptors1, descriptors2, None))
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
height, width, channels = img2.shape
# Perspective
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
im1Reg_Perspective = cv2.warpPerspective(img1, h, (width, height)) # 透视变换
return im1Reg_Perspective
def process_single_imgpart(img_master, target_img):
master_height, master_width, _ = img_master.shape
cur_height, cur_width, _ = target_img.shape
assert cur_width == master_width
top_pad, down_pad = 0, 0
target_imgpad = target_img.copy()
if master_height > cur_height:
top_pad = int((master_height - cur_height)/2)
down_pad = master_height - cur_height - top_pad
target_imgpad = cv2.copyMakeBorder(target_img, top_pad, down_pad, 0, 0, cv2.BORDER_CONSTANT, value=(255, 255, 255))
elif master_height < cur_height:
print("cur_height > master_height", cur_height, master_height)
img_show = target_imgpad.copy()
im2Gray = cv2.cvtColor(target_imgpad, cv2.COLOR_BGR2GRAY)
im1Reg_Perspective = alignImages_Perspective(img_master, target_imgpad)
imRegGray = cv2.cvtColor(im1Reg_Perspective, cv2.COLOR_BGR2GRAY)
diff = cv2.absdiff(imRegGray, im2Gray)
# cv2.imwrite("diff.jpg", diff)
ret, thresh = cv2.threshold(diff, 120, 255, cv2.THRESH_BINARY) # 120
# cv2.imwrite("thresh.jpg", thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1 and area < max(cur_height, cur_width):
cv2.drawContours(img_show, cnt, -1, (0, 0, 255), 2)
img_out = img_show[top_pad: master_height - down_pad, :, : ]
return img_out
if __name__ == "__main__":
# draw_img()
# exit()
root = "../data/"
master_srcdata = cv2.imread(root + "OK1_1.jpg")
target_srcdata = cv2.imread(root + "NG1_1.jpg")
master3 = master_srcdata[300:4850,:,:]
# cv2.imwrite("master3.jpg", master3)
target3 = target_srcdata[720:5270,:,:]
# cv2.imwrite("target3.jpg", target3)
# padding to 1000s, at least 2000
master3_pad, target3_pad, top_pad, down_pad, left_pad, right_pad = pad_imgs(master3, target3)
# cv2.imwrite("master3_pad.jpg", master3_pad)
# cv2.imwrite("target3_pad.jpg", target3_pad)
masterpad_h, masterpad_w, _ = master3_pad.shape
master_reg_pad = bilinear_interpolation_of_patch_registration(master3_pad, target3_pad)
master3_reg = master_reg_pad[top_pad: masterpad_h-down_pad, left_pad:masterpad_w-right_pad, : ]
cv2.imwrite("master3_reg.jpg", master3_reg)
cv2.imwrite("master3.jpg", master3)
cv2.imwrite("target3.jpg", target3)
# Stage Five: high-precision feature alignment
master_reg_out = process_single_imgpart(master3_reg, target3)
cv2.imwrite("master_reg_out.jpg", master_reg_out)
master_out = process_single_imgpart(master3, target3)
cv2.imwrite("master_out.jpg", master_out)
| 43.631285
| 131
| 0.705634
| 1,112
| 7,810
| 4.706835
| 0.220324
| 0.022927
| 0.024073
| 0.019488
| 0.232709
| 0.164501
| 0.128009
| 0.112151
| 0.080054
| 0.064578
| 0
| 0.070953
| 0.173496
| 7,810
| 179
| 132
| 43.631285
| 0.739892
| 0.108963
| 0
| 0
| 0
| 0
| 0.031904
| 0.006785
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.04065
| false
| 0
| 0.04065
| 0
| 0.113821
| 0.01626
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eaf9ec2243bbc0b3558c08de925bc43b8365f96
| 1,253
|
py
|
Python
|
src/sprites/weapon_vfx.py
|
mgear2/undervoid
|
6c91a5786d29d766223831190952fd90ddc6a1e8
|
[
"MIT"
] | 1
|
2020-08-29T06:41:03.000Z
|
2020-08-29T06:41:03.000Z
|
src/sprites/weapon_vfx.py
|
mgear2/undervoid
|
6c91a5786d29d766223831190952fd90ddc6a1e8
|
[
"MIT"
] | 10
|
2019-07-15T05:15:38.000Z
|
2020-11-25T03:14:03.000Z
|
src/sprites/weapon_vfx.py
|
mgear2/undervoid
|
6c91a5786d29d766223831190952fd90ddc6a1e8
|
[
"MIT"
] | 1
|
2020-11-22T08:25:26.000Z
|
2020-11-22T08:25:26.000Z
|
# Copyright (c) 2020
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
import pygame as pg
import ruamel.yaml
from random import choice
vec = pg.math.Vector2
class Weapon_VFX(pg.sprite.Sprite):
"""
Weapon_VFX appear when the player is shooting.
Cycling between available img options provides
animation effect.
"""
def __init__(
self,
settings: ruamel.yaml.comments.CommentedMap,
game_client_data_weaponvfx: list,
pos: vec,
):
self.settings = settings
self._layer = self.settings["layer"]["vfx"]
pg.sprite.Sprite.__init__(self)
self.image = pg.transform.scale(
choice(game_client_data_weaponvfx),
(
self.settings["gen"]["tilesize"],
self.settings["gen"]["tilesize"],
),
)
self.rect = self.image.get_rect()
self.pos = self.rect.center = pos
self.spawn_time = pg.time.get_ticks()
def update(self):
if (
pg.time.get_ticks() - self.spawn_time
> self.settings["weapon"]["vbullet"]["fx_life"]
):
self.kill()
| 27.23913
| 59
| 0.60016
| 148
| 1,253
| 4.925676
| 0.533784
| 0.098765
| 0.030178
| 0.046639
| 0.068587
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005624
| 0.290503
| 1,253
| 45
| 60
| 27.844444
| 0.814398
| 0.217877
| 0
| 0.133333
| 0
| 0
| 0.052466
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eafa55a23b75bd6783941216b9d9087a84c8b15
| 9,860
|
py
|
Python
|
game/blenderpanda/pman.py
|
Moguri/prototype-nitrogen
|
607f78219fcfbd55dfcd1611684107a2922f635d
|
[
"Apache-2.0"
] | 1
|
2017-05-29T23:03:13.000Z
|
2017-05-29T23:03:13.000Z
|
game/blenderpanda/pman.py
|
Moguri/prototype-nitrogen
|
607f78219fcfbd55dfcd1611684107a2922f635d
|
[
"Apache-2.0"
] | null | null | null |
game/blenderpanda/pman.py
|
Moguri/prototype-nitrogen
|
607f78219fcfbd55dfcd1611684107a2922f635d
|
[
"Apache-2.0"
] | null | null | null |
import fnmatch
import os
import shutil
import subprocess
import sys
import time
from collections import OrderedDict
try:
import configparser
except ImportError:
import ConfigParser as configparser
class PManException(Exception):
pass
class NoConfigError(PManException):
pass
class CouldNotFindPythonError(PManException):
pass
class BuildError(PManException):
pass
class FrozenEnvironmentError(PManException):
def __init__(self):
PManException.__init__(self, "Operation not supported in frozen applications")
if '__file__' not in globals():
__is_frozen = True
__file__ = ''
else:
__is_frozen = False
_config_defaults = OrderedDict([
('general', OrderedDict([
('name', 'Game'),
('render_plugin', ''),
])),
('build', OrderedDict([
('asset_dir', 'assets/'),
('export_dir', 'game/assets/'),
('ignore_patterns', '*.blend1, *.blend2'),
])),
('run', OrderedDict([
('main_file', 'game/main.py'),
('auto_build', True),
('auto_save', True),
])),
])
_user_config_defaults = OrderedDict([
('blender', OrderedDict([
('last_path', 'blender'),
('use_last_path', True),
])),
])
def __py2_read_dict(config, d):
for section, options in d.items():
config.add_section(section)
for option, value in options.items():
config.set(section, option, value)
def _get_config(startdir, conf_name, defaults):
try:
if startdir is None:
startdir = os.getcwd()
except FileNotFoundError:
# The project folder was deleted on us
raise NoConfigError("Could not find config file")
dirs = os.path.abspath(startdir).split(os.sep)
while dirs:
cdir = os.sep.join(dirs)
if cdir.strip() and conf_name in os.listdir(cdir):
configpath = os.path.join(cdir, conf_name)
config = configparser.ConfigParser()
if hasattr(config, 'read_dict'):
config.read_dict(defaults)
else:
__py2_read_dict(config, defaults)
config.read(configpath)
config.add_section('internal')
config.set('internal', 'projectdir', os.path.dirname(configpath))
return config
dirs.pop()
# No config found
raise NoConfigError("Could not find config file")
def get_config(startdir=None):
return _get_config(startdir, '.pman', _config_defaults)
def get_user_config(startdir=None):
try:
return _get_config(startdir, '.pman.user', _user_config_defaults)
except NoConfigError:
# No user config, just create one
config = get_config(startdir)
fp = os.path.join(config.get('internal', 'projectdir'), '.pman.user')
print("Creating user config at {}".format(fp))
with open(fp, 'w') as f:
pass
return _get_config(startdir, '.pman.user', _user_config_defaults)
def _write_config(config, conf_name):
writecfg = configparser.ConfigParser()
writecfg.read_dict(config)
writecfg.remove_section('internal')
with open(os.path.join(config.get('internal', 'projectdir'), conf_name), 'w') as f:
writecfg.write(f)
def write_config(config):
_write_config(config, '.pman')
def write_user_config(user_config):
_write_config(user_config, '.pman.user')
def is_frozen():
return __is_frozen
def get_python_program(config):
python_programs = [
'ppython',
'python3',
'python',
'python2',
]
# Check to see if there is a version of Python that can import panda3d
for pyprog in python_programs:
args = [
pyprog,
'-c',
'import panda3d.core; import direct',
]
with open(os.devnull, 'w') as fp:
try:
retcode = subprocess.call(args, stderr=fp)
except FileNotFoundError:
retcode = 1
if retcode == 0:
return pyprog
# We couldn't find a python program to run
raise CouldNotFindPythonError('Could not find a usable Python install')
def create_project(projectdir):
if is_frozen():
raise FrozenEnvironmentError()
confpath = os.path.join(projectdir, '.pman')
if os.path.exists(confpath):
print("Updating project in {}".format(projectdir))
else:
print("Creating new project in {}".format(projectdir))
# Touch config file to make sure it is present
with open(confpath, 'a') as f:
pass
config = get_config(projectdir)
write_config(config)
templatedir = os.path.join(os.path.dirname(__file__), 'templates')
print("Creating directories...")
dirs = [
'assets',
'game',
]
bpanda_mod_files = [
os.path.join(templatedir, '__init__.py'),
os.path.join(templatedir, 'bpbase.py'),
'rendermanager.py',
'pman.py',
'pman_build.py',
]
dirs = [os.path.join(projectdir, i) for i in dirs]
for d in dirs:
if os.path.exists(d):
print("\tSkipping existing directory: {}".format(d))
else:
print("\tCreating directory: {}".format(d))
os.mkdir(d)
print("Creating main.py")
with open(os.path.join(templatedir, 'main.py')) as f:
main_data = f.read()
mainpath = os.path.join(projectdir, 'game', 'main.py')
if os.path.exists(mainpath):
print("\tmain.py already exists at {}".format(mainpath))
else:
with open(mainpath, 'w') as f:
f.write(main_data)
print("\tmain.py created at {}".format(mainpath))
bpmodpath = os.path.join(projectdir, 'game/blenderpanda')
if os.path.exists(bpmodpath):
print("Updating blenderpanda module")
shutil.rmtree(bpmodpath)
else:
print("Creating blenderpanda module")
os.mkdir(bpmodpath)
for cf in bpanda_mod_files:
bname = os.path.basename(cf)
print("\tCopying over {}".format(bname))
cfsrc = os.path.join(os.path.dirname(__file__), cf)
cfdst = os.path.join(projectdir, 'game', 'blenderpanda', bname)
shutil.copy(cfsrc, cfdst)
print("\t\t{} created at {}".format(bname, cfdst))
def get_abs_path(config, path):
return os.path.join(
config.get('internal', 'projectdir'),
path
)
def get_rel_path(config, path):
return os.path.relpath(path, config.get('internal', 'projectdir'))
def build(config=None):
if is_frozen():
raise FrozenEnvironmentError()
if config is None:
config = get_config()
user_config = get_user_config(config.get('internal', 'projectdir'))
if hasattr(time, 'perf_counter'):
stime = time.perf_counter()
else:
stime = time.time()
print("Starting build")
srcdir = get_abs_path(config, config.get('build', 'asset_dir'))
dstdir = get_abs_path(config, config.get('build', 'export_dir'))
if not os.path.exists(srcdir):
raise BuildError("Could not find asset directory: {}".format(srcdir))
if not os.path.exists(dstdir):
print("Creating asset export directory at {}".format(dstdir))
os.makedirs(dstdir)
print("Read assets from: {}".format(srcdir))
print("Export them to: {}".format(dstdir))
ignore_patterns = [i.strip() for i in config.get('build', 'ignore_patterns').split(',')]
print("Ignoring file patterns: {}".format(ignore_patterns))
num_blends = 0
for root, dirs, files in os.walk(srcdir):
for asset in files:
src = os.path.join(root, asset)
dst = src.replace(srcdir, dstdir)
ignore_pattern = None
for pattern in ignore_patterns:
if fnmatch.fnmatch(asset, pattern):
ignore_pattern = pattern
break
if ignore_pattern is not None:
print('Skip building file {} that matched ignore pattern {}'.format(asset, ignore_pattern))
continue
if asset.endswith('.blend'):
dst = dst.replace('.blend', '.bam')
if os.path.exists(dst) and os.stat(src).st_mtime <= os.stat(dst).st_mtime:
print('Skip building up-to-date file: {}'.format(dst))
continue
if asset.endswith('.blend'):
# Handle with Blender
num_blends += 1
else:
print('Copying non-blend file from "{}" to "{}"'.format(src, dst))
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
shutil.copyfile(src, dst)
if num_blends > 0:
blender_path = user_config.get('blender', 'last_path') if user_config.getboolean('blender', 'use_last_path') else 'blender'
args = [
blender_path,
'-b',
'-P',
os.path.join(os.path.dirname(__file__), 'pman_build.py'),
'--',
srcdir,
dstdir,
]
#print("Calling blender: {}".format(' '.join(args)))
subprocess.call(args, env=os.environ.copy())
if hasattr(time, 'perf_counter'):
etime = time.perf_counter()
else:
etime = time.time()
print("Build took {:.4f}s".format(etime - stime))
def run(config=None):
if is_frozen():
raise FrozenEnvironmentError()
if config is None:
config = get_config()
if config.getboolean('run', 'auto_build'):
build(config)
mainfile = get_abs_path(config, config.get('run', 'main_file'))
print("Running main file: {}".format(mainfile))
args = [get_python_program(config), mainfile]
#print("Args: {}".format(args))
subprocess.Popen(args, cwd=config.get('internal', 'projectdir'))
| 27.853107
| 131
| 0.600406
| 1,146
| 9,860
| 5.017452
| 0.219023
| 0.034435
| 0.027826
| 0.028174
| 0.17287
| 0.126087
| 0.101217
| 0.043478
| 0.043478
| 0.026435
| 0
| 0.001945
| 0.26998
| 9,860
| 353
| 132
| 27.932011
| 0.796888
| 0.034483
| 0
| 0.189189
| 0
| 0
| 0.16183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057915
| false
| 0.023166
| 0.042471
| 0.015444
| 0.150579
| 0.088803
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eb07bf2ab74b26ed4d8db65e2b44e12fd9bf220
| 1,326
|
py
|
Python
|
src/createGraph.py
|
AJMFactsheets/NetworkSpeedGrapher
|
86e755e8831ab22394719520713d4949ed3d018e
|
[
"Apache-2.0"
] | null | null | null |
src/createGraph.py
|
AJMFactsheets/NetworkSpeedGrapher
|
86e755e8831ab22394719520713d4949ed3d018e
|
[
"Apache-2.0"
] | null | null | null |
src/createGraph.py
|
AJMFactsheets/NetworkSpeedGrapher
|
86e755e8831ab22394719520713d4949ed3d018e
|
[
"Apache-2.0"
] | null | null | null |
import sys
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
#Argument 1 must be your plotly username, argument 2 is your api key. Get those by registering for a plotly account.
#Argument 3 is the name of the input file to input data from. Must be in the form: Date \n Download \n Upload \n
plotly.tools.set_credentials_file(username=sys.argv[1], api_key=sys.argv[2])
time = []
download = []
upload = []
lnum = 1
x = 1
file = open(sys.argv[3], 'r')
for line in file:
if lnum == 1:
#time.append(line[11:13])
time.append(x)
x += 1
lnum = 2
elif lnum == 2:
download.append(line[10:15])
lnum = 3
elif lnum == 3:
upload.append(line[8:12])
lnum = 1
else:
raise SystemError('lnum internal error', lnum)
#trace1 = go.Histogram(
# x=time,
# y=download,
# opacity=0.75
#)
#trace2 = go.Histogram(
# x=time,
# y=upload,
# opacity=0.75
#)
#data = [trace1, trace2]
#layout = go.Layout(barmode='overlay')
#fig = go.Figure(data=data, layout=layout)
#py.iplot(fig, filename='Network Speed Graph')
trace1 = go.Bar(
x=time,
y=download,
name='Download Speed'
)
trace2 = go.Bar(
x=time,
y=upload,
name='Upload Speed'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='Network Speed Graph')
| 17
| 116
| 0.6727
| 217
| 1,326
| 4.092166
| 0.37788
| 0.022523
| 0.027027
| 0.036036
| 0.295045
| 0.231982
| 0.231982
| 0.148649
| 0.148649
| 0.148649
| 0
| 0.036832
| 0.180995
| 1,326
| 77
| 117
| 17.220779
| 0.780847
| 0.381599
| 0
| 0.1
| 0
| 0
| 0.087282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eb77f4c11a2d3ec08d7055fbeacf7a5223e4aad
| 630
|
py
|
Python
|
src/spellbot/migrations/versions/6e982c9318a6_adds_voice_category_per_channel.py
|
lexicalunit/spellbot
|
17a4999d5e1def06246727ac5481230aa4a4557d
|
[
"MIT"
] | 13
|
2020-07-03T01:20:54.000Z
|
2021-11-22T06:06:21.000Z
|
src/spellbot/migrations/versions/6e982c9318a6_adds_voice_category_per_channel.py
|
lexicalunit/spellbot
|
17a4999d5e1def06246727ac5481230aa4a4557d
|
[
"MIT"
] | 660
|
2020-06-26T02:52:18.000Z
|
2022-03-31T14:14:02.000Z
|
src/spellbot/migrations/versions/6e982c9318a6_adds_voice_category_per_channel.py
|
lexicalunit/spellbot
|
17a4999d5e1def06246727ac5481230aa4a4557d
|
[
"MIT"
] | 3
|
2020-07-12T06:18:39.000Z
|
2021-06-22T06:54:47.000Z
|
"""Adds voice category per channel
Revision ID: 6e982c9318a6
Revises: ef54f035a75c
Create Date: 2021-12-03 13:18:57.468342
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "6e982c9318a6"
down_revision = "ef54f035a75c"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"channels",
sa.Column(
"voice_category",
sa.String(length=50),
nullable=True,
server_default=sa.text("'SpellBot Voice Channels'"),
),
)
def downgrade():
op.drop_column("channels", "voice_category")
| 19.6875
| 64
| 0.655556
| 74
| 630
| 5.472973
| 0.689189
| 0.096296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112266
| 0.236508
| 630
| 31
| 65
| 20.322581
| 0.72973
| 0.253968
| 0
| 0
| 0
| 0
| 0.201299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4eb9e46990415a6b4e9b33a746cb5c6ea0b09797
| 7,576
|
py
|
Python
|
main.py
|
jarchv/capsnet-tensorflow
|
e4a69124060ac946cf21861b3ef3870e956325b6
|
[
"MIT"
] | null | null | null |
main.py
|
jarchv/capsnet-tensorflow
|
e4a69124060ac946cf21861b3ef3870e956325b6
|
[
"MIT"
] | null | null | null |
main.py
|
jarchv/capsnet-tensorflow
|
e4a69124060ac946cf21861b3ef3870e956325b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#title :main.py
#description :Tensorflow implementation of CapsNet.
#author :Jose Chavez
#date :2019/04/30
#version :1.0
#usage :python3 main.py
#python_version :3.6.7
#==============================================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from capsnet import CapsNet
from tensorflow.examples.tutorials.mnist import input_data
import functools
mnist = input_data.read_data_sets('MNIST_data/')
batch_size = 10
tf.reset_default_graph()
tf.random.set_random_seed(0)
np.random.seed(0)
checkpoint_file = './tmp/model.ckpt'
def train(model, restore = False, n_epochs = 50):
init = tf.global_variables_initializer()
n_iter_train_per_epoch = mnist.train.num_examples // batch_size
n_iter_valid_per_epoch = mnist.validation.num_examples // batch_size
best_loss_val = np.infty
saver = tf.train.Saver()
with tf.Session() as sess:
writer = tf.summary.FileWriter("output", sess.graph)
if restore and tf.train.checkpoint_exists('checkpoint_file'):
saver.restore(sess, checkpoint_file)
else:
init.run()
print('\n\nRunning CapsNet ...\n')
count_params()
for epoch in range(n_epochs):
margin_loss_train_ep = []
recnst_loss_train_ep = []
loss_train_ep = []
acc_train_ep = []
for it in range(1, n_iter_train_per_epoch + 1):
X_batch, y_batch = mnist.train.next_batch(batch_size)
_, loss_batch_train, margin_loss_train, recnst_loss_train,acc_batch_train = sess.run(
[model.train_op,
model.margn_loss,
model.recnst_loss_scale,
model.batch_loss,
model.accuracy],
feed_dict = {model.X: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch,
model.reconstruction: True})
print("\rIter: {}/{} [{:.1f}%] loss : {:.5f}".format(
it, n_iter_train_per_epoch, 100.0 * it / n_iter_train_per_epoch, loss_batch_train), end="")
plot_imgs = sess.run(model.X_cropped, feed_dict = {model.X: X_batch.reshape([-1, 28, 28, 1])})
#print(plot_imgs.shape)
#print(X_batch[0])
#plt.imshow(X_batch[0].reshape((28,28)), cmap='gray')
#plt.show()
#plt.imshow(plot_imgs[0].reshape((28,28)), cmap='gray')
#plt.show()
loss_train_ep.append(loss_batch_train)
acc_train_ep.append(acc_batch_train)
margin_loss_train_ep.append(margin_loss_train)
recnst_loss_train_ep.append(recnst_loss_train)
loss_train = np.mean(loss_train_ep)
margin_loss_train = np.mean(margin_loss_train_ep)
recnst_loss_train = np.mean(recnst_loss_train_ep)
acc_train = np.mean(acc_train_ep)
loss_val_ep = []
acc_val_ep = []
for it in range(1, n_iter_valid_per_epoch + 1):
X_batch, y_batch = mnist.validation.next_batch(batch_size)
loss_batch_val, acc_batch_val = sess.run(
[model.batch_loss, model.accuracy],
feed_dict = {model.X_cropped: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch})
loss_val_ep.append(loss_batch_val)
acc_val_ep.append(acc_batch_val)
print("\rValidation {}/{} {:.1f}%".format(it,
n_iter_valid_per_epoch,
100.0 * it / n_iter_valid_per_epoch),
end=" "*30)
loss_val = np.mean(loss_val_ep)
acc_val = np.mean(acc_val_ep)
print("\repoch: {} loss_train: {:.5f}, loss_val: {:.5f}, margin_loss: {:.5f}, recnst_loss: {:.5f}, train_acc: {:.4f}%, valid_acc: {:.4f}% {}".format(
epoch + 1,
loss_train,
margin_loss_train,
recnst_loss_train,
loss_val,
acc_train * 100.0,
acc_val * 100.0,
"(improved)" if loss_val < best_loss_val else ""))
if loss_val < best_loss_val:
saver.save(sess, checkpoint_file)
best_loss_val = loss_val
writer.close()
def test(model):
n_iter_test_per_epoch = mnist.test.num_examples // batch_size
loss_test_ep = []
acc_test_ep = []
#init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
#init.run()
#saver = tf.train.import_meta_graph(checkpoint_file +'.meta')
saver.restore(sess, tf.train.latest_checkpoint('tmp/'))
#init.run()
print('\n\nTest\n')
for it in range(1, n_iter_test_per_epoch + 1):
X_batch, y_batch = mnist.test.next_batch(batch_size)
loss_batch_test, acc_batch_test = sess.run(
[model.batch_loss, model.accuracy],
feed_dict = { model.X_cropped: X_batch.reshape([-1, 28, 28, 1]),
model.y: y_batch,
model.reconstruction: False})
loss_test_ep.append(loss_batch_test)
acc_test_ep.append(acc_batch_test)
print("\rTesting {}/{} {:.1f}%".format(it,
n_iter_test_per_epoch,
100.0 * it / n_iter_test_per_epoch),
end=" "*30)
loss_test = np.mean(loss_test_ep)
acc_test = np.mean(acc_test_ep)
print("\r(Testing) accuracy: {:.3f}%, loss: {:.4f}".format(acc_test*100.0, loss_test))
def reconstruction(model, num_samples):
samples_imgs = mnist.test.images[:num_samples].reshape([-1, 28, 28, 1])
with tf.Session() as sess:
saver = tf.train.import_meta_graph(checkpoint_file +'.meta')
saver.restore(sess, tf.train.latest_checkpoint('tmp/'))
decoder_output, y_pred_value = sess.run(
[model.decoder_output, model.y_pred],
feed_dict = {model.X_cropped: samples_imgs,
model.y: np.array([], dtype = np.int64),
model.reconstruction: False})
samples_imgs = samples_imgs.reshape([-1, 28, 28])
reconstructions_imgs = decoder_output.reshape([-1, 28, 28])
plt.figure(figsize = (num_samples * 2, 4))
for img_idx in range(num_samples):
plt.subplot(2, num_samples, img_idx + 1)
plt.imshow(samples_imgs[img_idx], cmap='gray')
plt.title("Input: " + str(mnist.test.labels[img_idx]))
plt.axis("off")
#plt.show()
for img_idx in range(num_samples):
plt.subplot(2, num_samples, num_samples + img_idx + 1)
plt.imshow(reconstructions_imgs[img_idx], cmap='gray')
plt.title("Output: " + str(y_pred_value[img_idx]))
plt.axis("off")
plt.show()
def count_params():
size = lambda v: functools.reduce(lambda x, y: x*y, v.get_shape().as_list())
n_trainable = sum(size(v) for v in tf.trainable_variables())
#n_total = sum(size(v) for v in tf.all_variables())
print("Model size (Trainable): {:.1f}M\n".format(n_trainable/1000000.0))
#print("Model size (Total): {}".format(n_total))
if __name__ == '__main__':
tf.reset_default_graph()
model = CapsNet(rounds = 3)
#train(model, False, 50)
test(model)
#reconstruction(model, 5)
| 35.905213
| 161
| 0.574314
| 984
| 7,576
| 4.115854
| 0.183943
| 0.044444
| 0.024444
| 0.020741
| 0.438765
| 0.35358
| 0.298272
| 0.204198
| 0.161728
| 0.137284
| 0
| 0.025943
| 0.292767
| 7,576
| 210
| 162
| 36.07619
| 0.729937
| 0.098205
| 0
| 0.166667
| 0
| 0.007246
| 0.06463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.050725
| 0
| 0.07971
| 0.057971
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ebb360ae9b11a1457dfb35575d9b1a3c0b33203
| 6,240
|
py
|
Python
|
platforms_handlers/dialogflow/request.py
|
Robinson04/inoft_vocal_framework
|
9659e0852604bc628b01e0440535add0ae5fc5d1
|
[
"MIT"
] | 11
|
2020-04-15T07:47:34.000Z
|
2022-03-30T21:47:36.000Z
|
platforms_handlers/dialogflow/request.py
|
Robinson04/inoft_vocal_framework
|
9659e0852604bc628b01e0440535add0ae5fc5d1
|
[
"MIT"
] | 20
|
2020-08-09T00:11:49.000Z
|
2021-09-11T11:34:02.000Z
|
platforms_handlers/dialogflow/request.py
|
Robinson04/inoft_vocal_framework
|
9659e0852604bc628b01e0440535add0ae5fc5d1
|
[
"MIT"
] | 6
|
2020-02-21T04:45:19.000Z
|
2021-07-18T22:13:55.000Z
|
from typing import Optional, List
from pydantic import Field
from pydantic.main import BaseModel
from inoft_vocal_framework.utils.formatters import normalize_intent_name
class Intent(BaseModel):
name: str
displayName: str
class User(BaseModel):
_VERIFICATION_NAME_GUEST = "GUEST"
_VERIFICATION_NAME_VERIFIED = "VERIFIED"
_PERMISSION_UPDATE_TYPE = "UPDATE"
permissions: Optional[list] = None
locale: Optional[str] = None
lastSeen: Optional[str] = None
userStorage: Optional[str] = None
userVerificationStatus: Optional[str] = None
class Payload(BaseModel):
_INPUT_TYPE_OPTION = "OPTION"
user: User = Field(default_factory=User)
class Conversation(BaseModel):
conversationId: str
type: str
conversation: Optional[Conversation] = None
isInSandbox: bool
requestType: str
class InputsCustomList(list):
# todo: make the check that the current device has the capabilities to use an interactive list
class InputItem(BaseModel):
intent: str
rawInputs: list
class ArgumentItemsCustomList(list):
class ArgumentItem(BaseModel):
name: str
textValue: str
rawText: str
def append(self, item: dict) -> None:
if isinstance(item, dict):
argument_item_object = self.ArgumentItem(**item)
super().append(argument_item_object)
def custom_set_from(self, list_object: list) -> None:
for item in list_object:
self.append(item=item)
arguments: Optional[ArgumentItemsCustomList] = Field(default_factory=ArgumentItemsCustomList)
def append(self, item: dict) -> None:
if isinstance(item, dict):
input_item_object = self.InputItem(**item)
super().append(input_item_object)
def custom_set_from(self, list_object: list) -> None:
for item in list_object:
self.append(item=item)
inputs: InputsCustomList = Field(default_factory=InputsCustomList)
class Surface(BaseModel):
capabilities: list = Field(default_factory=list)
surface: Surface = Field(default_factory=Surface)
class AvailableSurfaceItem(BaseModel):
capabilities: list = Field(default_factory=list)
availableSurfaces: List[AvailableSurfaceItem] = Field(default_factory=list)
def get_first_input_of_type(self, type_name: str) -> Optional[dict]:
for input_item in self.inputs:
for argument_item in input_item.arguments:
if argument_item.name == type_name:
return argument_item
return None
class OriginalDetectIntentRequest(BaseModel):
source: str
version: str
payload: Payload
class QueryResult(BaseModel):
queryText: str
action: str
parameters: dict
allRequiredParamsPresent: bool
fulfillmentText: Optional[str] = None
fulfillmentMessages: Optional[List[str]] = None
outputContexts: List[dict]
intent: Intent
intentDetectionConfidence: Optional[int] = None
diagnosticInfo: Optional[dict] = None
LanguageModel: str
class Request(BaseModel):
# General for LaunchRequest, IntentRequest and SessionEndedRequest
responseId: str
queryResult: QueryResult
originalDetectIntentRequest: OriginalDetectIntentRequest
session: str
def is_option_select_request(self) -> bool:
return self.queryResult.queryText == "actions_intent_OPTION"
def get_updates_user_id_if_present(self) -> Optional[str]:
for output_context in self.queryResult.outputContexts:
context_parameters: Optional[dict] = output_context.get('parameters', None)
if context_parameters is not None:
context_parameters_permission: Optional[bool] = context_parameters.get('PERMISSION')
if context_parameters_permission is True:
context_parameters_updates_user_id: Optional[str] = context_parameters.get('UPDATES_USER_ID', None)
if context_parameters_updates_user_id is not None:
return context_parameters_updates_user_id
return None
def selected_option_identifier(self) -> str:
argument_item = self.originalDetectIntentRequest.payload.get_first_input_of_type(self.originalDetectIntentRequest.payload._INPUT_TYPE_OPTION)
if isinstance(argument_item, self.originalDetectIntentRequest.payload.InputsCustomList.InputItem.ArgumentItemsCustomList.ArgumentItem):
return argument_item.textValue
def is_launch_request(self) -> bool:
return self.queryResult.queryText == "GOOGLE_ASSISTANT_WELCOME"
def active_intent_name(self) -> str:
return normalize_intent_name(intent_name=self.queryResult.intent.displayName)
def is_in_intent_names(self, intent_names_list: List[str] or str) -> bool:
intent_name: str = self.active_intent_name()
if isinstance(intent_names_list, list):
return intent_name in [normalize_intent_name(intent_name=name) for name in intent_names_list]
elif isinstance(intent_names_list, str):
return intent_name == normalize_intent_name(intent_name=intent_names_list)
else:
raise Exception(f"intent_names_list type not supported : {type(intent_names_list)}")
def get_intent_parameter_value(self, parameter_key: str, default=None):
return self.queryResult.parameters.get(dict_key=parameter_key).to_any(default=default)
def is_not_usable(self):
return False
if self.type is not None and self.type not in [self.LaunchRequestKeyName, self.IntentRequestKeyName, self.SessionEndedRequestKeyName]:
raise Exception(f"The request type '{self.type}' is not None or any of the supported types.")
return False
if (self._type == str()
or self._requestId == str()
or self._timestamp == str()
or self._locale == str()):
return True
else:
return False
def to_dict(self) -> dict:
return self.dict()
| 37.365269
| 149
| 0.680769
| 679
| 6,240
| 6.039764
| 0.215022
| 0.029261
| 0.032431
| 0.016825
| 0.204097
| 0.119971
| 0.108754
| 0.063399
| 0.063399
| 0.063399
| 0
| 0
| 0.244551
| 6,240
| 166
| 150
| 37.590361
| 0.869962
| 0.02516
| 0
| 0.165354
| 0
| 0
| 0.039809
| 0.011515
| 0
| 0
| 0
| 0.006024
| 0
| 1
| 0.110236
| false
| 0
| 0.031496
| 0.03937
| 0.653543
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ebf96b0cd05bc2eb3a4a7d33d2460323ab21921
| 1,073
|
py
|
Python
|
scraping_data.py
|
WeiTaKuan/TPEX_StockBot
|
e8a7d694dd08efdc66989a827518a629e380de16
|
[
"MIT"
] | null | null | null |
scraping_data.py
|
WeiTaKuan/TPEX_StockBot
|
e8a7d694dd08efdc66989a827518a629e380de16
|
[
"MIT"
] | null | null | null |
scraping_data.py
|
WeiTaKuan/TPEX_StockBot
|
e8a7d694dd08efdc66989a827518a629e380de16
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#--------------------------------#
"""
File name: TPEX_STOCKBOT/main.py
Author: WEI-TA KUAN
Date created: 12/9/2021
Date last modified: 9/10/2021
Version: 1.0
Python Version: 3.8.8
Status: Developing
"""
#--------------------------------#
from scraping_data import stock_daily_scraping, tpex_holiday
import pickle
import datetime
year = datetime.datetime.today().strftime("%Y")
today = datetime.datetime.today().strftime("%Y/%m/%d")
holiday = pickle.load(open("assets/tpex_holiday.pkl",'rb'))
# update the market close date for each year
while True:
if year != holiday["休市日期"][0].split("/")[0]:
print("Update Holiday")
tpex_holiday.get_holiday()
holiday = pickle.load(open("assets/tpex_holiday.pkl",'rb'))
break
# Dont run the code if the market is close
if (today != holiday["休市日期"]).any() and datetime.datetime.today().weekday() not in [5, 6]:
print("Run 360 TPEX Stockbot...")
# run the daily scraping method to store today stock data
stock_daily_scraping.daily_scraping()
| 29
| 90
| 0.649581
| 151
| 1,073
| 4.536424
| 0.529801
| 0.075912
| 0.091971
| 0.084672
| 0.213139
| 0.125547
| 0.125547
| 0.125547
| 0.125547
| 0
| 0
| 0.029508
| 0.147251
| 1,073
| 37
| 91
| 29
| 0.719126
| 0.37931
| 0
| 0.133333
| 0
| 0
| 0.164363
| 0.070661
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ec073c949edac61a57ee7d6306e6b0a094db09d
| 3,959
|
py
|
Python
|
l1t_cli/commands/list/twikis/__init__.py
|
kreczko/l1t-cli
|
f708f001b6f434d4245da6631a068a7eeb9edf30
|
[
"Apache-2.0"
] | null | null | null |
l1t_cli/commands/list/twikis/__init__.py
|
kreczko/l1t-cli
|
f708f001b6f434d4245da6631a068a7eeb9edf30
|
[
"Apache-2.0"
] | null | null | null |
l1t_cli/commands/list/twikis/__init__.py
|
kreczko/l1t-cli
|
f708f001b6f434d4245da6631a068a7eeb9edf30
|
[
"Apache-2.0"
] | null | null | null |
"""
list twikis:
List all L1 Trigger Offline Twikis
Usage:
list twikis [check=1]
Parameters:
check: force a check of the twiki URL before printing.
Useful when adding new entries. Default: 0
"""
import logging
import urllib
import hepshell
LOG = logging.getLogger(__name__)
URL_PREFIX = 'https://twiki.cern.ch/twiki/bin/view/'
TWIKIS = {
'L1T offline DEV': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideL1TOfflineDev',
'description': 'Instructions for L1 offline software development',
},
'L1T Calo Upgrade Offline Analysis': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1CaloUpgradeOfflineAnalysis',
'description': 'Some CaloL2 analysis workflows are detailed here',
},
'L1T phase 2': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1TriggerPhase2',
'description': 'In preparation ! ',
},
'L1T phase 2 interface specs': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/L1TriggerPhase2InterfaceSpecifications',
'description': 'Working definitions of Trigger Primitive inputs',
},
'CSC trigger emulator timing': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/CSCDigitizationTiming',
'description': 'Simulation of signal times for CSC',
},
'L1 Trigger Emulator Stage 2 Upgrade Instructions': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideL1TStage2Instructions',
'description': 'L1 Trigger Emulator Stage 2 Upgrade Instructions',
},
'Offline DQM': {
'url': 'https://twiki.cern.ch/twiki/bin/view/CMS/DQMOffline',
'description': 'Twiki meant to give you a basic understanding of Offline DQM',
},
'L1T DQM DEV': {
'url': 'https://twiki.cern.ch/twiki/bin/view/Sandbox/L1TDQMModuleDev',
'description': 'L1T DQM Module Development Guide',
}
}
def does_url_exist(url):
exists = False
try:
qry = urllib.urlopen(url)
if qry.getcode() == 200:
exists = True
except Exception as e:
print(e)
return exists
def get_text_lenghts(twikis):
names = twikis.keys()
urls = []
descriptions = []
for _, twiki in twikis.items():
urls.append(twiki['url'])
descriptions.append(twiki['description'])
len_names = [len(n) for n in names]
len_urls = [len(u) for u in urls]
len_descriptions = [len(d) for d in descriptions]
return max(len_names), max(len_urls), max(len_descriptions)
class Command(hepshell.Command):
DEFAULTS = {
'check': False
}
def __init__(self, path=__file__, doc=__doc__):
super(Command, self).__init__(path, doc)
def run(self, args, variables):
# parse arguments and parameters
self.__prepare(args, variables)
self.__create_table(TWIKIS)
return True
def __create_table(self, twikis):
headers = ['Name', 'URL', 'Description']
# get maximum lenghts of our columns
max_len_n, max_len_u, max_len_d = get_text_lenghts(twikis)
# add some space
max_len_n = max([max_len_n, len(headers[0])])
row_format = "{:<" + str(max_len_n) + "}\t"
row_format += "{:<" + str(max_len_u) + "}\t"
row_format += "{:<" + str(max_len_d) + "}\n"
self.__text = row_format.format(*headers)
self.__text += '-' * (max_len_n + max_len_u + max_len_d)
self.__text += '\n'
for name, twiki in sorted(twikis.items()):
# url = twiki['url'].replace(URL_PREFIX, '')
url = twiki['url']
desc = twiki['description']
if not self.__variables['check'] or does_url_exist(url):
self.__text += row_format.format(*[name, url, desc])
else:
LOG.warn('Twiki "{0}" does not exist!'.format(url))
self.__text += '\n'
| 32.186992
| 97
| 0.605961
| 475
| 3,959
| 4.873684
| 0.332632
| 0.036285
| 0.054428
| 0.062203
| 0.234557
| 0.206911
| 0.190497
| 0.142117
| 0.142117
| 0
| 0
| 0.010559
| 0.258399
| 3,959
| 122
| 98
| 32.45082
| 0.777929
| 0.090174
| 0
| 0.022989
| 0
| 0
| 0.36315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057471
| false
| 0
| 0.034483
| 0
| 0.149425
| 0.011494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ec139a98dfaa140655178c0f7864e5e8a59aecf
| 1,528
|
py
|
Python
|
examples/surrogates/corrnoise.py
|
manu-mannattil/nolitsa
|
40befcb1ce5535703f90ffe87209181bcdb5eb5c
|
[
"BSD-3-Clause"
] | 118
|
2017-06-21T08:38:07.000Z
|
2022-03-29T05:39:44.000Z
|
examples/surrogates/corrnoise.py
|
tanmaymaloo/nolitsa
|
40befcb1ce5535703f90ffe87209181bcdb5eb5c
|
[
"BSD-3-Clause"
] | 2
|
2018-06-17T03:49:53.000Z
|
2019-10-21T14:45:01.000Z
|
examples/surrogates/corrnoise.py
|
tanmaymaloo/nolitsa
|
40befcb1ce5535703f90ffe87209181bcdb5eb5c
|
[
"BSD-3-Clause"
] | 35
|
2018-06-16T22:41:24.000Z
|
2022-02-19T19:42:45.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""IAAFT surrogates for correlated noise.
The properties of linearly correlated noise can be captured quite
accurately by IAAFT surrogates. Thus, they cannot easily fool
a dimension estimator (here we use Takens's maximum likelihood estimator
for the correlation dimension) if surrogate analysis is performed
additionally.
"""
import matplotlib.pyplot as plt
import numpy as np
from nolitsa import surrogates, d2, noise, delay
x = noise.sma(np.random.normal(size=(2 ** 12)), hwin=100)
ends = surrogates.mismatch(x)[0]
x = x[ends[0]:ends[1]]
act = np.argmax(delay.acorr(x) < 1 / np.e)
mle = np.empty(19)
# Compute 19 IAAFT surrogates and compute the correlation sum.
for k in range(19):
y = surrogates.iaaft(x)[0]
r, c = d2.c2_embed(y, dim=[7], tau=act, window=act)[0]
# Compute the Takens MLE.
r_mle, mle_surr = d2.ttmle(r, c)
i = np.argmax(r_mle > 0.5 * np.std(y))
mle[k] = mle_surr[i]
plt.loglog(r, c, color='#BC8F8F')
r, c = d2.c2_embed(x, dim=[7], tau=act, window=act)[0]
# Compute the Takens MLE.
r_mle, true_mle = d2.ttmle(r, c)
i = np.argmax(r_mle > 0.5 * np.std(x))
true_mle = true_mle[i]
plt.title('IAAFT surrogates for correlated noise')
plt.xlabel('Distance $r$')
plt.ylabel('Correlation sum $C(r)$')
plt.loglog(r, c, color='#000000')
plt.figure(2)
plt.title('Takens\'s MLE for correlated noise')
plt.xlabel(r'$D_\mathrm{MLE}$')
plt.vlines(mle, 0.0, 0.5)
plt.vlines(true_mle, 0.0, 1.0)
plt.yticks([])
plt.ylim(0, 3.0)
plt.show()
| 26.807018
| 72
| 0.685864
| 272
| 1,528
| 3.805147
| 0.411765
| 0.011594
| 0.052174
| 0.054106
| 0.289855
| 0.13913
| 0.13913
| 0.13913
| 0.13913
| 0.13913
| 0
| 0.04099
| 0.153796
| 1,528
| 56
| 73
| 27.285714
| 0.759474
| 0.309555
| 0
| 0
| 0
| 0
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.096774
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ec177a61c4b2700cdcadf9e2506e37171a32c85
| 1,853
|
py
|
Python
|
test/pubmed/test_entrez.py
|
aaronnorrish/PubMedConnections
|
dc17e141d94afe6d26a9b49b2183c06f3630e561
|
[
"CC-BY-4.0"
] | 4
|
2022-03-09T05:20:46.000Z
|
2022-03-13T11:18:58.000Z
|
test/pubmed/test_entrez.py
|
aaronnorrish/PubMedConnections
|
dc17e141d94afe6d26a9b49b2183c06f3630e561
|
[
"CC-BY-4.0"
] | null | null | null |
test/pubmed/test_entrez.py
|
aaronnorrish/PubMedConnections
|
dc17e141d94afe6d26a9b49b2183c06f3630e561
|
[
"CC-BY-4.0"
] | 1
|
2022-03-09T05:21:53.000Z
|
2022-03-09T05:21:53.000Z
|
import time
from unittest import TestCase
from app.pubmed.source_entrez import *
class TestEntrez(TestCase):
def test_do_rate_limit(self):
# Serial Test
start = time.time()
do_rate_limit()
do_rate_limit()
do_rate_limit()
do_rate_limit()
elapsed = time.time() - start
self.assertTrue(0.37 * 3 < elapsed < 0.37 * 4, "Incorrect elapsed time for serial test, " + str(elapsed))
time.sleep(0.37)
# Parallel Test 1
start = time.time()
run_over_threads(do_rate_limit, [[], [], [], []])
elapsed = time.time() - start
self.assertTrue(0.37 * 3 < elapsed < 0.37 * 4, "Incorrect elapsed time for parallel test, " + str(elapsed))
time.sleep(0.37)
# Parallel Test 2
start = time.time()
run_over_threads(do_rate_limit, [[], [], [], [], [], [], []])
elapsed = time.time() - start
self.assertTrue(0.37 * 6 < elapsed < 0.37 * 7, "Incorrect elapsed time for parallel test, " + str(elapsed))
def test_request_entrez_einfo(self):
response = request_entrez_einfo()
self.assertIsInstance(response, dict)
self.assertTrue("DbList" in response)
databases = response["DbList"]
self.assertIsInstance(databases, list)
self.assertTrue(PUBMED_DB_NAME in databases)
self.assertTrue(PUBMED_CENTRAL_DB_NAME in databases)
def test_request_entrez_database_list(self):
databases = request_entrez_database_list()
self.assertIsInstance(databases, list)
self.assertTrue(PUBMED_DB_NAME in databases)
self.assertTrue(PUBMED_CENTRAL_DB_NAME in databases)
def test_request_entrez_by_date(self):
# This test is really slow...
# print(len(download_all_modified_since(PUBMED_DB_NAME, "2022/03/08")))
pass
| 36.333333
| 115
| 0.636805
| 230
| 1,853
| 4.908696
| 0.286957
| 0.077945
| 0.068202
| 0.06023
| 0.654562
| 0.618246
| 0.618246
| 0.618246
| 0.571302
| 0.474756
| 0
| 0.028798
| 0.250405
| 1,853
| 50
| 116
| 37.06
| 0.784017
| 0.076093
| 0
| 0.486486
| 0
| 0
| 0.079719
| 0
| 0
| 0
| 0
| 0
| 0.297297
| 1
| 0.108108
| false
| 0.027027
| 0.081081
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ec4dd9e5afd36d15c0c2a204aed4c3badf824b1
| 1,799
|
py
|
Python
|
bankapi.py
|
robinstauntoncollins/bank-api
|
b19cadf5a65f5e66ca14688af8774f400d4fb0f8
|
[
"Unlicense"
] | null | null | null |
bankapi.py
|
robinstauntoncollins/bank-api
|
b19cadf5a65f5e66ca14688af8774f400d4fb0f8
|
[
"Unlicense"
] | null | null | null |
bankapi.py
|
robinstauntoncollins/bank-api
|
b19cadf5a65f5e66ca14688af8774f400d4fb0f8
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import os
import click
from bank_api import create_app, db, models, utils
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
@app.shell_context_processor
def make_shell_context():
return {
'db': db,
'Account': models.Account,
'Customer': models.Customer,
'Transaction': models.Transaction}
@app.cli.command('createdb')
@click.option('--test-data', type=bool, default=True, help="Initializes database with pre-loaded data")
def createdb(test_data):
db.drop_all()
db.create_all()
if test_data:
customer_data = [
{'name': "Robin", 'surname': "Staunton-Collins"},
{'name': "Matin", 'surname': "Abbasi"},
{'name': "Rodrigo", 'surname': "Hammerly"},
{'name': "Monty", 'surname': "Python"}
]
account_data = [
{'customer_id': 1, 'balance': 50, 'account_number': utils.generate_random_account_number()},
{'customer_id': 1, 'balance': 40, 'account_number': utils.generate_random_account_number()},
{'customer_id': 2, 'balance': 450, 'account_number': utils.generate_random_account_number()},
]
transaction_data = [
{'account_id': 1, 'amount': 50},
{'account_id': 2, 'amount': 40},
{'account_id': 3, 'amount': 450},
]
customers = [models.Customer().import_data(c) for c in customer_data]
db.session.add_all(customers)
accounts = [models.Account().import_data(a) for a in account_data]
db.session.add_all(accounts)
transactions = [models.Transaction().import_data(t) for t in transaction_data]
db.session.add_all(transactions)
db.session.commit()
if __name__ == '__main__':
app.run(debug=True)
| 32.125
| 105
| 0.612007
| 210
| 1,799
| 5.004762
| 0.4
| 0.074215
| 0.05138
| 0.074215
| 0.201713
| 0.147479
| 0.147479
| 0.104662
| 0.104662
| 0
| 0
| 0.015251
| 0.234575
| 1,799
| 55
| 106
| 32.709091
| 0.748003
| 0.011673
| 0
| 0
| 0
| 0
| 0.203151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0.02381
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ecc15d4ccded89291e34497472b06937ec1df8b
| 18,554
|
py
|
Python
|
WS_CNN.py
|
Aks-Dmv/WSDDN
|
71fe1ccb17d5e779c8dac94a84227c871bd3aa73
|
[
"MIT"
] | null | null | null |
WS_CNN.py
|
Aks-Dmv/WSDDN
|
71fe1ccb17d5e779c8dac94a84227c871bd3aa73
|
[
"MIT"
] | null | null | null |
WS_CNN.py
|
Aks-Dmv/WSDDN
|
71fe1ccb17d5e779c8dac94a84227c871bd3aa73
|
[
"MIT"
] | null | null | null |
import argparse
import os
import shutil
import time
import sys
import sklearn
import sklearn.metrics
import torch
torch.cuda.init()
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from AlexNet import *
from voc_dataset import *
from utils import *
import wandb
USE_WANDB = True # use flags, wandb is not convenient for debugging
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', default='localizer_alexnet')
parser.add_argument(
'-j',
'--workers',
default=4,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument(
'--epochs',
default=30,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch-size',
default=256,
type=int,
metavar='N',
help='mini-batch size (default: 256)')
parser.add_argument(
'--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument(
'--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument(
'--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument(
'--print-freq',
'-p',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument(
'--eval-freq',
default=2,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument(
'--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument(
'-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument(
'--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument(
'--world-size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument(
'--dist-url',
default='tcp://224.66.41.62:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument(
'--dist-backend', default='gloo', type=str, help='distributed backend')
parser.add_argument('--vis', action='store_true')
best_prec1 = 0
cntr_train = 0
cntr_val = 0
def main():
global args, best_prec1, cntr_train, cntr_val
args = parser.parse_args()
args.distributed = args.world_size > 1
# create model
print("=> creating model '{}'".format(args.arch))
if args.arch == 'localizer_alexnet':
model = localizer_alexnet(pretrained=args.pretrained)
elif args.arch == 'localizer_alexnet_robust':
model = localizer_alexnet_robust(pretrained=args.pretrained)
print(model)
model = torch.nn.DataParallel(model)
model.cuda()
# TODO:
# define loss function (criterion) and optimizer
# also use an LR scheduler to decay LR by 10 every 30 epochs
# you can also use PlateauLR scheduler, which usually works well
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
training_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
#TODO: Create Datasets and Dataloaders using VOCDataset - Ensure that the sizes are as required
# Also ensure that data directories are correct - the ones use for testing by TAs might be different
# Resize the images to 512x512
train_dataset = VOCDataset(image_size=512)
val_dataset = VOCDataset(split='test', image_size=512)
def collate_fn(batch):
return tuple(zip(*batch))
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
# shuffle=(train_sampler is None),
shuffle=False,
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True, collate_fn=collate_fn)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=True, collate_fn=collate_fn)
if args.evaluate:
validate(val_loader, model, criterion)
return
# TODO: Create loggers for wandb - ideally, use flags since wandb makes it harder to debug code.
if USE_WANDB:
wandb.init(project="vlr2", reinit=True)
for epoch in range(args.start_epoch, args.epochs):
# adjust_learning_rate(optimizer, epoch)
# train for one epoch
loss = train(train_loader, model, criterion, optimizer, epoch)
# training_scheduler.step(loss)
# evaluate on validation set
if epoch % args.eval_freq == 0 or epoch == args.epochs - 1:
m1, m2 = validate(val_loader, model, criterion, epoch)
score = m1 * m2
# remember best prec@1 and save checkpoint
is_best = score > best_prec1
best_prec1 = max(score, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
#TODO: You can add input arguments if you wish
def train(train_loader, model, criterion, optimizer, epoch):
global cntr_train
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
avg_m1 = AverageMeter()
avg_m2 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (data) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# TODO: Get inputs from the data dict
# TODO: Get output from model
# TODO: Perform any necessary functions on the output such as clamping
# TODO: Compute loss using ``criterion``
img_input = torch.stack(data[0], dim=0).cuda()
target = torch.stack(data[1], dim=0).cuda()
wgt = torch.stack(data[2], dim=0).cuda()
# TODO: Get output from model
# TODO: Perform any necessary functions on the output such as clamping
# TODO: Compute loss using ``criterion``
optimizer.zero_grad()
output_heatmap = model(img_input)
if args.arch == 'localizer_alexnet':
max_pool_k = output_heatmap.shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap)
elif args.arch == 'localizer_alexnet_robust':
max_pool_k = output_heatmap[0].shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap[0])
max_pool_k1 = output_heatmap[1].shape[2]
maxPool1 = nn.MaxPool2d(kernel_size=max_pool_k1)
output_1 = maxPool1(output_heatmap[1])
max_pool_k2 = output_heatmap[2].shape[2]
maxPool2 = nn.MaxPool2d(kernel_size=max_pool_k2)
output_2 = maxPool2(output_heatmap[2])
output = output*0.333 + output_1*0.333 + output_2*0.333
output = output.view(output.shape[0], output.shape[1])
loss = criterion(output*wgt, target*wgt)
# measure metrics and record loss
sigmoid = nn.Sigmoid()
m1 = metric1(sigmoid(output), target, wgt)
m2 = metric2(sigmoid(output), target, wgt)
losses.update(loss.item(), img_input.size(0))
avg_m1.update(m1)
avg_m2.update(m2)
# TODO:
# compute gradient and do SGD step
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Metric1 {avg_m1.val:.3f} ({avg_m1.avg:.3f})\t'
'Metric2 {avg_m2.val:.3f} ({avg_m2.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
avg_m1=avg_m1,
avg_m2=avg_m2))
#TODO: Visualize/log things as mentioned in handout
#TODO: Visualize at appropriate intervals
if USE_WANDB and i % args.print_freq == 0:
wandb.log({"train/loss": loss, "train/cntr":cntr_train})
wandb.log({"train/m1": m1, "train/cntr":cntr_train})
wandb.log({"train/m2": m2, "train/cntr":cntr_train})
cntr_train+=1
# End of train()
return loss.detach()
def validate(val_loader, model, criterion, epoch = 0):
global cntr_val
batch_time = AverageMeter()
losses = AverageMeter()
avg_m1 = AverageMeter()
avg_m2 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (data) in enumerate(val_loader):
# TODO: Get inputs from the data dict
img_input = torch.stack(data[0], dim=0).cuda()
target = torch.stack(data[1], dim=0).cuda()
wgt = torch.stack(data[2], dim=0).cuda()
# TODO: Get output from model
# TODO: Perform any necessary functions on the output
# TODO: Compute loss using ``criterion``
output_heatmap = model(img_input)
if args.arch == 'localizer_alexnet':
max_pool_k = output_heatmap.shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap)
elif args.arch == 'localizer_alexnet_robust':
max_pool_k = output_heatmap[0].shape[2]
maxPool = nn.MaxPool2d(kernel_size=max_pool_k)
output = maxPool(output_heatmap[0])
max_pool_k1 = output_heatmap[1].shape[2]
maxPool1 = nn.MaxPool2d(kernel_size=max_pool_k1)
output_1 = maxPool1(output_heatmap[1])
max_pool_k2 = output_heatmap[2].shape[2]
maxPool2 = nn.MaxPool2d(kernel_size=max_pool_k2)
output_2 = maxPool2(output_heatmap[2])
output = output*0.333 + output_1*0.333 + output_2*0.333
output = output.view(output.shape[0], output.shape[1])
loss = criterion(output*wgt, target*wgt)
sigmoid = nn.Sigmoid()
# measure metrics and record loss
m1 = metric1(sigmoid(output), target, wgt)
m2 = metric2(sigmoid(output), target, wgt)
losses.update(loss.item(), img_input.size(0))
avg_m1.update(m1)
avg_m2.update(m2)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Metric1 {avg_m1.val:.3f} ({avg_m1.avg:.3f})\t'
'Metric2 {avg_m2.val:.3f} ({avg_m2.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
avg_m1=avg_m1,
avg_m2=avg_m2))
#TODO: Visualize things as mentioned in handout
#TODO: Visualize at appropriate intervals
if USE_WANDB:
if i % args.print_freq == 0:
wandb.log({"val/loss": loss, "val/cntr":cntr_val})
wandb.log({"val/m1": m1, "val/cntr":cntr_val})
wandb.log({"val/m2": m2, "val/cntr":cntr_val})
cntr_val+=1
if i<5 and epoch%14==0:
gt_np_img = img_input[0].detach().cpu().numpy().mean(axis=0)
wandb.log({'heatmaps/epoch_{}_gt_img_{}'.format(epoch, i): wandb.Image(gt_np_img)})
weighted_target = (target[0] * wgt[0]).detach().cpu().numpy()
heat_i = 0
resize512 = transforms.Resize((512, 512))
for class_i in range(20):
print(weighted_target[class_i])
if weighted_target[class_i]==1:
target_gt = class_i
else:
continue
if args.arch == 'localizer_alexnet':
print("output heatmap shape ", output_heatmap.shape)
print(torch.sum(torch.isnan(output_heatmap[0,target_gt]).type(torch.uint8)))
out_heat = resize512(output_heatmap[0,target_gt][None,:,:])
selected_heatmap = out_heat.detach().cpu()
# selected_heatmap = selected_heatmap[None,:,:]
elif args.arch == 'localizer_alexnet_robust':
print("output heatmap shape ", output_heatmap[0].shape, output_heatmap[1].shape, output_heatmap[2].shape)
# print(torch.sum(torch.isnan(output_heatmap[0][0,target_gt]).type(torch.uint8)))
out_heat = resize512(output_heatmap[0][0,target_gt][None,:,:]) * 0.333
out_heat1 = resize512(output_heatmap[1][0,target_gt][None,:,:]) * 0.333
out_heat2 = resize512(output_heatmap[2][0,target_gt][None,:,:]) * 0.333
selected_heatmap = out_heat + out_heat1 + out_heat2
selected_heatmap = selected_heatmap.detach().cpu()
print("target gt", target_gt)
selected_heatmap = resize512(selected_heatmap)
selected_heatmap = torch.permute(selected_heatmap, (1,2,0)).numpy()
print(selected_heatmap.min())
print(selected_heatmap.max())
wandb.log({'heatmaps/epoch_{}_img_{}_heatmap_{}'.format(epoch, i, target_gt): wandb.Image(selected_heatmap)})
print(' * Metric1 {avg_m1.avg:.3f} Metric2 {avg_m2.avg:.3f}'.format(
avg_m1=avg_m1, avg_m2=avg_m2))
return avg_m1.avg, avg_m2.avg
# TODO: You can make changes to this function if you wish (not necessary)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def metric1(pred, gt, valid):
# TODO: Ignore for now - proceed till instructed
pred = torch.sigmoid(pred).cpu().detach().numpy()
gt = gt.cpu().detach().numpy()
valid = valid.cpu().detach().numpy()
nclasses = gt.shape[1]
AP = []
for cid in range(nclasses):
gt_cls = gt[:, cid][valid[:, cid] > 0].astype('float32')
pred_cls = pred[:, cid][valid[:, cid] > 0].astype('float32')
if np.all(gt_cls==0):
if np.all(pred_cls<0.5):
ap=1.
else:
ap=0.
else:
# As per PhilK. code:
# https://github.com/philkr/voc-classification/blob/master/src/train_cls.py
pred_cls -= 1e-5 * gt_cls
ap = sklearn.metrics.average_precision_score(gt_cls, pred_cls)
AP.append(ap)
return np.mean(AP)
def metric2(pred, gt, valid):
#TODO: Ignore for now - proceed till instructed
pred = torch.sigmoid(pred).cpu().detach().numpy()
gt = gt.cpu().detach().numpy()
valid = valid.cpu().detach().numpy()
nclasses = gt.shape[1]
M2 = []
for cid in range(nclasses):
gt_cls = gt[:, cid][valid[:, cid] > 0].astype('float32')
pred_cls = pred[:, cid][valid[:, cid] > 0].astype('float32')
if np.all(gt_cls==0):
if np.all(pred_cls<0.5):
rec=1.
else:
rec=0.
else:
# As per PhilK. code:
# https://github.com/philkr/voc-classification/blob/master/src/train_cls.py
pred_cls -= 1e-5 * gt_cls
# print(gt_cls)
# print(pred_cls)
rec = sklearn.metrics.recall_score(gt_cls, pred_cls>0.5, average='binary')
M2.append(rec)
return np.mean(M2)
if __name__ == '__main__':
main()
| 32.955595
| 129
| 0.579929
| 2,289
| 18,554
| 4.543906
| 0.169506
| 0.037496
| 0.027786
| 0.01846
| 0.474858
| 0.43765
| 0.415441
| 0.365349
| 0.357658
| 0.340833
| 0
| 0.028642
| 0.298103
| 18,554
| 562
| 130
| 33.014235
| 0.770022
| 0.122453
| 0
| 0.422785
| 0
| 0
| 0.118358
| 0.015596
| 0
| 0
| 0
| 0.001779
| 0
| 1
| 0.025316
| false
| 0
| 0.058228
| 0.002532
| 0.101266
| 0.055696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ecd621ab56bfd508e9835987ea7537a72ff3b56
| 1,093
|
py
|
Python
|
fuc/cli/vcf_index.py
|
sbslee/fuc
|
f4eb5f6b95b533252ee877920278cd4e4c964bb8
|
[
"MIT"
] | 17
|
2021-06-09T23:23:56.000Z
|
2022-03-10T11:58:46.000Z
|
fuc/cli/vcf_index.py
|
sbslee/fuc
|
f4eb5f6b95b533252ee877920278cd4e4c964bb8
|
[
"MIT"
] | 27
|
2021-04-21T06:25:22.000Z
|
2022-03-30T23:25:36.000Z
|
fuc/cli/vcf_index.py
|
sbslee/fuc
|
f4eb5f6b95b533252ee877920278cd4e4c964bb8
|
[
"MIT"
] | null | null | null |
import sys
from .. import api
import pysam
description = """
Index a VCF file.
This command will create an index file (.tbi) for the input VCF.
"""
epilog = f"""
[Example] Index a compressed VCF file:
$ fuc {api.common._script_name()} in.vcf.gz
[Example] Index an uncompressed VCF file (will create a compressed VCF first):
$ fuc {api.common._script_name()} in.vcf
"""
def create_parser(subparsers):
parser = api.common._add_parser(
subparsers,
api.common._script_name(),
description=description,
epilog=epilog,
help='Index a VCF file.',
)
parser.add_argument(
'vcf',
help='Input VCF file to be indexed. When an uncompressed file is \n'
'given, the command will automatically create a BGZF \n'
'compressed copy of the file (.gz) before indexing.'
)
parser.add_argument(
'--force',
action='store_true',
help='Force to overwrite the index file if it is already present.'
)
def main(args):
pysam.tabix_index(args.vcf, preset='vcf', force=args.force)
| 25.418605
| 78
| 0.643184
| 148
| 1,093
| 4.662162
| 0.425676
| 0.050725
| 0.065217
| 0.082609
| 0.078261
| 0.078261
| 0.078261
| 0
| 0
| 0
| 0
| 0
| 0.247027
| 1,093
| 42
| 79
| 26.02381
| 0.838396
| 0
| 0
| 0.117647
| 0
| 0
| 0.510522
| 0.049405
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.088235
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ecec912c81fe613d6387ded4c0e5121003a14a5
| 640
|
py
|
Python
|
main.py
|
sergioyahni/captcha
|
f8235a4c3b64fadf71c00d9932fae7f1bf1962f5
|
[
"MIT"
] | null | null | null |
main.py
|
sergioyahni/captcha
|
f8235a4c3b64fadf71c00d9932fae7f1bf1962f5
|
[
"MIT"
] | null | null | null |
main.py
|
sergioyahni/captcha
|
f8235a4c3b64fadf71c00d9932fae7f1bf1962f5
|
[
"MIT"
] | null | null | null |
from captcha.image import ImageCaptcha
import random
def create_captcha():
captcha_text = str(hex(random.randint(3000, 5999) * random.randint(100, 199)))
image = ImageCaptcha(width=280, height=90)
data = image.generate(captcha_text)
image.write(captcha_text, 'cImg.png')
return captcha_text
def check_captcha():
rand_n = create_captcha()
counter = 0
while counter < 3:
my_string = input("enter captcha: ")
if my_string != rand_n:
res = False
else:
res = True
break
counter += 1
return res
check_captcha()
| 23.703704
| 83
| 0.6
| 76
| 640
| 4.894737
| 0.578947
| 0.11828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04955
| 0.30625
| 640
| 26
| 84
| 24.615385
| 0.788288
| 0
| 0
| 0
| 0
| 0
| 0.037459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ed15e1a4c599c5f5acf73a58c9805ac84372eae
| 4,045
|
py
|
Python
|
openstates/openstates-master/openstates/mi/events.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/mi/events.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/mi/events.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
from openstates.utils import LXMLMixin
import datetime as dt
import re
from billy.scrape.events import Event, EventScraper
import lxml.html
import pytz
mi_events = "http://legislature.mi.gov/doc.aspx?CommitteeMeetings"
class MIEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'mi'
_tz = pytz.timezone('US/Eastern')
def scrape_event_page(self, url, chamber, session):
page = self.lxmlize(url)
trs = page.xpath("//table[@id='frg_committeemeeting_MeetingTable']/tr")
metainf = {}
for tr in trs:
tds = tr.xpath(".//td")
if len(tds) <= 1:
continue
key = tds[0].text_content().strip()
val = tds[1]
metainf[key] = {
"txt": val.text_content().strip(),
"obj": val
}
if metainf == {}:
return
# Wednesday, 5/16/2012 3:00 pm
datetime = "%s %s" % (
metainf['Date']['txt'],
metainf['Time']['txt'].replace(".","")
)
if "Cancelled" in datetime:
return
translate = {
"noon": " PM",
"a.m.": " AM",
"am": " AM", # This is due to a nasty line they had.
"a.m": "AM" #another weird one
}
for t in translate:
if t in datetime:
datetime = datetime.replace(t, translate[t])
datetime = re.sub("\s+", " ", datetime)
for text_to_remove in [
"or after committees are given leave",
"or later immediately after committees are given leave",
"or later after committees are given leave by the House to meet",
"**Please note time**"
]:
datetime = datetime.split(text_to_remove)[0].strip()
datetime = datetime.replace('p.m.', 'pm')
datetime = datetime.replace('Noon',"pm")
datetime = dt.datetime.strptime(datetime, "%A, %m/%d/%Y %I:%M %p")
where = metainf['Location']['txt']
title = metainf['Committee']['txt'] # XXX: Find a better title
if chamber == 'other':
chamber = 'joint'
event = Event(session, datetime, 'committee:meeting',
title, location=where)
event.add_source(url)
event.add_source(mi_events)
chair_name = metainf['Chair']['txt'].strip()
if chair_name:
event.add_participant('chair', chair_name, 'legislator', chamber=chamber)
else:
self.warning("No chair found for event '{}'".format(title))
event.add_participant('host', metainf['Committee']['txt'],
'committee',
chamber=chamber)
agenda = metainf['Agenda']['obj']
agendas = agenda.text_content().split("\r")
related_bills = agenda.xpath("//a[contains(@href, 'getObject')]")
for bill in related_bills:
description = agenda
for a in agendas:
if bill.text_content() in a:
description = a
event.add_related_bill(
bill.text_content(),
description=description,
type='consideration'
)
self.save_event(event)
def scrape(self, chamber, session):
page = self.lxmlize(mi_events)
xpaths = {
"lower": "//span[@id='frg_committeemeetings_HouseMeetingsList']",
"upper": "//span[@id='frg_committeemeetings_SenateMeetingsList']",
"other": "//span[@is='frg_committeemeetings_JointMeetingsList']"
}
span = page.xpath(xpaths[chamber])
if len(span) > 0:
span = span[0]
else:
return
events = span.xpath(".//a[contains(@href, 'committeemeeting')]")
for event in events:
url = event.attrib['href']
if 'doPostBack' in url:
continue
self.scrape_event_page(url, chamber, session)
| 32.36
| 85
| 0.528307
| 424
| 4,045
| 4.95283
| 0.375
| 0.02619
| 0.032857
| 0.032857
| 0.074286
| 0.033333
| 0.033333
| 0
| 0
| 0
| 0
| 0.006006
| 0.341409
| 4,045
| 124
| 86
| 32.620968
| 0.782282
| 0.0267
| 0
| 0.07
| 0
| 0
| 0.203459
| 0.053662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.06
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ed2ebd3e68752d3caa55e15dd92ce5cc345106b
| 418
|
py
|
Python
|
code/0190-reverseBits.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
code/0190-reverseBits.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
code/0190-reverseBits.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
ret, power = 0, 31
while n:
ret += (n & 1) << power # n & 1 means: 末位是1则是1, 0则是0 向右移位
n = n >> 1 # n 左移移位
power -= 1 # 位数-1
return ret
if "__main__" == __name__:
solution = Solution()
res = solution.reverseBits(43261596)
print(res)
| 19.904762
| 69
| 0.5
| 51
| 418
| 3.941176
| 0.54902
| 0.029851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 0.38756
| 418
| 20
| 70
| 20.9
| 0.707031
| 0.198565
| 0
| 0
| 0
| 0
| 0.024316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ed342ee0815f43f923a49b70459817dc28094de
| 1,018
|
py
|
Python
|
muria/db/preload.py
|
xakiy/muria
|
0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905
|
[
"MIT"
] | 1
|
2020-02-10T00:12:27.000Z
|
2020-02-10T00:12:27.000Z
|
muria/db/preload.py
|
xakiy/muria
|
0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905
|
[
"MIT"
] | 8
|
2019-12-07T16:48:08.000Z
|
2021-08-31T06:31:34.000Z
|
muria/db/preload.py
|
xakiy/muria
|
0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905
|
[
"MIT"
] | null | null | null |
"""Some preloads of database content."""
tables = list()
roles = list()
roles.append({"id": 1, "name": "administrator"})
roles.append({"id": 2, "name": "contributor"})
roles.append({"id": 3, "name": "staff"})
roles.append({"id": 4, "name": "parent"})
roles.append({"id": 5, "name": "caretaker"})
roles.append({"id": 6, "name": "student"})
tables.append({"model": "Role", "data": roles})
responsibilities = list()
responsibilities.append({"id": 1, "name": "manager"})
responsibilities.append({"id": 2, "name": "user"})
responsibilities.append({"id": 3, "name": "journalist"})
tables.append({"model": "Responsibility", "data": responsibilities})
sets = list()
responsibility_role = [
(1, 1),
(1, 3),
(2, 1),
(2, 2),
(2, 3),
(2, 4),
(2, 5),
(2, 6),
(3, 2),
(3, 3),
(3, 6),
]
sets.append(
{
"parent": "Responsibility",
"rel": "roles",
"child": "Role",
"data": responsibility_role,
}
)
| 22.130435
| 69
| 0.52554
| 114
| 1,018
| 4.675439
| 0.298246
| 0.135084
| 0.146341
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039642
| 0.231827
| 1,018
| 45
| 70
| 22.622222
| 0.641944
| 0.033399
| 0
| 0
| 0
| 0
| 0.217578
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ed3a96b67e22aff964a1489de5d4c55aa41991d
| 6,689
|
py
|
Python
|
src/meeting_timer/settings.py
|
andrewjrobinson/meeting_timer
|
cad3303f6925d2e8961b262c6cfbecf4a30a1ce5
|
[
"MIT"
] | null | null | null |
src/meeting_timer/settings.py
|
andrewjrobinson/meeting_timer
|
cad3303f6925d2e8961b262c6cfbecf4a30a1ce5
|
[
"MIT"
] | null | null | null |
src/meeting_timer/settings.py
|
andrewjrobinson/meeting_timer
|
cad3303f6925d2e8961b262c6cfbecf4a30a1ce5
|
[
"MIT"
] | null | null | null |
#
# MIT License
#
# Copyright (c) 2020 Andrew Robinson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections.abc
import json
import os
import tkinter as tk
class SettingsWrapper(object):
'''
Wraps settings sub-sections with standard interface
'''
def __init__(self, root=None, settings={}):
'''
Constructor
'''
if root is None:
self._root = self
else:
self._root = root
self.settings = settings
def settings_root(self):
'''
Get the top level settings object
'''
return self._root
def __getattr__(self, key):
'''Attribute mapper (i.e. Object.key)'''
return self.get(key)
def __getitem__(self, key):
'''Index mapper (i.e. Object['key'])'''
return self.get(key)
def get(self, key, default=None):
'''
Get the specified key
'''
if default is not None:
result = self.settings.get(key, default)
else:
result = self.settings[key]
if isinstance(result, collections.abc.Mapping):
result = SettingsWrapper(self._root, result)
return result
## end class SettingsWrapper() ##
class Settings(SettingsWrapper):
'''
Object for storing settings including writing-to/reading-from file
'''
def __init__(self, filename=None):
'''
Constructor
@param filename: string, name of file to read-from/write-to
'''
SettingsWrapper.__init__(self, self, {
"colour": {
"background": tk.StringVar(value="black"),
"finished": tk.StringVar(value="red"),
"primary": tk.StringVar(value="green"),
"warning": tk.StringVar(value="orange"),
},
"display": {
"background": tk.StringVar(value="black"),
"foreground": tk.StringVar(value="green"),
"title": tk.StringVar(value=""),
"time": tk.StringVar(value=""),
"speaker": tk.StringVar(value=""),
},
"initial": {
"duration": tk.IntVar(value=540),
"title": tk.StringVar(value="My Webinar"),
"time": tk.StringVar(value=""),
"speaker": tk.StringVar(value="Welcome"),
"warning": tk.IntVar(value=60),
"width": tk.IntVar(value=1280),
"height": tk.IntVar(value=720),
},
"next": {
"duration": tk.IntVar(value=540),
"speaker": tk.StringVar(value="John Smith"),
"title": tk.StringVar(value="My Webinar"),
"warning": tk.IntVar(value=60),
},
"finished_text": tk.StringVar(value="STOP")
})
self._filename = filename
self._settings_loaded = False
def get(self, key, default=None):
'''
Get a specific setting
'''
if not self._settings_loaded and self._filename is not None:
self._settings_loaded = True
self.read()
return SettingsWrapper.get(self, key, default=default)
def read(self, from_filename=None):
'''
Read settings from file
'''
# change filename if required
if from_filename is not None:
self._filename = from_filename
# check filename is ok
if self._filename is None:
raise FileNotFoundError(f"Settings filename not provided")
if not os.path.exists(self._filename):
raise FileNotFoundError(f"Settings file does not exist ({self._filename})")
# open file to read
if os.path.getsize(self._filename) > 0:
with open(self._filename, 'r') as f:
content = json.load(f)
self._read_setting_values(self.settings, content, ('display',))
def write(self, as_filename=None):
'''
Writes settings to file, optionally as an alternate filename
'''
# change filename if required
if as_filename is not None:
self._filename = as_filename
# check filename is ok
if self._filename is None:
raise FileNotFoundError(f"Settings filename not provided")
# open file to write
with open(self._filename, 'w+') as f:
# convert to basic python types
content = self._dump_setting_values(self.settings, ('display',))
# write to file in json format
json.dump(content, f, indent=2, sort_keys=True)
def _read_setting_values(self, settings, values, ignore_keys=()):
'''
Tree-recursively load setting values
'''
for key,var in settings.items():
if key not in ignore_keys:
if isinstance(var, collections.abc.Mapping):
self._read_setting_values(var, values.get(key))
elif values is not None:
var.set(values.get(key))
def _dump_setting_values(self, settings, ignore_keys=()):
'''
Tree-recursively dump settings into regular python types
'''
result = {}
for key,var in settings.items():
if key not in ignore_keys:
if isinstance(var, collections.abc.Mapping):
result[key] = self._dump_setting_values(var)
else:
result[key] = var.get()
return result
## end class Settings() ##
| 32.470874
| 87
| 0.575123
| 752
| 6,689
| 5.019947
| 0.303191
| 0.043709
| 0.063576
| 0.02649
| 0.270199
| 0.179338
| 0.148079
| 0.13457
| 0.111788
| 0.111788
| 0
| 0.005081
| 0.323217
| 6,689
| 205
| 88
| 32.629268
| 0.828805
| 0.276274
| 0
| 0.29
| 0
| 0
| 0.081254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11
| false
| 0
| 0.04
| 0
| 0.23
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ed623c2f06e37c570057cd2950ac913943aac09
| 651
|
py
|
Python
|
python/191122.py
|
Xanonymous-GitHub/main
|
53120110bd8dc9ab33424fa26d1a8ca5b9256ebe
|
[
"Apache-2.0"
] | 1
|
2019-09-27T17:46:41.000Z
|
2019-09-27T17:46:41.000Z
|
python/191122.py
|
Xanonymous-GitHub/main
|
53120110bd8dc9ab33424fa26d1a8ca5b9256ebe
|
[
"Apache-2.0"
] | null | null | null |
python/191122.py
|
Xanonymous-GitHub/main
|
53120110bd8dc9ab33424fa26d1a8ca5b9256ebe
|
[
"Apache-2.0"
] | 5
|
2019-09-30T16:41:14.000Z
|
2019-10-25T11:13:39.000Z
|
from os import getcwd
def rdfile():
data = list()
# 顯示這個程式碼檔案是在哪裡被執行
print(getcwd())
with open("pm25.txt", 'r') as fd:
for line in fd:
try:
data.append(float(line.replace('\n', '')))
except:
pass
print('Max =', max(data))
print('Min =', min(data))
print('Avg =', (sum(data)/len(data)))
data_bigger_than_70 = int()
for x in range(len(data)):
if data[x] > 70:
data_bigger_than_70 += 1
print('The amount of data which is bigger than 70 :', data_bigger_than_70)
def main():
rdfile()
if __name__ == '__main__':
main()
| 21.7
| 78
| 0.533026
| 86
| 651
| 3.837209
| 0.546512
| 0.121212
| 0.145455
| 0.145455
| 0.109091
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029213
| 0.316436
| 651
| 29
| 79
| 22.448276
| 0.71236
| 0.024578
| 0
| 0
| 0
| 0
| 0.123223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0.045455
| 0.045455
| 0
| 0.136364
| 0.227273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ed6b577e511cc21f5108b75969a300169a86b9c
| 5,534
|
py
|
Python
|
treeplotter/plotter.py
|
Luke-Poeppel/treeplotter
|
940e08b02d30f69972b0df1a5668f3b2ade02027
|
[
"MIT"
] | 7
|
2021-06-12T17:48:17.000Z
|
2022-01-27T09:47:12.000Z
|
treeplotter/plotter.py
|
Luke-Poeppel/treeplotter
|
940e08b02d30f69972b0df1a5668f3b2ade02027
|
[
"MIT"
] | 36
|
2021-06-09T18:31:44.000Z
|
2022-03-17T12:06:59.000Z
|
treeplotter/plotter.py
|
Luke-Poeppel/treeplotter
|
940e08b02d30f69972b0df1a5668f3b2ade02027
|
[
"MIT"
] | 2
|
2021-12-07T18:41:53.000Z
|
2022-03-09T10:46:52.000Z
|
####################################################################################################
# File: plotter.py
# Purpose: Plotting module.
#
# Author: Luke Poeppel
#
# Location: Kent, 2021
####################################################################################################
import logging
import os
import json
import sys
import subprocess
import shutil
import tempfile
from .style import (
write_index_html,
write_treant_css,
write_node_css
)
here = os.path.abspath(os.path.dirname(__file__))
treant_templates = here + "/templates"
def get_logger(name, print_to_console=True, write_to_file=None):
"""
A simple helper for logging. Copied from my `decitala` package.
"""
logger = logging.getLogger(name)
if not len(logger.handlers):
logger.setLevel(logging.INFO)
if write_to_file is not None:
file_handler = logging.FileHandler(write_to_file)
logger.addHandler(file_handler)
if print_to_console:
stdout_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout_handler)
return logger
def prepare_arrow(dict_in):
"""
Raphaël's arrow formatting is a bit more involved. This parsing is done here.
"""
arrow_end = dict_in["arrow_end"]
arrow_width = dict_in["arrow_width"]
arrow_length = dict_in["arrow_length"]
return "-".join([arrow_end, arrow_width, arrow_length])
def _prepare_chart_config(tree):
chart_config = dict()
chart_config["container"] = "#treeplotter"
connector_style_pre = tree.connector_style.style()
connector_style = dict()
for key, val in connector_style_pre.items():
if "_" in key:
new_key = "-".join(key.split("_"))
if key == "arrow_end":
connector_style[new_key] = prepare_arrow(dict_in=connector_style_pre)
elif key in {"arrow_length" or "arrow_width"}:
continue
else:
connector_style[new_key] = val
else:
connector_style[key] = val
connector_type_dict = {
"type": tree.connector_type,
"style": connector_style
}
chart_config["connectors"] = connector_type_dict
chart_config["rootOrientation"] = tree.orientation.upper()
HTML_dict_obj = {
"HTMLclass": "treeNode"
}
chart_config["node"] = HTML_dict_obj
dumped = json.dumps(chart_config)
with open("chart_config.json", "w") as chart_config_file:
json.dump(dumped, chart_config_file)
return
def _prepare_docs_and_screenshot(
path,
tree,
serialized_tree,
background_color,
webshot,
logger
):
with open("tree.json", "w") as json_file:
json.dump(serialized_tree, json_file)
logger.info("-> Copying templates...")
for this_file in os.listdir(treant_templates):
shutil.copyfile(treant_templates + "/" + this_file, path + "/" + this_file)
logger.info("-> Writing index.html...")
write_index_html(
background_color=background_color,
path=path + "/" + "index.html"
)
logger.info("-> Writing Treant CSS file...")
write_treant_css(path=path + "/" + "Treant.css")
logger.info("-> Writing Node CSS file...")
write_node_css(
background_color=tree.node_style.background_color,
font_family=tree.node_style.font_family,
font_size=tree.node_style.font_size,
text_align=tree.node_style.text_align,
width=tree.node_style.width,
border=tree.node_style.border,
padding=tree.node_style.padding,
border_radius=tree.node_style.border_radius,
path=path + "/" + "treeplotter.css"
)
logger.info("-> Running browserify...")
parse_data_file = "/".join([path, "parse_data.js"])
browserified_file = "/".join([path, "bundle.js"])
os.system(f"browserify {parse_data_file} -o {browserified_file}")
if webshot:
logger.info("-> Creating webshot with R...")
webshot_string = "webshot::webshot(url={0}, file={1}, zoom=3, selector={2})".format(
"'" + path + "/index.html" + "'",
"'" + path + "/shot.png" + "'",
"'" + ".Treant" + "'"
)
subprocess.call(
[
f"""Rscript -e "{webshot_string}" """
],
shell=True
)
def create_tree_diagram(
tree,
background_color="#868DEE",
save_path=None,
webshot=False,
verbose=False
):
"""
This function creates a visualization of a given `tree.Tree` by wrapping the TreantJS library.
Parameters
----------
tree : tree.Tree
A `tree.Tree` object.
background_color : str
Color (given in Hex) of the desired background color of the visualization.
save_path : str
Optional path to the directory in which all the relevant files will be saved. Default is `None`.
webshot : bool
Whether or not to invoke Rs webshot library to create a high-res screenshot of the tree.
Default is `False`.
verbose : bool
Whether to print logging messages in the plotting process. Useful for debugging.
"""
if verbose:
logger = get_logger(name=__name__, print_to_console=True)
else:
logger = get_logger(name=__name__, print_to_console=False)
serialized = tree.serialize(for_treant=True)
logger.info("-> Creating directory and writing tree to JSON...")
if save_path:
if not(os.path.isdir(save_path)):
os.mkdir(save_path)
os.chdir(save_path)
_prepare_chart_config(tree=tree)
_prepare_docs_and_screenshot(
path=save_path,
tree=tree,
serialized_tree=serialized,
background_color=background_color,
webshot=webshot,
logger=logger
)
logger.info("Done ✔")
return save_path
else:
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
_prepare_docs_and_screenshot(tmpdir, serialized_tree=serialized, logger=logger)
logger.info("Done ✔")
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
shutil.copyfile(tmpdir + "/shot.png", tmpfile.name)
return tmpfile.name
| 28.091371
| 100
| 0.696061
| 732
| 5,534
| 5.02459
| 0.285519
| 0.032898
| 0.028276
| 0.014682
| 0.057096
| 0.034802
| 0.02012
| 0.02012
| 0
| 0
| 0
| 0.002324
| 0.144742
| 5,534
| 197
| 101
| 28.091371
| 0.77435
| 0.15721
| 0
| 0.082759
| 0
| 0
| 0.145707
| 0.005604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.055172
| 0
| 0.124138
| 0.027586
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ed70f9df4c3c063308c836d1a779ff6d33f1046
| 3,814
|
py
|
Python
|
filewriter.py
|
FrederikBjorne/python-serial-logging
|
e553bc2421699a2bb38f21abffbb08ee70c81a21
|
[
"MIT"
] | null | null | null |
filewriter.py
|
FrederikBjorne/python-serial-logging
|
e553bc2421699a2bb38f21abffbb08ee70c81a21
|
[
"MIT"
] | null | null | null |
filewriter.py
|
FrederikBjorne/python-serial-logging
|
e553bc2421699a2bb38f21abffbb08ee70c81a21
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import logging
from threading import Thread, Event
from Queue import Queue, Empty as QueueEmpty
import codecs
class FileWriter(Thread):
"""
This thread reads log lines from a queue and writes these to a file passed as log_file_path.
The log line queue is filled with new log lines by calling put().
Thread quits if stop() is called. If an exception is raised when writing to file, this thread
will callback to its owner to stop operation.
Setting the read_queue_timer for reading the queue determine the responsiveness to stop call
and is optional.
"""
READ_NEW_LOGLINE_TMO = 0.5
def __init__(self,
log_file_path,
callback,
read_queue_timeout=READ_NEW_LOGLINE_TMO,
encoding='utf8'):
"""
:param log_file_path: The file path to write log lines to.
:param callback: A callback method for calling back to application when error occurs.
:param read_queue_timeout: The read timeout to avoid blocking.
:param encoding: The encoding format when writing to file.
"""
super(FileWriter, self).__init__(name = self.__class__.__name__)
self._read_queue_timeout = read_queue_timeout
self._log_file_path = log_file_path
self._encoding = encoding
self.setDaemon(True)
self._log_line_queue = Queue()
self._stop = Event()
self.logger = logging.getLogger(self.__class__.__name__)
self._callback = callback
codecs.register_error('backslashreplace', self.backslash_replace)
def __repr__(self):
return '{}({!r}, {!r}, {!r}, {!r})'.format(self.__class__.__name__,
self.getName(),
self._read_queue_timeout,
self._log_file_path,
self._encoding)
def put(self, text_line):
"""
Puts a text line to the text queue to be written to the specified file for logging.
:param text_line: A text line to be written to file.
"""
self._log_line_queue.put(text_line) # Queue calls are thread-safe
def stop(self):
"""
Stop writing to a log file from the internal queue and commit suicide.
"""
self._stop.set()
self.logger.debug('writer stopped')
if self.is_alive():
self.join()
self.logger.debug('writer has terminated')
@staticmethod
def backslash_replace(error):
"""
An error handler to be called if escape characters are read from the log line queue input.
"""
return u"".join([u"\\x{:x}".format(ord(error.object[i]))
for i in range(error.start, error.end)]), error.end
def run(self):
try:
with codecs.open(self._log_file_path, 'wb', self._encoding) as log_file:
self.logger.info('start writing to file.')
while not self._stop.is_set():
try: # timeout avoids blocking in order to be responsive to stop calls
log_line = self._log_line_queue.get(timeout=self._read_queue_timeout)
except QueueEmpty:
continue
else:
self._log_line_queue.task_done()
log_file.write(log_line + '\n')
except Exception as e: # this may occur if codecs fails somehow
self.logger.error('Error: {}'.format(e))
self._callback('{} has stopped running. error: {}'.format(self.getName(), str(e))) # call back error
self.logger.info('stopped writing to file.')
| 40.574468
| 113
| 0.588621
| 473
| 3,814
| 4.515856
| 0.321353
| 0.032772
| 0.036049
| 0.02809
| 0.045412
| 0.029026
| 0.029026
| 0
| 0
| 0
| 0
| 0.001169
| 0.327216
| 3,814
| 93
| 114
| 41.010753
| 0.831255
| 0.299423
| 0
| 0.036364
| 0
| 0
| 0.07177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109091
| false
| 0
| 0.072727
| 0.018182
| 0.254545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ed8c0b61feb32ca367f3590a99a8b047fcbbc95
| 610
|
py
|
Python
|
adv/pipple.py
|
XenoXilus/dl
|
cdfce03835cd67aac553140d6d88bc4c5c5d60ff
|
[
"Apache-2.0"
] | null | null | null |
adv/pipple.py
|
XenoXilus/dl
|
cdfce03835cd67aac553140d6d88bc4c5c5d60ff
|
[
"Apache-2.0"
] | null | null | null |
adv/pipple.py
|
XenoXilus/dl
|
cdfce03835cd67aac553140d6d88bc4c5c5d60ff
|
[
"Apache-2.0"
] | null | null | null |
from core.advbase import *
def module():
return Pipple
class Pipple(Adv):
conf = {}
conf['slots.a'] = ['Proper_Maintenance', 'Brothers_in_Arms']
conf['slots.frostbite.a'] = conf['slots.a']
conf['slots.d'] = 'Gaibhne_and_Creidhne'
conf['acl'] = """
`dragon(c3-s-end),x=5
`s2, (x=5 or s) and self.energy()<5
`s4
`s3, cancel
`s1, x>2
"""
conf['coabs'] = ['Tiki', 'Renee', 'Tobias']
conf['share'] = ['Summer_Luca','Patia']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| 25.416667
| 64
| 0.568852
| 82
| 610
| 4.012195
| 0.682927
| 0.109422
| 0.06079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019481
| 0.242623
| 610
| 24
| 65
| 25.416667
| 0.692641
| 0
| 0
| 0
| 0
| 0
| 0.451718
| 0.03437
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0.05
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4edad0b70551d7b3c45fcd8cf2f69ef8cc0ea351
| 3,799
|
py
|
Python
|
test/testFactorMethods.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T19:22:17.000Z
|
2021-05-26T19:22:17.000Z
|
test/testFactorMethods.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | null | null | null |
test/testFactorMethods.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import logging
import nzmath.factor.methods as mthd
try:
_log = logging.getLogger('test.testFactorMethod')
except:
try:
_log = logging.getLogger('nzmath.test.testFactorMethod')
except:
_log = logging.getLogger('testFactorMethod')
_log.setLevel(logging.INFO)
class FactorTest (unittest.TestCase):
def testTrialDivision(self):
self.assertEqual([(2,2),(3,1),(5,1)], mthd.trialDivision(60))
self.assertEqual([(2,7)], mthd.trialDivision(128))
self.assertEqual([(409,1),(491,1)], mthd.trialDivision(200819))
self.assertEqual([(701,1),(1487,1)], mthd.trialDivision(1042387))
def testRho(self):
self.assertEqual([(2,2),(3,1),(5,1)], mthd.rhomethod(60))
self.assertEqual([(2,7)], mthd.rhomethod(128))
self.assertEqual([(409,1),(491,1)], mthd.rhomethod(200819))
self.assertEqual([(701,1),(1487,1)], mthd.rhomethod(1042387))
self.assertEqual([(17,2), (19,1)], mthd.rhomethod(17**2 * 19))
def testPMinusOneMethod(self):
self.assertEqual([(19,1), (101,1)], mthd.pmom(1919))
# 6133 = prime.prime(800) > sqrt(B) & 800 == 0 mod 20
p = 4 * 6133 + 1
self.assertEqual([(p,1), (154858631,1)], mthd.pmom(p*154858631))
def testMPQS(self):
p = 4 * 6133 + 1
result = mthd.mpqs(p*154858631)
self.assertEqual([(p,1), (154858631,1)], result)
def testEllipticCurveMethod(self):
#self.assertEqual([(19,1), (101,1)], mthd.ecm(1919))
# 6133 = prime.prime(800) > sqrt(B) & 800 == 0 mod 20
p = 4 * 6133 + 1
self.assertEqual([(p,1), (154858631,1)], mthd.ecm(p*154858631))
def testFactor(self):
# default method
p = 4 * 6133 + 1
result = mthd.factor(p*154858631)
self.assertEqual([(p,1), (154858631,1)], result)
def testFactorSpecifyMethod(self):
self.assertEqual([(2,2),(3,1),(5,1)], mthd.factor(60, method='t'))
self.assertEqual([(2,2),(3,1),(5,1)], mthd.factor(60, method='trial'))
self.assertEqual([(19,1), (101,1)], mthd.factor(1919, method='p'))
self.assertEqual([(19,1), (101,1)], mthd.factor(1919, method='pmom'))
p = 4 * 6133 + 1
self.assertEqual([(p,1), (154858631,1)], mthd.factor(p*154858631, 'm'))
self.assertEqual([(p,1), (154858631,1)], mthd.factor(p*154858631, 'e'))
self.assertEqual([(2,2),(3,1),(5,1)], mthd.factor(60, method='r'))
def testVerbosity(self):
# default method
p = 4 * 6133 + 1
_log.info("silent:")
result = mthd.mpqs(p*154858631, verbose=False)
_log.info("verbose:")
result = mthd.mpqs(p*154858631, verbose=True)
class TrialDivisionTest (unittest.TestCase):
def testTrialDivisionTracker(self):
tdm = mthd.TrialDivision()
factorization_of_49 = tdm.factor(49, return_type='tracker')
self.assertTrue(isinstance(factorization_of_49, mthd.util.FactoringInteger))
self.assertTrue(7 in factorization_of_49.primality)
# fail to factor is iterator is short
factorization_of_10201 = tdm.factor(10201,
return_type='tracker',
iterator=iter(list(range(3, 100, 2))))
self.assertTrue(10201 in factorization_of_10201.primality) # not factored
self.assertFalse(factorization_of_10201.primality[10201]) # not a prime
def suite(suffix = "Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
logging.basicConfig()
runner = unittest.TextTestRunner()
runner.run(suite())
| 39.164948
| 84
| 0.609108
| 476
| 3,799
| 4.796218
| 0.254202
| 0.144547
| 0.049058
| 0.018397
| 0.427946
| 0.426194
| 0.367061
| 0.346036
| 0.263688
| 0.263688
| 0
| 0.138597
| 0.223217
| 3,799
| 96
| 85
| 39.572917
| 0.635039
| 0.064491
| 0
| 0.16
| 0
| 0
| 0.034979
| 0.013822
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.133333
| false
| 0
| 0.04
| 0
| 0.213333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14c1c00575d1e7a958fc95661cce6a81b4fbbd6f
| 2,057
|
py
|
Python
|
LeetCode/0151-reverse-words-in-a-string/solution.py
|
RyouMon/road-of-master
|
02e18c2e524db9c7df4e6f8db56b3c8408a9fc6b
|
[
"Apache-2.0"
] | null | null | null |
LeetCode/0151-reverse-words-in-a-string/solution.py
|
RyouMon/road-of-master
|
02e18c2e524db9c7df4e6f8db56b3c8408a9fc6b
|
[
"Apache-2.0"
] | null | null | null |
LeetCode/0151-reverse-words-in-a-string/solution.py
|
RyouMon/road-of-master
|
02e18c2e524db9c7df4e6f8db56b3c8408a9fc6b
|
[
"Apache-2.0"
] | null | null | null |
import collections
class Solution01:
"""
使用内置API
"""
def reverseWords(self, s: str) -> str:
return ' '.join(reversed(s.split()))
class Solution02:
"""
自己实现对应的功能
"""
def trim_space(self, s: str) -> list:
left, right = 0, len(s) - 1
# 去除首尾空格
while s[left] == ' ':
left += 1
while s[right] == ' ':
right -= 1
# 去除多余空格
output = []
while left <= right:
if s[left] != ' ':
output.append(s[left])
elif output[-1] != ' ':
output.append(s[left])
left += 1
return output
def reverse(self, l: list, left: int, right: int) -> None:
while left < right:
l[left], l[right] = l[right], l[left]
left, right = left + 1, right - 1
def reverse_each_word(self, l: list) -> None:
n = len(l)
start = end = 0
while start < n:
# 寻找单词的结尾处
while (end < n) and (l[end] != ' '):
end += 1
# 反转单词
self.reverse(l, start, end - 1)
# 更新 start 和 end
start = end + 1
end += 1
def reverseWords(self, s: str) -> str:
# 去除多余空格
l = self.trim_space(s)
# 反转整个字符串
self.reverse(l, 0, len(l) - 1)
# 反转每个单词
self.reverse_each_word(l)
return ''.join(l)
class Solution03:
"""
使用双端队列
"""
def reverseWords(self, s: str) -> str:
# 去除字符串两端的空格
left, right = 0, len(s) - 1
while s[left] == ' ':
left += 1
while s[right] == ' ':
right -= 1
# 将每个单词依次压到队列的头部
dq, word = collections.deque(), []
while left <= right:
if s[left] != ' ':
word.append(s[left])
elif s[left] == ' ' and word:
dq.appendleft(''.join(word))
word = []
left += 1
dq.appendleft(''.join(word))
return ' '.join(dq)
| 22.855556
| 62
| 0.427807
| 224
| 2,057
| 3.901786
| 0.241071
| 0.045767
| 0.036613
| 0.06865
| 0.244851
| 0.244851
| 0.073227
| 0.073227
| 0.073227
| 0.073227
| 0
| 0.022165
| 0.429752
| 2,057
| 89
| 63
| 23.11236
| 0.722933
| 0.056393
| 0
| 0.462963
| 0
| 0
| 0.005832
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.018519
| 0.018519
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14c1f4a62cb93b24d14dc7d0ea4f4f2eb0f1a413
| 3,154
|
py
|
Python
|
setup.py
|
Tiksagol/hype
|
1485b80fe16a7678605afe209b2494a2a875df3f
|
[
"MIT"
] | 13
|
2021-07-31T12:07:06.000Z
|
2022-03-24T15:00:50.000Z
|
setup.py
|
Tiksagol/hype
|
1485b80fe16a7678605afe209b2494a2a875df3f
|
[
"MIT"
] | 2
|
2021-08-02T14:04:58.000Z
|
2021-09-06T09:35:20.000Z
|
setup.py
|
Tiksagol/hype
|
1485b80fe16a7678605afe209b2494a2a875df3f
|
[
"MIT"
] | 3
|
2021-08-07T13:23:54.000Z
|
2022-01-24T13:23:08.000Z
|
# Copyright (c) 2021, Serum Studio
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from setuptools import setup, find_packages
from hype import __license__, __author__, __version__, __desc__
BASE_URL = "https://github.com/serumstudio/hype"
def get_long_description():
with open("README.md", encoding="utf-8") as f:
readme = f.read()
return readme
extras_require = {
'color': ['colorama==0.4.4'], #: Color support
'standard': ['colorama==0.4.4'], #: Standard installation with color support
'progress': ['alive-progress==1.6.2'], #: With progressbar support
'table': ['tabulate==0.8.9'] #: With Table support
}
setup(
name = "hypecli",
author = __author__,
description =__desc__,
long_description=get_long_description(),
long_description_content_type='text/markdown',
project_urls={
'Documentation': 'https://hype.serum.studio',
'Source': BASE_URL,
'Tracker': "%s/issues" % (BASE_URL)
},
version = __version__,
license = __license__,
url=BASE_URL,
keywords='cli,commandline-toolkit,command line toolkit,python cli,python 3'.split(','),
packages = [p for p in find_packages() if 'test' not in p],
extras_require = extras_require,
classifiers = [
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development",
"Typing :: Typed",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License"
],
)
| 38.938272
| 91
| 0.679138
| 383
| 3,154
| 5.462141
| 0.506527
| 0.042065
| 0.071702
| 0.062141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010891
| 0.214014
| 3,154
| 81
| 92
| 38.938272
| 0.832997
| 0.366519
| 0
| 0
| 0
| 0
| 0.480347
| 0.026544
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.04
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14c57c94bb76c89fd6223c07cfaec40385ecbc9c
| 1,133
|
py
|
Python
|
setup.py
|
travisliu/data-spec-validator
|
7ee0944ca9899d565ad04ed82ca26bb402970958
|
[
"MIT"
] | 23
|
2021-08-11T08:53:15.000Z
|
2022-02-14T04:44:13.000Z
|
setup.py
|
travisliu/data-spec-validator
|
7ee0944ca9899d565ad04ed82ca26bb402970958
|
[
"MIT"
] | 2
|
2021-09-11T08:59:12.000Z
|
2022-03-29T00:40:42.000Z
|
setup.py
|
travisliu/data-spec-validator
|
7ee0944ca9899d565ad04ed82ca26bb402970958
|
[
"MIT"
] | 1
|
2022-01-04T07:45:22.000Z
|
2022-01-04T07:45:22.000Z
|
import os
import setuptools
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(CUR_DIR, "data_spec_validator", "__version__.py"), "r") as f:
exec(f.read(), about)
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="data-spec-validator",
version=about['__version__'],
author="CJHwong, falldog, HardCoreLewis, kilikkuo, xeonchen",
author_email="pypi@hardcoretech.co",
description="Simple validation tool for API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hardcoretech/data-spec-validator",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"data_spec_validator": "data_spec_validator"},
packages=setuptools.find_packages(),
install_requires=[
"python-dateutil",
],
extras_require={
'decorator': ['Django', 'djangorestframework'],
},
python_requires=">=3.6",
)
| 29.815789
| 84
| 0.672551
| 130
| 1,133
| 5.615385
| 0.615385
| 0.054795
| 0.116438
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004283
| 0.17564
| 1,133
| 37
| 85
| 30.621622
| 0.777302
| 0
| 0
| 0.0625
| 0
| 0
| 0.390997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14c7234590ee0036166bb3c285dac3557145714c
| 9,204
|
py
|
Python
|
prisms_influxdb.py
|
VDL-PRISM/home-assistant-components
|
2041d2a257aede70613ddf8fe1e76bcc1877ef2e
|
[
"Apache-2.0"
] | null | null | null |
prisms_influxdb.py
|
VDL-PRISM/home-assistant-components
|
2041d2a257aede70613ddf8fe1e76bcc1877ef2e
|
[
"Apache-2.0"
] | null | null | null |
prisms_influxdb.py
|
VDL-PRISM/home-assistant-components
|
2041d2a257aede70613ddf8fe1e76bcc1877ef2e
|
[
"Apache-2.0"
] | null | null | null |
"""
A component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
from datetime import timedelta
import functools
import logging
import itertools
import json
from persistent_queue import PersistentQueue
import requests
import voluptuous as vol
from homeassistant.const import (EVENT_STATE_CHANGED, STATE_UNAVAILABLE,
STATE_UNKNOWN, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_point_in_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "prisms_influxdb"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8086
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_BATCH_TIME = 10
DEFAULT_CHUNK_SIZE = 500
REQUIREMENTS = ['influxdb==3.0.0', 'python-persistent-queue==1.3.0']
CONF_HOST = 'host'
CONF_DEPLOYMENT_ID = 'home_id'
CONF_PORT = 'port'
CONF_DB_NAME = 'database'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_SSL = 'ssl'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_BLACKLIST = 'blacklist'
CONF_WHITELIST = 'whitelist'
CONF_TAGS = 'tags'
CONF_BATCH_TIME = 'batch_time'
CONF_CHUNK_SIZE = 'chunk_size'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_DEPLOYMENT_ID): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.positive_int,
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_USERNAME, default=None): vol.Any(cv.string, None),
vol.Optional(CONF_PASSWORD, default=None): vol.Any(cv.string, None),
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL,
default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_BLACKLIST, default=[]): cv.ensure_list,
vol.Optional(CONF_WHITELIST, default=[]): cv.ensure_list,
vol.Optional(CONF_TAGS, default={}): dict,
vol.Optional(CONF_BATCH_TIME,
default=DEFAULT_BATCH_TIME): cv.positive_int,
vol.Optional(CONF_CHUNK_SIZE,
default=DEFAULT_CHUNK_SIZE): cv.positive_int,
})
}, extra=vol.ALLOW_EXTRA)
RUNNING = True
# pylint: disable=too-many-locals
def setup(hass, config):
"""Setup the InfluxDB component."""
from influxdb import InfluxDBClient
conf = config[DOMAIN]
blacklist = conf[CONF_BLACKLIST]
whitelist = conf[CONF_WHITELIST]
tags = conf[CONF_TAGS]
batch_time = conf[CONF_BATCH_TIME]
chunk_size = conf[CONF_CHUNK_SIZE]
tags[CONF_DEPLOYMENT_ID] = conf[CONF_DEPLOYMENT_ID]
influx = InfluxDBClient(host=conf[CONF_HOST],
port=conf[CONF_PORT],
username=conf[CONF_USERNAME],
password=conf[CONF_PASSWORD],
database=conf[CONF_DB_NAME],
ssl=conf[CONF_SSL],
verify_ssl=conf[CONF_VERIFY_SSL])
events = PersistentQueue('prisms_influxdb.queue',
path=hass.config.config_dir)
render = functools.partial(get_json_body, hass=hass, tags=tags)
def influx_event_listener(event):
"""Listen for new messages on the bus and sends them to Influx."""
state = event.data.get('new_state')
if state is None or state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE) or \
state.entity_id in blacklist:
# The state is unknown or it is on the black list
return
if len(whitelist) > 0 and state.entity_id not in whitelist:
# It is not on the white list
return
if batch_time == 0:
# Since batch time hasn't been set, just upload as soon as an event
# occurs
try:
_LOGGER.debug("Since batch_time == 0, writing data")
json_body = render(event)
write_data(influx, json_body)
except ValueError as e:
_LOGGER.error("Something is wrong with the provided template: %s", e)
return
else:
# Convert object to pickle-able. Since State.attributes uses
# MappingProxyType, it is not pickle-able
if event.data['new_state']:
event.data['new_state'].attributes = dict(event.data['new_state'].attributes)
if event.data['old_state']:
event.data['old_state'].attributes = dict(event.data['old_state'].attributes)
# Store event to be uploaded later
events.push(event)
_LOGGER.debug("Saving event for later (%s)", len(events))
hass.bus.listen(EVENT_STATE_CHANGED, influx_event_listener)
if batch_time != 0:
# Set up task to upload batch data
_LOGGER.debug("Starting task to upload batch data")
write_batch_data(hass, events, influx, render, batch_time, chunk_size)
def stop(event):
global RUNNING
_LOGGER.info("Shutting down PRISMS InfluxDB component")
RUNNING = False
# Register to know when home assistant is stopping
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
return True
def write_data(influx, json_body):
from influxdb import exceptions
try:
influx.write_points(json_body)
except requests.exceptions.RequestException as e:
_LOGGER.exception('Unable to connect to database: %s', e)
return False
except exceptions.InfluxDBClientError as e:
error = json.loads(e.content)['error']
_LOGGER.exception('Error saving event "%s": %s', str(json_body)[:1000], error)
return False
except exceptions.InfluxDBServerError as e:
_LOGGER.exception('Error saving event "%s" to InfluxDB: %s', str(json_body)[:1000], e)
return False
except Exception: # Catch anything else
_LOGGER.exception("An unknown exception happened while uploading data")
return False
return True
def write_batch_data(hass, events, influx, render, batch_time, chunk_size):
def next_time():
return dt_util.now() + timedelta(seconds=batch_time)
def action(now):
while RUNNING:
_LOGGER.info("Trying to upload data")
if len(events) == 0:
# No more events to upload
_LOGGER.info("Nothing to upload")
break
events_chunk = events.peek(chunk_size)
size = len(events_chunk)
_LOGGER.info("Uploading chunk of size %s (%s)", size, len(events))
try:
# Render and write events
data = itertools.chain(*[render(event) for event in events_chunk])
result = write_data(influx, list(data))
except ValueError as e:
_LOGGER.error("Something is wrong with the provided template: %s", e)
return
if result:
# Chunk got saved so remove events
_LOGGER.info("Data was uploaded successfully so deleting data")
events.delete(size)
if size < chunk_size:
_LOGGER.debug("Finished uploading data because size <"
" chunk_size: %s < %s (%s)", size,
chunk_size, len(events))
break
else:
# Unable to write data so give up for now
_LOGGER.error("Error while trying to upload data. Trying again later")
break
if RUNNING:
_LOGGER.debug("Flushing all events that were deleted")
events.flush()
# Schedule again
next = next_time()
_LOGGER.info("Scheduling to upload data at %s", next)
track_point_in_time(hass, action, next)
# Start the action
next = next_time()
_LOGGER.info("Scheduling to upload data at %s", next)
track_point_in_time(hass, action, next)
def get_json_body(event, hass, tags):
state = event.data.get('new_state')
try:
_state = float(state_helper.state_as_number(state))
_state_key = "value"
except ValueError:
_state = state.state
_state_key = "state"
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
measurement = state.entity_id
event_time = state.attributes.get('sample_time', event.time_fired)
json_body = [
{
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event_time,
'fields': {
_state_key: _state,
}
}
]
for tag in tags:
json_body[0]['tags'][tag] = tags[tag]
return json_body
| 34.215613
| 94
| 0.624402
| 1,111
| 9,204
| 4.976598
| 0.229523
| 0.022789
| 0.029843
| 0.008682
| 0.197504
| 0.1465
| 0.105987
| 0.093688
| 0.093688
| 0.093688
| 0
| 0.004412
| 0.285854
| 9,204
| 268
| 95
| 34.343284
| 0.836756
| 0.092243
| 0
| 0.158163
| 0
| 0
| 0.128831
| 0.006129
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0.015306
| 0.081633
| 0.005102
| 0.183673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14c9c1f833fdc6508d89df41045c267b53031119
| 587
|
py
|
Python
|
utils/callbacks/callbacks_weather.py
|
Chris1nexus/carla-data-collector
|
333019622cb07dc53bbe8f1c07cfb12fbfaae60c
|
[
"MIT"
] | null | null | null |
utils/callbacks/callbacks_weather.py
|
Chris1nexus/carla-data-collector
|
333019622cb07dc53bbe8f1c07cfb12fbfaae60c
|
[
"MIT"
] | null | null | null |
utils/callbacks/callbacks_weather.py
|
Chris1nexus/carla-data-collector
|
333019622cb07dc53bbe8f1c07cfb12fbfaae60c
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from ..helpers import save_json
def callback_weather_fn(sensor_data, custom_args):
## note that in this function, sensor data comes from the
## 'weather' custom arg
## the sensor is used just to trigger the callback at the correct timestamp
weather = custom_args['weather']
world = custom_args['world']
data_dict = weather.to_json()
return data_dict
def save_weather_data_fn(outdir, data_dict, frame_id):
output_file_path = os.path.join(outdir, str(frame_id)+ '.json')
save_json(output_file_path, data_dict)
| 29.35
| 79
| 0.722317
| 89
| 587
| 4.516854
| 0.494382
| 0.079602
| 0.069652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194208
| 587
| 20
| 80
| 29.35
| 0.849894
| 0.253833
| 0
| 0
| 0
| 0
| 0.039352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.272727
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14cbaa1dbf623ab97aaa48323072de223e8374d1
| 1,393
|
py
|
Python
|
exp2.py
|
advaithca/CG_LAB
|
07c4424be2f37d21ed7af804361f0a992a8124ac
|
[
"MIT"
] | null | null | null |
exp2.py
|
advaithca/CG_LAB
|
07c4424be2f37d21ed7af804361f0a992a8124ac
|
[
"MIT"
] | null | null | null |
exp2.py
|
advaithca/CG_LAB
|
07c4424be2f37d21ed7af804361f0a992a8124ac
|
[
"MIT"
] | null | null | null |
#drawing a line using DDA
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
import math
def init():
glClearColor(1.0,2.0,1.0,1.0)
gluOrtho2D(-100.0,100.0,-100.0,100.0)
x1 = 0
x2 = 0
y1 = 0
y2 = 0
def plotpoints():
global x1, y1, x2, y2
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPointSize(5.0)
dx = x2 - x1
dy = y2 - y1
if abs(dx) > abs(dy):
steps = abs(dx)
else:
steps = abs(dy)
ix = dx/steps
iy = dy/steps
x = x1
y = y1
glBegin(GL_POINTS)
glVertex2f(x,y)
glEnd()
for i in range(abs(steps)+1):
x = x + ix
y = y + iy
glBegin(GL_POINTS)
glVertex2f(x,y)
glEnd()
glFlush()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(500,500)
glutInitWindowPosition(50,50)
glutCreateWindow("DDA")
global x1, x2, y1, y2
print("Enter coordinates of end-points")
x1 = int(input("X-coordinate of 1st point : "))
y1 = int(input("Y-coordinate of 1st point : "))
x2 = int(input("X-coordinate of 2nd point : "))
y2 = int(input("Y-coordinate of 2nd point : "))
glutDisplayFunc(plotpoints)
init()
glutMainLoop()
if __name__ == "__main__":
main()
| 21.106061
| 52
| 0.557789
| 196
| 1,393
| 3.887755
| 0.397959
| 0.010499
| 0.027559
| 0.031496
| 0.215223
| 0.104987
| 0.08399
| 0
| 0
| 0
| 0
| 0.080713
| 0.315147
| 1,393
| 66
| 53
| 21.106061
| 0.718029
| 0.017229
| 0
| 0.111111
| 0
| 0
| 0.118098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.092593
| 0
| 0.148148
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14cc76852586183e306354dd7443e72f19468e4e
| 4,884
|
py
|
Python
|
landlab/io/netcdf/dump.py
|
clebouteiller/landlab
|
e6f47db76ea0814c4c5a24e695bbafb74c722ff7
|
[
"MIT"
] | 1
|
2022-01-07T02:36:07.000Z
|
2022-01-07T02:36:07.000Z
|
landlab/io/netcdf/dump.py
|
clebouteiller/landlab
|
e6f47db76ea0814c4c5a24e695bbafb74c722ff7
|
[
"MIT"
] | 1
|
2021-11-11T21:23:46.000Z
|
2021-11-11T21:23:46.000Z
|
landlab/io/netcdf/dump.py
|
clebouteiller/landlab
|
e6f47db76ea0814c4c5a24e695bbafb74c722ff7
|
[
"MIT"
] | 2
|
2019-08-19T08:58:10.000Z
|
2022-01-07T02:36:01.000Z
|
import pathlib
import numpy as np
import xarray as xr
def to_netcdf(
grid, path, include="*", exclude=None, time=None, format="NETCDF4", mode="w"
):
"""Write landlab a grid to a netcdf file.
Write the data and grid information for *grid* to *path* as NetCDF.
If the *append* keyword argument in True, append the data to an existing
file, if it exists. Otherwise, clobber an existing files.
Parameters
----------
grid : ModelGrid
Landlab grid object that holds a grid and field values.
path : str
Path to which to save this grid.
include : str or iterable of str, optional
A list of unix-style glob patterns of field names to include. Fully
qualified field names that match any of these patterns will be
written to the output file. A fully qualified field name is one that
that has a prefix that indicates what grid element is defined on
(e.g. "at_node:topographic__elevation"). The default is to include
all fields.
exclude : str or iterable of str, optional
Like the *include* keyword but, instead, fields matching these
patterns will be excluded from the output file.
format : {'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', 'NETCDF4'}
Format of output netcdf file.
attrs : dict
Attributes to add to netcdf file.
mode : {"w", "a"}, optional
Write ("w") or append ("a") mode. If mode="w", any existing file at
this location will be overwritten. If mode="a", existing variables
will be overwritten.
Parameters
----------
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.io.netcdf import to_netcdf
Create a uniform rectilinear grid with four rows and 3 columns, and add
some data fields to it.
>>> rmg = RasterModelGrid((4, 3))
>>> rmg.at_node["topographic__elevation"] = np.arange(12.0)
>>> rmg.at_node["uplift_rate"] = 2.0 * np.arange(12.0)
Create a temporary directory to write the netcdf file into.
>>> import tempfile, os
>>> temp_dir = tempfile.mkdtemp()
>>> os.chdir(temp_dir)
Write the grid to a netcdf3 file but only include the *uplift_rate*
data in the file.
>>> to_netcdf(
... rmg, "test.nc", format="NETCDF3_64BIT", include="at_node:uplift_rate"
... )
Read the file back in and check its contents.
>>> from scipy.io import netcdf
>>> fp = netcdf.netcdf_file('test.nc', 'r')
>>> 'at_node:uplift_rate' in fp.variables
True
>>> 'at_node:topographic__elevation' in fp.variables
False
>>> fp.variables['at_node:uplift_rate'][:].flatten()
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20.,
22.])
>>> rmg.at_cell["air__temperature"] = np.arange(2.0)
>>> to_netcdf(
... rmg,
... "test-cell.nc",
... format="NETCDF3_64BIT",
... include="at_cell:*",
... # names="air__temperature", at="cell",
... )
"""
path = pathlib.Path(path)
if not path.is_file():
mode = "w"
if time is None and mode == "a":
time = np.nan
this_dataset = grid.as_dataset(include=include, exclude=exclude, time=time)
if format != "NETCDF4":
this_dataset["status_at_node"] = (
("node",),
this_dataset["status_at_node"].values.astype(dtype=int),
)
if mode == "a":
with xr.open_dataset(path) as that_dataset:
if "time" not in that_dataset.dims:
_add_time_dimension_to_dataset(that_dataset, time=np.nan)
new_vars = set(this_dataset.variables) - set(that_dataset.variables)
for var in new_vars:
that_dataset[var] = (
this_dataset[var].dims,
np.full_like(this_dataset[var].values, np.nan),
)
for var in list(that_dataset.variables):
if var.startswith("at_layer"):
del that_dataset[var]
this_dataset = xr.concat(
[that_dataset, this_dataset], dim="time", data_vars="minimal"
)
if np.isnan(this_dataset["time"][-1]):
this_dataset["time"].values[-1] = this_dataset["time"][-2] + 1.0
this_dataset.to_netcdf(path, format=format, mode="w", unlimited_dims=("time",))
def _add_time_dimension_to_dataset(dataset, time=0.0):
"""Add a time dimension to all variables except those at_layer."""
names = set(
[
name
for name in dataset.variables
if name.startswith("at_") and not name.startswith("at_layer")
]
)
for name in names:
dataset[name] = (("time",) + dataset[name].dims, dataset[name].values[None])
dataset["time"] = (("time",), [time])
| 33.682759
| 84
| 0.600328
| 650
| 4,884
| 4.383077
| 0.287692
| 0.046332
| 0.016848
| 0.022464
| 0.089856
| 0.03861
| 0
| 0
| 0
| 0
| 0
| 0.015267
| 0.275799
| 4,884
| 144
| 85
| 33.916667
| 0.790218
| 0.551392
| 0
| 0
| 0
| 0
| 0.059655
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.06383
| 0
| 0.106383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14cccb90d3e5e893e8714d97f092815310280afd
| 4,053
|
py
|
Python
|
app.py
|
ethylomat/MathPhysTheoTS
|
76144c3990d9511817cfaa007a75ec55bc8e7310
|
[
"MIT"
] | 1
|
2019-04-29T22:23:22.000Z
|
2019-04-29T22:23:22.000Z
|
app.py
|
ethylomat/MathPhysTheoTS
|
76144c3990d9511817cfaa007a75ec55bc8e7310
|
[
"MIT"
] | 2
|
2016-08-11T14:26:47.000Z
|
2016-08-11T14:29:44.000Z
|
app.py
|
ethylomat/MathPhysTheoTS
|
76144c3990d9511817cfaa007a75ec55bc8e7310
|
[
"MIT"
] | null | null | null |
from flask import request, url_for, g
from flask_api import FlaskAPI, status, exceptions
from flask_sqlalchemy import SQLAlchemy
import arrow
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_cors import CORS
app = FlaskAPI(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tickets.db'
db = SQLAlchemy(app)
class Ticket(db.Model):
id = db.Column(db.Integer, primary_key=False)
ticketcode = db.Column(db.String(80), unique=True, primary_key=True)
arrived = db.Column(db.Boolean, unique=False)
arrived_at = db.Column(db.String(80), unique=False)
def __init__(self, ticketcode):
self.ticketcode = ticketcode
self.arrived = False
self.arrived_at = ""
def __repr__(self):
return '<User %r>' % self.ticketcode
def arrived():
return len(Ticket.query.filter_by(arrived=True).all())
def not_in_list_repr(ticketcode, arrived):
return {
'ticketcode' : str(ticketcode),
'status' : 'nil',
'count_arrived' : arrived
}
def arrived_repr(ticketcode, ticket, arrived):
return {
'ticketcode' : str(ticketcode),
'status' : 'arr',
'timestamp' : arrow.get(ticket.arrived_at).format('YYYY-MM-DD HH:mm:ss ZZ'),
'human_timestamp' : arrow.get(ticket.arrived_at).humanize(),
'count_arrived' : arrived
}
def not_arrived_repr(ticketcode, ticket, arrived):
return {
'ticketcode' : str(ticketcode),
'status' : 'n_arr',
'count_arrived' : arrived
}
def already_arrived_repr(ticketcode, ticket, arrived):
return {
'ticketcode' : str(ticketcode),
'status' : 'a_arr',
'timestamp' : arrow.get(ticket.arrived_at).format('YYYY-MM-DD HH:mm:ss ZZ'),
'human_timestamp' : arrow.get(ticket.arrived_at).humanize(),
'count_arrived' : arrived
}
def all_tickets_repr(arrived, not_arrived):
count_arrived = len(arrived)
count_not_arrived = len(not_arrived)
not_arrived_ticketcodes = []
for i in not_arrived:
not_arrived_ticketcodes.append(i.ticketcode)
arrived_ticketcodes = {}
for i in arrived:
arrived_ticketcodes[i.ticketcode] = arrow.get(i.arrived_at).format('YYYY-MM-DD HH:mm:ss ZZ')
return {
'count_arrived' : count_arrived,
'count_not_arrived' : count_not_arrived,
'arrived' : arrived_ticketcodes,
#'not_arrived' : not_arrived_ticketcodes
}
@app.route("/", methods=['GET'])
def all_tickets():
if request.method == 'GET':
arrived = Ticket.query.filter_by(arrived=True).all()
not_arrived = Ticket.query.filter_by(arrived=False).all()
return all_tickets_repr(arrived, not_arrived)
@app.route("/<int:ticketcode>/", methods=['GET', 'POST']) #, 'PUT', 'DELETE']
def arrive(ticketcode):
if request.method =='GET':
ticketcode = str(ticketcode)
ticket = Ticket.query.get(ticketcode)
if ticket != None:
if ticket.arrived == True:
return already_arrived_repr(ticketcode, ticket, arrived())
else:
return not_arrived_repr(ticketcode, ticket, arrived())
else:
return not_in_list_repr(ticketcode, arrived())
if request.method =='POST':
ticketcode = str(ticketcode)
ticket = Ticket.query.get(ticketcode)
if ticket != None:
if ticket.arrived == True:
return already_arrived_repr(ticketcode, ticket, arrived())
else:
ticket.arrived = True
ticket.arrived_at = arrow.utcnow().timestamp
db.session.commit()
return arrived_repr(ticketcode, ticket, arrived())
else:
return not_in_list_repr(ticketcode, arrived())
#if request.method == 'PUT':
if __name__ == "__main__":
admin = Admin(app)
admin.add_view(ModelView(Ticket, db.session))
app.run(debug=True, host="0.0.0.0")
| 29.583942
| 100
| 0.635085
| 479
| 4,053
| 5.164927
| 0.212944
| 0.07882
| 0.059418
| 0.076395
| 0.548504
| 0.498787
| 0.407842
| 0.381164
| 0.377526
| 0.377526
| 0
| 0.002588
| 0.237355
| 4,053
| 136
| 101
| 29.801471
| 0.7978
| 0.020725
| 0
| 0.346535
| 0
| 0
| 0.100605
| 0.005799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09901
| false
| 0
| 0.069307
| 0.059406
| 0.356436
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14d5f7d082a22edb6ba40c486b8faa869556d8a1
| 2,649
|
py
|
Python
|
simsiam/engine/supervised.py
|
tillaczel/simsiam
|
d4d03aae625314ac2f24155fac3ca5bfc31502c7
|
[
"MIT"
] | null | null | null |
simsiam/engine/supervised.py
|
tillaczel/simsiam
|
d4d03aae625314ac2f24155fac3ca5bfc31502c7
|
[
"MIT"
] | null | null | null |
simsiam/engine/supervised.py
|
tillaczel/simsiam
|
d4d03aae625314ac2f24155fac3ca5bfc31502c7
|
[
"MIT"
] | null | null | null |
from omegaconf import DictConfig
import pytorch_lightning as pl
import numpy as np
import torch
import wandb
from simsiam.models import get_resnet
from simsiam.metrics import get_accuracy
from simsiam.optimizer import get_optimizer, get_scheduler
class SupervisedEngine(pl.LightningModule):
def __init__(self, config: DictConfig):
super().__init__()
self.config = config
self.resnet = get_resnet(num_classes=config.dataset.n_classes)
self.loss_func = torch.nn.CrossEntropyLoss()
self.predict_step = self.validation_step
self.test_step = self.validation_step
@property
def lr(self):
result = self.optimizers().param_groups[0]['lr']
return result
def forward(self, x):
x = self.resnet(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.resnet(x)
loss = self.loss_func(y_hat, y[:, 0])
self.log('lr', self.lr, prog_bar=True, on_step=True, logger=False) # For progress bar
return {'loss': loss}
def training_epoch_end(self, outputs: list):
loss = torch.stack([x['loss'] for x in outputs]).mean()
metrics = {'train/loss': loss}
metrics.update({f'train/lr': self.lr})
self.logger.experiment.log(metrics, step=self.current_epoch) # For wandb
self.log_dict(metrics, prog_bar=False, on_epoch=True, on_step=False, logger=False, sync_dist=True) # For callbacks
def validation_step(self, batch, batch_idx):
x, y = batch
f = self.resnet(x)
return f.detach().cpu(), y.detach().cpu()
def validation_epoch_end(self, outputs: list):
self.calc_acc(outputs, 'valid')
def calc_acc(self, outputs, data_split):
y_hat, y = map(torch.cat, zip(*outputs))
y_hat, y = np.argsort(y_hat.numpy(), axis=1)[:, ::-1], y.numpy()
acc = dict()
_acc = get_accuracy(y_hat, y, (1, 3, 5))
for k, v in _acc.items():
acc[f'{data_split}/supervised_{k}'] = v
self.logger.experiment.log(acc, step=self.current_epoch) # For wandb
self.log_dict(acc, prog_bar=False, on_epoch=True, on_step=False, logger=False,
sync_dist=True) # For callbacks
def configure_optimizers(self):
training_config = self.config.training
optimizer = get_optimizer(training_config.optimizer, self.resnet.parameters())
if training_config.scheduler is not None:
scheduler = get_scheduler(training_config.scheduler, optimizer)
return [optimizer], [scheduler]
else:
return optimizer
| 33.531646
| 123
| 0.645527
| 357
| 2,649
| 4.605042
| 0.296919
| 0.034063
| 0.012165
| 0.026764
| 0.19708
| 0.1691
| 0.1691
| 0.1691
| 0.135037
| 0.087591
| 0
| 0.003471
| 0.238581
| 2,649
| 78
| 124
| 33.961538
| 0.811601
| 0.02416
| 0
| 0.033898
| 0
| 0
| 0.024087
| 0.01049
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0
| 0.135593
| 0
| 0.40678
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14de21cf53b113f2413b7d529932853ff2790fae
| 2,420
|
py
|
Python
|
demo.py
|
allenjhuang/rsys_api
|
41bc05fbeda5b5c76232a548aa16d33d05bfa8e4
|
[
"Unlicense"
] | null | null | null |
demo.py
|
allenjhuang/rsys_api
|
41bc05fbeda5b5c76232a548aa16d33d05bfa8e4
|
[
"Unlicense"
] | null | null | null |
demo.py
|
allenjhuang/rsys_api
|
41bc05fbeda5b5c76232a548aa16d33d05bfa8e4
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import config
import rsys_api
import secrets
import json
import logging
import sys
def main():
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y/%m/%d %H:%M:%S",
filename="demo.log"
)
logging.info("BEGIN: {script_name}".format(script_name=sys.argv[0]))
# Start new session.
session = rsys_api.Session(
config.LOGIN_BASE_URL, config.BASE_RESOURCE_PATH
)
# Authenticate.
session.password_login(
secrets.USER_NAME, secrets.PASSWORD
)
# Output throttle limits into a json file.
with open("throttle_limits.json", 'w') as output_file:
json.dump(
obj=session.get_throttle_limits(),
indent=4,
fp=output_file
)
# Output information on the next batch of campaigns into a json file.
with open("next_fetched_campaign_batch.json", 'w') as output_file:
json.dump(
obj=session.fetch_next_campaign_batch(),
indent=4,
fp=output_file
)
# Output information on a batch of campaigns into a json file.
with open("fetched_campaign_batch.json", 'w') as output_file:
json.dump(
obj=session.fetch_a_campaign_batch(
limit=200,
offset=0,
campaign_type="email"
),
indent=4,
fp=output_file
)
# Output information on the next batch of campaigns into a json file.
with open("next_fetched_campaign_batch.json", 'w') as output_file:
json.dump(
obj=session.fetch_next_campaign_batch(),
indent=4,
fp=output_file
)
# Output information on all running programs into a json file.
with open("all_fetched_programs.json", 'w') as output_file:
json.dump(
obj=session.fetch_all_programs(status="RUNNING"),
indent=4,
fp=output_file
)
# Output information on all running campaigns into a json file.
with open("all_fetched_campaigns.json", 'w') as output_file:
json.dump(
obj=session.fetch_all_campaigns(campaign_type="email"),
indent=4,
fp=output_file
)
logging.info("END: {script_name}\n".format(script_name=sys.argv[0]))
if __name__ == '__main__':
main()
| 30.632911
| 73
| 0.605372
| 304
| 2,420
| 4.615132
| 0.279605
| 0.085531
| 0.038489
| 0.055595
| 0.620813
| 0.620813
| 0.571632
| 0.565217
| 0.48325
| 0.431932
| 0
| 0.007576
| 0.290909
| 2,420
| 78
| 74
| 31.025641
| 0.810023
| 0.171488
| 0
| 0.349206
| 0
| 0
| 0.155311
| 0.071142
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0.031746
| 0.095238
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14debfd1d4eddfbeadc1ea54fc7d19ccc2df866b
| 4,377
|
py
|
Python
|
algorithms/common/runner.py
|
Fluidy/twc2020
|
0c65ab3508675a81e3edc831e45d59729dab159d
|
[
"MIT"
] | 1
|
2021-09-05T01:56:45.000Z
|
2021-09-05T01:56:45.000Z
|
algorithms/common/runner.py
|
Fluidy/twc2020
|
0c65ab3508675a81e3edc831e45d59729dab159d
|
[
"MIT"
] | null | null | null |
algorithms/common/runner.py
|
Fluidy/twc2020
|
0c65ab3508675a81e3edc831e45d59729dab159d
|
[
"MIT"
] | null | null | null |
from utils import save_params, load_params
from importlib import import_module
from environments.env import Env
def run(algorithm_name, exp_name, env_name, agent_params, train_params, use_ray, use_gpu, is_train,
num_runs=None, test_run_id=None, test_model_id=None):
"""
Runner for training or testing DRL algorithms
"""
exp_dir = 'experiments/' + exp_name
if use_ray:
try:
import ray
ray.init(num_cpus=train_params['num_cpus'], num_gpus=1)
except ImportError:
ray = None
use_ray = 0
print('Ray is not installed. I will run in serial training/testing mode.')
"""
Import DRL agent and training function according to algorithm_name
"""
if algorithm_name in ['ddpg', 'ddpg_pds', 'td3', 'td3_pds']:
train = import_module('algorithms.ddpg.train').train
if algorithm_name == 'ddpg':
Agent = import_module('algorithms.ddpg.agent').DDPGAgent
elif algorithm_name == 'ddpg_pds':
Agent = import_module('algorithms.ddpg_pds.agent').PDSDDPGAgent
elif algorithm_name == 'td3':
Agent = import_module('algorithms.td3.agent').TD3Agent
else:
Agent = import_module('algorithms.td3_pds.agent').PDSTD3Agent
elif algorithm_name in ['qprop', 'qprop_pds']:
train = import_module('algorithms.qprop.train').train
if algorithm_name == 'qprop':
Agent = import_module('algorithms.qprop.agent').QPropAgent
else:
Agent = import_module('algorithms.qprop_pds.agent').PDSQPropAgent
elif algorithm_name in ['preplan', 'perfect']:
train = None
Agent = import_module('algorithms.preplan.agent').PrePlanAgent
elif algorithm_name == 'non_predictive':
train = None
Agent = import_module('algorithms.non_predictive.agent').NonPredictiveAgent
else:
print('Unsupported algorithm')
return
if is_train:
"""
Training
"""
env_params = import_module('environments.' + env_name).env_params
# Save all the experiment settings to a json file
save_params([agent_params, train_params, env_params], exp_dir, 'exp_config')
# Create environment
env = Env(env_params)
if use_ray:
# Parallel training
train = ray.remote(train)
train_op = [train.remote(env, Agent, agent_params, train_params, exp_dir, run_id, use_gpu=use_gpu)
for run_id in range(num_runs)]
ray.get(train_op)
else:
# Serial training
[train(env, Agent, agent_params, train_params, exp_dir, run_id, use_gpu=use_gpu)
for run_id in range(num_runs)]
else:
"""
Testing
"""
# Get test set path
test_set_dir = 'data/' + env_name
# Load agent and env parameters from exp_dir
env_params = load_params('data/' + env_name, 'env_config')
if algorithm_name != 'perfect':
if algorithm_name == 'preplan':
env_params_train = load_params(exp_dir, 'env_config')
elif algorithm_name == 'non_predictive':
env_params_train = env_params
else:
agent_params, _, env_params_train = load_params(exp_dir, 'exp_config')
if env_params_train != env_params:
print('Warning: Testing and training env settings do not match!')
# Create environment
env = Env(env_params)
# Import testing function
test = import_module('algorithms.common.test').test
if use_ray:
# Parallel testing
test = ray.remote(test)
test_op = [test.remote(env, Agent, agent_params, exp_dir, run_id, model_id,
test_set_dir=test_set_dir, use_gpu=use_gpu)
for run_id in test_run_id for model_id in test_model_id]
ray.get(test_op)
else:
# Serial testing
[test(env, Agent, agent_params, exp_dir, run_id, model_id,
test_set_dir=test_set_dir, use_gpu=use_gpu)
for run_id in test_run_id for model_id in test_model_id]
| 39.432432
| 111
| 0.59767
| 527
| 4,377
| 4.679317
| 0.189753
| 0.06326
| 0.098135
| 0.087591
| 0.413625
| 0.230333
| 0.175182
| 0.150852
| 0.150852
| 0.150852
| 0
| 0.002998
| 0.314142
| 4,377
| 110
| 112
| 39.790909
| 0.818454
| 0.064656
| 0
| 0.293333
| 0
| 0
| 0.156045
| 0.062418
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013333
| false
| 0
| 0.226667
| 0
| 0.253333
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14e118dd6032aaabd75d35019107d6e409ebb6bc
| 875
|
py
|
Python
|
login/middleWare/auth.py
|
csk17k/WebPanel
|
fdb0ae1b2fd12d006fbca65c779369e2d3d62928
|
[
"Apache-2.0"
] | null | null | null |
login/middleWare/auth.py
|
csk17k/WebPanel
|
fdb0ae1b2fd12d006fbca65c779369e2d3d62928
|
[
"Apache-2.0"
] | null | null | null |
login/middleWare/auth.py
|
csk17k/WebPanel
|
fdb0ae1b2fd12d006fbca65c779369e2d3d62928
|
[
"Apache-2.0"
] | 1
|
2021-06-24T13:38:23.000Z
|
2021-06-24T13:38:23.000Z
|
import re
from django.conf import settings
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
EXEMPT_URLS=[]
if hasattr(settings,'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS+=[re.compile(url) for url in settings.LOGIN_EXEMPT_URLS]
class AuthenticationMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
path=request.path_info.lstrip('/')
url_is_exempt=any(url.match(path) for url in EXEMPT_URLS)
if url_is_exempt!=True:
print('checking....')
if request.session.get('logged',False) is not True:
print('Redirecting .....')
return HttpResponseRedirect('/login/')
response = self.get_response(request)
return response
| 31.25
| 69
| 0.76
| 119
| 875
| 5.386555
| 0.512605
| 0.078003
| 0.070203
| 0.071763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133714
| 875
| 27
| 70
| 32.407407
| 0.845646
| 0.148571
| 0
| 0
| 0
| 0
| 0.080972
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.45
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14e28f82f57d04fe78acc078756343daa686d910
| 579
|
py
|
Python
|
tests/domain/entities/metadata_test.py
|
keigohtr/autify-web-scraper
|
007ed78c461b31007328b5560957278856908979
|
[
"Apache-2.0"
] | null | null | null |
tests/domain/entities/metadata_test.py
|
keigohtr/autify-web-scraper
|
007ed78c461b31007328b5560957278856908979
|
[
"Apache-2.0"
] | null | null | null |
tests/domain/entities/metadata_test.py
|
keigohtr/autify-web-scraper
|
007ed78c461b31007328b5560957278856908979
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta, timezone
import freezegun
from autifycli.domain.entities.metadata import Metadata
JST = timezone(timedelta(hours=+9), "JST")
@freezegun.freeze_time("2021-08-12")
def test_metadata():
site = "https://example.com"
num_links = 0
num_images = 0
last_fetch = datetime.now(JST)
meta = Metadata(site=site, last_fetch=last_fetch)
assert site == meta.site
assert num_links == meta.num_links
assert num_images == meta.num_images
assert str(last_fetch) == str(meta.last_fetch)
assert site in str(meta)
| 25.173913
| 55
| 0.716753
| 82
| 579
| 4.902439
| 0.426829
| 0.11194
| 0.074627
| 0.094527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023013
| 0.174439
| 579
| 22
| 56
| 26.318182
| 0.817992
| 0
| 0
| 0
| 0
| 0
| 0.055268
| 0
| 0
| 0
| 0
| 0
| 0.3125
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14e8b8ee0a1f85b70e2cc66661f3d254f3aee85e
| 3,720
|
py
|
Python
|
keepthis/KeepThis.py
|
puhoshville/keepthis
|
70447ec367b78caba03c302470f591df2dcc1e7e
|
[
"MIT"
] | 4
|
2020-02-18T12:29:29.000Z
|
2020-11-12T10:19:37.000Z
|
keepthis/KeepThis.py
|
puhoshville/keepthis
|
70447ec367b78caba03c302470f591df2dcc1e7e
|
[
"MIT"
] | 79
|
2019-12-26T14:00:11.000Z
|
2022-03-18T02:20:45.000Z
|
keepthis/KeepThis.py
|
puhoshville/keepthis
|
70447ec367b78caba03c302470f591df2dcc1e7e
|
[
"MIT"
] | 3
|
2019-09-25T22:47:25.000Z
|
2019-10-03T15:07:36.000Z
|
import hashlib
import json
import numpy as np
import pandas as pd
from pymemcache import serde
from pymemcache.client import base
from keepthis.MemcachedConnection import MemcachedConnection
from keepthis.exceptions import KeepThisValueError
class KeepThis:
def __init__(
self,
memcached_host,
memcached_port,
):
self.memcached_host = memcached_host
self.memcached_port = memcached_port
self.__supported_entity_types__ = (
np.ndarray,
str,
int,
float,
)
@staticmethod
def _hash_string(input_string):
sha224 = hashlib.sha224
return sha224(input_string.encode()).hexdigest()
@staticmethod
def _hash_ndarray(input_array):
if not isinstance(input_array, np.ndarray):
raise KeepThisValueError(
"numpy.ndarray instance was expected but got {}".format(
type(input_array)
)
)
string = input_array.data.hex()
return KeepThis._hash_string(string)
@staticmethod
def _hash_pandas(input_dataframe):
if not isinstance(input_dataframe, (pd.DataFrame, pd.Series, pd.Index)):
raise KeepThisValueError(
"numpy.ndarray instance was expected but got {}".format(
type(input_dataframe)
)
)
string = pd.util.hash_pandas_object(input_dataframe).values.data.hex()
return KeepThis._hash_string(string)
def _hash_object(self, entity):
"""Converting to string non-supported by JSON objects.
:param entity: object, any item
:return: object or hash-string
"""
if not isinstance(entity, self.__supported_entity_types__):
raise KeepThisValueError(
"Entity is has type {}, while only {} supports".format(
type(entity),
self.__supported_entity_types__,
)
)
if isinstance(entity, np.ndarray):
# getting hash from numpy.ndarray
return self._hash_ndarray(entity)
elif isinstance(entity, (pd.DataFrame, pd.Series, pd.Index)):
# getting hash from pandas.DataFrame
return self._hash_pandas(entity)
else:
return entity
def drop(self):
memcached = self._get_connection()
memcached.flush_all()
memcached.close()
def _get_connection(self):
return base.Client(
(self.memcached_host, self.memcached_port),
serializer=serde.python_memcache_serializer,
deserializer=serde.python_memcache_deserializer,
)
def _get_unique_key(self, func, *args, **kwargs):
func_name = func.__name__
args_dict = [self._hash_object(x) for x in args]
args_dict.append(kwargs)
args_str = json.dumps(args_dict)
string_to_hash = "{}|{}".format(func_name, args_str)
resulting_hash = self._hash_string(string_to_hash)
return resulting_hash
def this(self, func, *args, **kwargs):
def func_wrapper(*args, **kwargs):
unique_hash = self._get_unique_key(func, *args, **kwargs)
with MemcachedConnection(self.memcached_host, self.memcached_port) as memcached:
cached_value = memcached.get(unique_hash)
if cached_value is not None:
memcached.close()
return cached_value
value_to_cache = func(*args, **kwargs)
memcached.set(unique_hash, value_to_cache)
return value_to_cache
return func_wrapper
| 32.631579
| 92
| 0.608065
| 393
| 3,720
| 5.475827
| 0.272265
| 0.048327
| 0.031599
| 0.036245
| 0.201673
| 0.159851
| 0.104089
| 0.069703
| 0.069703
| 0.069703
| 0
| 0.003521
| 0.312903
| 3,720
| 113
| 93
| 32.920354
| 0.838419
| 0.049194
| 0
| 0.133333
| 0
| 0
| 0.040514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.088889
| 0.011111
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14ec81da7a7909c65783eff82c284b4266341daf
| 1,016
|
py
|
Python
|
sbx_bgsvc_starterpack/sbx_cfg.py
|
parkssie/sbx-bgsvc-starterpack
|
9f2cb80cc677b9ab73cbf085a910d30c40194449
|
[
"MIT"
] | null | null | null |
sbx_bgsvc_starterpack/sbx_cfg.py
|
parkssie/sbx-bgsvc-starterpack
|
9f2cb80cc677b9ab73cbf085a910d30c40194449
|
[
"MIT"
] | null | null | null |
sbx_bgsvc_starterpack/sbx_cfg.py
|
parkssie/sbx-bgsvc-starterpack
|
9f2cb80cc677b9ab73cbf085a910d30c40194449
|
[
"MIT"
] | null | null | null |
import json
from pathlib import Path
from sbx_bgsvc_starterpack.sbx_json_default import json_default
def load(default_cfg: dict = {}, file: str = './cfg/cfg.json', encoding: str = 'utf-8-sig') -> dict:
# 1. 설정 파일이 없는경우 생성
cfg_file1 = Path(file)
if not cfg_file1.is_file():
save(default_cfg, file, encoding)
# 2. 설정파일 load
with open(file, mode='rt', encoding=encoding) as f:
cfg = json.loads(f.read())
# 새로운 설정이 있는 경우를 위해 병합, 저장
default_cfg.update(cfg) # 병합시 기존 설정을 덮어쓰므로 기본 설정에 파일에 저장된 설정을 덮어쓰고 저장, 반환.
save(default_cfg, file, encoding)
return default_cfg
def save(cfg: dict = {}, filename: str = './cfg/cfg.json', encoding: str = 'utf-8-sig'):
cfg_file1 = Path(filename)
cfg_file1.parents[0].mkdir(parents=True, exist_ok=True)
with open(filename, mode='wt', encoding=encoding) as f: # wt : Write Text
# ensure_ascii=False: 한글이 유니코드로 출력되지 않도록 처리.
f.write(json.dumps(cfg, indent=4, ensure_ascii=False, default=json_default))
| 31.75
| 100
| 0.662402
| 161
| 1,016
| 4.062112
| 0.490683
| 0.076453
| 0.027523
| 0.039755
| 0.174312
| 0.094801
| 0.094801
| 0.094801
| 0.094801
| 0
| 0
| 0.012438
| 0.208661
| 1,016
| 31
| 101
| 32.774194
| 0.800995
| 0.160433
| 0
| 0.117647
| 0
| 0
| 0.059172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.176471
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14ededd86abda0dc6be68373dfe57be0e413a26e
| 10,880
|
py
|
Python
|
pyi_updater/client/patcher.py
|
rsumner31/PyUpdater1
|
d9658000472e57453267ee8fa174ae914dd8d33c
|
[
"BSD-2-Clause"
] | null | null | null |
pyi_updater/client/patcher.py
|
rsumner31/PyUpdater1
|
d9658000472e57453267ee8fa174ae914dd8d33c
|
[
"BSD-2-Clause"
] | null | null | null |
pyi_updater/client/patcher.py
|
rsumner31/PyUpdater1
|
d9658000472e57453267ee8fa174ae914dd8d33c
|
[
"BSD-2-Clause"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright 2014 Digital Sapphire Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
import logging
import os
try:
import bsdiff4
except ImportError:
bsdiff4 = None
from pyi_updater.client.downloader import FileDownloader
from pyi_updater.exceptions import PatcherError
from pyi_updater import settings
from pyi_updater.utils import (get_package_hashes,
EasyAccessDict,
lazy_import,
Version)
if bsdiff4 is None:
from pyi_updater.utils import bsdiff4_py as bsdiff4
log = logging.getLogger(__name__)
@lazy_import
def jms_utils():
import jms_utils
import jms_utils.paths
import jms_utils.system
return jms_utils
platform_ = jms_utils.system.get_system()
class Patcher(object):
"""Downloads, verifies, and patches binaries
Kwargs:
name (str): Name of binary to patch
json_data (dict): Info dict with all package meta data
current_version (str): Version number of currently installed binary
highest_version (str): Newest version available
update_folder (str): Path to update folder to place updated binary in
update_urls (list): List of urls to use for file download
verify (bool) Meaning:
True: Verify https connection
False: Don't verify https connection
"""
def __init__(self, **kwargs):
self.name = kwargs.get(u'name')
self.json_data = kwargs.get(u'json_data')
self.star_access_update_data = EasyAccessDict(self.json_data)
self.current_version = Version(kwargs.get(u'current_version'))
self.highest_version = kwargs.get(u'highest_version')
self.update_folder = kwargs.get(u'update_folder')
self.update_urls = kwargs.get(u'update_urls', [])
self.verify = kwargs.get(u'verify', True)
self.progress_hooks = kwargs.get(u'progress_hooks', [])
self.patch_data = []
self.patch_binary_data = []
self.og_binary = None
# ToDo: Update tests with linux archives.
# Used for testing.
self.plat = kwargs.get(u'platform', platform_)
self.current_filename = kwargs.get(u'current_filename')
self.current_file_hash = kwargs.get(u'current_file_hash')
file_info = self._current_file_info(self.name,
self.current_version)
if self.current_filename is None:
self.current_filename = file_info['filename']
if self.current_file_hash is None:
self.current_file_hash = file_info['file_hash']
def start(self):
"Starts patching process"
log.debug(u'Starting patch updater...')
# Check hash on installed binary to begin patching
binary_check = self._verify_installed_binary()
if not binary_check:
log.debug(u'Binary check failed...')
return False
# Getting all required patch meta-data
all_patches = self._get_patch_info(self.name)
if all_patches is False:
log.debug(u'Cannot find all patches...')
return False
# Download and verify patches in 1 go
download_check = self._download_verify_patches()
if download_check is False:
log.debug(u'Patch check failed...')
return False
try:
self._apply_patches_in_memory()
except PatcherError:
return False
else:
try:
self._write_update_to_disk()
except PatcherError:
return False
return True
def _verify_installed_binary(self):
# Verifies currently installed binary against known hash
log.debug(u'Checking for current installed binary to patch')
# I just really like using this ChDir context
# manager. Even sent the developer a cup of coffee
with jms_utils.paths.ChDir(self.update_folder):
if not os.path.exists(self.current_filename):
log.debug(u'Cannot find binary to patch')
return False
installed_file_hash = get_package_hashes(self.current_filename)
if self.current_file_hash != installed_file_hash:
log.debug(u'Binary hash mismatch')
return False
with open(self.current_filename, u'rb') as f:
self.og_binary = f.read()
os.remove(self.current_filename)
log.debug(u'Binary found and verified')
return True
# We will take all versions. Then append any version
# thats greater then the current version to the list
# of needed patches.
def _get_patch_info(self, name):
# Taking the list of needed patches and extracting the
# patch data from it. If any loop fails, will return False
# and start full binary update.
log.debug(u'Getting patch meta-data')
required_patches = self._get_required_patches(name)
for p in required_patches:
info = {}
plat_key = '{}*{}*{}*{}'.format(settings.UPDATES_KEY, name,
str(p), self.plat)
plat_info = self.star_access_update_data.get(plat_key)
try:
info[u'patch_name'] = plat_info[u'patch_name']
info[u'patch_urls'] = self.update_urls
info[u'patch_hash'] = plat_info[u'patch_hash']
self.patch_data.append(info)
except KeyError:
log.error(u'Missing required patch meta-data')
return False
return True
def _get_required_patches(self, name):
needed_patches = []
try:
versions = map(Version,
self.json_data[settings.UPDATES_KEY][name].keys())
except KeyError:
log.debug(u'No updates found in updates dict')
versions = sorted(versions)
log.debug(u'getting required patches')
for i in versions:
if i > self.current_version:
needed_patches.append(i)
# Used to guarantee patches are only added once
return list(set(needed_patches))
def _download_verify_patches(self):
# Downloads & verifies all patches
log.debug('Downloading patches')
downloaded = 0
total = len(self.patch_data)
for p in self.patch_data:
fd = FileDownloader(p[u'patch_name'], p[u'patch_urls'],
p[u'patch_hash'], self.verify)
data = fd.download_verify_return()
if data is not None:
self.patch_binary_data.append(data)
downloaded += 1
status = {u'total': total,
u'downloaed': downloaded,
u'status': u'downloading'}
self._call_progress_hooks(status)
else:
return False
status = {u'total': total,
u'downloaed': downloaded,
u'status': u'finished'}
self._call_progress_hooks(status)
return True
def _call_progress_hooks(self, data):
for ph in self.progress_hooks:
ph(data)
def _apply_patches_in_memory(self):
# Applies a sequence of patches in memory
log.debug(u'Applying patches')
# Beginning the patch process
self.new_binary = self.og_binary
for i in self.patch_binary_data:
try:
self.new_binary = bsdiff4.patch(self.new_binary, i)
except Exception as err:
log.debug(err, exc_info=True)
log.error(err)
raise PatcherError(u'Patch failed to apply')
def _write_update_to_disk(self):
# Writes updated binary to disk
log.debug('Writing update to disk')
filename_key = '{}*{}*{}*{}*{}'.format(settings.UPDATES_KEY, self.name,
self.highest_version,
self.plat,
u'filename')
filename = self.star_access_update_data.get(filename_key)
if filename is None:
raise PatcherError('Filename missing in version file')
with jms_utils.paths.ChDir(self.update_folder):
try:
with open(filename, u'wb') as f:
f.write(self.new_binary)
except IOError:
# Removes file is it somehow got created
if os.path.exists(filename):
os.remove(filename)
log.error(u'Failed to open file for writing')
raise PatcherError(u'Failed to open file for writing')
else:
file_info = self._current_file_info(self.name,
self.highest_version)
new_file_hash = file_info['file_hash']
log.debug(u'checking file hash match')
if new_file_hash != get_package_hashes(filename):
log.error(u'File hash does not match')
os.remove(filename)
raise PatcherError(u'Patched file hash bad checksum')
log.debug('Wrote update file')
def _current_file_info(self, name, version):
# Returns filename and hash for given name and version
info = {}
plat_key = '{}*{}*{}*{}'.format(settings.UPDATES_KEY, name,
version, self.plat)
plat_info = self.star_access_update_data.get(plat_key)
try:
filename = plat_info[u'filename']
except Exception as err:
log.debug(str(err))
filename = ''
log.debug(u'Current filename: {}'.format(filename))
info[u'filename'] = filename
try:
file_hash = plat_info[u'file_hash']
except Exception as err:
log.debug(str(err))
file_hash = ''
info[u'file_hash'] = file_hash
log.debug('Current file_hash {}'.format(file_hash))
return info
| 36.881356
| 79
| 0.583732
| 1,286
| 10,880
| 4.763608
| 0.206843
| 0.027424
| 0.020568
| 0.013059
| 0.217107
| 0.125204
| 0.089781
| 0.080966
| 0.044727
| 0.031995
| 0
| 0.002309
| 0.323254
| 10,880
| 294
| 80
| 37.006803
| 0.829689
| 0.19614
| 0
| 0.261307
| 0
| 0
| 0.118379
| 0
| 0
| 0
| 0
| 0.003401
| 0
| 1
| 0.055276
| false
| 0
| 0.070352
| 0
| 0.211055
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14eee5ee3d7b6b1d697c697b8f6b60cc9529087d
| 3,090
|
py
|
Python
|
tests/test_absort.py
|
MapleCCC/ABSort
|
fa020d7f2d6025603910c12fdfe775922d33afbc
|
[
"MIT"
] | null | null | null |
tests/test_absort.py
|
MapleCCC/ABSort
|
fa020d7f2d6025603910c12fdfe775922d33afbc
|
[
"MIT"
] | null | null | null |
tests/test_absort.py
|
MapleCCC/ABSort
|
fa020d7f2d6025603910c12fdfe775922d33afbc
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import ast
import os
import re
import sys
from itertools import product
from pathlib import Path
import attr
from hypothesis import given, settings
from hypothesis.strategies import sampled_from
from absort.__main__ import (
CommentStrategy,
FormatOption,
NameRedefinition,
SortOrder,
absort_str,
)
from absort.ast_utils import ast_deep_equal
from absort.utils import constantfunc, contains
from .strategies import products
# Use third-party library hypothesmith to generate random valid Python source code, to
# conduct property-based testing on the absort*() interface.
# The guy who use such tool to test on black library and CPython stdlib and report issues is Zac-HD (https://github.com/Zac-HD).
STDLIB_DIR = Path(sys.executable).with_name("Lib")
# Reference: https://docs.travis-ci.com/user/environment-variables/#default-environment-variables
if os.getenv("CI") and os.getenv("TRAVIS"):
py_version = os.getenv("TRAVIS_PYTHON_VERSION")
assert py_version
# Reference: https://docs.travis-ci.com/user/languages/python/#python-versions
# Reference: https://docs.travis-ci.com/user/languages/python/#development-releases-support
py_version_num = re.fullmatch(r"(?P<num>[0-9.]+)(?:-dev)?", py_version).group("num")
STDLIB_DIR = Path(f"/opt/python/{py_version}/lib/python{py_version_num}/")
TEST_FILES = list(STDLIB_DIR.rglob("*.py"))
@attr.s(auto_attribs=True)
class Option:
comment_strategy: CommentStrategy
format_option: FormatOption
sort_order: SortOrder
@classmethod
def from_tuple(cls: type, tup: tuple) -> Option:
return cls(*tup)
all_comment_strategies = list(CommentStrategy)
all_format_options = [
FormatOption(*p) # type: ignore
for p in product(*([(True, False)] * len(attr.fields(FormatOption))))
]
all_sort_orders = list(SortOrder)
arg_options = constantfunc(
products(all_comment_strategies, all_format_options, all_sort_orders).map(
Option.from_tuple
)
)
@given(sampled_from(TEST_FILES), arg_options())
@settings(deadline=None)
def test_absort_str(test_sample: Path, option: Option) -> None:
try:
source = test_sample.read_text(encoding="utf-8")
new_source = absort_str(source, **attr.asdict(option, recurse=False))
second_run_new_source = absort_str(source, **attr.asdict(option, recurse=False))
# Check that absort is deterministic and stable
assert new_source == second_run_new_source
old_ast = ast.parse(source)
new_ast = ast.parse(new_source)
assert len(old_ast.body) == len(new_ast.body)
for stmt in old_ast.body:
assert contains(new_ast.body, stmt, equal=ast_deep_equal)
except (SyntaxError, NameRedefinition, UnicodeDecodeError):
pass
except Exception as exc:
exc_cls_name = getattr(exc.__class__, "__name__", "some exception")
print(f"Encountered {exc_cls_name} when sorting {test_sample}")
raise
# TODO add unit test for absort_file()
# TODO add unit test for absort_files()
| 30
| 128
| 0.726537
| 419
| 3,090
| 5.145585
| 0.410501
| 0.025046
| 0.025046
| 0.033395
| 0.130334
| 0.130334
| 0.108071
| 0.092764
| 0.092764
| 0.048237
| 0
| 0.001166
| 0.167638
| 3,090
| 102
| 129
| 30.294118
| 0.837092
| 0.214887
| 0
| 0
| 0
| 0
| 0.08126
| 0.04063
| 0
| 0
| 0
| 0.009804
| 0.061538
| 1
| 0.030769
| false
| 0.015385
| 0.215385
| 0.015385
| 0.323077
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14ef95586e2cc40aadbf1094d06743d8533ef65a
| 4,593
|
py
|
Python
|
BrickBreaker/brick_breaker.py
|
Urosh91/BrickBreaker
|
527564eb7fbab31e215a60ca8d46843a5a13791b
|
[
"MIT"
] | null | null | null |
BrickBreaker/brick_breaker.py
|
Urosh91/BrickBreaker
|
527564eb7fbab31e215a60ca8d46843a5a13791b
|
[
"MIT"
] | null | null | null |
BrickBreaker/brick_breaker.py
|
Urosh91/BrickBreaker
|
527564eb7fbab31e215a60ca8d46843a5a13791b
|
[
"MIT"
] | null | null | null |
import pygame
from BrickBreaker import *
from BrickBreaker.Scenes import *
from BrickBreaker.Shared import *
class BrickBreaker:
def __init__(self):
self._lives = 5
self._score = 0
self._bonus = 1
self._level = Level(self)
self._level.load_random()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
self._balls = [
Ball((400, 400), pygame.image.load(GameConstants.BALL_IMAGE), self)
]
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.mixer.init()
pygame.init()
pygame.display.set_caption("Brick Breaker")
self._clock = pygame.time.Clock()
self.screen = pygame.display.set_mode(GameConstants.SCREEN_SIZE)
pygame.mouse.set_visible(False)
self._scenes = (
PlayingGameScene(self),
HighscoreScene(self),
MainMenuScene(self),
GameOverScene(self),
WinScene(self),
ControlsScene(self),
GameRulesScene(self),
)
self._current_scene = 2
self._sounds = (
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_A_STANDARD_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_SPEED_UP_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_EXTRA_LIFE_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_BALL_HITTING_A_WALL_OR_A_PAD),
pygame.mixer.Sound(GameConstants.SOUND_FILE_GAME_OVER),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_EXTRA_BALL_BRICK),
pygame.mixer.Sound(GameConstants.SOUND_FILE_HITTING_BONUS_SIZE_BRICK),
)
def start(self):
while True:
self._clock.tick(60)
self.screen.fill((0, 0, 0))
_current_scene = self._scenes[self._current_scene]
_current_scene.handle_events(pygame.event.get())
_current_scene.render()
pygame.display.update()
def change_scene(self, scene):
self._current_scene = scene
def get_level(self):
return self._level
def get_bonus(self):
return self._bonus
def increment_bonus(self):
self._bonus += 1
def reset_bonus(self):
self._bonus = 1
def double_pad(self):
keyboard = self._pad.get_keyboard_status()
mouse = self._pad.get_mouse_status()
self._pad = DoublePad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.DOUBLE_PAD_SIZE[1]),
pygame.image.load(GameConstants.DOUBLE_PAD_IMAGE))
if keyboard:
self._pad.activate_keyboard()
if mouse:
self._pad.activate_mouse()
def reset_pad(self):
keyboard = self._pad.get_keyboard_status()
mouse = self._pad.get_mouse_status()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
if keyboard:
self._pad.activate_keyboard()
if mouse:
self._pad.activate_mouse()
def get_pad(self):
return self._pad
def get_score(self):
return self._score
def increase_score(self, score):
self._score += score * self._bonus
def increase_score_by_1k(self, score=1000):
self._score += score * self._bonus
def get_lives(self):
return self._lives
def get_balls(self):
return self._balls
def add_one_ball(self):
self._balls.append(Ball((400, 400), pygame.image.load(GameConstants.BALL_IMAGE), self))
def play_sound(self, sound_clip):
sound = self._sounds[sound_clip]
sound.stop()
sound.play()
def reduce_life_by_one(self):
self._lives -= 1
def add_one_life(self):
self._lives += 1
def reset(self):
self._lives = 5
self._score = 0
self._bonus = 1
self._level = Level(self)
self._level.load_random()
self._pad = Pad((GameConstants.SCREEN_SIZE[0] / 2,
GameConstants.SCREEN_SIZE[1] - GameConstants.PAD_SIZE[1]),
pygame.image.load(GameConstants.PAD_IMAGE))
def main():
BrickBreaker().start()
if __name__ == '__main__':
BrickBreaker().start()
| 28.886792
| 95
| 0.609188
| 526
| 4,593
| 4.998099
| 0.193916
| 0.034614
| 0.078737
| 0.077216
| 0.54089
| 0.527957
| 0.476987
| 0.429821
| 0.351845
| 0.351845
| 0
| 0.018683
| 0.289136
| 4,593
| 158
| 96
| 29.06962
| 0.786524
| 0
| 0
| 0.310345
| 0
| 0
| 0.004572
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181034
| false
| 0
| 0.034483
| 0.051724
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14f0fe0a265ae04fc3df046e751c6650ca481d2f
| 2,188
|
py
|
Python
|
mow/strong/phase2/predict.py
|
tychen5/Audio_Tagging_Challenge
|
4602400433d37958d95ebf40a3c0798d17cc53c6
|
[
"MIT"
] | 3
|
2019-01-22T03:14:32.000Z
|
2019-08-17T02:22:06.000Z
|
mow/strong/phase2/predict.py
|
tychen5/Audio_Tagging_Challenge
|
4602400433d37958d95ebf40a3c0798d17cc53c6
|
[
"MIT"
] | null | null | null |
mow/strong/phase2/predict.py
|
tychen5/Audio_Tagging_Challenge
|
4602400433d37958d95ebf40a3c0798d17cc53c6
|
[
"MIT"
] | null | null | null |
'''
###################################
Modified from Mike's predict_acc.py
###################################
'''
import os
import sys
import random
import pickle
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from keras.models import load_model
from sklearn.metrics import accuracy_score
with open('map.pkl', 'rb') as f:
map_dict = pickle.load(f)
with open('map_reverse.pkl', 'rb') as f:
map_reverse = pickle.load(f)
Y_train = pd.read_csv('/tmp2/b03902110/phase2/data/train_label.csv')
Y_dict = Y_train['label'].map(map_dict)
Y_dict = np.array(Y_dict)
print(Y_dict.shape)
print(Y_dict)
Y_fname_train = Y_train['fname'].tolist()
Y_test = pd.read_csv('./sample_submission.csv')
Y_fname_test = Y_test['fname'].tolist()
Y_all = []
for i in Y_dict:
Y_all.append(to_categorical(i, num_classes=41))
Y_all = np.array(Y_all)
print(Y_all)
print(Y_all.shape)
X_train = np.load('/tmp2/b03902110/phase2/data/X_train.npy')
X_test = np.load('/tmp2/b03902110/phase2/data/X_test.npy')
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
base = '/tmp2/b03902110/newphase2'
modelbase = os.path.join(base, '10_fold_model')
name = sys.argv[1]
fold_num = int(sys.argv[2])
filename = os.path.join(modelbase, name)
X_val = np.load('/tmp2/b03902110/newphase1/data/X/X{}.npy'.format(fold_num+1))
X_val = (X_val - mean) / std
Y_val = np.load('/tmp2/b03902110/newphase1/data/y/y{}.npy'.format(fold_num+1))
npy_predict = os.path.join(base, 'npy_predict')
if not os.path.exists(npy_predict):
os.makedirs(npy_predict)
csv_predict = os.path.join(base, 'csv_predict')
if not os.path.exists(csv_predict):
os.makedirs(csv_predict)
model = load_model(filename)
print('Evaluating {}'.format(name))
score = model.evaluate(X_val, Y_val)
print(score)
print('Predicting X_test...')
result = model.predict(X_test)
np.save(os.path.join(npy_predict, 'mow_cnn2d_semi_test_{}.npy'.format(fold_num+1)), result)
df = pd.DataFrame(result)
df.insert(0, 'fname', Y_fname_test)
df.to_csv(os.path.join(csv_predict, 'mow_cnn2d_semi_test_{}.csv'.format(fold_num+1)), index=False, header=True)
| 25.741176
| 111
| 0.706581
| 375
| 2,188
| 3.904
| 0.272
| 0.032787
| 0.040984
| 0.051913
| 0.246585
| 0.121585
| 0.088798
| 0
| 0
| 0
| 0
| 0.038304
| 0.105119
| 2,188
| 84
| 112
| 26.047619
| 0.709397
| 0.048903
| 0
| 0
| 0
| 0
| 0.199807
| 0.144788
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.157895
| 0
| 0.157895
| 0.122807
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14f3c981162924e41ccbbaedac2e774e7979b26d
| 2,267
|
py
|
Python
|
environments/locomotion/scene_stadium.py
|
wx-b/unsup-3d-keypoints
|
8a2e687b802d19b750aeadffa9bb6970f5956d4d
|
[
"MIT"
] | 28
|
2021-06-15T03:38:14.000Z
|
2022-03-15T04:12:41.000Z
|
environments/locomotion/scene_stadium.py
|
wx-b/unsup-3d-keypoints
|
8a2e687b802d19b750aeadffa9bb6970f5956d4d
|
[
"MIT"
] | 3
|
2021-12-25T17:57:47.000Z
|
2022-03-24T09:52:43.000Z
|
environments/locomotion/scene_stadium.py
|
wx-b/unsup-3d-keypoints
|
8a2e687b802d19b750aeadffa9bb6970f5956d4d
|
[
"MIT"
] | 5
|
2021-11-02T17:38:36.000Z
|
2021-12-11T02:57:39.000Z
|
import os
import pybullet_data
from environments.locomotion.scene_abstract import Scene
import pybullet as p
class StadiumScene(Scene):
zero_at_running_strip_start_line = True # if False, center of coordinates (0,0,0) will be at the middle of the stadium
stadium_halflen = 105 * 0.25 # FOOBALL_FIELD_HALFLEN
stadium_halfwidth = 50 * 0.25 # FOOBALL_FIELD_HALFWID
stadiumLoaded = 0
def episode_restart(self, bullet_client):
self._p = bullet_client
Scene.episode_restart(self, bullet_client) # contains cpp_world.clean_everything()
if (self.stadiumLoaded == 0):
self.stadiumLoaded = 1
# stadium_pose = cpp_household.Pose()
# if self.zero_at_running_strip_start_line:
# stadium_pose.set_xyz(27, 21, 0) # see RUN_STARTLINE, RUN_RAD constants
if self.enable_grid:
filename = os.path.join(pybullet_data.getDataPath(), "plane_stadium.sdf")
else:
filename = os.path.join("environments/locomotion/assets", "plane_stadium.sdf")
self.ground_plane_mjcf = self._p.loadSDF(filename)
# filename = os.path.join(pybullet_data.getDataPath(),"stadium_no_collision.sdf")
# self.ground_plane_mjcf = self._p.loadSDF(filename)
#
for i in self.ground_plane_mjcf:
self._p.changeDynamics(i, -1, lateralFriction=0.8, restitution=0.5)
self._p.changeVisualShape(i, -1, rgbaColor=[1, 1, 1, 0.8])
self._p.configureDebugVisualizer(p.COV_ENABLE_PLANAR_REFLECTION, i)
# for j in range(p.getNumJoints(i)):
# self._p.changeDynamics(i,j,lateralFriction=0)
# despite the name (stadium_no_collision), it DID have collision, so don't add duplicate ground
class SinglePlayerStadiumScene(StadiumScene):
"This scene created by environment, to work in a way as if there was no concept of scene visible to user."
multiplayer = False
class MultiplayerStadiumScene(StadiumScene):
multiplayer = True
players_count = 3
def actor_introduce(self, robot):
StadiumScene.actor_introduce(self, robot)
i = robot.player_n - 1 # 0 1 2 => -1 0 +1
robot.move_robot(0, i, 0)
| 42.773585
| 123
| 0.666961
| 295
| 2,267
| 4.918644
| 0.444068
| 0.024121
| 0.028946
| 0.037216
| 0.209511
| 0.16816
| 0.114404
| 0.057891
| 0.057891
| 0
| 0
| 0.025656
| 0.243494
| 2,267
| 52
| 124
| 43.596154
| 0.820408
| 0.32554
| 0
| 0
| 0
| 0.030303
| 0.103768
| 0.01853
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.121212
| 0
| 0.484848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14f46540bddbc3d9b12cae1ca8aeeee6d852e367
| 522
|
py
|
Python
|
src/python/providers/movement/standard_move.py
|
daboross/dxnr
|
8f73e9d5f4473b97dcfe05804a40c9a0826e51b6
|
[
"MIT"
] | null | null | null |
src/python/providers/movement/standard_move.py
|
daboross/dxnr
|
8f73e9d5f4473b97dcfe05804a40c9a0826e51b6
|
[
"MIT"
] | null | null | null |
src/python/providers/movement/standard_move.py
|
daboross/dxnr
|
8f73e9d5f4473b97dcfe05804a40c9a0826e51b6
|
[
"MIT"
] | null | null | null |
from defs import *
from utilities import warnings
def move_to(creep: Creep, target: RoomPosition) -> int:
result = creep.moveTo(target, {
'ignoreCreeps': True,
})
if result == ERR_NO_PATH:
result = creep.moveTo(target, {
'ignoreCreeps': False,
})
if result != OK and result != ERR_TIRED:
warnings.warn("unknown result from (creep {}).moveTo({}): {}"
.format(creep.name, target, warnings.transform_error_code(result)))
return result
| 27.473684
| 89
| 0.603448
| 57
| 522
| 5.421053
| 0.561404
| 0.106796
| 0.110032
| 0.148867
| 0.226537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272031
| 522
| 18
| 90
| 29
| 0.813158
| 0
| 0
| 0.285714
| 0
| 0
| 0.132184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14f8101f9071baa5ade2230825bde845717654bf
| 4,817
|
py
|
Python
|
pyshop/helpers/timeseries.py
|
sintef-energy/pyshop
|
2991372f023e75c69ab83ece54a47fa9c3b73d60
|
[
"MIT"
] | 1
|
2022-03-08T07:20:16.000Z
|
2022-03-08T07:20:16.000Z
|
pyshop/helpers/timeseries.py
|
sintef-energy/pyshop
|
2991372f023e75c69ab83ece54a47fa9c3b73d60
|
[
"MIT"
] | 2
|
2022-02-09T13:53:16.000Z
|
2022-03-16T14:36:21.000Z
|
pyshop/helpers/timeseries.py
|
sintef-energy/pyshop
|
2991372f023e75c69ab83ece54a47fa9c3b73d60
|
[
"MIT"
] | null | null | null |
from typing import Dict, Sequence, Union
from .typing_annotations import DataFrameOrSeries
import pandas as pd
import numpy as np
def create_constant_time_series(value:Union[int,float], start:pd.Timestamp) -> pd.Series:
return pd.Series([value], index=[start])
def remove_consecutive_duplicates(df:DataFrameOrSeries) -> DataFrameOrSeries:
"""
Compress timeseries by only keeping the first row of consecutive duplicates. This is done by comparing a copied
DataFrame/Series that has been shifted by one, with the original, and only keeping the rows in which at least one
one column value is different from the previous row. The first row will always be kept
"""
if isinstance(df, pd.DataFrame):
df = df.loc[(df.shift() != df).any(1)]
else:
df = df.loc[df.shift() != df]
return df
def get_timestamp_indexed_series(starttime:pd.Timestamp, time_unit:str, t:Sequence[Union[int,float]], y:Sequence[float], column_name:str='data') -> DataFrameOrSeries:
if not isinstance(t, np.ndarray):
t = np.fromiter(t, int)
if not isinstance(y, np.ndarray):
y = np.array(y, dtype=float)
if time_unit == 'minute':
delta = pd.Timedelta(minutes=1)
elif time_unit == 'second':
delta = pd.Timedelta(seconds=1)
else:
delta = pd.Timedelta(hours=1)
# Remove time zone info before calling to_datetime64 which automatically converts timestamps to utc time
tz_name = starttime.tzname()
if tz_name is not None:
starttime = starttime.tz_localize(tz=None)
t = np.repeat(starttime.to_datetime64(), t.size) + t * delta
if y.size > t.size: # Stochastic
value = pd.DataFrame(data=y, index=t)
if tz_name is not None:
value.index = value.index.tz_localize(tz=tz_name) #Add the original time zone info back
else:
value = pd.Series(data=y.flatten(), index=t, name=column_name)
if tz_name is not None:
value = value.tz_localize(tz=tz_name) #Add the original time zone info back
value[value >= 1.0e40] = np.nan
return value
def resample_resolution(time:Dict, df:DataFrameOrSeries, delta:float, time_resolution:pd.Series) -> DataFrameOrSeries:
"""
Resample timeseries when time resolution is non-constant
"""
# Convert timeseries index to integers based on the time unit
df.index = ((df.index - time['starttime']).total_seconds() * delta).astype(int)
# Compress the time resolution returned from shop, by only keeping the first of consecutive duplicate resolutions
resolution_format = time_resolution.astype(int)
compressed_resolution_format = remove_consecutive_duplicates(resolution_format)
# Extract the different time resolutions and their respective time of enactment
resolution_tuples = list(compressed_resolution_format.iteritems())
# Add a dummy time at the optimization end time to serve as a well defined bound
resolution = resolution_tuples[-1][1]
end_unit_index = int((time['endtime'] - time['starttime']).total_seconds() * delta)
resolution_tuples.append((end_unit_index, resolution))
# Build the resampled output
output_parts = []
index = 0
for i, res_tuple in enumerate(resolution_tuples[:-1]):
unit_index, resolution = res_tuple
next_unit_index = resolution_tuples[i+1][0]
selection = df.iloc[unit_index:next_unit_index]
# Normalize index
# line below is commented out since it gives wrong result after concating output parts
# selection.index = selection.index - unit_index
# Resample by taking the mean of all datapoints in "resolution" sized windows
selection = selection.rolling(window=resolution).mean().shift(-(resolution-1))
# Extract the correct means from the rolling means
selection = selection.iloc[::resolution]
# Handle any remaining intervals that are less than "resolution" sized
if (next_unit_index - unit_index) % resolution != 0:
reduced_res = (next_unit_index - unit_index) % resolution
last_selection_index = next_unit_index - reduced_res
last_row = df.iloc[last_selection_index:next_unit_index].mean()
if isinstance(df, pd.Series):
last_row = pd.Series(index=[last_selection_index], data=[last_row])
else:
last_row = last_row.to_frame().T
last_row.index = [last_selection_index]
# Replace the last row, as this has been set to "nan" by the rolling mean
selection = pd.concat([selection[:-1], last_row])
output_parts.append(selection)
index = index + (next_unit_index-unit_index)//resolution
output_df = pd.concat(output_parts)
return output_df
| 44.192661
| 166
| 0.686734
| 651
| 4,817
| 4.940092
| 0.299539
| 0.041978
| 0.028296
| 0.022388
| 0.139614
| 0.107898
| 0.043532
| 0.029851
| 0.029851
| 0.029851
| 0
| 0.005606
| 0.222338
| 4,817
| 108
| 167
| 44.601852
| 0.85291
| 0.277143
| 0
| 0.104478
| 0
| 0
| 0.01196
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0
| 0.059701
| 0.014925
| 0.179104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14fbebb9df421f915a2a0442fc7bcdd045fbbef0
| 2,787
|
py
|
Python
|
openweave/tlv/schema/tests/test_VENDOR.py
|
robszewczyk/openweave-tlv-schema
|
c0acbccce4fcaf213a09261f79d6a141ae94f7e8
|
[
"Apache-2.0"
] | 1
|
2020-05-19T22:52:27.000Z
|
2020-05-19T22:52:27.000Z
|
openweave/tlv/schema/tests/test_VENDOR.py
|
robszewczyk/openweave-tlv-schema
|
c0acbccce4fcaf213a09261f79d6a141ae94f7e8
|
[
"Apache-2.0"
] | null | null | null |
openweave/tlv/schema/tests/test_VENDOR.py
|
robszewczyk/openweave-tlv-schema
|
c0acbccce4fcaf213a09261f79d6a141ae94f7e8
|
[
"Apache-2.0"
] | 1
|
2021-02-15T16:14:17.000Z
|
2021-02-15T16:14:17.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Unit tests for VENDOR definitions.
#
import unittest
from .testutils import TLVSchemaTestCase
class Test_VENDOR(TLVSchemaTestCase):
def test_VENDOR(self):
schemaText = '''
test-vendor-1 => VENDOR [ id 0 ]
test-vendor-2 => VENDOR [ id 1 ]
test-vendor-65535 => VENDOR [ id 65535 ]
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertNoErrors(errs)
def test_VENDOR_NoId(self):
schemaText = 'test-vendor-1 => VENDOR'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'id qualifier missing')
schemaText = 'test-vendor-1 => VENDOR [ ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'id qualifier missing')
def test_VENDOR_BadId(self):
schemaText = 'test-vendor-1 => VENDOR [ id 65536 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'invalid id value')
schemaText = 'test-vendor-1 => VENDOR [ id -1 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'invalid id value')
schemaText = 'test-vendor-1 => VENDOR [ id 42:1 ]'
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'invalid id value')
def test_VENDOR_InconsistentId(self):
schemaText = '''
test-vendor-1 => VENDOR [ id 1 ]
test-vendor-2 => VENDOR [ id 2 ]
test-vendor-1 => VENDOR [ id 42 ] // Inconsistent
test-vendor-2 => VENDOR [ id 2 ]
'''
(tlvSchema, errs) = self.loadValidate(schemaText)
self.assertErrorCount(errs, 1)
self.assertError(errs, 'inconsistent vendor id: 0x002A (42)')
if __name__ == '__main__':
unittest.main()
| 34.8375
| 76
| 0.614281
| 315
| 2,787
| 5.384127
| 0.342857
| 0.100236
| 0.051887
| 0.080189
| 0.548349
| 0.53066
| 0.465802
| 0.40684
| 0.40684
| 0.40684
| 0
| 0.02887
| 0.279153
| 2,787
| 79
| 77
| 35.278481
| 0.815331
| 0.239684
| 0
| 0.533333
| 0
| 0
| 0.348904
| 0
| 0
| 0
| 0.00286
| 0
| 0.288889
| 1
| 0.088889
| false
| 0
| 0.044444
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14fc3caa752fb624866d5cfe60083c14dfb17ed9
| 336
|
py
|
Python
|
app/services/events.py
|
fufuok/FF.PyAdmin
|
031fcafe70ecb78488876d0c61e30ca4fb4290af
|
[
"MIT"
] | 56
|
2019-11-26T15:42:29.000Z
|
2022-03-10T12:28:07.000Z
|
app/services/events.py
|
fufuok/FF.PyAdmin
|
031fcafe70ecb78488876d0c61e30ca4fb4290af
|
[
"MIT"
] | 4
|
2020-03-20T01:51:47.000Z
|
2022-03-30T22:10:56.000Z
|
app/services/events.py
|
fufuok/FF.PyAdmin
|
031fcafe70ecb78488876d0c61e30ca4fb4290af
|
[
"MIT"
] | 15
|
2019-11-26T15:42:33.000Z
|
2022-03-09T05:41:44.000Z
|
# -*- coding:utf-8 -*-
"""
events.py
~~~~~~~~
自定义信号, 事件
:author: Fufu, 2019/12/20
"""
from blinker import signal
# 用户登录成功
event_user_logined = signal('event_user_logined')
# 系统管理操作(用户授权/权限组管理等)
event_sys_admin = signal('event_sys_admin')
# app 上下文环境示例
event_async_with_app_demo = signal('event_async_with_app_demo')
| 17.684211
| 63
| 0.690476
| 47
| 336
| 4.595745
| 0.638298
| 0.152778
| 0.148148
| 0.157407
| 0.194444
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031802
| 0.157738
| 336
| 18
| 64
| 18.666667
| 0.731449
| 0.345238
| 0
| 0
| 0
| 0
| 0.300518
| 0.129534
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14fe677b2376deed69fc96644a350773e0c985ca
| 1,635
|
py
|
Python
|
ai_finger_counting.py
|
dnovai/advancedCVProject
|
de3e75247c7b7ae617a578800c51c42fadbdc844
|
[
"MIT"
] | 1
|
2022-02-25T02:36:02.000Z
|
2022-02-25T02:36:02.000Z
|
ai_finger_counting.py
|
dnovai/advancedCVProject
|
de3e75247c7b7ae617a578800c51c42fadbdc844
|
[
"MIT"
] | null | null | null |
ai_finger_counting.py
|
dnovai/advancedCVProject
|
de3e75247c7b7ae617a578800c51c42fadbdc844
|
[
"MIT"
] | null | null | null |
import cv2
import os
import time
import advancedcv.hand_tracking as htm
import numpy as np
import itertools
patterns = np.array(list(itertools.product([0, 1], repeat=5)))
p_time = 0
cap = cv2.VideoCapture(0)
# w_cam, h_cam = 648, 480
# cap.set(3, w_cam)
# cap.set(4, h_cam)
folder_path = "finger_images"
my_list = os.listdir(folder_path)
my_list.sort()
overlay_list = []
detector = htm.HandDetector()
for im_path in my_list:
image = cv2.imread(f'{folder_path}/{im_path}')
print(f'{folder_path}/{im_path}')
overlay_list.append(image)
key_ids = [4, 8, 12, 16, 20]
while True:
success, img = cap.read()
img = detector.find_hands(img, draw=False)
lm_list = detector.get_position(img, hand_number=0, draw=False)
if len(lm_list) != 0:
fingers = []
# Thumb
if lm_list[key_ids[0]][1] > lm_list[key_ids[0]-1][1]:
fingers.append(1)
else:
fingers.append(0)
# Other fingers
for idx in range(1, len(key_ids)):
if lm_list[key_ids[idx]][2] < lm_list[key_ids[idx]-2][2]:
fingers.append(1)
else:
fingers.append(0)
dist = (patterns - fingers)**2
dist = np.sum(dist, axis=1)
min_index = np.argmin(dist)
print(min_index)
h, w, c = overlay_list[min_index+1].shape
img[0:h, 0:w] = overlay_list[min_index+1]
c_time = time.time()
fps = 1/(c_time-p_time)
p_time = c_time
cv2.putText(img, f'FPS: {str(round(fps))}', (50, 70), cv2.FONT_HERSHEY_PLAIN, 5, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
| 24.044118
| 100
| 0.601223
| 260
| 1,635
| 3.6
| 0.373077
| 0.038462
| 0.038462
| 0.051282
| 0.215812
| 0.132479
| 0.068376
| 0
| 0
| 0
| 0
| 0.050654
| 0.251376
| 1,635
| 68
| 101
| 24.044118
| 0.714052
| 0.048318
| 0
| 0.130435
| 0
| 0
| 0.055448
| 0.029658
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
14ff741d0e6a57229801a6e6be5e98e3344172dd
| 3,085
|
py
|
Python
|
convert.py
|
AndreiBaias/PAS
|
8905f86db15806647ab7879fd32c9057a9b93868
|
[
"MIT"
] | null | null | null |
convert.py
|
AndreiBaias/PAS
|
8905f86db15806647ab7879fd32c9057a9b93868
|
[
"MIT"
] | 3
|
2022-03-30T15:43:12.000Z
|
2022-03-30T15:43:41.000Z
|
convert.py
|
AndreiBaias/PAS
|
8905f86db15806647ab7879fd32c9057a9b93868
|
[
"MIT"
] | null | null | null |
import numpy as np
import collections, numpy
import glob
from PIL import Image
from matplotlib.pyplot import cm
nrImages = 1
imageSize = 449
finalImageSize = 449
ImageNumber = 0
sourceFolder = 'images'
# sourceFolder = "testInput"
destinationFolder = 'final_text_files_2'
# destinationFolder = "testOutput"
def modifica(a):
for i in range(imageSize):
for j in range(imageSize):
if a[i][j] > 170:
a[i][j] = 255
elif a[i][j] > 120:
a[i][j] = 128
else:
a[i][j] = 0
return a
def veciniNegrii(a, x, y):
s = 0
ValoareNegru = 0
try:
if a[x - 1][y - 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x - 1][y] == ValoareNegru:
s += 1
except:
None
try:
if a[x - 1][y + 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x][y + 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x][y - 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x + 1][y + 1] == ValoareNegru:
s += 1
except:
None
try:
if a[x + 1][y] == ValoareNegru:
s += 1
except:
None
try:
if a[x + 1][y - 1] == ValoareNegru:
s += 1
except:
None
return s
def eliminaExtraCladiri(a):
for i in range(imageSize):
for j in range(imageSize):
if a[i][j] == 128 and veciniNegrii(a, i, j) >= 2:
a[i][j] = 255
return a
# image = Image.open("1570.png").convert("L")
# print(np.asarray(image))
index = 0
for filename in glob.glob(sourceFolder + '/*.png'):
image = Image.open(filename).convert("L")
imageArray = np.asarray(image)
imageArray = modifica(imageArray)
eliminaExtraCladiri(imageArray)
g = open("./" + destinationFolder + "/map" + str(index) + ".txt", "w")
g.write("")
g.close()
g = open("./" + destinationFolder + "/map" + str(index) + ".txt", "a")
g.write(str(len(imageArray)) + "\n" + str(len(imageArray)) + "\n")
for x in imageArray:
for y in x:
g.write(str(y) + " ")
g.write("\n")
index += 1
if index % 100 == 0:
print(index)
print(index)
# for i in range(nrImages):
# image = Image.open("./final_images/_2O7gRvMPVdPfW9Ql60S-w.png").convert("L")
# # image = image.resize((imageSize, imageSize), Image.ANTIALIAS)
#
# imageArray = np.asarray(image)
# print(imageArray.shape)
# imageArray = modifica(imageArray)
# eliminaExtraCladiri(imageArray)
# print(imageArray)
# g = open("map2.txt", "w")
# g.write("")
# g.close()
# g = open("map2.txt", "a")
# g.write(str(len(imageArray)) + "\n" + str(len(imageArray)) + "\n")
# for x in imageArray:
# for y in x:
# g.write(str(y) + " ")
# g.write("\n")
| 24.68
| 83
| 0.491086
| 373
| 3,085
| 4.048257
| 0.211796
| 0.019868
| 0.015894
| 0.037086
| 0.493377
| 0.417881
| 0.417881
| 0.377483
| 0.349669
| 0.349669
| 0
| 0.03443
| 0.359806
| 3,085
| 125
| 84
| 24.68
| 0.730127
| 0.23436
| 0
| 0.466667
| 0
| 0
| 0.0271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.055556
| 0
| 0.122222
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09003d15be83a3b390c12acd09219a14eb6cb09a
| 15,291
|
py
|
Python
|
kempnn/trainer.py
|
ttyhasebe/KEMPNN
|
d52ec0a82d758431120c0831738b104a535f2264
|
[
"BSD-3-Clause"
] | 4
|
2022-01-14T08:43:52.000Z
|
2022-03-02T11:06:03.000Z
|
kempnn/trainer.py
|
ttyhasebe/KEMPNN
|
d52ec0a82d758431120c0831738b104a535f2264
|
[
"BSD-3-Clause"
] | null | null | null |
kempnn/trainer.py
|
ttyhasebe/KEMPNN
|
d52ec0a82d758431120c0831738b104a535f2264
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright 2021 by Tatsuya Hasebe, Hitachi, Ltd.
# All rights reserved.
#
# This file is part of the KEMPNN package,
# and is released under the "BSD 3-Clause License". Please see the LICENSE
# file that should have been included as part of this package.
#
import datetime
import json
import os
import pickle
import time
import numpy as np
import torch
import torch.utils.data
from .loader import MoleculeCollater, loadDataset
from .utils import peason_r2_score, rmse_score
defaultMoleculeTrainConfig = {
"name": "",
"device": "cuda",
"optimizer": torch.optim.Adam,
"optimizer_args": {"lr": 0.001},
"optimize_schedule": None,
"optimize_schedule_args": {},
"loss": torch.nn.MSELoss(),
"save": True,
"save_path": "weights",
"batch_size": 16,
"epochs": 50,
"drop_last": True,
}
class ConfigEncoder(json.JSONEncoder):
# overload method default
def default(self, obj):
# Match all the types you want to handle in your converter
if isinstance(obj, (float, int, str, dict, list, tuple)):
return json.JSONEncoder.default(self, obj)
if hasattr(obj, "__class__"):
if obj.__class__.__name__ == "type":
return obj.__name__ # Call the default method for other type
return str(obj) # Call the default method for other type
return json.JSONEncoder.default(self, obj)
@classmethod
def dumps(cls, obj):
return json.dumps(obj, indent=4, cls=cls)
class MoleculeTrainer:
""" Train molecule dataset
"""
def __init__(self):
self.default_cfg = defaultMoleculeTrainConfig
self.trained = False
self.dataset = None
self.att_dataset = None
pass
def setDataset(self, train_dataset, test_dataset, valid_dataset):
"""Set training, test, validation dataset
"""
self.dataset = (train_dataset, test_dataset, valid_dataset)
def setKnowledgeDataset(self, data):
"""Set dataset for knolwedge learning (molecule dataset with node_label)
"""
self.att_dataset = data
def prepareData(self, cfg):
self.dataset = loadDataset(cfg)
def fit(self, model, cfg=None, verbose=True, debug=False):
"""Execute model traning.
"""
if cfg is None:
cfg = self.default_cfg
assert self.dataset is not None
# dataset
train_dataset, test_dataset, valid_dataset = self.dataset
# send model to device
device = cfg["device"]
model.to(device)
# configure save path and save the configurations
model_dir = ""
if "dataset" in cfg and "name" in cfg["dataset"]:
name = cfg["name"] + "_" + cfg["dataset"]["name"]
else:
name = cfg["name"]
root_save_path = cfg["save_path"]
save = cfg["save"]
print_text = None
if save:
model_dir = os.path.join(
root_save_path,
name
+ "_"
+ datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"),
)
os.makedirs(model_dir, exist_ok=True)
cfg["model_path"] = model_dir
cfg["model_str"] = str(model)
with open(model_dir + "/config.json", "w") as fp:
fp.write(ConfigEncoder.dumps(cfg))
with open(model_dir + "/transform.pkl", "wb") as fp:
pickle.dump(train_dataset.transform, fp)
print_text = open(model_dir + "/output.log", "w")
# define SGD optimizer and its schedule
optimizer = cfg["optimizer"](
model.parameters(), **cfg["optimizer_args"]
)
if cfg["optimize_schedule"] is not None:
scheduler = cfg["optimize_schedule"](
optimizer, **cfg["optimize_schedule_args"]
)
else:
scheduler = None
# number of epoches
n_epoch = cfg["epochs"]
# define dataloader using batch_size
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg["batch_size"],
shuffle=True,
collate_fn=MoleculeCollater(label=True),
pin_memory=True,
drop_last=cfg["drop_last"],
)
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=cfg["batch_size"],
shuffle=False,
collate_fn=MoleculeCollater(label=True),
pin_memory=True,
drop_last=False,
)
valid_dataloader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=cfg["batch_size"],
shuffle=False,
collate_fn=MoleculeCollater(label=True),
pin_memory=True,
drop_last=False,
)
# define dataloader for knowledge data
use_knowledge = False
if "knowledge" in cfg and cfg["knowledge"] is not None:
assert self.att_dataset is not None
use_knowledge = True
att_dataloader = torch.utils.data.DataLoader(
self.att_dataset,
batch_size=cfg["knowledge"]["batch_size"],
shuffle=True,
collate_fn=MoleculeCollater(label=False, node_label=True),
pin_memory=True,
drop_last=True,
)
else:
att_dataloader = None
# define loss
loss_func = cfg["loss"]
# define variables used in traning
train_loss_log = torch.zeros(n_epoch).to(device)
test_loss_log = torch.zeros(n_epoch).to(device)
val_loss_log = torch.zeros(n_epoch).to(device)
n_train = len(train_dataset)
n_test = len(test_dataset)
n_val = len(valid_dataset)
n_batch = n_train // cfg["batch_size"]
n_batch_test = n_test // cfg["batch_size"]
n_batch_val = n_val // cfg["batch_size"]
best_valid_rmse = 1e20
best_test_rmse = 1e20
best_epoch = -1
test_rmse = None
val_rmse = None
# define optimizer and loss for knowledge training
if use_knowledge:
k_cfg = cfg["knowledge"]
k_optimizer = k_cfg["optimizer"](
model.parameters(), **k_cfg["optimizer_args_pretrain"]
)
k_loss_func = k_cfg["loss"]
if "optimize_schedule" in k_cfg:
k_scheduler = k_cfg["optimize_schedule"](
k_optimizer, **k_cfg["optimize_schedule_args"]
)
else:
k_scheduler = None
else:
k_cfg = None
k_optimizer = None
k_loss_func = None
k_scheduler = None
# execute knowledge pre-training if configured.
if use_knowledge and cfg["knowledge"]["pretrain_epoch"] > 0:
assert self.att_dataset is not None
k_pre_loss_log = torch.zeros(
cfg["knowledge"]["pretrain_epoch"]
).to(device)
k_n_batch = len(self.att_dataset) // cfg["knowledge"]["batch_size"]
k_n_epoch = cfg["knowledge"]["pretrain_epoch"]
for epoch in range(k_n_epoch):
start_time = time.time()
model.train()
# batch learning
for batch in att_dataloader:
x, y = batch
# send to gpu
x = [_x.to(device) for _x in x]
y = y.to(device)
k_optimizer.zero_grad()
y_pred = model(*x, attention_loss=True)
loss = k_loss_func(y_pred.view(-1, 1), y.view(-1, 1))
loss.backward()
with torch.no_grad():
k_pre_loss_log[epoch] += loss / k_n_batch
k_optimizer.step()
if k_scheduler:
k_scheduler.step()
# batch evaluation
print(
f"knowledge_pretrain epoch:{epoch + 1}/{k_n_epoch}"
f" rmse:{torch.sqrt(k_pre_loss_log[epoch]):.4f}"
)
use_knowledge_train = (
use_knowledge and cfg["knowledge"]["train_factor"] > 0
)
# batch learning for traning dataset
for epoch in range(n_epoch):
start_time = time.time()
model.train()
if device == "cuda":
torch.cuda.empty_cache()
# iterate batch
for batch in train_dataloader:
optimizer.zero_grad()
# calculate knowledge loss (\gamma L_k)
knowledge_loss = 0
if use_knowledge_train:
k_batch = next(iter(att_dataloader))
k_x, k_y = k_batch
# send to gpu
k_x = [_x.to(device) for _x in k_x]
k_y = k_y.to(device)
k_y_pred = model(*k_x, attention_loss=True)
knowledge_loss = (
k_loss_func(k_y_pred.view(-1, 1), k_y.view(-1, 1))
* cfg["knowledge"]["train_factor"]
)
# calculate loss (L_p + \gamma_kp L_kp)
x, y = batch
# send to gpu
x = [_x.to(device) for _x in x]
y = y.to(device)
y_pred = model(*x)
loss = loss_func(y_pred, y.view(-1, 1))
# add knowledge loss
if use_knowledge_train:
loss += knowledge_loss
with torch.no_grad():
train_loss_log[epoch] += loss / n_batch
loss.backward()
optimizer.step()
if scheduler:
scheduler.step()
# batch evaluation
model.eval()
y_test_all = []
y_pred_test_all = []
y_val_all = []
y_pred_val_all = []
# evaluate on test set
with torch.no_grad():
for batch in test_dataloader:
x, y_val = batch
# send to gpu
x = [_x.to(device) for _x in x]
y_val = y_val.to(device)
y_pred_val = model(*x)
test_loss_log[epoch] += (
loss_func(y_pred_val, y_val.view(-1, 1)) / n_batch_test
)
# record label for r2 calculation
y_test_all.append(y_val.cpu().numpy())
if type(y_pred_val) == tuple:
y_pred_test_all.append(
y_pred_val[0][:, 0].cpu().numpy()
)
else:
y_pred_test_all.append(y_pred_val[:, 0].cpu().numpy())
# evaluate on validation set
with torch.no_grad():
for batch in valid_dataloader:
x, y_val = batch
# send to gpu
x = [_x.to(device) for _x in x]
y_val = y_val.to(device)
y_pred_val = model(*x)
val_loss_log[epoch] += (
loss_func(y_pred_val, y_val.view(-1, 1)) / n_batch_val
)
# record label for r2 calculation
y_val_all.append(y_val.cpu().numpy())
if type(y_pred_val) == tuple:
y_pred_val_all.append(
y_pred_val[0][:, 0].cpu().numpy()
)
else:
y_pred_val_all.append(y_pred_val[:, 0].cpu().numpy())
# calulate metrics
# inverse-transform the properties to
# evaluate metrics in the original scale.
y_test_all_inv = test_dataset.inverse_transform(
np.concatenate(y_test_all)
)[:, 0]
y_pred_test_all_inv = test_dataset.inverse_transform(
np.concatenate(y_pred_test_all)
)[:, 0]
y_val_all_inv = valid_dataset.inverse_transform(
np.concatenate(y_val_all)
)[:, 0]
y_pred_val_all_inv = valid_dataset.inverse_transform(
np.concatenate(y_pred_val_all)
)[:, 0]
test_rmse = rmse_score(y_test_all_inv, y_pred_test_all_inv)
val_rmse = rmse_score(y_val_all_inv, y_pred_val_all_inv)
try:
test_r2 = peason_r2_score(y_test_all_inv, y_pred_test_all_inv)
except ValueError:
test_r2 = np.nan
try:
val_r2 = peason_r2_score(y_val_all_inv, y_pred_val_all_inv)
except ValueError:
val_r2 = np.nan
train_loss_ = train_loss_log.cpu().numpy()[epoch]
test_loss_ = test_loss_log.cpu().numpy()[epoch]
val_loss_ = val_loss_log.cpu().numpy()[epoch]
# save and print result
if best_valid_rmse > val_rmse:
if save:
torch.save(
model.state_dict(), model_dir + "/best_model.pth"
)
best_valid_rmse = val_rmse
best_test_rmse = test_rmse
best_epoch = epoch + 1
text = (
f"epoch {epoch+1:d}/{n_epoch:d} "
f"train_loss: {train_loss_:.4f} test_loss: {test_loss_:.4f} "
f"test_r2: {test_r2:.4f} test_rmse: {test_rmse:.4f} "
f"val_loss: {val_loss_:.4f} val_r2: {val_r2:.4f} "
f"val_rmse: {val_rmse:.4f} "
f"time: {time.time() - start_time:.2f}sec"
)
if verbose:
print(text)
if save:
print_text.write(text + "\n")
if save:
torch.save(
torch.stack((train_loss_log, test_loss_log, val_loss_log)),
model_dir + "/losses.pth",
)
torch.save(model, model_dir + "/last_model.pth")
print_text.close()
self.trained = True
ret = {
"test_rmse": test_rmse,
"val_rmse": val_rmse,
"best_test_rmse": best_test_rmse,
"best_val_rmse": best_valid_rmse,
"epoch": best_epoch,
"model_dir": model_dir,
}
print(
f"Training result: "
f"test_rmse:{test_rmse:.5f} val_rmse:{val_rmse:.5f}\n"
f"best_epoch:{best_epoch} best_test_rmse:{best_test_rmse:.5f} "
f"best_val_rmse:{best_valid_rmse:.5f}"
)
if debug:
return (
ret,
{
"k_loss_func": k_loss_func,
"k_optimizer": k_optimizer,
"loss_func": loss_func,
"optimizer": optimizer,
"scheduler": scheduler,
"epochs": n_epoch,
"batch_size": cfg["batch_size"],
},
)
return ret
| 33.459519
| 80
| 0.511804
| 1,737
| 15,291
| 4.219344
| 0.153138
| 0.020467
| 0.018556
| 0.011461
| 0.338109
| 0.27207
| 0.233183
| 0.203848
| 0.152408
| 0.145586
| 0
| 0.00828
| 0.391799
| 15,291
| 456
| 81
| 33.532895
| 0.779785
| 0.092538
| 0
| 0.221893
| 0
| 0
| 0.100355
| 0.021649
| 0
| 0
| 0
| 0
| 0.008876
| 1
| 0.02071
| false
| 0.002959
| 0.029586
| 0.002959
| 0.076923
| 0.02071
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0903fb75c589ec651b3db5a68d90addf520bf4a1
| 696
|
py
|
Python
|
app.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | null | null | null |
app.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | null | null | null |
app.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
import os
basePath = os.path.abspath(os.path.dirname(__file__))
template_dir = os.path.join(basePath, 'templates')
app = Flask(__name__, template_folder=template_dir)
app.config['SECRET_KEY'] = 'Thisissupposedtobesecret'
app.config['SQL_TRACK_MODIFICATION'] = False
app.config['SQL_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = ''
db = SQLAlchemy(app)
Bootstrap(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
| 27.84
| 102
| 0.806034
| 93
| 696
| 5.698925
| 0.462366
| 0.067925
| 0.045283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093391
| 696
| 24
| 103
| 29
| 0.839937
| 0
| 0
| 0
| 0
| 0
| 0.165706
| 0.131124
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.294118
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09047cdff4106518ddb7312a4ad2e4fbacd7ac5f
| 6,167
|
py
|
Python
|
xdl/blueprints/chasm2.py
|
mcrav/xdl
|
c120a1cf50a9b668a79b118700930eb3d60a9298
|
[
"MIT"
] | null | null | null |
xdl/blueprints/chasm2.py
|
mcrav/xdl
|
c120a1cf50a9b668a79b118700930eb3d60a9298
|
[
"MIT"
] | null | null | null |
xdl/blueprints/chasm2.py
|
mcrav/xdl
|
c120a1cf50a9b668a79b118700930eb3d60a9298
|
[
"MIT"
] | null | null | null |
from ..constants import JSON_PROP_TYPE
from .base_blueprint import BaseProcedureBlueprint
from ..steps import placeholders
from ..reagents import Reagent
DEFAULT_VESSEL: str = 'reactor'
DEFAULT_SEPARATION_VESSEL: str = 'separator'
DEFAULT_EVAPORATION_VESSEL: str = 'rotavap'
class Chasm2(BaseProcedureBlueprint):
PROP_TYPES = {
'chasm2': JSON_PROP_TYPE,
}
def __init__(self, chasm2):
super().__init__(locals())
def build_reaction(self):
steps, reagents = [], []
current_temp = None
for reaction_id, reaction_chasm2 in self.chasm2['reaction'].items():
for i, item in enumerate(sorted(reaction_chasm2)):
item_chasm2 = reaction_chasm2[item]
if 'temp' in item_chasm2 and item_chasm2['temp'] is not None:
heating_step = placeholders.HeatChillToTemp(
temp=item_chasm2['temp'],
vessel=DEFAULT_VESSEL,
continue_heatchill=True,
active=True
)
if heating_step.temp != current_temp:
current_temp = item_chasm2['temp']
steps.append(heating_step)
item_steps, item_reagents = converters[item](
item_chasm2, position=i)
steps.extend(item_steps)
reagents.extend(item_reagents)
quench_steps, quench_reagents = chasm2_quench(self.chasm2['quench'])
steps.extend(quench_steps)
reagents.extend(quench_reagents)
return steps, reagents
def build_workup(self):
steps, reagents = [], []
i = 0
for separation_id, separation_chasm2 in self.chasm2['workup'].items():
item_steps, item_reagents = converters[separation_id](
separation_chasm2, position=i, workup_steps=steps)
steps.extend(item_steps)
reagents.extend(item_reagents)
i += 1
evaporation_steps, _ =\
chasm2_evaporation(self.chasm2['evaporation'])
if evaporation_steps:
for step in steps:
if step.to_vessel == 'product':
step.to_vessel = DEFAULT_EVAPORATION_VESSEL
steps.extend(evaporation_steps)
return steps, reagents
def build_purification(self):
return chasm2_purification(self.chasm2['purification'])
def chasm2_quench(chasm2):
steps, reagents = [], []
if chasm2['reagent']:
steps.append(
placeholders.Add(
vessel=DEFAULT_VESSEL,
reagent=chasm2['reagent'],
volume=chasm2['volume'],
temp=chasm2['temp'],
)
)
reagents.append(Reagent(chasm2['reagent']))
return steps, reagents
def chasm2_reaction(chasm2, position):
steps, reagents = [], []
if chasm2['time'] is None and chasm2['temp'] is None:
return steps, reagents
steps.append(
placeholders.HeatChill(
vessel=DEFAULT_VESSEL,
temp=chasm2['temp'],
time=chasm2['time'],
)
)
return steps, reagents
def chasm2_addition(chasm2, position):
if not chasm2['reagent']:
return [], []
steps, reagents = [], [Reagent(chasm2['reagent'])]
stir = True if position > 0 else False
if chasm2['reagent_type'] == 'solid':
step = placeholders.AddSolid(
vessel=DEFAULT_VESSEL,
reagent=chasm2['reagent'],
mass=chasm2['amount'],
stir=stir
)
if chasm2['speed']:
step.time = f'{step.mass / float(chasm2["speed"])} min'
steps = [step]
else:
steps = [
placeholders.Add(
vessel=DEFAULT_VESSEL,
reagent=chasm2['reagent'],
volume=chasm2['amount'],
speed=chasm2['speed'],
stir=stir
)
]
return steps, reagents
def chasm2_separation(chasm2, position, workup_steps):
steps, reagents = [], []
if not chasm2['solvent']:
return steps, reagents
if position == 0:
from_vessel = DEFAULT_VESSEL
else:
from_vessel = workup_steps[-1].to_vessel
waste_phase_to_vessel = None
if chasm2['waste_phase_dest'] != 'waste':
waste_phase_to_vessel = chasm2['waste_phase_dest']
steps.append(
placeholders.Separate(
solvent=chasm2['solvent'],
solvent_volume=chasm2['solvent_volume'],
product_phase=chasm2['product_phase'],
from_vessel=from_vessel,
separation_vessel=DEFAULT_SEPARATION_VESSEL,
to_vessel=chasm2['product_phase_dest'],
waste_phase_to_vessel=waste_phase_to_vessel,
purpose=chasm2['purpose'],
)
)
reagents.append(Reagent(chasm2['solvent']))
return steps, reagents
def chasm2_evaporation(chasm2):
steps, reagents = [], []
steps.append(
placeholders.Evaporate(
vessel=DEFAULT_EVAPORATION_VESSEL,
pressure=chasm2['pressure'],
temp=chasm2['temp'],
time=chasm2['time'],
)
)
if chasm2['dry']:
steps.append(
placeholders.Dry(
vessel=DEFAULT_EVAPORATION_VESSEL,
)
)
return steps, reagents
def chasm2_purification(chasm2):
steps, reagents = [], []
return steps, reagents
converters = {
'addition1': chasm2_addition,
'addition2': chasm2_addition,
'addition3': chasm2_addition,
'addition4': chasm2_addition,
'addition5': chasm2_addition,
'addition6': chasm2_addition,
'addition7': chasm2_addition,
'addition8': chasm2_addition,
'addition9': chasm2_addition,
'addition10': chasm2_addition,
'reaction': chasm2_reaction,
'separation1': chasm2_separation,
'separation2': chasm2_separation,
'separation3': chasm2_separation,
'separation4': chasm2_separation,
'separation5': chasm2_separation,
'evaporation': chasm2_evaporation,
'purification': chasm2_purification,
}
| 31.304569
| 78
| 0.592995
| 593
| 6,167
| 5.939292
| 0.175379
| 0.077513
| 0.059341
| 0.043725
| 0.22402
| 0.105338
| 0.0636
| 0.0636
| 0.037479
| 0.037479
| 0
| 0.024441
| 0.303389
| 6,167
| 196
| 79
| 31.464286
| 0.795391
| 0
| 0
| 0.273256
| 0
| 0
| 0.089995
| 0.00373
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05814
| false
| 0
| 0.023256
| 0.005814
| 0.162791
| 0.011628
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09050f807c744801e59522d4a44d059ae276259e
| 570
|
py
|
Python
|
pandaharvester/harvestercore/plugin_base.py
|
tsulaiav/harvester
|
ca3f78348019dd616738f2da7d50e81700a8e6b9
|
[
"Apache-2.0"
] | 11
|
2017-06-01T10:16:58.000Z
|
2019-11-22T08:41:36.000Z
|
pandaharvester/harvestercore/plugin_base.py
|
tsulaiav/harvester
|
ca3f78348019dd616738f2da7d50e81700a8e6b9
|
[
"Apache-2.0"
] | 34
|
2016-10-25T19:15:24.000Z
|
2021-03-05T12:59:04.000Z
|
pandaharvester/harvestercore/plugin_base.py
|
tsulaiav/harvester
|
ca3f78348019dd616738f2da7d50e81700a8e6b9
|
[
"Apache-2.0"
] | 17
|
2016-10-24T13:29:45.000Z
|
2021-03-23T17:35:27.000Z
|
from future.utils import iteritems
from pandaharvester.harvestercore import core_utils
class PluginBase(object):
def __init__(self, **kwarg):
for tmpKey, tmpVal in iteritems(kwarg):
setattr(self, tmpKey, tmpVal)
# make logger
def make_logger(self, base_log, token=None, method_name=None, send_dialog=True):
if send_dialog and hasattr(self, 'dbInterface'):
hook = self.dbInterface
else:
hook = None
return core_utils.make_logger(base_log, token=token, method_name=method_name, hook=hook)
| 33.529412
| 96
| 0.685965
| 72
| 570
| 5.222222
| 0.527778
| 0.079787
| 0.06383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22807
| 570
| 16
| 97
| 35.625
| 0.854545
| 0.019298
| 0
| 0
| 0
| 0
| 0.019749
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0905ad16307a5af70bde741ea4817b4a93ef0e8a
| 1,762
|
py
|
Python
|
preprocessing/MEG/filtering.py
|
athiede13/neural_sources
|
3435f26a4b99b7f705c7ed6b43ab9c741fdd1502
|
[
"MIT"
] | null | null | null |
preprocessing/MEG/filtering.py
|
athiede13/neural_sources
|
3435f26a4b99b7f705c7ed6b43ab9c741fdd1502
|
[
"MIT"
] | null | null | null |
preprocessing/MEG/filtering.py
|
athiede13/neural_sources
|
3435f26a4b99b7f705c7ed6b43ab9c741fdd1502
|
[
"MIT"
] | null | null | null |
"""
Filtering of MEG data
Created on 13.9.2017
@author: Anja Thiede <anja.thiede@helsinki.fi>
"""
import os
from os import walk
import datetime
import numpy as np
import mne
now = datetime.datetime.now()
def processedcount(file_list):
n = 0
for item in file_list:
if item[-8:-4] == 'filt':
n = n+1
return n
# set up data paths
root_path = ('/media/cbru/SMEDY_SOURCES/DATA/MEG_prepro/')
f = []
for (dirpath, dirnames, filenames) in walk(root_path):
f.extend(filenames)
break
log_path = root_path+'logs/logs_filt_'+now.strftime("%Y-%m-%d")
log = open(log_path, 'w')
#sub = ['sme_028'] # for testing or filtering single files
i = 0
for subject in dirnames: #sub: #
subject_folder = root_path+subject+'/'
subject_files = os.listdir(subject_folder)
# filt_file_count = processedcount(subject_files)
# if filt_file_count == 2:
# continue
for pieces in subject_files:
if pieces[-11:] == 'ref_ssp.fif':
final_path = subject_folder+pieces
print(final_path)
i = i+1
raw = mne.io.read_raw_fif(final_path, preload=True) # read preprocessed data
# raw.set_eeg_reference()
order = np.arange(raw.info['nchan'])
# filter the data
raw.load_data()
hp = 0.5
lp = 25.0
raw.filter(hp, None, n_jobs=8, method='fir')
# high-pass filter, default hamming window is used
raw.filter(None, lp, n_jobs=8, method='fir') # low-pass filter
fsave = subject_folder+pieces[:-4]+'_filt.fif'
print(fsave)
raw.save(fsave, overwrite=True) # save filtered file to disk
log.write(subject+' processed\n')
log.close()
| 27.53125
| 88
| 0.611805
| 249
| 1,762
| 4.184739
| 0.497992
| 0.03071
| 0.024952
| 0.023033
| 0.028791
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020898
| 0.266742
| 1,762
| 63
| 89
| 27.968254
| 0.785604
| 0.238365
| 0
| 0
| 0
| 0
| 0.086298
| 0.031794
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.125
| 0
| 0.175
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09098bec23281af47e835daa26b81dccca6d2e2c
| 22,972
|
py
|
Python
|
src/pds_doi_service/core/db/doi_database.py
|
NASA-PDS/pds-doi-service
|
b994381a5757700229865e8fe905553559684e0d
|
[
"Apache-2.0"
] | 2
|
2020-11-03T19:29:11.000Z
|
2021-09-26T01:42:41.000Z
|
src/pds_doi_service/core/db/doi_database.py
|
NASA-PDS/pds-doi-service
|
b994381a5757700229865e8fe905553559684e0d
|
[
"Apache-2.0"
] | 222
|
2020-05-07T21:05:23.000Z
|
2021-12-16T22:14:54.000Z
|
src/pds_doi_service/core/db/doi_database.py
|
NASA-PDS/pds-doi-service
|
b994381a5757700229865e8fe905553559684e0d
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2020–21, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any commercial
# use must be negotiated with the Office of Technology Transfer at the
# California Institute of Technology.
#
"""
===============
doi_database.py
===============
Contains classes and functions for interfacing with the local transaction
database (SQLite3).
"""
import sqlite3
from collections import OrderedDict
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from sqlite3 import Error
from pds_doi_service.core.entities.doi import DoiStatus
from pds_doi_service.core.entities.doi import ProductType
from pds_doi_service.core.util.config_parser import DOIConfigUtil
from pds_doi_service.core.util.general_util import get_logger
# Get the common logger and set the level for this file.
logger = get_logger(__name__)
class DOIDataBase:
"""
Provides a mechanism to write, update and read rows to/from a local SQLite3
database.
"""
DOI_DB_SCHEMA = OrderedDict(
{
"identifier": "TEXT NOT NULL", # PDS identifier (any version)
"doi": "TEXT", # DOI (may be null for pending or draft)
"status": "TEXT NOT NULL", # current status
"title": "TEXT", # title used for the DOI
"submitter": "TEXT", # email of the submitter of the DOI
"type": "TEXT", # product type
"subtype": "TEXT", # subtype of the product
"node_id": "TEXT NOT NULL", # steward discipline node ID
"date_added": "INT", # as Unix epoch seconds
"date_updated": "INT NOT NULL", # as Unix epoch seconds
"transaction_key": "TEXT NOT NULL", # transaction (key is node id/datetime)
"is_latest": "BOOLEAN", # whether the transaction is the latest
}
)
"""
The schema used to define the DOI DB table. Each key corresponds to a column
name, and each value corresponds to the data type and column constraint as
expected by the Sqlite3 CREATE TABLE statement.
"""
EXPECTED_NUM_COLS = len(DOI_DB_SCHEMA)
""""The expected number of columns as defined by the schema."""
def __init__(self, db_file):
self._config = DOIConfigUtil().get_config()
self.m_database_name = db_file
self.m_default_table_name = "doi"
self.m_my_conn = None
def get_database_name(self):
"""Returns the name of the SQLite database."""
return self.m_database_name
def close_database(self):
"""Close connection to the SQLite database."""
logger.debug("Closing database %s", self.m_database_name)
if self.m_my_conn:
self.m_my_conn.close()
# Set m_my_conn to None to signify that there is no connection.
self.m_my_conn = None
else:
logger.warn("Database connection to %s has not been started or is " "already closed", self.m_database_name)
def create_connection(self):
"""Create and return a connection to the SQLite database."""
if self.m_my_conn is not None:
logger.warning("There is already an open database connection, " "closing existing connection.")
self.close_database()
logger.info("Connecting to SQLite3 (ver %s) database %s", sqlite3.version, self.m_database_name)
try:
self.m_my_conn = sqlite3.connect(self.m_database_name)
except Error as my_error:
logger.error("Failed to connect to database, reason: %s", my_error)
def get_connection(self, table_name=None):
"""
Returns a connection to the SQLite database. If a connection does
already exist, it is created using the default database file.
The default table is also created by this method if it does not exist.
"""
if not table_name:
table_name = self.m_default_table_name
if not self.m_my_conn:
self.create_connection()
if not self.check_if_table_exists(table_name):
self.create_table(table_name)
return self.m_my_conn
def check_if_table_exists(self, table_name):
"""
Check if the expected default table exists in the current database.
If a database connection has not been made yet, one is created by
this method.
"""
logger.info("Checking for existence of DOI table %s", table_name)
o_table_exists_flag = False
if self.m_my_conn is None:
logger.warn("Not connected to %s, establishing new connection...", self.m_database_name)
self.create_connection()
table_pointer = self.m_my_conn.cursor()
# Get the count of tables with the given name.
query_string = "SELECT count(name) FROM sqlite_master WHERE type='table' AND " f"name='{table_name}'"
logger.info("Executing query: %s", query_string)
table_pointer.execute(query_string)
# If the count is 1, then table exists.
if table_pointer.fetchone()[0] == 1:
o_table_exists_flag = True
logger.debug("o_table_exists_flag: %s", o_table_exists_flag)
return o_table_exists_flag
def drop_table(self, table_name):
"""Delete the given table from the SQLite database."""
if self.m_my_conn:
logger.debug("Executing query: DROP TABLE %s", table_name)
self.m_my_conn.execute(f"DROP TABLE {table_name}")
def query_string_for_table_creation(self, table_name):
"""
Builds the query string to create a transaction table in the SQLite
database.
Parameters
----------
table_name : str
Name of the table to build the query for.
Returns
-------
o_query_string : str
The Sqlite3 query string used to create the transaction table within
the database.
"""
o_query_string = f"CREATE TABLE {table_name} "
o_query_string += "("
for index, (column, constraints) in enumerate(self.DOI_DB_SCHEMA.items()):
o_query_string += f"{column} {constraints}"
if index < (self.EXPECTED_NUM_COLS - 1):
o_query_string += ","
o_query_string += ");"
logger.debug("CREATE o_query_string: %s", o_query_string)
return o_query_string
def query_string_for_transaction_insert(self, table_name):
"""
Builds the query string used to insert a transaction row into the SQLite
database table.
Parameters
----------
table_name : str
Name of the table to build the query for.
Returns
-------
o_query_string : str
The Sqlite3 query string used to insert a new row into the database.
"""
o_query_string = f"INSERT INTO {table_name} "
o_query_string += "("
for index, column in enumerate(self.DOI_DB_SCHEMA):
o_query_string += f"{column}"
if index < (self.EXPECTED_NUM_COLS - 1):
o_query_string += ","
o_query_string += ") "
o_query_string += f'VALUES ({",".join(["?"] * self.EXPECTED_NUM_COLS)})'
logger.debug("INSERT o_query_string: %s", o_query_string)
return o_query_string
def query_string_for_is_latest_update(self, table_name, primary_key_column):
"""
Build the query string to set the is_latest to False (0) for rows
in the table having a specified primary key (identifier) value.
Parameters
----------
table_name : str
Name of the table to build the query for.
primary_key_column: str
Name of the database column designated as the primary key.
Returns
-------
o_query_string : str
The Sqlite3 query string used to perform the is_latest update.
"""
# Note that we set column "is_latest" to 0 to signify that all previous
# rows are now not the latest.
o_query_string = f"UPDATE {table_name} "
o_query_string += "SET "
o_query_string += "is_latest = 0 "
o_query_string += f"WHERE {primary_key_column} = ?"
o_query_string += ";" # Don't forget the last semi-colon for SQL to work.
logger.debug("UPDATE o_query_string: %s", o_query_string)
return o_query_string
def create_table(self, table_name):
"""Create a given table in the SQLite database."""
logger.info('Creating SQLite table "%s"', table_name)
self.m_my_conn = self.get_connection()
query_string = self.query_string_for_table_creation(table_name)
self.m_my_conn.execute(query_string)
logger.info("Table created successfully")
def write_doi_info_to_database(
self,
identifier,
transaction_key,
doi=None,
date_added=datetime.now(),
date_updated=datetime.now(),
status=DoiStatus.Unknown,
title="",
product_type=ProductType.Collection,
product_type_specific="",
submitter="",
discipline_node="",
):
"""
Write a new row to the Sqlite3 transaction database with the provided
DOI entry information.
Parameters
----------
identifier : str
The PDS identifier to associate as the primary key for the new row.
transaction_key : str
Path to the local transaction history location associated with the
new row.
doi : str, optional
The DOI value to associate with the new row. Defaults to None.
date_added : datetime, optional
Time that the row was initially added to the database. Defaults
to the current time.
date_updated : datetime, optional
Time that the row was last updated. Defaults to the current time.
status : DoiStatus
The status of the transaction. Defaults to DoiStatus.Unknown.
title : str
The title associated with the transaction. Defaults to an empty string.
product_type : ProductType
The product type associated with the transaction. Defaults to
ProductType.Collection.
product_type_specific : str
The specific product type associated with the transaction.
Defaults to an empty string.
submitter : str
The submitter email associated with the transaction. Defaults
to an empty string.
discipline_node : str
The discipline node ID associated with the transaction. Defaults
to an empty string.
Raises
------
RuntimeError
If the database transaction cannot be committed for any reason.
"""
self.m_my_conn = self.get_connection()
# Convert timestamps to Unix epoch floats for simpler table storage
date_added = date_added.replace(tzinfo=timezone.utc).timestamp()
date_updated = date_updated.replace(tzinfo=timezone.utc).timestamp()
# Map the inputs to the appropriate column names. By doing so, we
# can ignore database column ordering for now.
data = {
"identifier": identifier,
"status": status,
"date_added": date_added,
"date_updated": date_updated,
"submitter": submitter,
"title": title,
"type": product_type,
"subtype": product_type_specific,
"node_id": discipline_node,
"doi": doi,
"transaction_key": transaction_key,
"is_latest": True,
}
try:
# Create and execute the query to unset the is_latest field for all
# records with the same identifier field.
query_string = self.query_string_for_is_latest_update(
self.m_default_table_name, primary_key_column="identifier"
)
self.m_my_conn.execute(query_string, (identifier,))
self.m_my_conn.commit()
except sqlite3.Error as err:
msg = f"Failed to update is_latest field for identifier {identifier}, " f"reason: {err}"
logger.error(msg)
raise RuntimeError(msg)
try:
# Combine the insert and update here so the commit can be applied to both actions.
query_string = self.query_string_for_transaction_insert(self.m_default_table_name)
# Create the named parameters tuple in the order expected by the
# database schema
data_tuple = tuple([data[column] for column in self.DOI_DB_SCHEMA])
self.m_my_conn.execute(query_string, data_tuple)
self.m_my_conn.commit()
except sqlite3.Error as err:
msg = f"Failed to commit transaction for identifier {identifier}, " f"reason: {err}"
logger.error(msg)
raise RuntimeError(msg)
def _normalize_rows(self, columns, rows):
"""
Normalize columns from each rows to be the data types we expect,
rather than the types which are convenient for table storage
"""
for row in rows:
# Convert the add/update times from Unix epoch back to datetime,
# accounting for the expected (PST) timezone
for time_column in ("date_added", "date_updated"):
time_val = row[columns.index(time_column)]
time_val = datetime.fromtimestamp(time_val, tz=timezone.utc).replace(
tzinfo=timezone(timedelta(hours=--8.0))
)
row[columns.index(time_column)] = time_val
# Convert status/product type back to Enums
row[columns.index("status")] = DoiStatus(row[columns.index("status")].lower())
row[columns.index("type")] = ProductType(row[columns.index("type")].capitalize())
return rows
def select_rows(self, query_criterias, table_name=None):
"""Select rows based on the provided query criteria."""
if not table_name:
table_name = self.m_default_table_name
self.m_my_conn = self.get_connection(table_name)
query_string = f"SELECT * FROM {table_name}"
criterias_str, criteria_dict = DOIDataBase.parse_criteria(query_criterias)
if len(query_criterias) > 0:
query_string += f" WHERE {criterias_str}"
query_string += "; "
logger.debug("SELECT query_string: %s", query_string)
cursor = self.m_my_conn.cursor()
cursor.execute(query_string, criteria_dict)
columns = list(map(lambda x: x[0], cursor.description))
rows = [list(row) for row in cursor]
rows = self._normalize_rows(columns, rows)
logger.debug("Query returned %d result(s)", len(rows))
return columns, rows
def select_latest_rows(self, query_criterias, table_name=None):
"""Select all rows marked as latest (is_latest column = 1)"""
if not table_name:
table_name = self.m_default_table_name
self.m_my_conn = self.get_connection(table_name)
criterias_str, criteria_dict = DOIDataBase.parse_criteria(query_criterias)
query_string = f"SELECT * from {table_name} " f"WHERE is_latest=1 {criterias_str} ORDER BY date_updated"
logger.debug("SELECT query_string: %s", query_string)
cursor = self.m_my_conn.cursor()
cursor.execute(query_string, criteria_dict)
columns = list(map(lambda x: x[0], cursor.description))
rows = [list(row) for row in cursor]
rows = self._normalize_rows(columns, rows)
logger.debug("Query returned %d result(s)", len(rows))
return columns, rows
def select_all_rows(self, table_name=None):
"""Select all rows from the database"""
if not table_name:
table_name = self.m_default_table_name
self.m_my_conn = self.get_connection(table_name)
query_string = f"SELECT * FROM {table_name};"
logger.debug("SELECT query_string %s", query_string)
cursor = self.m_my_conn.cursor()
cursor.execute(query_string)
columns = list(map(lambda x: x[0], cursor.description))
rows = [list(row) for row in cursor]
rows = self._normalize_rows(columns, rows)
logger.debug("Query returned %d result(s)", len(rows))
return columns, rows
def update_rows(self, query_criterias, update_list, table_name=None):
"""
Update all rows and fields (specified in update_list) that match
the provided query criteria.
"""
if not table_name:
table_name = self.m_default_table_name
self.m_my_conn = self.get_connection(table_name)
query_string = f"UPDATE {table_name} SET "
for ii in range(len(update_list)):
# Build the SET column_1 = new_value_1,
# column_2 = new_value_2
# Only precede the comma for subsequent values
if ii == 0:
query_string += update_list[ii]
else:
query_string += "," + update_list[ii]
# Add any query_criterias
if len(query_criterias) > 0:
query_string += " WHERE "
# Build the WHERE clause
for ii in range(len(query_criterias)):
if ii == 0:
query_string += query_criterias[ii]
else:
query_string += f" AND {query_criterias[ii]} "
logger.debug("UPDATE query_string: %s", query_string)
self.m_my_conn.execute(query_string)
@staticmethod
def _form_query_with_wildcards(column_name, search_tokens):
"""
Helper method to form a portion of an SQL WHERE clause that returns
matches from the specified column using the provided list of tokens.
The list of tokens may either contain fully specified identifiers, or
identifiers containing Unix-style wildcards (*), aka globs. The method
partitions the tokens accordingly, and forms the appropriate clause
to capture all results.
Parameters
----------
column_name : str
Name of the SQL table column name that will be searched by the
returned query.
search_tokens : list of str
List of tokens to search for. Tokens may either be full identifiers,
or contain one or more wildcards (*).
Returns
-------
where_subclause : str
Query portion which can be used with a WHERE clause to find the
requested set of tokens. This subclause is parameterized, and should
be used with the returned named parameter dictionary.
named_parameter_values : dict
The dictionary mapping the named parameters in the returned subclause
with the actual values to use.
"""
# Partition the tokens containing wildcards from the fully specified ones
wildcard_tokens = list(filter(lambda token: "*" in token, search_tokens))
full_tokens = list(set(search_tokens) - set(wildcard_tokens))
# Clean up the column name provided so it can be used as a suitable
# named parameter placeholder token
filter_chars = [" ", "'", ":", "|"]
named_param_id = column_name
for filter_char in filter_chars:
named_param_id = named_param_id.replace(filter_char, "")
# Set up the named parameters for the IN portion of the WHERE used
# to find fully specified tokens
named_parameters = ",".join([f":{named_param_id}_{i}" for i in range(len(full_tokens))])
named_parameter_values = {f"{named_param_id}_{i}": full_tokens[i] for i in range(len(full_tokens))}
# Set up the named parameters for the GLOB portion of the WHERE used
# find tokens containing wildcards
glob_parameters = " OR ".join(
[f"{column_name} GLOB :{named_param_id}_glob_{i}" for i in range(len(wildcard_tokens))]
)
named_parameter_values.update(
{f"{named_param_id}_glob_{i}": wildcard_tokens[i] for i in range(len(wildcard_tokens))}
)
# Build the portion of the WHERE clause combining the necessary
# parameters needed to search for all the tokens we were provided
where_subclause = "AND ("
if full_tokens:
where_subclause += f"{column_name} IN ({named_parameters}) "
if full_tokens and wildcard_tokens:
where_subclause += " OR "
if wildcard_tokens:
where_subclause += f"{glob_parameters}"
where_subclause += ")"
logger.debug("WHERE subclause: %s", where_subclause)
return where_subclause, named_parameter_values
@staticmethod
def _get_simple_in_criteria(v, column):
named_parameters = ",".join([":" + column + "_" + str(i) for i in range(len(v))])
named_parameter_values = {column + "_" + str(i): v[i].lower() for i in range(len(v))}
return f" AND lower({column}) IN ({named_parameters})", named_parameter_values
@staticmethod
def _get_query_criteria_title(v):
return DOIDataBase._get_simple_in_criteria(v, "title")
@staticmethod
def _get_query_criteria_doi(v):
return DOIDataBase._get_simple_in_criteria(v, "doi")
@staticmethod
def _get_query_criteria_ids(v):
return DOIDataBase._form_query_with_wildcards("identifier", v)
@staticmethod
def _get_query_criteria_submitter(v):
return DOIDataBase._get_simple_in_criteria(v, "submitter")
@staticmethod
def _get_query_criteria_node(v):
return DOIDataBase._get_simple_in_criteria(v, "node_id")
@staticmethod
def _get_query_criteria_status(v):
return DOIDataBase._get_simple_in_criteria(v, "status")
@staticmethod
def _get_query_criteria_start_update(v):
return (" AND date_updated >= :start_update", {"start_update": v.replace(tzinfo=timezone.utc).timestamp()})
@staticmethod
def _get_query_criteria_end_update(v):
return (" AND date_updated <= :end_update", {"end_update": v.replace(tzinfo=timezone.utc).timestamp()})
@staticmethod
def parse_criteria(query_criterias):
criterias_str = ""
criteria_dict = {}
for k, v in query_criterias.items():
logger.debug("Calling get_query_criteria_%s with value %s", k, v)
criteria_str, dict_entry = getattr(DOIDataBase, "_get_query_criteria_" + k)(v)
logger.debug("criteria_str: %s", criteria_str)
logger.debug("dict_entry: %s", dict_entry)
criterias_str += criteria_str
criteria_dict.update(dict_entry)
return criterias_str, criteria_dict
| 36.176378
| 119
| 0.632596
| 2,923
| 22,972
| 4.759152
| 0.138898
| 0.056933
| 0.01409
| 0.02135
| 0.400906
| 0.316081
| 0.276831
| 0.23715
| 0.188412
| 0.157717
| 0
| 0.00261
| 0.282779
| 22,972
| 634
| 120
| 36.233438
| 0.841648
| 0.292399
| 0
| 0.282895
| 0
| 0
| 0.147412
| 0.009331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092105
| false
| 0
| 0.032895
| 0.026316
| 0.203947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0909f5e66b19795a40b888634a2cf23b87f0cd63
| 786
|
py
|
Python
|
amnesia/modules/search/views/tag.py
|
silenius/amnesia
|
ba5e3ac79a89da599c22206ad1fd17541855f74c
|
[
"BSD-2-Clause"
] | 4
|
2015-05-08T10:57:56.000Z
|
2021-05-17T04:32:11.000Z
|
amnesia/modules/search/views/tag.py
|
silenius/amnesia
|
ba5e3ac79a89da599c22206ad1fd17541855f74c
|
[
"BSD-2-Clause"
] | 6
|
2019-12-26T16:43:41.000Z
|
2022-02-28T11:07:54.000Z
|
amnesia/modules/search/views/tag.py
|
silenius/amnesia
|
ba5e3ac79a89da599c22206ad1fd17541855f74c
|
[
"BSD-2-Clause"
] | 1
|
2019-09-23T14:08:11.000Z
|
2019-09-23T14:08:11.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from amnesia.modules.tag import Tag
from amnesia.modules.search import SearchResource
def includeme(config):
''' Pyramid includeme func'''
config.scan(__name__)
@view_config(context=SearchResource, name='tag', request_method='GET',
renderer='amnesia:templates/search/tag.pt')
def tag(context, request):
tag_id = request.GET.get('id', '').strip()
tag_obj = request.dbsession.get(Tag, tag_id)
if not tag_obj:
raise HTTPNotFound()
search_query = context.tag_id(tag_obj, limit=500)
return {
'results': search_query.result,
'count': search_query.count,
'tag': tag_obj
}
| 23.117647
| 70
| 0.683206
| 99
| 786
| 5.252525
| 0.444444
| 0.046154
| 0.069231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012618
| 0.193384
| 786
| 33
| 71
| 23.818182
| 0.807571
| 0.085242
| 0
| 0
| 0
| 0
| 0.076056
| 0.043662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
090a042fdb172133fc8a7549c6014b2194047447
| 12,410
|
py
|
Python
|
compute_mainmodes.py
|
mehrdad-bm/mobility_shift
|
242f12b60dc8e07e3da13b5f1199456fd0fd697e
|
[
"MIT"
] | 1
|
2020-06-24T12:49:49.000Z
|
2020-06-24T12:49:49.000Z
|
compute_mainmodes.py
|
mehrdad-bm/mobility_shift
|
242f12b60dc8e07e3da13b5f1199456fd0fd697e
|
[
"MIT"
] | null | null | null |
compute_mainmodes.py
|
mehrdad-bm/mobility_shift
|
242f12b60dc8e07e3da13b5f1199456fd0fd697e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 00:17:55 2020
@author: mehrdad
"""
import json
import numpy as np
import pandas as pd
import time
import math
#import blist
import tslib.mining
import tslib.common
import tslib.trip_detection
import tslib.trip
STORE_RESULTS = False
#output_folder = './data/output'
#all_modes = {'WALK':0, 'RUN':0, 'BUS': 0, 'TRAM':0, 'RAIL':0, 'FERRY':0,
# 'CAR':0, 'SUBWAY':0, 'BICYCLE':0, 'EBICYCLE':0}
#all_modes_df = pd.DataFrame(data=all_modes.values(), index=all_modes.keys())
#from pyfiles.common.modalchoice import ModalChoice
# ----------------------------------------------------------------------------------------
def combine_sequential_modes(multimodal_summary):
multimodal_summary = multimodal_summary.replace('RUN','WALK')
l = multimodal_summary.split('->')
new_l = []
prev_mode = 'none'
for mode in l:
if mode == prev_mode:
pass
else:
new_l.append(mode)
prev_mode = mode
new_modes_str = '->'.join(new_l)
return new_modes_str
def fix_ebike_in_computed_multimodes(data):
print("fix_ebike_in_computed_multimodes() ...")
start = time.time()
ebikes = data[data['mode']=='EBICYCLE']
new_multimodal = ebikes.multimodal_summary.apply(lambda x: x.replace('BICYCLE', 'EBICYCLE'))
data.update(new_multimodal)
new_d_by_mode = ebikes.distance_by_mode.apply(lambda x: x.replace('EBICYCLE', 'ERASED'))
new_d_by_mode = new_d_by_mode.apply(lambda x: x.replace('BICYCLE', 'EBICYCLE'))
#ebikes.distance_by_mode.values[0]
#'{"RUN": 0.0, "WALK": 0.0, "EBICYCLE": 0.0, "BICYCLE": 20427.21}'
data.update(new_d_by_mode)
# TODO: If planning to also update time_by_mode, the time values should be reduced according to ebike speed
# ...
#temp = pd.DataFrame()
#temp['multimodal_summary'] = ebikes.multimodal_summary.apply(lambda x: x.replace('BICYCLE', 'EBICYCLE'))
end = time.time()
print("elapsed", end-start)
def compute_modes_distance_shares_per_trip(data):
# ----------------------------------------
# Get distances-by-mode
#temp = data.distance_by_mode.apply(lambda x: dict(json.loads(x)))
temp = data.distance_by_mode.apply(json.loads)
d_df = temp.to_frame()
d_df['distance'] = data.distance
start = time.time()
# compute modes distance shares per trip ---------------
#users = blist.blist() # maybe better performance for larger datasets
#users = list(np.zeros(len(d_df), dtype=int)) # not necessary unless profiler shows that list.append() is a bottleneck
users = list()
trip_ids = list()
plan_ids = list()
mode_shares = list()
max_mode_shares = list()
max_modes = list()
total_d_errors =list()
row_index = 0
for trip in d_df.itertuples():
users.append(trip.Index[0])
trip_ids.append(trip.Index[1])
if len(trip.Index)==3: # for computed_trips
plan_ids.append(trip.Index[2])
total_distance = trip.distance
d = trip.distance_by_mode
dvals = np.array(list(d.values()))
total = math.fsum(dvals)
if total>0:
shares = dvals/total
max_share = shares.max()
max_index = shares.argmax()
max_mode = list(d.keys())[max_index]
shares = shares[shares>0]
else:
max_share = 0
max_mode = 'UNDEFINED'
shares = []
mode_shares.append(shares)
max_mode_shares.append(max_share)
max_modes.append(max_mode)
total_d_errors.append(total_distance - total)
#users[row_index] = trip.Index[0]
row_index += 1
all_data={#'user': users,
#'trip': trip_ids,
'max_mode': max_modes,
'max_mode_share':max_mode_shares,
'mode_shares':mode_shares,
'total_d_error':total_d_errors}
#users = np.apply_along_axis(lambda x: x[0] , 1, indexes)
#trip_ids = np.apply_along_axis(lambda x: x[1] , 1, indexes)
#plan_ids = np.apply_along_axis(lambda x: x[2] , 1, indexes)
if len(trip.Index) == 2:
all_index=[users, trip_ids]
elif len(trip.Index) == 3: # for computed_trips
all_index=[users, trip_ids, plan_ids]
mode_distance_shares = pd.DataFrame(index=all_index, data=all_data)
# mode_distance_shares.set_index(keys=['user', 'trip'], inplace=True)
end = time.time()
print("compute_modes_distance_shares_per_trip(): elapsed", end-start)
return mode_distance_shares
# -----------------------------------------
def compute_mainmode_per_trip(mode_distance_shares):
# Compute main-mode per trip -----------------------
start = time.time()
mainmodes = []
mainmode_shares = []
for trip in mode_distance_shares.itertuples():
MIN_SHARE = tslib.mining.MIN_DISTANCE_SHARE_OF_MAINMODE
if trip.max_mode_share < MIN_SHARE and trip.max_mode_share > 0:
main_mode = 'SMALL_SHARE'
main_mode_share = 0 # we don't have a main-mode for this trip
else:
main_mode = trip.max_mode
main_mode_share = trip.max_mode_share
if main_mode == 'RUN':
main_mode = 'WALK'
mainmodes.append(main_mode)
mainmode_shares.append(main_mode_share)
mode_distance_shares['mainmode'] = mainmodes
mode_distance_shares['mainmode_share'] = mainmode_shares
end = time.time()
print("elapsed", end-start)
return mode_distance_shares
# -----------------------------------------------------------
def get_all_mode_shares(mode_distance_shares):
# Get all mode distance shares, for later stats -----------------------
start = time.time()
share_values_history = []
for trip in mode_distance_shares.itertuples():
share_values_history.extend(trip.mode_shares)
share_values_history_df = pd.DataFrame(data={'mode_distance_share': share_values_history})
end = time.time()
print("elapsed", end-start)
return share_values_history_df
# ---------------------------------------
def combine_samemode_leg_sequences(trips):
# Refine multimodal_summary of each trip, combine modes repeated right after each other:
trips['multimodal_summary_combined'] = trips.multimodal_summary.apply(combine_sequential_modes)
def compute_mainmodes_for_observed(trips):
print("compute_mainmodes_for_observed(): Given ",len(trips),"trip records")
mode_distance_shares = compute_modes_distance_shares_per_trip(trips)
mode_distance_shares = compute_mainmode_per_trip(mode_distance_shares)
# optional?: share_values_history_df = get_all_mode_shares(mode_distance_shares)
# Update the records:
trips['old_mode'] = trips['mode']
trips['mode'] = mode_distance_shares['mainmode']
trips['mainmode'] = mode_distance_shares['mainmode']
trips['mainmode_share'] = mode_distance_shares['mainmode_share']
if STORE_RESULTS:
store_filename_suffix = 'observed'
print("saving to file ...")
mode_distance_shares.to_csv('./trips/output/'+'mode_distance_shares_'+store_filename_suffix+'.csv')
#share_values_history_df.to_csv('./trips/output/share_values_history_df_'+store_filename_suffix+'.csv')
def compute_mainmodes_for_computed(trips):
print("compute_mainmodes_for_computed(): Given ",len(trips),"trip records")
mode_distance_shares = compute_modes_distance_shares_per_trip(trips)
mode_distance_shares = compute_mainmode_per_trip(mode_distance_shares)
trips['mainmode_share'] = mode_distance_shares['mainmode_share']
tslib.trip_detection.compute_mainmode_of_PT_trips(trips)
tslib.trip_detection.compute_mainmode_of_non_PT_trips(trips)
def fix_alts_with_misplaced_plan_id(session_data):
computed_trips_ = session_data.computed_trips
# POSSIBLE FIXES
# See: X.multimodal_summary.value_counts() of following datasets:
# also:
# np.histogram(pt_alts_by_planid.car_distance/pt_alts_by_planid.distance, bins=[0, 0.01, 0.3, 0.7, 1])
# np.histogram(pt_alts_by_planid.bike_distance/pt_alts_by_planid.distance, bins=[0, 0.01, 0.3, 0.7, 1])
# np.histogram(pt_alts_by_planid.walk_distance/pt_alts_by_planid.distance, bins=[0, 0.3, 0.7, 1])
# np.histogram(pt_alts_by_planid.pt_distance/pt_alts_by_planid.distance, bins=[0, 0.3, 0.7, 1])
walk_alts_by_planid = tslib.trip_detection.get_alts_by_planid(computed_trips_, [1]) # supposed to be walk alts
#OK, but very few CAR, BIKE and PT
# for which, bike and pt should be fine because they have the correct mainmode ??
bike_alts_by_planid = tslib.trip_detection.get_alts_by_planid(computed_trips_, [2]) # supposed to be bike alts
#OK, but very few CAR and PT
car_alts_by_planid = tslib.trip_detection.get_alts_by_planid(computed_trips_, [3]) # supposed to be bike alts
# Has ~400 PT, 60 WALK
# The PT ones wihtout SMALL_SHARE are fine already
pt_alts_by_planid = tslib.trip_detection.get_alts_by_planid(computed_trips_, [4,5,6]) # only can be PT alts
#test: (pt_alts_by_planid[pt_alts_by_planid.mainmode == 'WALK']).multimodal_summary.value_counts()
# 2551 are only WALK leg, and apparently those trips already have plan_id=1 WALK computed:
# Implies PT is not available or not posisble? ... see the distances
# Update computed_trips as 'mainmode' = PT_PLANNED_AS_WALK ?!!
# the rest have at least one PT leg.
# How to fixe computed_trips?
# Is it ok if for a 'PT' trip, actual motorized distance is only e.g. 20%?
# 12191 reciords mainmode == SMALL_SHARE ***
# Update SMALL_SHARE to a PT mode
# to the largest share ?!
# to dft.old_mode.value_counts() ?!
# to 'MULTI_PT_MODES' or 'PT_SMALL_SHARE' and then add 'MULTI_PT_MODES', etc. to PT_MODES ?!
# Make the corrections:
# PT alts:
# .1
revise = pt_alts_by_planid[(pt_alts_by_planid.multimodal_summary == 'WALK') & (pt_alts_by_planid.mainmode != 'PT_PLANNED_AS_WALK')]
computed_trips_.loc[computed_trips_.index.isin(revise.index), 'mainmode'] = 'PT_PLANNED_AS_WALK'
# .2
revise = pt_alts_by_planid[pt_alts_by_planid.mainmode == 'SMALL_SHARE']
#revise['pt_d_share'] = revise.pt_distance/revise.distance
#revise[['old_mode', 'multimodal_summary', 'pt_distance', 'pt_d_share']]
#dft.multimodal_summary
computed_trips_.loc[computed_trips_.index.isin(revise.index), 'mainmode'] = 'PT_SMALL_SHARE'
# . Where mainmode incorrectly classified as 'WALK' because walk leg had largest distance-share
revise = pt_alts_by_planid[(pt_alts_by_planid.multimodal_summary.apply(tslib.trip.has_pt_leg)) &\
(~ pt_alts_by_planid.mainmode.isin(tslib.trip_detection.PT_MODES))]
computed_trips_.loc[computed_trips_.index.isin(revise.index), 'mainmode'] = 'PT_SMALL_SHARE'
# .3
revise = car_alts_by_planid[car_alts_by_planid.multimodal_summary.apply(tslib.trip.has_pt_leg) &\
(car_alts_by_planid.mainmode == 'SMALL_SHARE')]
computed_trips_.loc[computed_trips_.index.isin(revise.index), 'mainmode'] = 'PT_SMALL_SHARE'
# ======================================================
def save_to_file(session_data):
print("Saving trips to file ...")
output_folder = session_data.settings.DATAOUT_FOLDER
tslib.common.save_dataframe_to_file(output_folder,'observed_trips', session_data.observed_trips)
tslib.common.save_dataframe_to_file(output_folder,'computed_trips', session_data.computed_trips)
def load_data_with_fixed_modes(session_data):
print("Loading trips from file ...")
data_storage_folder = session_data.settings.DATASTORE_FOLDER
session_data.observed_trips = tslib.common.load_dataframe_from_file(data_storage_folder,'observed_trips')
session_data.computed_trips = tslib.common.load_dataframe_from_file(data_storage_folder,'computed_trips')
| 40.032258
| 135
| 0.652458
| 1,641
| 12,410
| 4.606947
| 0.175503
| 0.053704
| 0.046032
| 0.035185
| 0.41918
| 0.363757
| 0.294577
| 0.246693
| 0.197355
| 0.187037
| 0
| 0.011665
| 0.21249
| 12,410
| 309
| 136
| 40.161812
| 0.761895
| 0.319581
| 0
| 0.167702
| 0
| 0
| 0.100156
| 0.022616
| 0
| 0
| 0
| 0.003236
| 0
| 1
| 0.068323
| false
| 0.006211
| 0.055901
| 0
| 0.149068
| 0.062112
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
090a8d868906c9c97d5b54db58497792e9cd606d
| 24,054
|
py
|
Python
|
envs/env.py
|
CMU-Light-Curtains/SafetyEnvelopes
|
e2b32f99437ea36c8b22f97470c5a7f406d3ec78
|
[
"BSD-3-Clause"
] | null | null | null |
envs/env.py
|
CMU-Light-Curtains/SafetyEnvelopes
|
e2b32f99437ea36c8b22f97470c5a7f406d3ec78
|
[
"BSD-3-Clause"
] | null | null | null |
envs/env.py
|
CMU-Light-Curtains/SafetyEnvelopes
|
e2b32f99437ea36c8b22f97470c5a7f406d3ec78
|
[
"BSD-3-Clause"
] | null | null | null |
from abc import ABC, abstractmethod
import gym
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import time
from setcpp import SmoothnessDPL1Cost, SmoothnessDPPairGridCost, SmoothnessGreedy
import tqdm
from typing import Optional, Tuple, NoReturn
from data.synthia import Frame, append_xyz_to_depth_map
from devices.light_curtain import LCReturn
from lc_planner.planner import PlannerRT
from lc_planner.config import LCConfig
import utils
########################################################################################################################
# region Base Env class
########################################################################################################################
class Env(ABC, gym.Env):
"""
Base class for implementing environments for safety envelope tracking.
This is intended to mimic the OpenAI Gym wrapper.
"""
def __init__(self,
lc_config: LCConfig,
thetas: np.ndarray,
min_range: float,
max_range: float,
use_random_curtain: bool,
random_curtain_updates_main_curtain: bool,
random_curtain_cache_file: str,
random_curtain_sampling: str,
random_curtain_spacing_power: float,
vertical_range: Tuple[float, float],
r_hit_intensity_thresh: int,
r_recession: float,
pp_smoothing: Optional[str],
tracking_rtol: float,
tracking_atol: float,
baseline_config: dict,
debug: bool = False):
assert len(thetas) == lc_config.CAMERA_PARAMS['width']
self._lc_config = lc_config
self._thetas = thetas
self._min_range = min_range
self._max_range = max_range
self._use_random_curtain = use_random_curtain
self._random_curtain_updates_main_curtain = random_curtain_updates_main_curtain
self._random_curtain_sampling = random_curtain_sampling
self._random_curtain_spacing_power = random_curtain_spacing_power
self._vertical_range = vertical_range
self._r_hit_intensity_thresh = r_hit_intensity_thresh
self._r_recession = r_recession
self._pp_smoothing = pp_smoothing
self._rtol = tracking_rtol
self._atol = tracking_atol
self._debug = debug
# config for handcrafted baseline policy
self.baseline_config = baseline_config
# options
self._RANGES_PER_RAY_V2 = 1000 # 01.9cm apart
# random curtain generator
self.rand_curtain_gen = RandomCurtainGenerator(cache_file=random_curtain_cache_file)
ranges = self.get_ranges(self.min_range, self.max_range, self._RANGES_PER_RAY_V2,
self._random_curtain_spacing_power)
if not self.rand_curtain_gen.has_curtains:
plannerV2 = PlannerRT(self._lc_config, ranges, self.C, version=2)
self.rand_curtain_gen.generate(planner=plannerV2, sampling=self._random_curtain_sampling)
# smoothing
self._SMOOTHNESS = 0.05
smoothness_args = (self.C, self.min_range, self.max_range, self._SMOOTHNESS)
self._smoothnessDPL1Cost = SmoothnessDPL1Cost(*smoothness_args)
self._smoothnessDPPairGridCost = SmoothnessDPPairGridCost(*smoothness_args)
self._smoothnessGreedy = SmoothnessGreedy(*smoothness_args)
# stores the most recently obtained intensities.
# this is used by heuristic_greedy smoothing to define a priority over camera rays.
# this should be initialized in self.reset()
self.intensities = None # (C,)
if self._debug:
# visualize 20 random curtains
for i in range(20):
design_pts = plannerV2._planner.randomCurtainDiscrete(self._random_curtain_sampling)
design_pts = np.array(design_pts, dtype=np.float32) # (C, 3)
plt.plot(design_pts[:, 0], design_pts[:, 1])
plt.ylim(0, 20)
plt.xlim(-7, 7)
plt.title("power: {}, vel: {}, acc: {}".format(
self._random_curtain_spacing_power,
self._lc_config.LASER_PARAMS["max_omega"],
self._lc_config.LASER_PARAMS["max_alpha"]), fontsize='xx-large')
plt.tight_layout()
plt.show()
@property
def thetas(self):
return self._thetas # (C,) in degrees and in increasing order in [-fov/2, fov/2]
@property
def min_range(self):
return self._min_range
@property
def max_range(self):
return self._max_range
@property
def H(self):
return self._lc_config.CAMERA_PARAMS['height'] # number of camera rows
@property
def C(self):
return self._lc_config.CAMERA_PARAMS['width'] # number of camera columns
@staticmethod
def get_ranges(min_range, max_range, num_ranges, power):
# generate numbers between 0 and 1
unit_spacing = np.linspace(0, 1, num_ranges, dtype=np.float32) # (R,)
unit_spacing = np.power(unit_spacing, power) # (R,)
ranges = min_range + (max_range - min_range) * unit_spacing # (R,)
return ranges
def safety_envelope(self,
frame: Frame) -> np.ndarray:
"""
Computes ground truth safety envelope from the ground truth depth map in the frame.
The safety envelope for each camera column is the smallest bev range value across all pixels in that column.
Args:
frame (Frame): frame containing ground truth depth.
Returns:
se_ranges: (np.ndarray, dtype=np.float32, shape=(C,)) the ranges of the ground truth safety envelope,
one per camera ray.
"""
depth = frame.depth.copy() # (H, C)
# append x, y, z to depth
P2 = frame.calib["P2"][:3, :3] # (3, 3)
cam_xyz = append_xyz_to_depth_map(depth[:, :, None], P2) # (H, C, 3); axis 2 is (x, y, z) in cam frame
cam_x, cam_y, cam_z = cam_xyz[:, :, 0], cam_xyz[:, :, 1], cam_xyz[:, :, 2] # all are (H, C)
bev_range = np.sqrt(np.square(cam_x) + np.square(cam_z)) # (H, C) sqrt(x**2 + z**2)
# we do not care about objects beyond "max_range"
bev_range = bev_range.clip(max=self.max_range) # (H, C)
# pixels that are outside the vertical range are assumed to be infinitely far away
# (note that cam_y points downwards)
vrange_min, vrange_max = self._vertical_range
outside_vrange_mask = (-cam_y < vrange_min) | (-cam_y > vrange_max) # (H, C)
bev_range[outside_vrange_mask] = self.max_range
se_ranges = bev_range.min(axis=0) # (C,)
return se_ranges.astype(np.float32)
def augment_frame_data(self, frame: Frame) -> NoReturn:
"""Compute the gt safety envelope and add it to the frame"""
se_ranges = self.safety_envelope(frame) # (C,)
se_design_pts = utils.design_pts_from_ranges(se_ranges, self.thetas) # (C, 2)
frame.annos["se_ranges"] = se_ranges
frame.annos["se_design_pts"] = se_design_pts
####################################################################################################################
# region Env API functions
####################################################################################################################
def reset(self,
vid: Optional[int] = None,
start: Optional[int] = None) -> np.ndarray:
"""Resets the state of the environment, returns the initial envelope and also initializes self.intensities.
Args:
vid (int): video id.
start (int): start frame of video.
Returns:
init_envelope (np.ndarray, dtype=np.float32, shape=(C,)): the initial envelope.
"""
raise NotImplementedError
def step(self,
action: Optional[np.ndarray],
score: Optional[float] = None,
get_gt: bool = False) -> Tuple[LCReturn, bool, dict]:
"""
Compute the observations from the current step.
This is derived by placing the light curtain computed from observations in the previous timestep,
in the current frame.
Args:
action (np.ndarray, dtype=np.float32, shape=(C,)): Ranges of the light curtain.
This is optional; if None, then the ground truth action will be used instead (for behavior cloning).
score (Optional[float]): the score of the front curtain that needs to be published.
get_gt (bool): whether to compute gt_action or not
Returns:
observation (LCReturn): agent's observation of the current environment. This is the return from the front
light curtain. Always returns a valid observation, even when end=True.
end (bool): is True for the last valid observation in the episode. No further calls to step() should be
made after a end=True has been returned.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
{
'gt_action' (optional): (np.ndarray, dtype=float32, shape=(C,))
the light curtain placement that should be considered `ground
truth' for the previous timestep. This is what `action' should
ideally be equal to.
'ss_action' (optional): (np.ndarray, dtype=np.float32, shape=(C,))
partial ground truth self-supervision signal generated by random
light curtains. note that the mask is equal to
(ss_action < self.max_range).
}
"""
self.env_step_begin()
info = {}
################################################################################################################
# region Random curtain
################################################################################################################
if self._use_random_curtain:
# place random curtain and move f_curtain to wherever hits are observed
r_curtain, r_hits = self.env_place_r_curtain()
# compute self-supervision signal
# these are the ranges of the random curtain for those camera rays where a hit was observed.
# rays that did not observe a hit are masked out.
ss_action = r_curtain.copy() # (C,)
ss_action[~r_hits] = self.max_range
info['ss_action'] = ss_action
# endregion
################################################################################################################
# region Pre-processing forecasting curtain
################################################################################################################
if action is not None:
# clip curtain between min and max range
f_curtain = action.clip(min=self.min_range, max=self.max_range) # (C,)
if self._use_random_curtain and self._random_curtain_updates_main_curtain:
# update f_curtain by moving it to locations where the random curtain observed returns
# update only those locations where the random curtain detected objects *closer* than the main curtain
r_update_mask = r_hits & (r_curtain < f_curtain) # (C,)
f_curtain[r_update_mask] = r_curtain[r_update_mask] - self._r_recession
# since f_curtain is being updated, self.intensities must also be updated.
# furthermore, the locations of random curtain hits should get the highest priority
self.intensities[r_update_mask] = 1.1
# endregion
################################################################################################################
# region Smoothing forecasting curtain
################################################################################################################
if action is not None:
if self._pp_smoothing == "heuristic_global":
# heuristic smoothing: difference between ranges on consecutive rays shouldn't exceed a threshold
# global optimization: minimizes the sum of L1 differences across all rays using DP
if self._use_random_curtain and self._random_curtain_updates_main_curtain:
# when using random curtains, the cost will be hierarchical:
# (sum of L1 costs over rays in r_update_mask, sum of L1 costs over rays outside r_update_mask)
# this priorities being close to the locations updated by r_curtain more than the other locations.
ranges = np.array(self._smoothnessDPPairGridCost.getRanges(), dtype=np.float32) # (R,)
flat_cost = np.abs(ranges.reshape(-1, 1) - f_curtain) # (R, C)
# hierarchical cost
# - (L1cost, 0): if on ray in r_update_mask
# - (0, L1cost): if on ray outside r_update_mask
pair_cost = np.zeros([len(ranges), self.C, 2], dtype=np.float32) # (R, C, 2)
pair_cost[:, r_update_mask, 0] = flat_cost[:, r_update_mask]
pair_cost[:, ~r_update_mask, 1] = flat_cost[:, ~r_update_mask]
f_curtain = np.array(self._smoothnessDPPairGridCost.smoothedRanges(pair_cost), dtype=np.float32) # (C,)
else:
f_curtain = np.array(self._smoothnessDPL1Cost.smoothedRanges(f_curtain), dtype=np.float32) # (C,)
elif self._pp_smoothing == "heuristic_greedy":
# heuristic smoothing: difference between ranges on consecutive rays shouldn't exceed a threshold
# greedy optimization: greedily smoothes ranges while iterating over rays prioritized by largest weights
f_curtain = np.array(self._smoothnessGreedy.smoothedRanges(f_curtain, self.intensities), dtype=np.float32) # (C,)
elif self._pp_smoothing == "planner_global":
# create L1 cost function
ranges = self.plannerV2.ranges # (R,)
cmap = -np.abs(ranges.reshape(-1, 1) - f_curtain) # (R, C)
design_pts = self.plannerV2.get_design_points(cmap) # (C, 2)
assert design_pts.shape == (self.plannerV2.num_camera_angles, 2)
f_curtain = np.linalg.norm(design_pts, axis=1) # (C,)
else:
raise Exception(f"env.pp_smoothing must be " +
"\"heuristic_global\" or \"heuristic_greedy\" or \"planner_global\"")
# endregion
################################################################################################################
# region GT-action and placing forecasting curtain
################################################################################################################
if (action is None) and (get_gt == False):
raise Exception("Must compute gt_action in behavior cloning")
# the next line gets the ground truth action for the previous timestep
# in the ideal policy, `action' should match this `gt_action'
if get_gt:
info['gt_action'] = self.env_current_gt_action() # (C,)
# if action is set to None (for eg. in behavior cloning), use the ground truth action instead
if action is None:
f_curtain = info['gt_action']
# placing forecasting curtain
obs: LCReturn = self.env_place_f_curtain(f_curtain, score=score)
# the next line updates self.intensities
self.intensities = obs.bev_intensities() / 255.0
# the next line computes `end', which checks whether another env.step() call can be made
end = self.env_end()
time.sleep(0) # interrupt, useful for RealEnv
return obs, end, info
def done(self,
f_curtain: np.ndarray,
se_ranges: np.ndarray) -> bool:
"""
Whether the episode transitions to the terminal state or not.
Done is true when the curtain has moved too far away from the safety envelope on any camera ray i.e.
abs(f_curtain - se_ranges) > (atol + rtol * se_ranges) for any camera ray
Args:
f_curtain (np.ndarray, dtype=float32, shape=(C,)): curtain placement
se_ranges (np.ndarray, dtype=float32, shape=(C,)): ground truth safety envelope.
Returns:
done (bool): whether f_curtain is too far away from se_ranges on any camera ray.
"""
# the next line computes the mask over rays; only these rays should count towards termination
mask = se_ranges < self.max_range # (C,)
f_curtain = f_curtain[mask] # (C',)
se_ranges = se_ranges[mask] # (C',)
# bad_rays = np.abs(f_curtain - se_ranges) > self._atol + self._rtol * se_ranges # (C')
# frac_bad_rays = bad_rays.sum() / mask.sum().clip(min=1)
# return frac_bad_rays >= 0.5
return np.any(np.abs(f_curtain - se_ranges) > self._atol + self._rtol * se_ranges)
def render(self, mode='human'):
pass
# endregion
####################################################################################################################
# region Env-specific helper functions for step()
####################################################################################################################
@abstractmethod
def env_step_begin(self) -> NoReturn:
"""
Env-specific helper function for step().
Any pre-processing that needs to be done at the start of the step() function.
"""
raise NotImplementedError
@abstractmethod
def env_place_r_curtain(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Env-specific helper function for step().
Places a random curtain and gets return.
Returns:
r_curtain (np.ndarray, dtype=float32, shape=(C,)): ranges of the unified random curtain
r_hits (np.ndarray, dtype=bool, shape=(C,)): mask where hits were found in the unified random curtain
"""
raise NotImplementedError
@abstractmethod
def env_place_f_curtain(self,
f_curtain: np.ndarray,
score: Optional[float]) -> LCReturn:
"""
Env-specific helper function for step().
Places a forecasting curtain and gets return.
Args:
f_curtain (np.ndarray, dtype=float32, shape=(C,)): ranges of the forecasting curtain
score (Optional[float]): score from the previous timestep. SimEnv uses this to publish score to Kittiviewer.
Returns:
f_return (LCReturn): Forecasting curtain return.
"""
raise NotImplementedError
@abstractmethod
def env_current_gt_action(self) -> np.ndarray:
"""
Env-specific helper function for step().
Computes the current gt_action.
Returns:
gt_action (np.ndarray, dtype=float32, shape=(C,)): current gt action
"""
raise NotImplementedError
@abstractmethod
def env_end(self) -> bool:
"""Computes the end flag, which checks whether another env.step() call can be made"""
raise NotImplementedError
# endregion
####################################################################################################################
# region Legacy helper functions
####################################################################################################################
def _debug_visualize_curtains(self, f_curtain, r_curtain):
design_pts = utils.design_pts_from_ranges(f_curtain, self.thetas)
x, z = design_pts[:, 0], design_pts[:, 1]
plt.plot(x, z, c='b')
design_pts = utils.design_pts_from_ranges(r_curtain, self.thetas)
x, z = design_pts[:, 0], design_pts[:, 1]
plt.plot(x, z, c='r')
plt.ylim(0, 21)
plt.show()
def _random_curtain(self,
r_type: str = "linear") -> np.ndarray:
"""Computes a random curtain across the entire scene
Args:
r_type (str): type of the random curtain. Options are (1) "uniform", (2) "linear".
Returns:
curtain (np.ndarray, dtype=np.float32, shape=(C,)): range per camera ray that may not correpsond to a
valid curtain.
"""
limits_lo = np.ones(self.C, dtype=np.float32) * 0.5 * self.min_range # (C,)
limits_hi = np.ones(self.C, dtype=np.float32) * self.max_range # (C,)
if r_type == "uniform":
curtain = np.random.uniform(low=limits_lo, high=limits_hi) # (C,)
elif r_type == "linear":
curtain = np.sqrt(np.random.uniform(low=np.square(limits_lo), high=np.square(limits_hi))) # (C,)
else:
raise Exception("r_type must be one of [uniform/linear]")
return curtain
# endregion
####################################################################################################################
# endregion
########################################################################################################################
# region Random curtain generator class
########################################################################################################################
class RandomCurtainGenerator:
def __init__(self,
cache_file: str):
self.curtains = None # (N, C)
self.ptr = 0
self.cache_file = Path(cache_file)
if self.cache_file.exists():
self.load_from_cache_file()
@property
def has_curtains(self):
return self.curtains is not None
def load_from_cache_file(self):
self.curtains = np.loadtxt(self.cache_file).astype(np.float32) # (N, C)
utils.cprint(f'Loaded {len(self.curtains)} random curtains from cache!', color='yellow')
def generate(self,
planner: PlannerRT,
sampling: str,
num_curtains: int=1000):
assert not self.cache_file.exists(), "Cannot generate curtains if cache file already exists"
with open(self.cache_file, 'w') as f:
for _ in tqdm.trange(num_curtains, desc='Creating random curtain cache ...'):
while True:
curtain = self.generate_curtain_from_planner(planner, sampling)
# don't save degenerate curtains 90% of whose rays are behind 3m
if not ((curtain < 3.0).mean() > 0.9):
break
print(' '.join([str(e) for e in curtain]), file=f)
self.load_from_cache_file()
@staticmethod
def generate_curtain_from_planner(planner, sampling):
r_curtain = planner._planner.randomCurtainDiscrete(sampling)
r_curtain = np.array(r_curtain, dtype=np.float32) # (C, 3)
r_curtain = np.linalg.norm(r_curtain[:, :2], axis=1) # (C,)
return r_curtain
def next(self):
curtain = self.curtains[self.ptr]
self.ptr += 1
if self.ptr == len(self.curtains):
self.ptr = 0
return curtain # (C,)
# endregion
########################################################################################################################
| 46.436293
| 130
| 0.547435
| 2,692
| 24,054
| 4.706538
| 0.180906
| 0.03899
| 0.016575
| 0.009945
| 0.23236
| 0.149961
| 0.115943
| 0.080189
| 0.059669
| 0.056196
| 0
| 0.009218
| 0.278374
| 24,054
| 517
| 131
| 46.526112
| 0.720705
| 0.326058
| 0
| 0.15748
| 0
| 0
| 0.034915
| 0
| 0
| 0
| 0
| 0
| 0.011811
| 1
| 0.102362
| false
| 0.003937
| 0.055118
| 0.023622
| 0.216535
| 0.007874
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
090ac8191b92a41692dec58a6457de7f58261791
| 17,884
|
py
|
Python
|
pw_console/py/pw_console/plugins/clock_pane.py
|
octml/pigweed
|
e273d46024ef7b5a7c7ec584e4aaada41c541fc4
|
[
"Apache-2.0"
] | 86
|
2021-03-09T23:49:40.000Z
|
2022-03-30T08:14:51.000Z
|
pw_console/py/pw_console/plugins/clock_pane.py
|
octml/pigweed
|
e273d46024ef7b5a7c7ec584e4aaada41c541fc4
|
[
"Apache-2.0"
] | 4
|
2021-07-27T20:32:03.000Z
|
2022-03-08T10:39:07.000Z
|
pw_console/py/pw_console/plugins/clock_pane.py
|
octml/pigweed
|
e273d46024ef7b5a7c7ec584e4aaada41c541fc4
|
[
"Apache-2.0"
] | 22
|
2021-03-11T15:15:47.000Z
|
2022-02-09T06:16:36.000Z
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Example Plugin that displays some dynamic content (a clock) and examples of
text formatting."""
from datetime import datetime
from prompt_toolkit.filters import Condition, has_focus
from prompt_toolkit.formatted_text import (
FormattedText,
HTML,
merge_formatted_text,
)
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from prompt_toolkit.layout import FormattedTextControl, Window, WindowAlign
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
from pw_console.plugin_mixin import PluginMixin
from pw_console.widgets import ToolbarButton, WindowPane, WindowPaneToolbar
from pw_console.get_pw_console_app import get_pw_console_app
# Helper class used by the ClockPane plugin for displaying dynamic text,
# handling key bindings and mouse input. See the ClockPane class below for the
# beginning of the plugin implementation.
class ClockControl(FormattedTextControl):
"""Example prompt_toolkit UIControl for displaying formatted text.
This is the prompt_toolkit class that is responsible for drawing the clock,
handling keybindings if in focus, and mouse input.
"""
def __init__(self, clock_pane: 'ClockPane', *args, **kwargs) -> None:
self.clock_pane = clock_pane
# Set some custom key bindings to toggle the view mode and wrap lines.
key_bindings = KeyBindings()
# If you press the v key this _toggle_view_mode function will be run.
@key_bindings.add('v')
def _toggle_view_mode(_event: KeyPressEvent) -> None:
"""Toggle view mode."""
self.clock_pane.toggle_view_mode()
# If you press the w key this _toggle_wrap_lines function will be run.
@key_bindings.add('w')
def _toggle_wrap_lines(_event: KeyPressEvent) -> None:
"""Toggle line wrapping."""
self.clock_pane.toggle_wrap_lines()
# Include the key_bindings keyword arg when passing to the parent class
# __init__ function.
kwargs['key_bindings'] = key_bindings
# Call the parent FormattedTextControl.__init__
super().__init__(*args, **kwargs)
def mouse_handler(self, mouse_event: MouseEvent):
"""Mouse handler for this control."""
# If the user clicks anywhere this function is run.
# Mouse positions relative to this control. x is the column starting
# from the left size as zero. y is the row starting with the top as
# zero.
_click_x = mouse_event.position.x
_click_y = mouse_event.position.y
# Mouse click behavior usually depends on if this window pane is in
# focus. If not in focus, then focus on it when left clicking. If
# already in focus then perform the action specific to this window.
# If not in focus, change focus to this clock pane and do nothing else.
if not has_focus(self.clock_pane)():
if mouse_event.event_type == MouseEventType.MOUSE_UP:
get_pw_console_app().focus_on_container(self.clock_pane)
# Mouse event handled, return None.
return None
# If code reaches this point, this window is already in focus.
# On left click
if mouse_event.event_type == MouseEventType.MOUSE_UP:
# Toggle the view mode.
self.clock_pane.toggle_view_mode()
# Mouse event handled, return None.
return None
# Mouse event not handled, return NotImplemented.
return NotImplemented
class ClockPane(WindowPane, PluginMixin):
"""Example Pigweed Console plugin window that displays a clock.
The ClockPane is a WindowPane based plugin that displays a clock and some
formatted text examples. It inherits from both WindowPane and
PluginMixin. It can be added on console startup by calling: ::
my_console.add_window_plugin(ClockPane())
For an example see:
https://pigweed.dev/pw_console/embedding.html#adding-plugins
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, pane_title='Clock', **kwargs)
# Some toggle settings to change view and wrap lines.
self.view_mode_clock: bool = True
self.wrap_lines: bool = False
# Counter variable to track how many times the background task runs.
self.background_task_update_count: int = 0
# ClockControl is responsible for rendering the dynamic content provided
# by self._get_formatted_text() and handle keyboard and mouse input.
# Using a control is always necessary for displaying any content that
# will change.
self.clock_control = ClockControl(
self, # This ClockPane class
self._get_formatted_text, # Callable to get text for display
# These are FormattedTextControl options.
# See the prompt_toolkit docs for all possible options
# https://python-prompt-toolkit.readthedocs.io/en/latest/pages/reference.html#prompt_toolkit.layout.FormattedTextControl
show_cursor=False,
focusable=True,
)
# Every FormattedTextControl object (ClockControl) needs to live inside
# a prompt_toolkit Window() instance. Here is where you specify
# alignment, style, and dimensions. See the prompt_toolkit docs for all
# opitons:
# https://python-prompt-toolkit.readthedocs.io/en/latest/pages/reference.html#prompt_toolkit.layout.Window
self.clock_control_window = Window(
# Set the content to the clock_control defined above.
content=self.clock_control,
# Make content left aligned
align=WindowAlign.LEFT,
# These two set to false make this window fill all available space.
dont_extend_width=False,
dont_extend_height=False,
# Content inside this window will have its lines wrapped if
# self.wrap_lines is True.
wrap_lines=Condition(lambda: self.wrap_lines),
)
# Create a toolbar for display at the bottom of this clock window. It
# will show the window title and buttons.
self.bottom_toolbar = WindowPaneToolbar(self)
# Add a button to toggle the view mode.
self.bottom_toolbar.add_button(
ToolbarButton(
key='v', # Key binding for this function
description='View Mode', # Button name
# Function to run when clicked.
mouse_handler=self.toggle_view_mode,
))
# Add a checkbox button to display if wrap_lines is enabled.
self.bottom_toolbar.add_button(
ToolbarButton(
key='w', # Key binding for this function
description='Wrap', # Button name
# Function to run when clicked.
mouse_handler=self.toggle_wrap_lines,
# Display a checkbox in this button.
is_checkbox=True,
# lambda that returns the state of the checkbox
checked=lambda: self.wrap_lines,
))
# self.container is the root container that contains objects to be
# rendered in the UI, one on top of the other.
self.container = self._create_pane_container(
# Display the clock window on top...
self.clock_control_window,
# and the bottom_toolbar below.
self.bottom_toolbar,
)
# This plugin needs to run a task in the background periodically and
# uses self.plugin_init() to set which function to run, and how often.
# This is provided by PluginMixin. See the docs for more info:
# https://pigweed.dev/pw_console/plugins.html#background-tasks
self.plugin_init(
plugin_callback=self._background_task,
# Run self._background_task once per second.
plugin_callback_frequency=1.0,
plugin_logger_name='pw_console_example_clock_plugin',
)
def _background_task(self) -> bool:
"""Function run in the background for the ClockPane plugin."""
self.background_task_update_count += 1
# Make a log message for debugging purposes. For more info see:
# https://pigweed.dev/pw_console/plugins.html#debugging-plugin-behavior
self.plugin_logger.debug('background_task_update_count: %s',
self.background_task_update_count)
# Returning True in the background task will force the user interface to
# re-draw.
# Returning False means no updates required.
return True
def toggle_view_mode(self):
"""Toggle the view mode between the clock and formatted text example."""
self.view_mode_clock = not self.view_mode_clock
self.redraw_ui()
def toggle_wrap_lines(self):
"""Enable or disable line wraping/truncation."""
self.wrap_lines = not self.wrap_lines
self.redraw_ui()
def _get_formatted_text(self):
"""This function returns the content that will be displayed in the user
interface depending on which view mode is active."""
if self.view_mode_clock:
return self._get_clock_text()
return self._get_example_text()
def _get_clock_text(self):
"""Create the time with some color formatting."""
# pylint: disable=no-self-use
# Get the date and time
date, time = datetime.now().isoformat(sep='_',
timespec='seconds').split('_')
# Formatted text is represented as (style, text) tuples.
# For more examples see:
# https://python-prompt-toolkit.readthedocs.io/en/latest/pages/printing_text.html
# These styles are selected using class names and start with the
# 'class:' prefix. For all classes defined by Pigweed Console see:
# https://cs.opensource.google/pigweed/pigweed/+/main:pw_console/py/pw_console/style.py;l=189
# Date in cyan matching the current Pigweed Console theme.
date_with_color = ('class:theme-fg-cyan', date)
# Time in magenta
time_with_color = ('class:theme-fg-magenta', time)
# No color styles for line breaks and spaces.
line_break = ('', '\n')
space = ('', ' ')
# Concatenate the (style, text) tuples.
return FormattedText([
line_break,
space,
space,
date_with_color,
space,
time_with_color,
])
def _get_example_text(self):
"""Examples of how to create formatted text."""
# pylint: disable=no-self-use
# Make a list to hold all the formatted text to display.
fragments = []
# Some spacing vars
wide_space = ('', ' ')
space = ('', ' ')
newline = ('', '\n')
# HTML() is a shorthand way to style text. See:
# https://python-prompt-toolkit.readthedocs.io/en/latest/pages/printing_text.html#html
# This formats 'Foreground Colors' as underlined:
fragments.append(HTML('<u>Foreground Colors</u>\n'))
# Standard ANSI colors examples
fragments.append(
FormattedText([
# These tuples follow this format:
# (style_string, text_to_display)
('ansiblack', 'ansiblack'),
wide_space,
('ansired', 'ansired'),
wide_space,
('ansigreen', 'ansigreen'),
wide_space,
('ansiyellow', 'ansiyellow'),
wide_space,
('ansiblue', 'ansiblue'),
wide_space,
('ansimagenta', 'ansimagenta'),
wide_space,
('ansicyan', 'ansicyan'),
wide_space,
('ansigray', 'ansigray'),
wide_space,
newline,
('ansibrightblack', 'ansibrightblack'),
space,
('ansibrightred', 'ansibrightred'),
space,
('ansibrightgreen', 'ansibrightgreen'),
space,
('ansibrightyellow', 'ansibrightyellow'),
space,
('ansibrightblue', 'ansibrightblue'),
space,
('ansibrightmagenta', 'ansibrightmagenta'),
space,
('ansibrightcyan', 'ansibrightcyan'),
space,
('ansiwhite', 'ansiwhite'),
space,
]))
fragments.append(HTML('\n<u>Background Colors</u>\n'))
fragments.append(
FormattedText([
# Here's an example of a style that specifies both background
# and foreground colors. The background color is prefixed with
# 'bg:'. The foreground color follows that with no prefix.
('bg:ansiblack ansiwhite', 'ansiblack'),
wide_space,
('bg:ansired', 'ansired'),
wide_space,
('bg:ansigreen', 'ansigreen'),
wide_space,
('bg:ansiyellow', 'ansiyellow'),
wide_space,
('bg:ansiblue ansiwhite', 'ansiblue'),
wide_space,
('bg:ansimagenta', 'ansimagenta'),
wide_space,
('bg:ansicyan', 'ansicyan'),
wide_space,
('bg:ansigray', 'ansigray'),
wide_space,
('', '\n'),
('bg:ansibrightblack', 'ansibrightblack'),
space,
('bg:ansibrightred', 'ansibrightred'),
space,
('bg:ansibrightgreen', 'ansibrightgreen'),
space,
('bg:ansibrightyellow', 'ansibrightyellow'),
space,
('bg:ansibrightblue', 'ansibrightblue'),
space,
('bg:ansibrightmagenta', 'ansibrightmagenta'),
space,
('bg:ansibrightcyan', 'ansibrightcyan'),
space,
('bg:ansiwhite', 'ansiwhite'),
space,
]))
# These themes use Pigweed Console style classes. See full list in:
# https://cs.opensource.google/pigweed/pigweed/+/main:pw_console/py/pw_console/style.py;l=189
fragments.append(HTML('\n\n<u>Current Theme Foreground Colors</u>\n'))
fragments.append([
('class:theme-fg-red', 'class:theme-fg-red'),
newline,
('class:theme-fg-orange', 'class:theme-fg-orange'),
newline,
('class:theme-fg-yellow', 'class:theme-fg-yellow'),
newline,
('class:theme-fg-green', 'class:theme-fg-green'),
newline,
('class:theme-fg-cyan', 'class:theme-fg-cyan'),
newline,
('class:theme-fg-blue', 'class:theme-fg-blue'),
newline,
('class:theme-fg-purple', 'class:theme-fg-purple'),
newline,
('class:theme-fg-magenta', 'class:theme-fg-magenta'),
newline,
])
fragments.append(HTML('\n<u>Current Theme Background Colors</u>\n'))
fragments.append([
('class:theme-bg-red', 'class:theme-bg-red'),
newline,
('class:theme-bg-orange', 'class:theme-bg-orange'),
newline,
('class:theme-bg-yellow', 'class:theme-bg-yellow'),
newline,
('class:theme-bg-green', 'class:theme-bg-green'),
newline,
('class:theme-bg-cyan', 'class:theme-bg-cyan'),
newline,
('class:theme-bg-blue', 'class:theme-bg-blue'),
newline,
('class:theme-bg-purple', 'class:theme-bg-purple'),
newline,
('class:theme-bg-magenta', 'class:theme-bg-magenta'),
newline,
])
fragments.append(HTML('\n<u>Theme UI Colors</u>\n'))
fragments.append([
('class:theme-fg-default', 'class:theme-fg-default'),
space,
('class:theme-bg-default', 'class:theme-bg-default'),
space,
('class:theme-bg-active', 'class:theme-bg-active'),
space,
('class:theme-fg-active', 'class:theme-fg-active'),
space,
('class:theme-bg-inactive', 'class:theme-bg-inactive'),
space,
('class:theme-fg-inactive', 'class:theme-fg-inactive'),
newline,
('class:theme-fg-dim', 'class:theme-fg-dim'),
space,
('class:theme-bg-dim', 'class:theme-bg-dim'),
space,
('class:theme-bg-dialog', 'class:theme-bg-dialog'),
space,
('class:theme-bg-line-highlight', 'class:theme-bg-line-highlight'),
space,
('class:theme-bg-button-active', 'class:theme-bg-button-active'),
space,
('class:theme-bg-button-inactive',
'class:theme-bg-button-inactive'),
space,
])
# Return all formatted text lists merged together.
return merge_formatted_text(fragments)
| 41.207373
| 132
| 0.597238
| 2,021
| 17,884
| 5.158832
| 0.213756
| 0.05563
| 0.036831
| 0.014579
| 0.181278
| 0.13217
| 0.118262
| 0.075005
| 0.05352
| 0.05352
| 0
| 0.001452
| 0.306755
| 17,884
| 433
| 133
| 41.30254
| 0.83949
| 0.364684
| 0
| 0.364706
| 0
| 0
| 0.209482
| 0.080991
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043137
| false
| 0
| 0.035294
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
090c9c265a0fb2fb0dad7a18bd49965eaa38157a
| 3,816
|
py
|
Python
|
mono/model/mono_autoencoder/layers.py
|
Jenaer/FeatDepth
|
64128b03873b27ffa5e99a5cb1712dd8aa15cb0d
|
[
"MIT"
] | 179
|
2020-08-21T08:57:22.000Z
|
2022-03-26T21:55:20.000Z
|
mono/model/mono_autoencoder/layers.py
|
sconlyshootery/feature_metric_depth
|
550420b3fb51a027549716b74c6fbce41651d3a5
|
[
"MIT"
] | 84
|
2020-08-30T14:25:19.000Z
|
2022-03-08T12:29:37.000Z
|
mono/model/mono_autoencoder/layers.py
|
sconlyshootery/feature_metric_depth
|
550420b3fb51a027549716b74c6fbce41651d3a5
|
[
"MIT"
] | 31
|
2020-10-01T12:12:19.000Z
|
2022-03-06T08:04:18.000Z
|
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIM(nn.Module):
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def upsample(x):
return F.interpolate(x, scale_factor=2, mode="nearest")
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.nonlin(out)
return out
class Conv1x1(nn.Module):
def __init__(self, in_channels, out_channels, bias=False):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(int(in_channels), int(out_channels), kernel_size=1, stride=1, bias=bias)
def forward(self, x):
out = self.conv(x)
return out
class Conv3x3(nn.Module):
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class Conv5x5(nn.Module):
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv5x5, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(2)
else:
self.pad = nn.ZeroPad2d(2)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 5)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class CRPBlock(nn.Module):
def __init__(self, in_planes, out_planes, n_stages):
super(CRPBlock, self).__init__()
for i in range(n_stages):
setattr(self, '{}_{}'.format(i + 1, 'pointwise'), Conv1x1(in_planes if (i == 0) else out_planes, out_planes, False))
self.stride = 1
self.n_stages = n_stages
self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
def forward(self, x):
top = x
for i in range(self.n_stages):
top = self.maxpool(top)
top = getattr(self, '{}_{}'.format(i + 1, 'pointwise'))(top)
x = top + x
return x
def compute_depth_errors(gt, pred):
thresh = torch.max((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).float().mean()
a2 = (thresh < 1.25 ** 2).float().mean()
a3 = (thresh < 1.25 ** 3).float().mean()
rmse = (gt - pred) ** 2
rmse = torch.sqrt(rmse.mean())
rmse_log = (torch.log(gt) - torch.log(pred)) ** 2
rmse_log = torch.sqrt(rmse_log.mean())
abs_rel = torch.mean(torch.abs(gt - pred) / gt)
sq_rel = torch.mean((gt - pred) ** 2 / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
| 33.473684
| 128
| 0.586478
| 577
| 3,816
| 3.637782
| 0.183709
| 0.034302
| 0.026679
| 0.042878
| 0.400667
| 0.335398
| 0.315388
| 0.292997
| 0.243449
| 0.16627
| 0
| 0.037024
| 0.270964
| 3,816
| 114
| 129
| 33.473684
| 0.717469
| 0
| 0
| 0.221053
| 0
| 0
| 0.00917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147368
| false
| 0
| 0.042105
| 0.010526
| 0.336842
| 0.010526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
090caec635d034601a9a45a30d2d0ee7c652da16
| 2,413
|
py
|
Python
|
pdnn/helpers/the_graveyard.py
|
alamorre/pdnn-experiment
|
b07b509e8610c324b11aa81204cfca06b8437f16
|
[
"BSD-2-Clause-FreeBSD"
] | 17
|
2017-06-14T16:36:12.000Z
|
2021-01-31T18:16:10.000Z
|
pdnn/helpers/the_graveyard.py
|
alamorre/pdnn-experiment
|
b07b509e8610c324b11aa81204cfca06b8437f16
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2018-02-26T16:04:48.000Z
|
2018-03-01T06:42:57.000Z
|
pdnn/helpers/the_graveyard.py
|
alamorre/pdnn-experiment
|
b07b509e8610c324b11aa81204cfca06b8437f16
|
[
"BSD-2-Clause-FreeBSD"
] | 5
|
2017-09-12T13:20:02.000Z
|
2019-02-06T08:41:58.000Z
|
import numpy as np
from plato.core import as_floatx, create_shared_variable, symbolic, add_update
from theano import tensor as tt
class FutureWeightGradCalculator(object):
def __init__(self, kp, kd, shapes):
"""
:param kp:
:param kd:
:param shapes: A tuple that specifies (minibatch_size, n_in, n_out)
"""
self.kp = kp
self.kd = kd
self.r = kd/as_floatx(kp+kd)
self.scale = (1./as_floatx(kp**2 + 2*kp*kd))
self.x_past = create_shared_variable(np.zeros((shapes[0], shapes[1])))
self.e_past = create_shared_variable(np.zeros((shapes[0], shapes[2])))
@symbolic
def compute_grad(self, xc, ec, x_true = None, e_true = None):
"""
:param xc:
:param ec:
:param x:
:param e:
:return:
"""
x_past = self.x_past*self.r if x_true is None else x_true*(self.kp+self.kd)-xc
e_past = self.e_past*self.r if e_true is None else e_true*(self.kp+self.kd)-ec
w_grad = self.scale * (xc.T.dot(e_past+ec) + x_past.T.dot(ec))
if x_true is None:
add_update(self.x_past, x_past + xc)
if e_true is None:
add_update(self.e_past, e_past + ec)
return w_grad
@symbolic
def past_weight_grad_calculator2(xs, es, kp_x, kd_x, kp_e, kd_e, shapes):
"""
This attempt never really got off the ground. It doesn't work
"""
kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]
n_samples, n_in, n_out = shapes
rx = kd_x/(kp_x+kd_x)
re = kd_e/(kp_e+kd_e)
xr = create_shared_variable(np.zeros((n_samples, n_in)))
er = create_shared_variable(np.zeros((n_samples, n_out)))
# xr_new = xr*rx + xs/(kp_x+kd_x)
# er_new = er*re + es/(kp_e+kd_e)
arr = rx*re/(1-rx*re)
xr_new = xr*arr + xs/(kp_x+kd_x)
er_new = er*arr + es/(kp_e+kd_e)
xsum = create_shared_variable(np.zeros((n_samples, n_in)))
esum = create_shared_variable(np.zeros((n_samples, n_out)))
xsum_new = xsum+xr_new
esum_new = esum+er_new
x_nospikes = tt.eq(xs, 0)
e_nospikes = tt.eq(es, 0)
dw = xs.T.dot(esum_new) + xsum_new.T.dot(es)
add_update(xr, xr_new)
add_update(er, er_new)
add_update(xsum, xsum_new*x_nospikes)
add_update(esum, esum_new*e_nospikes)
return xs.T.dot(er) + xr.T.dot(es)
# return xr.T.dot(er)
# return dw
| 29.426829
| 86
| 0.61293
| 427
| 2,413
| 3.199063
| 0.203747
| 0.061493
| 0.102489
| 0.096633
| 0.317716
| 0.259151
| 0.225476
| 0.225476
| 0.17716
| 0
| 0
| 0.006098
| 0.252383
| 2,413
| 82
| 87
| 29.426829
| 0.751109
| 0.123498
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.068182
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
090cd84d945d6fe0adc3e503a0af7a8286c3e451
| 5,603
|
py
|
Python
|
shop/views/cart_views.py
|
cuescience/cuescience-shop
|
bf5ea159f9277d1d6ab7acfcad3f2517723a225c
|
[
"MIT"
] | null | null | null |
shop/views/cart_views.py
|
cuescience/cuescience-shop
|
bf5ea159f9277d1d6ab7acfcad3f2517723a225c
|
[
"MIT"
] | null | null | null |
shop/views/cart_views.py
|
cuescience/cuescience-shop
|
bf5ea159f9277d1d6ab7acfcad3f2517723a225c
|
[
"MIT"
] | null | null | null |
import logging
from cart import Cart
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.utils import translation
from mailtemplates.models import EMailTemplate
from payment.models import PrePayment
from payment.services.paypal import paypal
from shop.checkout_wizard import condition_step_3, CheckoutWizardBase
from shop.models import Product, Order
from django.http import Http404, HttpResponseNotAllowed
from django.shortcuts import redirect, render_to_response, render
from django.template import RequestContext
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
logger = logging.getLogger(__name__)
@never_cache
def index_view(request):
return render_to_response("cuescience_shop/cart/index.html", RequestContext(request))
@never_cache
def add_view(request, product_id):
if request.method != "GET":
return HttpResponseNotAllowed(["GET"])
next = request.GET.get("next", "/")
cart = Cart(request)
try:
product = Product.objects.get(pk=product_id)
except Product.DoesNotExist:
raise Http404
cart.add(product, product.price)
return redirect(next)
@never_cache
def remove_view(request, product_id):
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
next = request.GET.get("next", "/")
cart = Cart(request)
try:
product = Product.objects.get(pk=product_id)
except Product.DoesNotExist:
raise Http404
cart.remove(product)
return redirect(next)
@never_cache
def update_view(request):
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
next = request.GET.get("next", "/")
cart = Cart(request)
for item in cart:
quantity = request.POST.get("quantity-{0}".format(item.product.pk), None)
isNone = quantity is None
if isNone:
continue
isSame = int(quantity) == item.quantity
if isSame:
continue
quantity = int(quantity)
if quantity == 0:
item.delete()
continue
item.quantity = quantity
item.save()
return redirect(next)
class CheckoutWizard(CheckoutWizardBase):
template_name = "cuescience_shop/cart/wizard.html"
def create_paypalpayment(self, cart):
paypalservice = paypal.PayPalService()
transaction = paypal.Transaction(total=cart.summary())
for cart_item in cart:
print("ITEM {0}".format(cart_item))
product = cart_item.product
item = paypal.Item(product.title, cart_item.get_unit_price(), cart_item.quantity, "EUR", sku=product.id)
transaction.item_list.append(item)
#TODO add translation
item = paypal.Item("Versand / Shipping", cart.shipping_costs(), 1, "EUR", sku=0)
transaction.item_list.append(item)
domain = get_current_site(self.request)
payment_result = paypalservice.create_payment(transaction, domain)
return payment_result
def done(self, form_list, **kwargs):
cart = Cart(self.request)
cart.create_cart()
order = Order(cart=cart.cart)
client = form_list[0].save(commit=False)
address = form_list[1].save()
client.shipping_address = address
billing_address = address
if condition_step_3(self):
billing_address = form_list[2].save()
client.billing_address = billing_address
client.save()
order.client = client
payment_option = self.get_cleaned_data_for_step("4").get("payment_options", None)
print ("PAYMENT {0}".format(self.get_cleaned_data_for_step("4")))
language = translation.get_language().upper()
if payment_option == "PayPal":
result = self.create_paypalpayment(cart)
order.payment = result.paypal_payment_db
order.save()
# we need to do the checkout after saving the order,
# if something went wrong
cart.check_out()
mail_result = EMailTemplate.objects.send("{0}_ORDER_SUCCESS_PAYPAL".format(language), client.email,
{"order": order, "billing_address": billing_address,
"shipping_address": address,
"paypal_url": order.payment.approval_url})
if result.payment.error:
logger.error("PayPal payment went wrong! Errors: {0}".format(result.payment.error))
return render(self.request, "cuescience_shop/failure_paypal.html", {"order": order})
elif not result.payment.errors and order.payment.approval_url:
return render(self.request, "cuescience_shop/success_paypal.html", {"order": order})
elif payment_option == "Prepayment":
payment = PrePayment()
payment.save()
order.payment = payment
order.save()
cart.check_out()
mail_result = EMailTemplate.objects.send("{0}_ORDER_SUCCESS_PREPAYMENT".format(language), client.email,
{"order": order, "billing_address": billing_address,
"shipping_address": address})
return render(self.request, "cuescience_shop/success.html", {"order": order})
return render_to_response("cuescience_shop/cart/index.html", RequestContext(self.request))
| 35.916667
| 116
| 0.641442
| 621
| 5,603
| 5.624799
| 0.238325
| 0.022903
| 0.014887
| 0.014601
| 0.338391
| 0.308045
| 0.279702
| 0.223876
| 0.223876
| 0.223876
| 0
| 0.006023
| 0.259147
| 5,603
| 155
| 117
| 36.148387
| 0.835461
| 0.016955
| 0
| 0.3
| 0
| 0
| 0.091553
| 0.044323
| 0
| 0
| 0
| 0.006452
| 0
| 1
| 0.05
| false
| 0
| 0.125
| 0.008333
| 0.291667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
090d1859bb4cd8c066260b4c5385d95d8db098d8
| 498
|
py
|
Python
|
solutions/python3/984.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/984.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/984.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
class Solution:
def strWithout3a3b(self, A: int, B: int) -> str:
if not A and not B: return ''
if A >= B:
a = 2 if A >= 2 else 1
b = 2 if A - a - B < 1 and B >= 2 else 1 if B else 0
return a * 'a' + b * 'b' + self.strWithout3a3b(A - a, B - b)
else:
b = 2 if B >= 2 else 1
a = 2 if B - b - A < 1 and A >= 2 else 1 if A else 0
return b * 'b' + a * 'a' + self.strWithout3a3b(A - a, B - b)
| 41.5
| 72
| 0.421687
| 89
| 498
| 2.359551
| 0.191011
| 0.047619
| 0.114286
| 0.057143
| 0.209524
| 0.209524
| 0
| 0
| 0
| 0
| 0
| 0.080586
| 0.451807
| 498
| 12
| 73
| 41.5
| 0.688645
| 0
| 0
| 0
| 0
| 0
| 0.008016
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
090e30708a609fe03c64ef91c96c83167bd3b51a
| 4,178
|
py
|
Python
|
niftypad/api/__init__.py
|
AMYPAD/NiftyPAD
|
80bc005ca409f503a8df3a13a071d2f3f413553f
|
[
"Apache-2.0"
] | null | null | null |
niftypad/api/__init__.py
|
AMYPAD/NiftyPAD
|
80bc005ca409f503a8df3a13a071d2f3f413553f
|
[
"Apache-2.0"
] | 2
|
2021-09-06T21:38:43.000Z
|
2021-10-05T11:07:08.000Z
|
niftypad/api/__init__.py
|
AMYPAD/NiftyPAD
|
80bc005ca409f503a8df3a13a071d2f3f413553f
|
[
"Apache-2.0"
] | null | null | null |
"""Clean API"""
import logging
from pathlib import Path
from . import readers
log = logging.getLogger(__name__)
def kinetic_model(src, dst=None, params=None, model='srtmb_basis', input_interp_method='linear',
w=None, r1=1, k2p=0.000250, beta_lim=None, n_beta=40, linear_phase_start=500,
linear_phase_end=None, km_outputs=None, thr=0.1, fig=False):
"""
Args:
src (Path or str): input patient directory or filename
dst (Path or str): output directory (default: `src` directory)
params (Path or str): config (relative to `src` directory)
model (str): any model from `niftypad.models` (see `niftypad.models.NAMES`)
input_interp_method (str): the interpolation method for getting reference input:
linear, cubic, exp_1, exp_2, feng_srtm
w (ndarray): weights for weighted model fitting
r1 (float): a pre-chosen value between 0 and 1 for r1, used in srtmb_asl_basis
k2p (float): a pre-chosen value for k2p, in second^-1, used in
srtmb_k2p_basis, logan_ref_k2p, mrtm_k2p
beta_lim (list[int]): [beta_min, beta_max] for setting the lower and upper limits
of beta values in basis functions, used in srtmb_basis, srtmb_k2p_basis, srtmb_asl_basis
n_beta (int): number of beta values/basis functions, used in
srtmb_basis, srtmb_k2p_basis, srtmb_asl_basis
linear_phase_start (int): start time of linear phase in seconds, used in logan_ref,
logan_ref_k2p, mrtm, mrtm_k2p
linear_phase_end (int): end time of linear phase in seconds, used in logan_ref,
logan_ref_k2p, mrtm, mrtm_k2p
km_outputs (list[str]): the kinetic parameters to save, e.g. ['R1', 'k2', 'BP']
thr (float): threshold value between 0 and 1. Used to mask out voxels with mean value
over time exceeding `thr * max(image value)`
fig (bool): whether to show a figure to check model fitting
"""
import nibabel as nib
import numpy as np
from niftypad import basis
from niftypad.image_process.parametric_image import image_to_parametric
from niftypad.models import get_model_inputs
from niftypad.tac import Ref
src_path = Path(src)
if src_path.is_dir():
fpath = next(src_path.glob('*.nii'))
else:
fpath = src_path
src_path = fpath.parent
log.debug("file:%s", fpath)
if dst is None:
dst_path = src_path
else:
dst_path = Path(dst)
assert dst_path.is_dir()
meta = readers.find_meta(src_path, filter(None, [params, fpath.stem]))
dt = np.asarray(meta['dt'])
ref = np.asarray(meta['ref'])
ref = Ref(ref, dt)
# change ref interpolation to selected method
ref.run_interp(input_interp_method=input_interp_method)
log.debug("looking for first `*.nii` file in %s", src_path)
img = nib.load(fpath)
# pet_image = img.get_fdata(dtype=np.float32)
pet_image = np.asanyarray(img.dataobj)
# basis functions
if beta_lim is None:
beta_lim = [0.01 / 60, 0.3 / 60]
# change ref.inputf1cubic -> ref.input_interp_1
b = basis.make_basis(ref.input_interp_1, dt, beta_lim=beta_lim, n_beta=n_beta, w=w, k2p=k2p)
if km_outputs is None:
km_outputs = ['R1', 'k2', 'BP']
# change ref.inputf1cubic -> ref.input_interp_1
user_inputs = {
'dt': dt, 'ref': ref, 'inputf1': ref.input_interp_1, 'w': w, 'r1': r1, 'k2p': k2p,
'beta_lim': beta_lim, 'n_beta': n_beta, 'b': b, 'linear_phase_start': linear_phase_start,
'linear_phase_end': linear_phase_end, 'fig': fig}
model_inputs = get_model_inputs(user_inputs, model)
# log.debug("model_inputs:%s", model_inputs)
parametric_images_dict, pet_image_fit = image_to_parametric(pet_image, dt, model, model_inputs,
km_outputs, thr=thr)
for kp in parametric_images_dict:
nib.save(nib.Nifti1Image(parametric_images_dict[kp], img.affine),
f"{dst_path / fpath.stem}_{model}_{kp}_{fpath.suffix}")
nib.save(nib.Nifti1Image(pet_image_fit, img.affine),
f"{dst_path / fpath.stem}_{model}_fit_{fpath.suffix}")
| 43.978947
| 99
| 0.66539
| 629
| 4,178
| 4.195548
| 0.284579
| 0.041682
| 0.025767
| 0.022736
| 0.200834
| 0.158393
| 0.158393
| 0.13111
| 0.089428
| 0.089428
| 0
| 0.020782
| 0.228339
| 4,178
| 94
| 100
| 44.446809
| 0.797767
| 0.4045
| 0
| 0.04
| 0
| 0.02
| 0.104044
| 0.032435
| 0
| 0
| 0
| 0
| 0.02
| 1
| 0.02
| false
| 0
| 0.18
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09112b983864e08dcf3260b85da5bc6f69581ccc
| 1,360
|
py
|
Python
|
cli/src/accretion_cli/_commands/raw/__init__.py
|
mattsb42/accretion
|
7cce5f4ed6d290bd9314b116be91417ded6b0f64
|
[
"Apache-2.0"
] | 1
|
2019-10-19T11:18:17.000Z
|
2019-10-19T11:18:17.000Z
|
cli/src/accretion_cli/_commands/raw/__init__.py
|
mattsb42/accretion
|
7cce5f4ed6d290bd9314b116be91417ded6b0f64
|
[
"Apache-2.0"
] | 13
|
2019-06-10T07:03:26.000Z
|
2019-11-06T01:09:38.000Z
|
cli/src/accretion_cli/_commands/raw/__init__.py
|
mattsb42/accretion
|
7cce5f4ed6d290bd9314b116be91417ded6b0f64
|
[
"Apache-2.0"
] | null | null | null |
"""Raw CLI commands."""
from typing import IO
import click
from ..._templates import artifact_builder, replication_listener, source_region_core
from ..._util.workers_zip import build_and_write_workers
from .add import add_to_deployment
from .init import init_project
_TEMPLATES = {"builder": artifact_builder, "listener": replication_listener, "core-source": source_region_core}
@click.group("raw")
def raw_cli():
"""Raw Accretion commands. Not recommended for direct use."""
@raw_cli.command()
@click.argument("template_type", type=click.Choice(_TEMPLATES.keys()))
@click.argument("output", type=click.File(mode="w", encoding="utf-8"))
def generate(template_type: str, output: IO):
"""Generate a template.
OUTPUT : Where to write the template?
\f
:param str template_type: The type of template to generate.
:param str output: Where to write the template?
"""
template = _TEMPLATES[template_type].build()
output.write(template.to_json(indent=4))
@raw_cli.command()
@click.argument("output", type=click.File(mode="wb"))
def build_workers(output: IO):
"""Build the workers zip file.
OUTPUT : Where to write the zip?
\f
:param str output: Where to write the workers zip?
"""
build_and_write_workers(outfile=output)
raw_cli.add_command(add_to_deployment)
raw_cli.add_command(init_project)
| 27.2
| 111
| 0.733088
| 191
| 1,360
| 5.015707
| 0.319372
| 0.037578
| 0.05428
| 0.075157
| 0.236952
| 0.174322
| 0.135699
| 0
| 0
| 0
| 0
| 0.00172
| 0.144853
| 1,360
| 49
| 112
| 27.755102
| 0.822012
| 0.264706
| 0
| 0.095238
| 0
| 0
| 0.066028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0914e1a28a157c416a8a2c605a43e0263d7aefd4
| 701
|
py
|
Python
|
distil/utils/config_helper.py
|
ansunsujoe/distil
|
cf6cae2b88ef129d09c159aae0569978190e9f98
|
[
"MIT"
] | 83
|
2021-01-06T06:50:30.000Z
|
2022-03-31T05:16:32.000Z
|
distil/utils/config_helper.py
|
ansunsujoe/distil
|
cf6cae2b88ef129d09c159aae0569978190e9f98
|
[
"MIT"
] | 30
|
2021-02-27T06:09:47.000Z
|
2021-12-23T11:03:36.000Z
|
distil/utils/config_helper.py
|
ansunsujoe/distil
|
cf6cae2b88ef129d09c159aae0569978190e9f98
|
[
"MIT"
] | 13
|
2021-03-05T18:26:58.000Z
|
2022-03-12T01:53:17.000Z
|
import json
import os
def read_config_file(filename):
"""
Loads and returns a configuration from the supplied filename / path.
Parameters
----------
filename: string
The name/path of the config file to load.
Returns
----------
config: object
The resulting configuration laoded from the JSON file
"""
print(filename.split('.')[-1])
if filename.split('.')[-1] not in ['json']:
raise IOError('Only json type are supported now!')
if not os.path.exists(filename):
raise FileNotFoundError('Config file does not exist!')
with open(filename, 'r') as f:
config = json.load(f)
return config
| 24.172414
| 72
| 0.600571
| 86
| 701
| 4.872093
| 0.569767
| 0.071599
| 0.066826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003953
| 0.278174
| 701
| 29
| 73
| 24.172414
| 0.824111
| 0.350927
| 0
| 0
| 0
| 0
| 0.165842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0915536721dec4fcf77cccd8a1e6caa20567b01f
| 1,944
|
py
|
Python
|
Easy/26.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 6
|
2017-09-25T18:05:50.000Z
|
2019-03-27T00:23:15.000Z
|
Easy/26.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 1
|
2017-10-29T12:04:41.000Z
|
2018-08-16T18:00:37.000Z
|
Easy/26.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | null | null | null |
# ------------------------------
# 26. Remove Duplicates from Sorted Array
#
# Description:
# Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
# Do not allocate extra space for another array, you must do this in place with constant memory.
#
# For example,
# Given input array nums = [1,1,2],
#
# Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively. It doesn't matter what you leave beyond the new length.
#
# Version: 1.0
# 09/17/17 by Jianfa
# ------------------------------
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
else:
nums_len = len(nums)
i = 0
check = nums[0] - 1
while i < nums_len:
if check != nums[i]:
check = nums[i]
i += 1
else:
nums.pop(i)
nums_len -= 1
return len(nums)
# Used for test
if __name__ == "__main__":
test = Solution()
nums = [1,1,1,2,3,4,4,4,4]
print(test.removeDuplicates(nums))
# ------------------------------
# Good idea from other solution:
# Actually there is no need to really remove value from the list. As the last sentence said
# "It doesn't matter what you leave beyond the new length." So we can just modify the first several
# numbers which is the length of unique values, but leave other values behind unchanged. We set two
# runner: a fast runner and a slow runner. As long as a different value is met, modify the corresponding
# value in position of slow runner, otherwise move the fast runner.
# Here is a link for reference:
# https://leetcode.com/problems/remove-duplicates-from-sorted-array/solution/
| 34.714286
| 161
| 0.587963
| 270
| 1,944
| 4.192593
| 0.492593
| 0.029152
| 0.031802
| 0.045936
| 0.132509
| 0.077739
| 0.077739
| 0.077739
| 0.077739
| 0.077739
| 0
| 0.023138
| 0.28858
| 1,944
| 56
| 162
| 34.714286
| 0.795372
| 0.640432
| 0
| 0.1
| 0
| 0
| 0.012422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.2
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0919fdf2ce825bd818ddbc07ef5fd14d15e3d623
| 6,373
|
py
|
Python
|
ncpsort/cluster_synthetic_data/inference_plot_synthetic.py
|
yueqiw/ncp-sort
|
045361d93bc9d8ef2596cdda7c485b6ffd77dd81
|
[
"MIT"
] | 2
|
2019-08-06T10:10:37.000Z
|
2020-09-30T12:11:28.000Z
|
ncpsort/cluster_synthetic_data/inference_plot_synthetic.py
|
yueqiw/ncp-sort
|
045361d93bc9d8ef2596cdda7c485b6ffd77dd81
|
[
"MIT"
] | 1
|
2021-04-14T12:09:02.000Z
|
2021-07-19T04:06:05.000Z
|
ncpsort/cluster_synthetic_data/inference_plot_synthetic.py
|
yueqiw/ncp-sort
|
045361d93bc9d8ef2596cdda7c485b6ffd77dd81
|
[
"MIT"
] | null | null | null |
"""Plot clustered spikes
Usage:
python ncpsort.cluster_synthetic_data.inference_plot_synthetic \
--inference_dir ./inference_synthetic_N-1000/cluster_S-150-beam_NCP-10000 \
--min_cls_size 50 --plot_type overlay
or --inference_dir --min_cls_size 50 --plot_type tsne
"""
import numpy as np
import torch
import time
import json
import argparse
import os
from ncpsort.utils.spike_utils import get_chan_nbrs, select_template_channels, template_window
from ncpsort.utils.plotting import DEFAULT_COLORS
from ncpsort.utils.plotting import plot_spike_clusters_and_gt_in_rows
from ncpsort.utils.plotting import plot_spike_clusters_and_templates_overlay
from ncpsort.utils.plotting import plot_raw_and_encoded_spikes_tsne
parser = argparse.ArgumentParser(description='Plot inference results.')
parser.add_argument('--inference_dir', type=str)
parser.add_argument('--min_cls_size', type=int, default=0)
parser.add_argument('--topn', type=int, default=1)
parser.add_argument('--plot_mfm', action="store_const", const=True, default=False)
parser.add_argument('--plot_type', type=str, default="overlay")
if __name__ == "__main__":
args = parser.parse_args()
do_corner_padding = True
output_dir = args.inference_dir
with open(os.path.join(output_dir, "infer_params.json"), "r") as f:
infer_params = json.load(f)
min_cls_size = args.min_cls_size
templates = None
templates_use = None
templates_name = None
infer_params['nbr_dist'] = 70
infer_params['n_nbr'] = 7
print("parameters:\n", json.dumps(infer_params, indent=2))
geom = np.array([
[-585.0, 270.0],
[-645.0, 270.0],
[-525.0, 270.0],
[-615.0, 210.0],
[-555.0, 210.0],
[-615.0, 330.0],
[-555.0, 330.0]]
)
chans_with_nbrs, chan_to_nbrs = get_chan_nbrs(geom, infer_params['nbr_dist'], infer_params['n_nbr'], keep_less_nbrs=False)
print("{} channels used:".format(len(chans_with_nbrs)))
print(chans_with_nbrs)
topn = args.topn
data_dir = os.path.join(output_dir, "data_ncp")
# fig_dir_by_row = os.path.join(output_dir, "figures_by_row")
# if not os.path.isdir(fig_dir_by_row): os.mkdir(fig_dir_by_row)
fig_dir_overlay = os.path.join(output_dir, "figs_overlay_min-cls-{}_temp-{}".format(min_cls_size, templates_name))
if not os.path.isdir(fig_dir_overlay): os.mkdir(fig_dir_overlay)
fig_dir_vert_overlay = os.path.join(output_dir, "figs_overlay_vertical_min-cls-{}_temp-{}".format(min_cls_size, templates_name))
if not os.path.isdir(fig_dir_vert_overlay): os.mkdir(fig_dir_vert_overlay)
if args.plot_mfm:
mfm_dir = os.path.join(infer_params['data_name'], "cluster_mfm", "data_mfm")
input_dir = infer_params['data_name']
fnames_list = [x.rstrip(".npz") for x in os.listdir(os.path.join(input_dir, "data_input")) if x.endswith(".npz")]
fnames_list = sorted(fnames_list)
for fname in fnames_list:
if args.plot_mfm:
mfm_fname = [x for x in os.listdir(mfm_dir) if fname in x and x.endswith(".npy")]
mfm_fname = mfm_fname[0].rstrip(".npy")
npy_fname = os.path.join(mfm_dir, "{}.npy".format(mfm_fname))
mfm_clusters = np.load(npy_fname)
mfm_name = "MFM"
else:
mfm_clusters = None
mfm_name = None
print("Plotting {}:".format(fname))
npz_fname = os.path.join(data_dir, "{}_ncp.npz".format(fname))
npz = np.load(npz_fname)
clusters, nll, data_arr, gt_labels = npz['clusters'], npz['nll'], npz['data_arr'], npz['gt_labels']
# plot_spike_clusters_and_gt_in_rows(
# css, nll, data_arr, gt_labels, topn=topn,
# figdir=fig_dir_by_row, fname_postfix=fname,
# plot_params={"spacing":1.25, "width":0.9, "vscale":1.5, "subplot_adj":0.9},
# downsample=3)
temp_in_ch = None
templates_name = "{} templates".format(templates_name) if templates_name else None
nbr_channels = np.arange(len(geom))
if args.plot_type == 'overlay':
plot_spike_clusters_and_templates_overlay(
clusters, nll, data_arr, geom, nbr_channels, DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, templates=temp_in_ch, template_name=templates_name,
figdir=fig_dir_overlay, fname_postfix=fname, size_single=(9,6),
plot_params={"time_scale":1.1, "scale":8., "alpha_overlay":0.1})
n_ch = len(nbr_channels)
vertical_geom = np.stack([np.zeros(n_ch), - np.arange(n_ch) * 12 * 7]).T
plot_spike_clusters_and_templates_overlay(
clusters, nll, data_arr, vertical_geom, np.arange(n_ch), DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, templates=temp_in_ch, template_name=templates_name,
figdir=fig_dir_vert_overlay, fname_postfix=fname, size_single=(2.5,18), vertical=True,
plot_params={"time_scale":1.1, "scale":8., "alpha_overlay":0.1})
elif args.plot_type == 'tsne':
fig_dir_tsne = os.path.join(output_dir, "figs_tsne_min-cls-{}".format(min_cls_size))
if not os.path.isdir(fig_dir_tsne): os.mkdir(fig_dir_tsne)
tsne_dir = os.path.join(infer_params['data_name'], "spike_encoder_it-18600/data_encoder")
fname = [x for x in os.listdir(tsne_dir) if fname in x and x.endswith(".npz")]
data_encoded = np.load(os.path.join(tsne_dir, "{}".format(fname[0])))
data_encoded = data_encoded['encoded_spikes']
fname = fname[0].rstrip("_encoded_spikes.npz")
plot_raw_and_encoded_spikes_tsne(
clusters, nll, data_arr, data_encoded, DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, sort_by_count=True,
figdir=fig_dir_tsne, fname_postfix=fname, size_single=(6,6),
tsne_params={'seed': 0, 'perplexity': 30},
plot_params={'pt_scale': 1}, show=False
)
| 45.198582
| 132
| 0.661541
| 927
| 6,373
| 4.210356
| 0.197411
| 0.026134
| 0.03587
| 0.024596
| 0.415322
| 0.3464
| 0.287215
| 0.255957
| 0.207789
| 0.182168
| 0
| 0.024128
| 0.213086
| 6,373
| 140
| 133
| 45.521429
| 0.754138
| 0.102464
| 0
| 0.108911
| 0
| 0
| 0.102402
| 0.018587
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.108911
| 0
| 0.108911
| 0.039604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
091cc72f8820abb4968972f3dd6375ff59862250
| 27,920
|
py
|
Python
|
src/loanpy/loanfinder.py
|
martino-vic/Framework-for-computer-aided-borrowing-detection
|
efce90390db14009fd27d96f32b1252a74aac1ef
|
[
"AFL-3.0"
] | 4
|
2020-05-18T15:10:44.000Z
|
2020-10-10T05:06:26.000Z
|
src/loanpy/loanfinder.py
|
martino-vic/Soundchange
|
09a276e16daed2828b144227a21e53746cbb72e4
|
[
"AFL-3.0"
] | null | null | null |
src/loanpy/loanfinder.py
|
martino-vic/Soundchange
|
09a276e16daed2828b144227a21e53746cbb72e4
|
[
"AFL-3.0"
] | null | null | null |
"""
Find (old) loanwords between two languages
"""
from ast import literal_eval
from functools import partial
from logging import getLogger
from pandas import DataFrame, Series, concat, read_csv
from tqdm import tqdm
from panphon.distance import Distance
from loanpy.helpers import gensim_multiword
from loanpy.adrc import Adrc
logger = getLogger(__name__)
class NoPhonMatch(Exception):
pass
def read_data(path2forms, adrc_col): # explosion means explode
"""
Reads a column with adapted or reconstructed words in a forms.csv file, \
drops empty elements, drops elements with certain keywords used by \
loanpy.adrc.Adrc.adapt and \
loanpy.adrc.Adrc.reconstruct, such as "not old", "wrong phonotactics", etc. \
Splits elements by ", " and assigns every word its own spot in the \
pandas Series which is returned. Called by loanpy.loanfinder.Search.__init__
:param path2forms: path to CLDF's forms.csv
:type path2forms: pathlib.PosixPath | str | None
:param adrc_col: name of column containing predicted \
adapted or reconstructed words
:type adrc_col: str
:return: Series object with one word per element. \
Words can be reg-exes as well
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from loanpy.loanfinder import __file__, read_data
>>> PATH2READ_DATA = Path(__file__).parent / "tests" / \
"input_files" / "ad_read_data.csv"
>>> read_data(PATH2READ_DATA, "col1")
0 a
1 blub
1 club
Name: col1, dtype: object
"""
# so the class can be initiated even without path2forms
if path2forms is None:
return None
# these red flags are returned by adapt() and reconstruct()
todrop = "wrong clusters|wrong phonotactics|not old|wrong vowel harmony"
# reading only 1 column saves RAM. Expensive calculations ahead.
df_forms = read_csv(path2forms, encoding="utf-8",
usecols=[adrc_col]).fillna("")
# drops columns with red flags
df_forms = df_forms[~df_forms[adrc_col].str.contains(todrop)]
# reconstructed words don't have ", " so nothing should happen there
df_forms[adrc_col] = df_forms[adrc_col].str.split(", ")
# explode is the pandas Series equivalent of flattening a nested list
df_forms = df_forms.explode(adrc_col) # one word per row
return df_forms[adrc_col] # a pandas Series object
def gen(iterable1, iterable2, function, prefix="Calculating", *args):
"""
A generator that applies a function to two iterables, \
incl. tqdm-progress-bar with customisable prefix. \
Called by loanpy.loanfinder.Search.loans to calculate phonological and \
semantic distances.
:param iterable1: The first iterable, will be zipped with \
iterable2 and and looped through.
:type iterable1: pathlib.PosixPath | list | iterable
:param iterable2: The second iterable, will be zipped with \
iterable1 and and looped through.
:type iterable2: pathlib.PosixPath | list | iterable
:param function: The function that should be applied to the elements of \
the tuples from the two zipped iterables.
:type function: function
:param prefix: The text that should be displayed by the progress-bar
:type prefix: str, default="Calculating"
:param args: positional arguments that shall be passed to the function
:type args: type depends on requirements of function \
passed to param <function>.
:return: the outputs of the function passed to param <function>
:rtype: generator object
:Example:
>>> from loanpy.loanfinder import gen
>>> list(gen([1, 2, 3], [4, 5, 6], lambda x, y: x+y))
Calculating: 100%|███████████████████████████████████| \
3/3 [00:00<00:00, 7639.90it/s]
[5, 7, 9]
>>> from loanpy.loanfinder import gen
>>> list(gen([1, 2, 3], [4, 5, 6], lambda x, y, z: x+y+z, "running", 1))
running: 100%|███████████████████████████████████| \
3/3 [00:00<00:00, 7639.90it/s]
[6, 8, 10]
"""
for ele1, ele2 in zip(tqdm(iterable1, prefix), iterable2):
yield function(ele1, ele2, *args) # can't pass kwargs!
class Search():
"""
Define the two word lists, the measurements to \
calculate phonological distance and semantic similarity \
and the thresholds below or above which to accept matches.
:param path2donordf: The path to forms.csv of the \
donor language containing a column of predicted adaptations into \
the recipient language.
:type path2donordf: pathlib.PosixPath | str | None, \
default=None
:param path2recipdf: The path to forms.csv of the \
recipient language, containing a column of \
predicted backward-reconstructions stored as regular expressions.
:type path2recipdf: pathlib.PosixPath | str | None, \
default=None
:param donorcol: The name of the column in the donor \
language's forms.csv containing a column of predicted adaptations into \
the tentative recipient language.
:type donorcol: str, default="ad"
:param recipcol: The name of the column in the recipient \
language's forms.csv containing a column of words in that language. When \
searching for old loanwords, this column can consist of regular \
expressions \
that represent backward reconstructions of present-day words.
:type recipcol: str, default="rc"
:param phondist: The maximal phonological distance between two words. \
By default, matches have to be identical.
:type phondist: int, default=0
:param phondist_msr: The name of the phonological distance measure, \
which has to be a method of panphon.distance.Distance
:type phondist_msr: "doglo_prime_distance" | \
"dolgo_prime_distance_div_maxlen" | \
"fast_levenshtein_distance" | \
"fast_levenshtein_distance_div_maxlen" | \
"feature_difference" | \
"feature_edit_distance" | \
"feature_edit_distance_div_maxlen" | \
"hamming_feature_edit_distance" | \
"hamming_feature_edit_distance_div_maxlen" | \
"hamming_substitution_cost" | \
"jt_feature_edit_distance" | \
"jt_feature_edit_distance_div_maxlen" | \
"jt_hamming_feature_edit_distance" | \
"jt_hamming_feature_edit_distance_div_maxlen" | \
"jt_weighted_feature_edit_distance" | \
"jt_weighted_feature_edit_distance_div_maxlen" | \
"levenshtein_distance", default="hamming_feature_edit_distance"
:param semsim: The minimal semantic similarity between the \
meaning of words. By default, meanings have to be identical.
:type semsim: int (float between -1 and 1 for gensim), default=1
:param semsim_msr: The function with which to measure semantic \
similarity.
:type semsim_msr: function of type func(a: str, b: str) -> int, \
default=loanpy.helpers.gensim_multiword
:param scdictlist_ad: list of correspondence dictionaries between \
tentative donor and recipient language generated with \
loanpy.qfysc.get_sound_corresp. Not a dictionary, therefore sequence \
important. \
Will be used in loanpy.loanfinder.Search.likeliestphonmatch to \
calculate likelihood \
(NSE) from predicted adaptation vs source word.
:type scdictlist_ad: None | list of 6 dicts. Dicts 0, 1, 2 \
capture phonological \
correspondences, dicts 3, 4, 5 phonotactic ones. dict0/dict3: the actual \
correspondences, dict1/dict4: How often they occur in the data, \
dict2/dict5: list of \
cognates in which they occur. default=None
:param scdictlist_rc: list of correspondence dictionaries between \
present-day language and past stage of that language generated with \
loanpy.qfysc.get_sound_corresp. Not a dictionary, therefore sequence \
important. \
Will be used in loanpy.loanfinder.Search.likeliestphonmatch to \
calculate likelihood \
(NSE) from predicted reconstruction vs source word.
:type scdictlist_rc: None | list of 6 dicts. Dicts 0, 1, 2 \
capture phonological \
correspondences, dicts 3, 4, 5 phonotactic ones. dict0/dict3: the actual \
correspondences, dict1/dict4: How often they occur in the data, \
dict2/dict5: list of \
cognates in which they occur. default=None
:Example:
>>> from pathlib import Path
>>> from loanpy.loanfinder import Search, __file__
>>> path2rec = Path(__file__).parent / "tests" \
/ "input_files"/ "hun.csv"
>>> path2don = Path(__file__).parent / "tests" \
/ "input_files"/ "got.csv"
>>> path2sc_ad = Path(__file__).parent / "tests" / "input_files" / \
"sc_ad_3cogs.txt"
>>> path2sc_rc = Path(__file__).parent / "tests" / "input_files" / \
"sc_rc_3cogs.txt"
>>> search_obj = Search(\
path2donordf=path2don, \
path2recipdf=path2rec, \
scdictlist_ad=path2sc_ad, \
scdictlist_rc=path2sc_rc)
How to plug in different semantic similarity measurement function, \
e.g. BERT:
>>> from loanpy import loanfinder
>>> from loanpy.helpers import plug_in_model
>>> # pip install transformers==4.19.2
>>> from sentence_transformers import SentenceTransformer
>>> from sklearn.metrics.pairwise import cosine_similarity
>>> plug_in_model(SentenceTransformer("bert-base-nli-mean-tokens"))
>>> def bert_similarity(sentence1, sentence2):
>>> return float(\
cosine_similarity(helpers.model.encode([sentence1]), \
helpers.model.encode([sentence2])))
>>> path2rec = Path(__file__).parent / "tests" \
/ "input_files"/ "hun.csv"
>>> path2don = Path(__file__).parent / "tests" \
/ "input_files"/ "got.csv"
>>> path2sc_ad = Path(__file__).parent / "tests" / "input_files" / \
"sc_ad_3cogs.txt"
>>> path2sc_rc = Path(__file__).parent / "tests" / "input_files" / \
"sc_rc_3cogs.txt"
>>> # plug in bert_similarity here into param <semsim_msr>
>>> search_obj = Search(path2donordf=path2don, path2recipdf=path2rec, \
scdictlist_ad=path2sc_ad, scdictlist_rc=path2sc_rc, \
semsim_msr=bert_similarity)
"""
def __init__(self, path2donordf=None, path2recipdf=None, donorcol="ad",
recipcol="rc",
phondist=0, phondist_msr="hamming_feature_edit_distance",
semsim=1, semsim_msr=gensim_multiword,
scdictlist_ad=None, scdictlist_rc=None):
# pandas Series of predicted adapted donor words in which to search
self.search_in = read_data(path2donordf, donorcol)
# pd Series of reg-exes of reconstructed recipient words to search for
self.search_for = read_data(path2recipdf, recipcol)
# path to donor and recipient forms.csv to read extra infos later
self.donpath, self.recpath = path2donordf, path2recipdf
# names of the columns containing adapted and reconstructed words
self.doncol, self.reccol = donorcol, recipcol # used in postprocessing
self.phondist = phondist # maximal phonological distance of a mtach
self.phondist_msr = getattr(Distance(), phondist_msr) # distnc measure
self.semsim = semsim # minimal semantic similarity of a match
self.semsim_msr = semsim_msr # semantic similarity measuring function
# normalised sum of examples for adaptions and reconstructions
self.get_nse_ad = Adrc(scdictlist=scdictlist_ad, mode="adapt").get_nse
self.get_nse_rc = Adrc(scdictlist=scdictlist_rc,
mode="reconstruct").get_nse
def phonmatch(self, search_for, index, dropduplicates=True):
"""
Check if a regular expression is contained \
in a wordlist and replace it with a number. \
The wordlist is a pandas Series object that gets initiated in \
loanpy.loanfinder.Search. To pass a wordlist in through the parameter \
of this function, use loanpy.loanfinder.Search.phonmatch_small
:param search_for: The regular expression for which to search in the \
donor language.
:type search_for: str
:param index: The number with which to replace a match. \
(This number will be \
used to merge the rest of the recipient language's \
data frame, so it should represent \
its index there.)
:type index: idx
:param dropduplicates: If set to True, this will drop matches \
that have the same \
index in the wordlist \
(There's one adapted donor-word per row, but its index \
is the same as the original donor word's from which it was adapted. \
Therefore, one recipient word can match with the same donor \
word through multiple \
adaptations. Since the semantics are the same for all of \
those matches, the first match can be picked and duplicates \
dropped safely. This saves a lot of time and energy. \
Later, loanpy.loanfinder.Search.likeliestphonmatch calculates \
the likeliest phonological matches, \
but only for those phonological matches, whose semantics already matched.)
:type dropduplicates: bool, default=True
:return: a pandas data frame containing \
phonological matches. The index \
indicates the position (row) of the word in the data frame assigned \
to loanpy.loanfinder.Search.search_in. \
The column "recipdf_idx" is intended to indicate \
the position of the word in the word list of the recipient language. \
It is the same value as the one passed to param <index>.
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from loanpy.loanfinder import Search, __file__
>>> path2read_data = Path(__file__).parent / "tests" / \
"input_files" / "ad_read_data.csv"
>>> search_obj = Search(path2donordf=path2read_data, donorcol="col1")
>>> search_obj.phonmatch(search_for="(b|c)?lub", index=99,
>>> dropduplicates=False)
match recipdf_idx
1 blub 99
1 club 99
"""
# maximal phonetic distance == 0 means only identical words are matches
if self.phondist == 0: # will drop all non-identical elements
matched = self.search_in[self.search_in.str.match(search_for)]
else: # will otherwise drop everything above the max distance
self.phondist_msr = partial(self.phondist_msr, target=search_for)
matched = self.search_in[
self.search_in.apply(self.phondist_msr) <= self.phondist]
# creates new col "recipdf_idx" - keys to the input df
dfphonmatch = DataFrame({"match": matched, "recipdf_idx": index})
# this makes things more economical. dropping redundancies
if dropduplicates is True:
dfphonmatch = dfphonmatch[~dfphonmatch.index.duplicated(
keep='first')]
# returns a pandas data frame
return dfphonmatch
def loans(self, write_to=False, postprocess=False, merge_with_rest=False):
"""
Searches for phonological matches \
and calculates their semantic similarity. Returns candidate list of loans.
:param write_to: indicate if results should be written to file. \
If yes, provide path.
:type write_to: pathlib.PosixPath | str | None | False, \
default=False
:param postprocess: Indicate if results should be post-processed. See \
loanpy.loanfinder.Search.postprocess for more details
:type postprocess: bool, default=False
:param merge_with_rest: Indicate if additional info from input \
data frame columns should be copied into the output data frame. \
Helps with quick debugging sometimes. See \
loanpy.loanfinder.Search.merge_with_rest for more details
:type merge_with_rest: bool, default=False
:returns: data frame with potential loanwords
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from loanpy.loanfinder import Search, __file__
>>> from loanpy.helpers import plug_in_model
>>> from gensim.models import word2vec
>>> from gensim.test.utils import common_texts
>>> in_got = path2donordf=Path(__file__).parent / "tests" / \
"input_files" / "loans_got.csv"
>>> in_hun = path2donordf=Path(__file__).parent / "tests" / \
"input_files" / "loans_hun.csv"
>>> search_obj = Search(in_got, in_hun, semsim=0.1)
>>> # plug in dummy vectors, api (default) would need \
internet + a minute to load
>>> plug_in_model(word2vec.Word2Vec(common_texts, min_count=1).wv)
>>> search_obj.loans()
match recipdf_idx Meaning_x Meaning_y gensim_multiword
0 blub 0 computer, interface human 0.109408
"""
# find phonological matches
dfmatches = concat(gen(self.search_for, self.search_for.index,
self.phonmatch,
"searching for phonological matches: "))
# raise exception if no matches found
if len(dfmatches) == 0:
raise NoPhonMatch("no phonological matches found")
# add translations for semantic comparison
dfmatches = dfmatches.merge(read_csv(self.recpath, encoding="utf-8",
usecols=["Meaning"]).fillna(""),
left_on="recipdf_idx", right_index=True)
dfmatches = dfmatches.merge(read_csv(self.donpath, encoding="utf-8",
usecols=["Meaning"]).fillna(""),
left_index=True, right_index=True)
# calculate semantic similarity of phonological matches
dfmatches[self.semsim_msr.__name__] = list(gen(dfmatches["Meaning_x"],
dfmatches["Meaning_y"],
self.semsim_msr,
"calculating semantic \
similarity of phonological matches: "))
# sorting and cutting off words with too low semantic similarity
logger.warning("cutting off by semsim=" +
str(self.semsim) +
"and ranking by semantic similarity")
dfmatches = dfmatches[dfmatches[
self.semsim_msr.__name__] >= self.semsim]
dfmatches = dfmatches.sort_values(by=self.semsim_msr.__name__,
ascending=False)
# 3 optional extra steps indicated in params, skipped by default
if postprocess:
dfmatches = self.postprocess(dfmatches)
if merge_with_rest:
dfmatches = self.merge_with_rest(dfmatches)
if write_to:
dfmatches.to_csv(write_to, encoding="utf-8", index=False)
logger.warning(f"file written to {write_to}")
logger.warning(f"done. Insert date and time later here.")
return dfmatches
def postprocess(self, dfmatches):
"""
Will replace every phonological match \
in the output data frame with its most likely version.
:param dfmatches: The entire data frame with potential loanwords
:type dfmatches: pandas.core.series.Series
:returns: the same data frame but with likelier adaptations of donor \
words
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from pandas import DataFrame
>>> from loanpy.loanfinder import Search, __file__
>>> PATH2SC_AD = Path(__file__).parent / "tests" \
/ "input_files" / "sc_ad_likeliest.txt"
>>> PATH2SC_RC = Path(__file__).parent / "tests" \
/ "input_files" / "sc_rc_likeliest.txt"
>>> search_obj = Search(
>>> path2donordf=Path(__file__).parent / "tests" \
/ "input_files" / "loans_got.csv",
>>> path2recipdf=Path(__file__).parent / "tests" / \
"input_files" / "loans_hun.csv",
>>> scdictlist_ad=PATH2SC_AD, scdictlist_rc=PATH2SC_RC,
>>> semsim=0.2)
>>> dfin = DataFrame({"match": ["blub"], "recipdf_idx": [0],
>>> "Meaning_x": ["computer, interface"],
>>> "Meaning_y": ["human"], "semsim_msr": [0.10940766]})
>>> search_obj.postprocess(dfin)
postprocessing...
recipdf_idx Meaning_x ... align_ad\
nse_combined
0 0 computer, interface ... ['b<b', 'l<l', 'u<u', 'b<b']\
15.0
"""
logger.warning(f"postprocessing...")
# read in data for likeliestphonmatch, i.e. col Segments in both,
# donor and recipient data frames
dfmatches = dfmatches.merge(read_csv(self.recpath, encoding="utf-8",
usecols=["Segments",
self.reccol]).fillna(""),
left_on="recipdf_idx", right_index=True)
dfmatches = dfmatches.merge(read_csv(self.donpath, encoding="utf-8",
usecols=["Segments",
self.doncol]).fillna(""),
left_index=True, right_index=True)
dfmatches["Segments_x"] = [i.replace(" ", "")
for i in dfmatches["Segments_x"]]
dfmatches["Segments_y"] = [i.replace(" ", "")
for i in dfmatches["Segments_y"]]
# calculate likeliest phonological matches
newcols = concat([self.likeliestphonmatch(ad, rc, segd, segr)
for ad, rc, segd, segr
in zip(dfmatches[self.doncol],
dfmatches[self.reccol],
dfmatches["Segments_y"],
dfmatches["Segments_x"])])
del dfmatches["match"] # delete non-likeliest matches
newcols.index = dfmatches.index # otherwise concat wont work
dfmatches = concat([dfmatches, newcols], axis=1) # add new cols
# delete redundant data
del (dfmatches["Segments_x"], dfmatches[self.reccol],
dfmatches["Segments_y"], dfmatches[self.doncol])
return dfmatches # same structure as input df
def likeliestphonmatch(self, donor_ad, recip_rc, donor_segment,
recip_segment):
"""
Called by loanpy.loanfinder.postprocess. \
Calculates the nse of recip_rc-recip_segment \
and donor_ad-donor_segment, adds them together \
and picks the word pair with the highest sum. \
Adds 2*4 columns from loanpy.adrc.Adrc.get_nse.
:param donor_ad: adapted words in the donor data frame
:type donor_ad: str (not a regular expression, words separated by ", ")
:param recip_rc: a reconstructed word
:type recip_rc: str (regular expression)
:param donor_segment: the original (non-adapted) donor word
:type donor_segment: str
:param recip_segment: the original (non-reconstructed) recipient word
:type recip_segment: str
:returns: The likeliest phonological match
:rtype: pandas.core.series.Series
:Example:
>>> from pathlib import Path
>>> from pandas import DataFrame
>>> from loanpy.loanfinder import Search, __file__
>>> PATH2SC_AD = Path(__file__).parent / "tests" \
/ "input_files" / "sc_ad_likeliest.txt"
>>> PATH2SC_RC = Path(__file__).parent / "tests" \
/ "input_files" / "sc_rc_likeliest.txt"
>>> PATH2READ_DATA = Path(__file__).parent / "tests" \
/ "input_files" / "ad_read_data.csv"
>>> search_obj = Search(
>>> PATH2READ_DATA, donorcol="col1",
>>> scdictlist_ad=PATH2SC_AD, scdictlist_rc=PATH2SC_RC)
>>> search_obj.likeliestphonmatch(donor_ad="a, blub, \
club", recip_rc="(b|c)?lub",
>>> donor_segment="elub", recip_segment="dlub")
match nse_rc se_rc ... distr_ad\
align_ad nse_combined
0 blub 10.0 50 ... [0, 0, 10, 10, 0] \
['e<V', 'C<b', 'l<l', 'u<u', 'b<b'] 14.0
[1 rows x 10 columns]
"""
# step 1: serach for phonological matches between
# reconstructed reg-ex and list of predicted adaptations
dfph = self.phonmatch_small(Series(donor_ad.split(", "), name="match"),
recip_rc, dropduplicates=False)
# get the nse score between original and predictions
# and write to new columns
# cols se_rc, lst_rc, se_ad, lst_ad are just extra info for the user
dfph = DataFrame([(wrd,) + self.get_nse_rc(recip_segment, wrd) +
self.get_nse_ad(donor_segment, wrd)
for wrd in dfph["match"]],
columns=["match", "nse_rc", "se_rc", "distr_rc",
"align_rc", "nse_ad", "se_ad", "distr_ad",
"align_ad"])
# add combined nse
dfph["nse_combined"] = dfph["nse_rc"] + dfph["nse_ad"]
# get idx of max combined, keep only that idx (=likeliest match)
dfph = dfph[dfph.index == dfph["nse_combined"].idxmax()]
return dfph
def phonmatch_small(self, search_in, search_for, index=None,
dropduplicates=True):
"""
Same as loanpy.loanfinder.Search.phonmatch but search_in \
has to be added as a parameter. Found this \
to be the most elegant solution b/c \
loanpy.loanfinder.Search.likeliestphonmatch() inputs lots of \
small and very different search_in-dfs, while loans() inputs one big df.
:param search_in: The iterable to search within
:type search_in: pandas.core.series.Series
:param search_for: See loanpy.loanfinder.Search.phonmatch
:type search_for: str
:param index: See loanpy.loanfinder.Search.phonmatch
:type index: str | None, default=None
:param dropduplicates: See loanpy.loanfinder.Search.phonmatch
:type dropduplicates: bool, default=True
:returns: See loanpy.loanfinder.Search.phonmatch
:rtype: pandas.core.series.Series
"""
# for inline comments see loanpy.loanfinder.Search.phonmatch
if self.phondist == 0:
matched = search_in[search_in.str.match(search_for)]
else:
self.phondist_msr = partial(self.phondist_msr, target=search_for)
matched = search_in[
search_in.apply(self.phondist_msr) <= self.phondist]
dfphonmatch = DataFrame({"match": matched, "recipdf_idx": index})
if dropduplicates is True:
dfphonmatch = dfphonmatch[
~dfphonmatch.index.duplicated(keep='first')]
return dfphonmatch
def merge_with_rest(self, dfmatches):
"""
Merges the output data frame with the remaining columns \
from both input data frames. This helps to inspect results quickly manually.
:param dfmatches: The output data frame
:type dfmatches: pandas.core.frame.DataFrame
:returns: same data frame with extra cols added from both \
input forms.csv
:rtype: pandas.core.frame.DataFrame
"""
logger.warning("Merging with remaining columns from input data frames")
# avoid duplicates
dfmatches = dfmatches.drop(["Meaning_x", "Meaning_y"], axis=1)
dfmatches = dfmatches.merge(read_csv(self.donpath,
encoding="utf-8").fillna(""),
left_index=True, right_index=True)
dfmatches = dfmatches.merge(read_csv(self.recpath,
encoding="utf-8").fillna(""),
left_on="recipdf_idx", right_index=True)
dfmatches = dfmatches.sort_values(by=self.semsim_msr.__name__,
ascending=False) # unsorted by merge
return dfmatches
| 42.691131
| 79
| 0.641261
| 3,388
| 27,920
| 5.150826
| 0.178276
| 0.023838
| 0.015243
| 0.020686
| 0.354994
| 0.286746
| 0.263137
| 0.226348
| 0.206865
| 0.198384
| 0
| 0.012456
| 0.263861
| 27,920
| 653
| 80
| 42.756508
| 0.833212
| 0.624749
| 0
| 0.233333
| 0
| 0
| 0.087365
| 0.003365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0.006667
| 0.053333
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
091cd8c5724529a22ff674589812a3252e946388
| 1,172
|
py
|
Python
|
scripts/tmp.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 2
|
2021-06-22T05:43:25.000Z
|
2021-06-22T08:40:16.000Z
|
scripts/tmp.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 1
|
2021-04-19T12:25:26.000Z
|
2021-04-19T12:25:26.000Z
|
scripts/tmp.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 1
|
2021-06-21T01:18:07.000Z
|
2021-06-21T01:18:07.000Z
|
import scipy
from scipy.io import loadmat
import random
import numpy as np
from sklearn.metrics import zero_one_loss
from sklearn.naive_bayes import BernoulliNB,MultinomialNB,GaussianNB
import matplotlib.pyplot as plt
from sklearn.feature_selection import mutual_info_classif
import os
data = loadmat('../data/XwindowsDocData.mat')
Xtrain = data['xtrain']
Xtrain = scipy.sparse.csc_matrix.toarray(Xtrain)
Xtest = data['xtest']
Xtest = scipy.sparse.csc_matrix.toarray(Xtest)
ytrain = data['ytrain']
ytest = data['ytest']
model = BernoulliNB()
model.fit(Xtrain, ytrain)
ypred_train = model.predict(Xtrain)
err_train = np.mean(zero_one_loss(ytrain, ypred_train))
ypred_test = model.predict(Xtest)
err_test = np.mean(zero_one_loss(ytest, ypred_test))
print('misclassification rates on train = '+str(err_train*100) +
' pc, on test = '+str(err_test*100)+' pc\n')
C = np.unique(data['ytrain']).size
print()
for i in range(0, C):
plt.bar(np.arange(0, 600, 1), np.exp(model.feature_log_prob_)[i, :])
plt.title(r'$P(x_j=1 \mid y='+str(i+1)+')$')
fileName = 'naiveBayesBow'+str(i+1)+'ClassCond'
plt.savefig(r'../figures/'+fileName)
plt.show()
| 25.478261
| 72
| 0.726962
| 180
| 1,172
| 4.6
| 0.461111
| 0.039855
| 0.039855
| 0.048309
| 0.10628
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01462
| 0.124573
| 1,172
| 45
| 73
| 26.044444
| 0.792398
| 0
| 0
| 0
| 0
| 0
| 0.138343
| 0.023057
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.28125
| 0
| 0.28125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
091da9e65fdc6e81e6f673354f6d6d367b942a8d
| 5,961
|
py
|
Python
|
src/google/events/firebase/remoteconfig/v1/RemoteConfigEventData.py
|
eclipselu/google-cloudevents-python
|
aa7ae3a593f94bd5810f68ca9b7c36eb1e32192d
|
[
"Apache-2.0"
] | 10
|
2020-09-19T10:43:34.000Z
|
2022-02-05T14:28:12.000Z
|
src/google/events/firebase/remoteconfig/v1/RemoteConfigEventData.py
|
eclipselu/google-cloudevents-python
|
aa7ae3a593f94bd5810f68ca9b7c36eb1e32192d
|
[
"Apache-2.0"
] | 39
|
2020-07-15T22:58:46.000Z
|
2022-02-03T23:19:26.000Z
|
src/google/events/firebase/remoteconfig/v1/RemoteConfigEventData.py
|
eclipselu/google-cloudevents-python
|
aa7ae3a593f94bd5810f68ca9b7c36eb1e32192d
|
[
"Apache-2.0"
] | 6
|
2020-06-30T12:58:02.000Z
|
2021-01-23T02:53:44.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code parses date/times, so please
#
# pip install python-dateutil
#
# To use this code, make sure you
#
# import json
#
# and then, to convert JSON from a string, do
#
# result = remote_config_event_data_from_dict(json.loads(json_string))
from typing import Optional, Any, Union, TypeVar, Type, cast
from datetime import datetime
import dateutil.parser
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x
def from_datetime(x: Any) -> datetime:
return dateutil.parser.parse(x)
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
class UpdateUser:
"""Aggregation of all metadata fields about the account that performed the update."""
"""Email address."""
email: Optional[str]
"""Image URL."""
image_url: Optional[str]
"""Display name."""
name: Optional[str]
def __init__(self, email: Optional[str], image_url: Optional[str], name: Optional[str]) -> None:
self.email = email
self.image_url = image_url
self.name = name
@staticmethod
def from_dict(obj: Any) -> 'UpdateUser':
assert isinstance(obj, dict)
email = from_union([from_str, from_none], obj.get("email"))
image_url = from_union([from_str, from_none], obj.get("imageUrl"))
name = from_union([from_str, from_none], obj.get("name"))
return UpdateUser(email, image_url, name)
def to_dict(self) -> dict:
result: dict = {}
result["email"] = from_union([from_str, from_none], self.email)
result["imageUrl"] = from_union([from_str, from_none], self.image_url)
result["name"] = from_union([from_str, from_none], self.name)
return result
class RemoteConfigEventData:
"""The data within all Firebase Remote Config events."""
"""The user-provided description of the corresponding Remote Config template."""
description: Optional[str]
"""Only present if this version is the result of a rollback, and will be the
version number of the Remote Config template that was rolled-back to.
"""
rollback_source: Optional[str]
"""Where the update action originated."""
update_origin: Union[int, None, str]
"""When the Remote Config template was written to the Remote Config server."""
update_time: Optional[datetime]
"""What type of update was made."""
update_type: Union[int, None, str]
"""Aggregation of all metadata fields about the account that performed the update."""
update_user: Optional[UpdateUser]
"""The version number of the version's corresponding Remote Config template."""
version_number: Optional[str]
def __init__(self, description: Optional[str], rollback_source: Optional[str], update_origin: Union[int, None, str], update_time: Optional[datetime], update_type: Union[int, None, str], update_user: Optional[UpdateUser], version_number: Optional[str]) -> None:
self.description = description
self.rollback_source = rollback_source
self.update_origin = update_origin
self.update_time = update_time
self.update_type = update_type
self.update_user = update_user
self.version_number = version_number
@staticmethod
def from_dict(obj: Any) -> 'RemoteConfigEventData':
assert isinstance(obj, dict)
description = from_union([from_str, from_none], obj.get("description"))
rollback_source = from_union([from_str, from_none], obj.get("rollbackSource"))
update_origin = from_union([from_int, from_str, from_none], obj.get("updateOrigin"))
update_time = from_union([from_datetime, from_none], obj.get("updateTime"))
update_type = from_union([from_int, from_str, from_none], obj.get("updateType"))
update_user = from_union([UpdateUser.from_dict, from_none], obj.get("updateUser"))
version_number = from_union([from_str, from_none], obj.get("versionNumber"))
return RemoteConfigEventData(description, rollback_source, update_origin, update_time, update_type, update_user, version_number)
def to_dict(self) -> dict:
result: dict = {}
result["description"] = from_union([from_str, from_none], self.description)
result["rollbackSource"] = from_union([from_str, from_none], self.rollback_source)
result["updateOrigin"] = from_union([from_int, from_str, from_none], self.update_origin)
result["updateTime"] = from_union([lambda x: x.isoformat(), from_none], self.update_time)
result["updateType"] = from_union([from_int, from_str, from_none], self.update_type)
result["updateUser"] = from_union([lambda x: to_class(UpdateUser, x), from_none], self.update_user)
result["versionNumber"] = from_union([from_str, from_none], self.version_number)
return result
def remote_config_event_data_from_dict(s: Any) -> RemoteConfigEventData:
return RemoteConfigEventData.from_dict(s)
def remote_config_event_data_to_dict(x: RemoteConfigEventData) -> Any:
return to_class(RemoteConfigEventData, x)
| 38.458065
| 264
| 0.694347
| 804
| 5,961
| 4.962687
| 0.217662
| 0.042105
| 0.055388
| 0.06015
| 0.284962
| 0.241604
| 0.186466
| 0.134336
| 0.072682
| 0.072682
| 0
| 0.001667
| 0.195101
| 5,961
| 154
| 265
| 38.707792
| 0.829929
| 0.153665
| 0
| 0.149425
| 0
| 0
| 0.051364
| 0.004773
| 0
| 0
| 0
| 0
| 0.08046
| 1
| 0.16092
| false
| 0.011494
| 0.034483
| 0.034483
| 0.471264
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0925149b145bd29447f47950402ae72fc46058f2
| 18,374
|
py
|
Python
|
Converter_V09102021_gkernel_stop.py
|
SolaleT/KTMnet
|
ded379cceac560eccfc9a05e8e0c55292544f8b9
|
[
"MIT"
] | null | null | null |
Converter_V09102021_gkernel_stop.py
|
SolaleT/KTMnet
|
ded379cceac560eccfc9a05e8e0c55292544f8b9
|
[
"MIT"
] | null | null | null |
Converter_V09102021_gkernel_stop.py
|
SolaleT/KTMnet
|
ded379cceac560eccfc9a05e8e0c55292544f8b9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 12:43:42 2019
@author: solale
In this version I will try to shrink the network and reduce the tensorization
"""
# Multilayer Perceptron
import pandas
import numpy
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# from tensorflow import set_random_seed
# set_random_seed(2)
import tensorflow
import tensorflow.keras
import math
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2DTranspose,Input, Reshape, Conv2D, Flatten
from tensorflow.keras.layers import Dense
from sklearn.metrics import mean_squared_error
# from tensorflow.keras.layers.merge import concatenate
from tensorflow.keras.layers import concatenate
import argparse
# from tensorflow.keras.utils.np_utils import to_categorical
from tensorflow.keras import utils
# import tensorflow as tf
from sklearn import preprocessing
from keras_ex.gkernel import GaussianKernel
# https://github.com/darecophoenixx/wordroid.sblo.jp/tree/master/lib/keras_ex/gkernel
def custom_loss_1 (y_true, y_pred):
A = tensorflow.keras.losses.mean_squared_error(y_true[:,0:4], y_pred[:,0:4])
return A
def custom_loss_2 (y_true, y_pred):
B = tensorflow.keras.losses.categorical_crossentropy(y_true[:,-4:], y_pred[:,-4:])
return n*B
def custom_loss (y_true, y_pred):
A = tensorflow.keras.losses.mean_squared_error(y_true[:,0:4], y_pred[:,0:4])
B = tensorflow.keras.losses.categorical_crossentropy(y_true[:,-4:], y_pred[:,-4:])
m=1
return((m*A)+ (n*B))
########################## argument getting
#parser = argparse.ArgumentParser()
#parser.add_argument("--i", )
#parser.add_argument("--j", )
#parser.add_argument("--k", )
#parser.add_argument("--m", )
#a = parser.parse_args()
##
#i=int(a.i)
#j=int(a.j)
#k=int(a.k)
#####
i = 64 #64
# j=16
# k=64
n = 75 #105
n_splits=10
max_epochs=500
BatchSize=350
N_AlternatingControler=2
###################### ######################
# Reading Multi Modal Y for train and Test
# train data
AllDataset = pandas.read_csv('./XY_BLD_Converter', low_memory=False)
AllDataset = AllDataset.set_index(AllDataset.RID)
AllDataset = AllDataset.fillna(0)
AllDataset['DX'] = AllDataset['DX'].map({'NL':0, 'MCI':1, 'Converter':2, 'Dementia':3})
le = preprocessing.LabelEncoder()
AllDataset['DX'] = le.fit_transform(AllDataset['DX'])
###################### MRI ######################
MRI_X = AllDataset.loc[:,['Ventricles', 'Hippocampus', 'WholeBrain', 'Entorhinal', 'Fusiform', 'MidTemp', 'ICV']]
MRI_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
MRI_RID = AllDataset.RID
# normalize data
MRI_X = (MRI_X - MRI_X.mean())/ (MRI_X.max() - MRI_X.min())
###################### PET ######################
PET_X = AllDataset.loc[:,['FDG', 'PIB', 'AV45']]
PET_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
PET_RID = AllDataset.RID
# normalize data
PET_X = (PET_X - PET_X.mean()) / (PET_X.max() - PET_X.min())
###################### COG ######################
COG_X = AllDataset.loc[:, ['RAVLTimmediate', 'RAVLTlearning', 'RAVLTforgetting', 'RAVLTpercforgetting','FAQ',
'EcogPtMem', 'EcogPtLang', 'EcogPtVisspat', 'EcogPtPlan', 'EcogPtOrgan', 'EcogPtDivatt', 'EcogPtTotal',
'EcogSPMem', 'EcogSPLang', 'EcogSPVisspat', 'EcogSPPlan', 'EcogSPOrgan', 'EcogSPDivatt', 'EcogSPTotal']]#'CDRSB', 'MOCA',
COG_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
COG_RID = AllDataset.RID
# normalize data
COG_X = (COG_X - COG_X.mean()) / (COG_X.std())
###################### CSF ######################
CSF_X = AllDataset.loc[:,['ABETA', 'PTAU', 'TAU']]
CSF_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
CSF_RID = AllDataset.RID
# normalize data
CSF_X = (CSF_X - CSF_X.mean()) / (CSF_X.max() - CSF_X.min())
###################### RF ######################
# RF_X = AllDataset.loc[:,['AGE', 'PTEDUCAT', 'APOE4','female','male']]
# RF_Y = AllDataset.loc[:, ['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24','DX']]
# RF_RID = AllDataset.RID
# # normalize data
# RF_X.AGE = (RF_X - RF_X.mean()) / (RF_X.max() - )
RF_X_1 = AllDataset.loc[:,['AGE','PTEDUCAT']]
# normalize age and years of education
RF_X_1 = (RF_X_1 - RF_X_1.mean()) / (RF_X_1.max() - RF_X_1.min())
RF_X_1=RF_X_1.fillna(0)
# normalize apoe4
RF_X_A = AllDataset.loc[:,['APOE4']]
RF_X_A=RF_X_A-1
RF_X_A=RF_X_A.fillna(0)
# normalize gender
RF_X_gender = AllDataset.loc[:,['female','male']]
# RF_X_sex[RF_X_sex=='Male']=-1
# RF_X_sex[RF_X_sex=='Female']=1
RF_X_gender=RF_X_gender.fillna(0)
#construct RF
RF_X = pandas.concat([RF_X_1, RF_X_A, RF_X_gender], axis=1)
##############################################
from tensorflow.keras.layers import Dropout
import numpy as np
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from scipy.stats import pearsonr, spearmanr
# FCN specifications
units_L2 = 25
units_L3 = 7
####################################### MRI FCN ###############################################
# mri FCN
MRI_inp_dim = MRI_X.shape[1]
MRI_visible = Input(shape=(MRI_inp_dim,))
hiddenMRI1 = Dense(2*MRI_inp_dim, kernel_initializer='normal', activation='linear')(MRI_visible)
hiddenMRI2 = hiddenMRI1
MRI_output = Dense(MRI_inp_dim, kernel_initializer='normal', activation='linear')(hiddenMRI2)
####################################### PET FCN ###############################################
PET_inp_dim = PET_X.shape[1]
PET_visible = Input(shape=(PET_inp_dim,))
hiddenPET1 = Dense(2*PET_inp_dim, kernel_initializer='normal', activation='linear')(PET_visible)
hiddenPET2=hiddenPET1
PET_output = Dense(PET_inp_dim, kernel_initializer='normal', activation='linear')(hiddenPET2)
####################################### COG FCN ###############################################
# mri FCN
COG_inp_dim = COG_X.shape[1]
COG_visible = Input(shape=(COG_inp_dim,))
hiddenCOG1 = Dense(2*COG_inp_dim, kernel_initializer='normal', activation='linear')(COG_visible)
hiddenCOG2=hiddenCOG1
COG_output = Dense(COG_inp_dim, kernel_initializer='normal', activation='linear')(hiddenCOG2)
####################################### CSF FCN ###############################################
CSF_inp_dim = CSF_X.shape[1]
CSF_visible = Input(shape=(CSF_inp_dim,))
hiddenCSF1 = Dense(2*CSF_inp_dim, kernel_initializer='normal', activation='linear')(CSF_visible)
hiddenCSF2=hiddenCSF1
CSF_output = Dense(CSF_inp_dim, kernel_initializer='normal', activation='linear')(hiddenCSF2)
####################################### CSF FCN ###############################################
RF_inp_dim = RF_X.shape[1]
RF_visible = Input(shape=(RF_inp_dim,))
hiddenRF1 = Dense(2*RF_inp_dim, kernel_initializer='normal', activation='linear')(RF_visible)
hiddenRF2=hiddenRF1
RF_output = Dense(RF_inp_dim, kernel_initializer='normal', activation='linear')(hiddenRF2)
#################################### Concat FCN ###############################################
merge = concatenate([MRI_output, PET_output, COG_output, CSF_output, RF_output])#
# print(merge.shape[1])
# interpretation layer
# hidden1 = Dense(100, activation='relu')(merge)
hidden1 = GaussianKernel(100, merge.shape[1], kernel_gamma="auto", name='gkernel1')(merge)
# hidden1 = Dropout(0.1)(hidden1)
hidden1_reshape = Reshape((10, 10, 1))(hidden1)
layer2D_1 = Conv2DTranspose(filters=10, kernel_size=(3,3), strides=(1, 1), padding="same")(hidden1_reshape)
layer2D_2 = Conv2DTranspose(filters=10, kernel_size=(3,3), strides=(1, 1), dilation_rate=(2,2),padding="same")(hidden1_reshape)
#layer2D_3 = Conv2DTranspose(filters=10, kernel_size=(3,3), strides=(1, 1), dilation_rate=(3,3), padding="same")(hidden1_reshape)
layer2D_4 = concatenate([layer2D_1,layer2D_2])#concatenate([layer2D_1,layer2D_2,layer2D_3])
# input layer
visible = layer2D_4
# first feature extractor
conv1 = Conv2D(i, kernel_size=3)(visible)#relu
conv1 = Dropout(0.1)(conv1)
flat1 = Flatten()(conv1)
## cutting out from hidden1 output
# prediction output
output_reg = Dense(4, activation='relu',kernel_regularizer=tensorflow.keras.regularizers.l1(0.01))(flat1)#relu
outout_class = Dense(4, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.l1(0.01))(flat1)#softmax
output=concatenate([output_reg, outout_class])
categorical_labels = utils.to_categorical(COG_Y.iloc[:,-1], num_classes=4)
X_all=[MRI_X.values, PET_X.values, COG_X.values, CSF_X.values, RF_X.values]#
YTrain = COG_Y
YTrain1 = YTrain.reset_index()
Y_Train = pandas.concat ([YTrain1[['MMSE_BLD','MMSE_6','MMSE_12','MMSE_24']], pandas.DataFrame(categorical_labels)], axis=1)
Y_all=Y_Train
AccScores = []
AccDetails=[]
All_Predicts_class=[]
All_Truth_class=[]
All_Predicts_reg=[]
All_Truth_reg=[]
AllRegErrors = np.zeros(shape=(4,1),dtype='float16')
X_all=[MRI_X.values, PET_X.values, COG_X.values, CSF_X.values, RF_X.values]#
Y_all=Y_Train
All_RMSE=np.zeros(shape=(4,1),dtype='float16')
model = Model(inputs= [MRI_visible, PET_visible, COG_visible, CSF_visible, RF_visible], outputs=output) #
#keras.utils.plot_model(model,to_file='model-final.png', show_shapes=True)
OPTIMIZER_1=tensorflow.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
OPTIMIZER_2=tensorflow.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.save_weights('SavedInitialWeights.h5')
callback_stop = tensorflow.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0, patience=20, verbose=0,
mode='auto', baseline=None, restore_best_weights=False
)
max_epochs_Alternating=max_epochs/N_AlternatingControler
max_epochs_Alternating=np.int(max_epochs_Alternating)
import matplotlib.pyplot as plt
for repeator in range(0,1):
#print('Repeat No: ', repeator+1)
# define n_splits-fold cross validation test harness
kfold = StratifiedKFold(n_splits, shuffle=True, random_state=repeator)
FoldCounter=0
for train, test in kfold.split(X_all[1], COG_Y.iloc[:,-1].values):
FoldCounter=FoldCounter+1
model.load_weights('SavedInitialWeights.h5')
X_train_here=[X_all[0][train], X_all[1][train], X_all[2][train], X_all[3][train], X_all[4][train]]#
print('---Repeat No: ', repeator+1, ' ---Fold No: ', FoldCounter)
# model.compile(loss=custom_loss, optimizer=OPTIMIZER_1)
# History = model.fit(X_train_here, Y_all.values[train],
# epochs= max_epochs_Alternating, batch_size=BatchSize, verbose=0)#250-250
model.compile(loss=custom_loss, optimizer=OPTIMIZER_2)
History = model.fit(X_train_here, Y_all.values[train], validation_split=0.1,
epochs= 2*max_epochs_Alternating, batch_size=BatchSize,
callbacks=[callback_stop], verbose=0)#250-250
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.grid()
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('Fold_'+str(FoldCounter)+'_History.png')
plt.close()
# for iters in range(N_AlternatingControler):
# # Fit the model
# if np.random.rand() < 0.5:
# model.compile(loss=custom_loss, optimizer=OPTIMIZER_1)
# print(iters)
# else:
# model.compile(loss=custom_loss, optimizer=OPTIMIZER_2)
# History = model.fit(X_train_here, Y_all.values[train], epochs= max_epochs_Alternating, batch_size=BatchSize, verbose=0)#250-250
X_test_here=[X_all[0][test], X_all[1][test], X_all[2][test], X_all[3][test], X_all[4][test]]#
Y_Validation=model.predict(X_test_here)
MSE_0 = mean_squared_error(Y_all.iloc[test, 0], Y_Validation[:, 0])#/Y_Pred_MultiModal.shape[0]
MSE_6 = mean_squared_error(Y_all.iloc[test, 1], Y_Validation[:, 1])#/Y_Pred_MultiModal.shape[0]
MSE_12 = mean_squared_error(Y_all.iloc[test, 2], Y_Validation[:, 2])#/Y_Pred_MultiModal.shape[0]
MSE_24 = mean_squared_error(Y_all.iloc[test, 3], Y_Validation[:, 3])#/Y_Pred_MultiModal.shape[0]
All_RMSE[0]=math.sqrt(MSE_0)
All_RMSE[1]=math.sqrt(MSE_6)
All_RMSE[2]=math.sqrt(MSE_12)
All_RMSE[3]=math.sqrt(MSE_24)
print([math.sqrt(MSE_0), math.sqrt(MSE_6), math.sqrt(MSE_12), math.sqrt(MSE_24)])
AllRegErrors=np.append(AllRegErrors,All_RMSE,axis=1)
#rho1, pval1 = spearmanr(Y_Pred_MultiModal[:, 0], Y_all.iloc[:, 0])
##### Classification
All_Predicts_class.append(Y_Validation[:,-4:])
All_Predicts_reg.append(Y_Validation[:,0:4])
All_Truth_class.append(COG_Y.iloc[test,-1])
All_Truth_reg.append(Y_all.iloc[test, 0:4])
DX_pred = np.argmax(Y_Validation[:,-4:], axis=1)
DX_real= COG_Y.iloc[test,-1]
score=accuracy_score(DX_real, DX_pred)
print (accuracy_score(DX_real, DX_pred))
AccScores.append(score*100)
# target_names = ['class 0', 'class 1', 'class 2', 'class 3']
target_names = ['CN', 'MCI_nc', 'MCI_c', 'AD']
class_names = target_names
Details=classification_report(DX_real, DX_pred, target_names=target_names,output_dict=True)
print(classification_report(DX_real, DX_pred, target_names=target_names))
AccDetails.append(Details)
#print >> f1, classification_report(DX_real, DX_pred, target_names=target_names)
print('#########################################################################')
print('#########################################################################')
print(i, n)
print('Average Result:')
print('########')
print('Mean of RMSE : ', np.mean(AllRegErrors[:,1:],1))
print('Mean of RMSE ALL: ', np.mean(AllRegErrors[:,1:]))
print('Mean of accuracy : ',np.mean(AccScores))
print(' --------------------- ')
print('std of RMSE : ', np.std(AllRegErrors[:,1:],1))
print('std of RMSE ALL: ', np.std(AllRegErrors[:,1:]))
print('std of accuracy : ',np.std(AccScores))
AD_precision=[]; MCI_nc_precision=[]; MCI_c_precision=[]; CN_precision=[];
AD_recall=[]; MCI_nc_recall=[]; MCI_c_recall=[]; CN_recall=[]
AD_f1=[]; MCI_nc_f1=[]; MCI_c_f1=[]; CN_f1=[]
AD_support=[]; MCI_nc_support=[]; MCI_c_support=[]; CN_support=[]
for i in range(len(AccDetails)):
Details=AccDetails[i]
A=Details['AD']['precision']
AD_precision.append(A)
A=Details['MCI_c']['precision']
MCI_c_precision.append(A)
A=Details['MCI_nc']['precision']
MCI_nc_precision.append(A)
A=Details['CN']['precision']
CN_precision.append(A)
A=Details['AD']['recall']
AD_recall.append(A)
A=Details['MCI_c']['recall']
MCI_c_recall.append(A)
A=Details['MCI_nc']['recall']
MCI_nc_recall.append(A)
A=Details['CN']['recall']
CN_recall.append(A)
A=Details['AD']['f1-score']
AD_f1.append(A)
A=Details['MCI_c']['f1-score']
MCI_c_f1.append(A)
A=Details['MCI_nc']['f1-score']
MCI_nc_f1.append(A)
A=Details['CN']['f1-score']
CN_f1.append(A)
A=Details['AD']['support']
AD_support.append(A)
A=Details['MCI_c']['support']
MCI_c_support.append(A)
A=Details['MCI_nc']['support']
MCI_nc_support.append(A)
A=Details['CN']['support']
CN_support.append(A)
print(' --------------------- ')
print(' --------------------- ')
print('Mean of precision of AD : ', np.mean(AD_precision))
print('Mean of precision of MCI_c : ', np.mean(MCI_c_precision))
print('Mean of precision of MCI_nc : ', np.mean(MCI_nc_precision))
print('Mean of precision of CN : ', np.mean(CN_precision))
print(' --------------------- ')
print('std of precision of AD : ', np.std(AD_precision))
print('std of precision of MCI_c : ', np.std(MCI_c_precision))
print('std of precision of MCI_nc : ', np.std(MCI_nc_precision))
print('std of precision of CN : ', np.std(CN_precision))
print(' --------------------- ')
print(' --------------------- ')
print('Mean of recall of AD : ', np.mean(AD_recall))
print('Mean of recall of MCI_c : ', np.mean(MCI_c_recall))
print('Mean of recall of MCI_nc : ', np.mean(MCI_nc_recall))
print('Mean of recall of CN : ', np.mean(CN_recall))
print(' --------------------- ')
print('std of recall of AD : ', np.std(AD_recall))
print('std of recall of MCI_c : ', np.std(MCI_c_recall))
print('std of recall of MCI_nc : ', np.std(MCI_nc_recall))
print('std of recall of CN : ', np.std(CN_recall))
print(' --------------------- ')
print(' --------------------- ')
print('Mean of f1-score of AD : ', np.mean(AD_f1))
print('Mean of f1-score of MCI_c : ', np.mean(MCI_c_f1))
print('Mean of f1-score of MCI_nc : ', np.mean(MCI_nc_f1))
print('Mean of f1-score of CN : ', np.mean(CN_f1))
print(' --------------------- ')
print('std of f1-score of AD : ', np.std(AD_f1))
print('std of f1-score of MCI_c : ', np.std(MCI_c_f1))
print('std of f1-score of MCI_nc : ', np.std(MCI_nc_f1))
print('std of f1-score of CN : ', np.std(CN_f1))
print(' --------------------- ')
print(' --------------------- ')
print('Mean of support of AD : ', np.mean(AD_support))
print('Mean of support of MCI_c : ', np.mean(MCI_c_support))
print('Mean of support of MCI_nc : ', np.mean(MCI_nc_support))
print('Mean of support of CN : ', np.mean(CN_support))
print(' --------------------- ')
print('std of support of AD : ', np.std(AD_support))
print('std of support of MCI_c : ', np.std(MCI_c_support))
print('std of support of MCI_nc : ', np.std(MCI_nc_support))
print('std of support of CN : ', np.std(CN_support))
print('#########################################################################')
print('#########################################################################')
DataDict={"AccScores":AccScores,"AccDetails":AccDetails,"AllRegErrors":AllRegErrors
, "All_Predicts_class": All_Predicts_class , "All_Truth_class": All_Truth_class,
"All_Predicts_reg": All_Predicts_reg , "All_Truth_reg": All_Truth_reg}
import pickle
pickle.dump(DataDict,open("pkl_Results_Combined_2.pkl","wb"))
| 39.770563
| 140
| 0.637967
| 2,584
| 18,374
| 4.301084
| 0.157895
| 0.008638
| 0.018805
| 0.020245
| 0.414252
| 0.317437
| 0.256163
| 0.185262
| 0.11688
| 0.107162
| 0
| 0.026424
| 0.136987
| 18,374
| 461
| 141
| 39.856833
| 0.674466
| 0.157886
| 0
| 0.086505
| 0
| 0
| 0.190125
| 0.04379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010381
| false
| 0
| 0.083045
| 0
| 0.100346
| 0.214533
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09252c654e5dd6c2c427adfbd63c5afde450b576
| 1,641
|
py
|
Python
|
sentiment_analysis.py
|
Submanifold/us-inauguration-speeches
|
d86221c47cc8b34363cc4ca9faaa4236436fc9c0
|
[
"MIT"
] | null | null | null |
sentiment_analysis.py
|
Submanifold/us-inauguration-speeches
|
d86221c47cc8b34363cc4ca9faaa4236436fc9c0
|
[
"MIT"
] | null | null | null |
sentiment_analysis.py
|
Submanifold/us-inauguration-speeches
|
d86221c47cc8b34363cc4ca9faaa4236436fc9c0
|
[
"MIT"
] | 1
|
2019-08-12T11:40:28.000Z
|
2019-08-12T11:40:28.000Z
|
#!/usr/bin/env python3
#
# Calculates sentiment polarity scores over the progression of a speech
# and writes them to STDOUT. The output can be parsed by gnuplot.
#
# Original author: Bastian Rieck
from textblob import TextBlob
import os
import sys
"""
Calculates sentiments over the progression of a given speech. The
results of this function are scaled such that the total *time* of
the speech lies between [0,1].
"""
def make_sentiment_curve(text, title):
blob = TextBlob(text)
n = len(blob.sentences)
polarities = []
print("\"%s\"" % title)
for index, sentence in enumerate(blob.sentences):
polarity = sentence.sentiment.polarity
t = index / (n-1)
polarities.append(polarity)
print(t, polarity)
# Try to mitigate issues with floating point numbers; I am pretty sure
# that this should *not* be that relevant here, though.
sum_polarities = sum( sorted(polarities) )
mean_polarity = sum_polarities / n
print("\n")
print("\"%s\"" % title)
print("0.0 %f" % mean_polarity)
print("1.0 %f" % mean_polarity)
print("\n")
"""
Extracts a year and a name from a filename.
"""
def get_year_and_name(filename):
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
name = name.replace("_", " ")
year = name[:4]
name = name[5:]
return year, name
"""
main
"""
if __name__ == "__main__":
for filename in sys.argv[1:]:
year, name = get_year_and_name(filename)
text = ""
title = "%s (%s)" % (name, year)
with open(filename) as f:
text = f.read()
make_sentiment_curve(text, title)
| 22.791667
| 72
| 0.652651
| 228
| 1,641
| 4.592105
| 0.464912
| 0.025788
| 0.034384
| 0.038204
| 0.17001
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009434
| 0.224863
| 1,641
| 71
| 73
| 23.112676
| 0.813679
| 0.1883
| 0
| 0.114286
| 0
| 0
| 0.032081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.085714
| 0
| 0.171429
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
092739ee8d9e52a0a47febefb10eeb7490145d9b
| 8,174
|
py
|
Python
|
EGEF_operation.py
|
zahraghh/Operation-Planning
|
dfa1fbbcce8d07544e96131f4e1f42c1846ce95b
|
[
"MIT"
] | null | null | null |
EGEF_operation.py
|
zahraghh/Operation-Planning
|
dfa1fbbcce8d07544e96131f4e1f42c1846ce95b
|
[
"MIT"
] | null | null | null |
EGEF_operation.py
|
zahraghh/Operation-Planning
|
dfa1fbbcce8d07544e96131f4e1f42c1846ce95b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import warnings
import pandas as pd
import scipy.stats as st
import statsmodels as sm
import seaborn as sns
import math
import collections
from collections import Counter
import statistics
import matplotlib
# By state level - Fuels emission factors - 1999 to 2017
HHV_Coal= 19.73 # Coke Coal HHV (mm BTU/ short ton) Source: Emission Factors for Greenhouse Gas Inventories
HHV_Gas= 1.033 # Natural gas HHV (mmBtu/ mcf) Source: Emission Factors for Greenhouse Gas Inventories
HHV_Pet= 0.145*42 # Crude Oil (close to distilled Oil) HHV (mmBtu/ barrel) Source: Emission Factors for Greenhouse Gas Inventories
# Input Files
emissions_regions = pd.read_csv('https://raw.githubusercontent.com/zahraghh/EmissionFactorElectricity/master/emission_annual_state.csv')
generation_regions = pd.read_csv('https://raw.githubusercontent.com/zahraghh/EmissionFactorElectricity/master/annual_generation_state.csv')
consumption_regions = pd.read_csv('https://raw.githubusercontent.com/zahraghh/EmissionFactorElectricity/master/consumption_annual_state.csv')
US_states= pd.read_csv('https://raw.githubusercontent.com/zahraghh/EmissionFactorElectricity/master/US_states.csv')
# Characteristics of Inputs
states= US_states['States']
emissions_regions_year= emissions_regions['Year']
consumption_regions_year= consumption_regions['YEAR']
generation_regions_year= generation_regions['YEAR']
emissions_regions_state= emissions_regions['State']
consumption_regions_state= consumption_regions['STATE']
generation_regions_state= generation_regions['STATE']
emissions_regions_type= emissions_regions['Producer Type']
consumption_regions_type= consumption_regions['TYPE OF PRODUCER']
generation_regions_type= generation_regions['TYPE OF PRODUCER']
emissions_regions_source= emissions_regions['Energy Source']
consumption_regions_source= consumption_regions['ENERGY SOURCE']
generation_regions_source= generation_regions['ENERGY SOURCE']
emissions_regions_CO2= emissions_regions['CO2'] # metric tones
emissions_regions_SO2= emissions_regions['SO2'] # metric tones
emissions_regions_NOx= emissions_regions['Nox'] # metric tones
consumption_regions_fuels= consumption_regions['CONSUMPTION for ELECTRICITY'] #Short tones, barrel, Mcf
generation_regions_fuels= generation_regions['GENERATION (Megawatthours)'] #Short tones, barrel, Mcf
def EF(year, Fuel, emissions_regions_xxx, Electric_scale, HHV):
emission= []
emission_state = []
consumption= []
consumption_state= []
generation= []
generation_state= []
for k in range(50):
for i in range(len(emissions_regions)):
if emissions_regions_year[i]== year and emissions_regions_state[i]== states[k] and emissions_regions_type[i]== Electric_scale and emissions_regions_source[i]== Fuel:
emission.append(emissions_regions_xxx[i]*1000) # converting metric ton to kg CO2/SO2/NOx
emission_state.append(states[k])
for j in range(len(consumption_regions)):
if consumption_regions_year[j]== year and consumption_regions_state[j]==states[k] and consumption_regions_type[j]==Electric_scale and consumption_regions_source[j]==Fuel:
consumption.append(int(consumption_regions_fuels[j])*HHV) # converting original unit to mmBTU
consumption_state.append(states[k])
for m in range(len(generation_regions)):
if generation_regions_year[m]== year and generation_regions_state[m]==states[k] and generation_regions_type[m]==Electric_scale and generation_regions_source[m]==Fuel:
generation.append(int(generation_regions_fuels[m])) # MWh electricity generation from each fuel type
generation_state.append(states[k])
dict_c={} # Dictonary map for consumption
dict_e={} # Dictonary map for emissions
dict_g={} # Dictonary map for generation
for c in range(len(consumption)):
dict_c[consumption_state[c]]= consumption[c]
for k in range(50):
if not states[k] in dict_c.keys():
dict_c[states[k]]=0
for e in range(len(emission)):
dict_e[emission_state[e]]= emission[e]
for k in range(50):
if not states[k] in dict_e.keys():
dict_e[states[k]]=0
for g in range(len(generation)):
dict_g[generation_state[g]]= generation[g]
for k in range(50):
if not states[k] in dict_g.keys():
dict_g[states[k]]=0
EF_st={k: dict_e[k]/ dict_c[k] for k in dict_e.keys() & dict_c if dict_c[k]}
GE_st={k: dict_e[k]/ dict_g[k] for k in dict_e.keys() & dict_c if dict_c[k]}
return dict_c, dict_e, dict_g, EF_st, GE_st ## mmBTU, kg CO2, MWh
EF_coal_results = EF(2017, 'Coal', emissions_regions_CO2, 'Total Electric Power Industry', HHV_Coal) #kg CO2/ mmBTU
EF_gas_results = EF(2017, 'Natural Gas', emissions_regions_CO2, 'Total Electric Power Industry', HHV_Gas) #kg CO2/ mmBTU
EF_pet_results = EF(2017, 'Petroleum', emissions_regions_CO2, 'Total Electric Power Industry', HHV_Pet) #kg CO2/ mmBTU
EF_coal_list=list(EF_coal_results[3].values()) #kg CO2/mm BTU
EF_gas_list=list(EF_gas_results[3].values()) #kg CO2/mm BTU
EF_pet_list=list(EF_pet_results[3].values()) #kg CO2/mm BTU
GE_coal_list=list(EF_coal_results[4].values()) #kg CO2/MWh
GE_gas_list=list(EF_gas_results[4].values()) #kg CO2/MWh
GE_pet_list=list(EF_pet_results[4].values()) #kg CO2/MWh
EF_coal_list = [i for i in EF_coal_list if 85 < i < 120]
EF_gas_list = [i for i in EF_gas_list if 43 < i < 63]
EF_pet_list = [i for i in EF_pet_list if 50 < i < 100]
def printinfo(list_EF):
return print( "/STD: ", round(statistics.stdev(list_EF),2),"/Mean: ",round(statistics.mean(list_EF),2),"/Median: ",round(statistics.median(list_EF),2),
"/Coef of variation %: ", round(statistics.stdev(list_EF)*100/statistics.mean(list_EF),2),
"/Relative Range: ", round((max(list_EF)-min(list_EF))/statistics.mean(list_EF),2))
#Electricity CO2 EF
bins=20
def fit_and_plot(dist,data):
params = dist.fit(data)
arg = params[:-2]
loc = params[-2]
scale = params[-1]
x = np.linspace( min(data), 150, bins)
bin_centers = 0.5*(x[1:] + x[:-1])
x = (x + np.roll(x, -1))[:-1] / 2.0
y, x = np.histogram(data, bins=bins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
pdf= dist.pdf(bin_centers, loc=loc, scale=scale, *arg)
return x, y, params, arg, loc, scale,
num_simulations=1
num_reps=10000
coal_params=fit_and_plot(st.levy_stable, EF_coal_list)
gas_params=fit_and_plot(st.lognorm, EF_gas_list)
pet_params=fit_and_plot(st.johnsonsu, EF_pet_list)
def EGEF_state(state):
state_stats = []
#electricty_generation_total_state = generation_regions[generation_regions['STATE']==state][generation_regions['YEAR']==2017][generation_regions['TYPE OF PRODUCER']=='Total Electric Power Industry'][generation_regions['ENERGY SOURCE']=='Total']['GENERATION (Megawatthours)']
electricty_generation_total_state = generation_regions[
(generation_regions['STATE']==state) &
(generation_regions['YEAR']==2017) &
(generation_regions['TYPE OF PRODUCER']=='Total Electric Power Industry') &
(generation_regions['ENERGY SOURCE']=='Total')]['GENERATION (Megawatthours)']
for i in range(num_simulations):
# Choose random inputs for the uncertain inputs: Coal, Natural gas, Petroleum.
coal_EF_rd = st.levy_stable.rvs(alpha=coal_params[2][0], beta=coal_params[2][1], loc= coal_params[2][2] , scale= coal_params[2][3] , size=num_reps)
gas_EF_rd = st.lognorm.rvs(s=gas_params[2][0], loc= gas_params[2][1] , scale= gas_params[2][2] , size=num_reps)
pet_EF_rd = st.johnsonsu.rvs(a=pet_params[2][0], b=pet_params[2][1], loc= pet_params[2][2] , scale= pet_params[2][3] , size=num_reps)
state_stats.append((coal_EF_rd*EF_coal_results[0][state] + gas_EF_rd*EF_gas_results[0][state] + pet_EF_rd*EF_pet_results[0][state])*2.20462/float(electricty_generation_total_state)) # EF_Electriicty (lb/MWh) Average distribution of fuels in the U.S.
data_new= state_stats
return data_new[0]
| 57.160839
| 279
| 0.725104
| 1,218
| 8,174
| 4.6289
| 0.167488
| 0.081412
| 0.023413
| 0.023058
| 0.364314
| 0.280241
| 0.238737
| 0.217453
| 0.164597
| 0.159986
| 0
| 0.023535
| 0.152679
| 8,174
| 142
| 280
| 57.56338
| 0.7905
| 0.151456
| 0
| 0.047619
| 0
| 0
| 0.125129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.095238
| 0.007937
| 0.15873
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09292afd918a49c812addd31394b503bc8e8b012
| 16,757
|
py
|
Python
|
Core_Functions.py
|
CadellVDH/gardinette
|
e8200a0f8369e4a6d5a5a73c11d04282e98a927f
|
[
"MIT"
] | 3
|
2020-10-31T23:19:37.000Z
|
2021-04-06T06:57:50.000Z
|
Core_Functions.py
|
CadellVDH/gardinette
|
e8200a0f8369e4a6d5a5a73c11d04282e98a927f
|
[
"MIT"
] | null | null | null |
Core_Functions.py
|
CadellVDH/gardinette
|
e8200a0f8369e4a6d5a5a73c11d04282e98a927f
|
[
"MIT"
] | 2
|
2020-11-01T01:10:16.000Z
|
2020-11-07T23:01:31.000Z
|
import os #tools for working with the CLI
import logging #needed for logging
import pigpio #needed for GPIO control
import time #needed for function timing
import threading #needed for OLED data continuous updating
import csv #needed for temporary data logging
import config as global_vars #import global variable initialization module
from pigpio_dht import DHT22 #temp and humidity sensor
from datetime import datetime #needed for control timing
from CalibrationAndDiagnostics.helpers import * #import helper functions and classes
##Create a class for handling variable target values, including default target values
class target:
'This class creates and accesses the Target.ini file'
#Get current directory for target value file
PROJECT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
PATH = "%s/Data/Target.ini" % PROJECT_DIRECTORY
##Create an initialization function for creating a default pinout file
def __init__(self):
if (os.path.isfile(target.PATH) == False): #check if file already exists
self.Target = open(target.PATH, "w+") #create file if none exists
self.Target.close()
self.configfile = open(target.PATH, "w+")
self.Config = ConfigParser()
self.Config.add_section('Water')
self.Config.add_section('Soil')
self.Config.add_section('Light')
self.Config.add_section('Temp')
self.Config.add_section('Humidity')
self.Config.set('Light', 'Hours', '16') #set value of lighting hours in ini file
self.Config.set('Light', 'Time', '12:00') #set value of lighting start time in ini file
self.Config.set('Water', 'Water', '12:00') #set value of water start time in ini file
self.Config.set('Soil', 'Soil', '25') #set value of soil moisture in ini file
self.Config.set('Temp', 'Temp', '70') #set value of temperature in ini file
self.Config.set('Humidity', 'Humidity', '55') #set value of humidity in ini file
self.Config.write(self.configfile) #save ini file
self.configfile.close()
#Create a function for getting target values from the Target.ini file
#param - parameter to be adjusted (Water, Soil, Hours, etc)
#parent - config section to look in (Light, Water, Soil, etc)
def getTarget(self, param, parent=None):
self.Config = ConfigParser()
self.Config.read(target.PATH)
try:
if parent == None:
return self.Config.get(param, param) #return target based on Target.ini file
else:
return self.Config.get(parent, param) #return target based on Target.ini file
except Exception as e:
logging.error("Failed to get target value: %s" % e)
return None
#Create a function for setting values in the Target.ini file
#param - parameter to be adjusted (Water, Soil, Hours, etc)
#value - new target value to be added
#parent - config section to look in (Light, Water, Soil, etc)
def setTarget(self, param, value, parent=None):
self.Config = ConfigParser()
self.Config.read(target.PATH)
self.configfile = open(target.PATH, "w+")
try:
if parent == None:
self.Config.set(param, param, str(value)) #if param has no parent, param is the parent and also the section
else:
self.Config.set(parent, param, str(value)) #otherise, parent is the section
except Exception as e:
logging.error("Failed to set target value: %s" % e)
return 'Failed'
with open(target.PATH, 'w') as configfile: #open pinout.ini as file object
self.Config.write(configfile) #save ini file
##Create a class which displays key data periodically
class dataGlance(threading.Thread):
#Create a function to initialize threads and data variables
def __init__(self):
threading.Thread.__init__(self)
self.pins = pinout() #initialize pinout
self.oled = oled_utility(128, 32, self.pins.getAddr('OLED')) #initialize OLED display
#Create a function to run the thread
def run(self):
#Create a loop to loop through data to display
while global_vars.data_glance_exit_flag == False:
self.oled.write_center(global_vars.current_temp, title="Temp") #write temp
for i in range(0, 1000): #Create controlled delay which intermittently checks for exit flag
if global_vars.data_glance_exit_flag == False:
i = i + 1
time.sleep(0.01)
else:
break
self.oled.write_center(global_vars.current_humidity, title="Humidity") #write humidity
for i in range(0, 1000): #Create controlled delay which intermittently checks for exit flag
if global_vars.data_glance_exit_flag == False:
i = i + 1
time.sleep(0.01)
else:
break
self.oled.write_center(global_vars.current_soil, title="Soil") #write soil
for i in range(0, 1000): #Create controlled delay which intermittently checks for exit flag
if global_vars.data_glance_exit_flag == False:
i = i + 1
time.sleep(0.01)
else:
break
##Create a class which collects and stores data as fast as the sensors allow
class dataCollect(threading.Thread):
#Create a function to initialize thread and data variables
def __init__(self, TEMP, FLOAT):
threading.Thread.__init__(self)
self.FLOAT = FLOAT
#Initialize DHT 22
self.DHT_SENSOR = DHT22(TEMP)
#initialize pigpio
self.pi = pigpio.pi() #Initialize pigpio
#Attempt to initialize sensor data
try:
[global_vars.current_temp, global_vars.current_humidity] = getTempHumidity(self.DHT_SENSOR)
global_vars.current_soil = getSoilMoisture()
global_vars.current_float = getFloat(self.pi, self.FLOAT)
except Exception as e:
logging.error("Failed one or more sensor readings: %s" % e) #exception block to prevent total failure if any sensor fails a reading
#Reinitialize sensor with higher timeout
self.DHT_SENSOR = DHT22(TEMP, timeout_secs=5)
#Create a function to run the thread
def run(self):
timer = 0 #create a timer for logging
prev_light = global_vars.currently_lighting #store initial value of light
#temporary code to make a csv of sensor data
PROJECT_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) #Get current directory for log files and for pin file
path = "%s/Data/SensorData.csv" % PROJECT_DIRECTORY
prev_log_time = int(time.strftime("%M")) #store the minute that data is logged
#Create a loop to constantly check and update the sensor data values
while True:
#Get current sensor values
try:
[global_vars.current_temp, global_vars.current_humidity] = getTempHumidity(self.DHT_SENSOR)
global_vars.current_soil = getSoilMoisture()
global_vars.current_float = getFloat(self.pi, self.FLOAT)
except Exception as e:
logging.error("Failed one or more sensor readings: %s" % e) #exception block to prevent total failure if any sensor fails a reading
#Check if it has been 5 minutes since last log
if int(time.strftime("%M")) >= prev_log_time + 5 or (prev_log_time >= 56 and int(time.strftime("%M")) >= 5-(60-prev_log_time) and int(time.strftime("%M")) < 10):
prev_log_time = int(time.strftime("%M")) #reset log time
events = [] #create empty list of events
#check if pump occured, then reset pump flag if it did
if global_vars.pumped == True:
events.append("Pumped") #add "pumped" to events list
global_vars.pumped = False #reset pump flag
#check if lighting status changed
if global_vars.currently_lighting != prev_light:
#determine whether lights were turned on or off based on initial state
if prev_light == True:
events.append("Light Off")
else:
events.append("Light On")
prev_light = global_vars.currently_lighting #set previous lighting to the current value
data_row = [datetime.now(), global_vars.current_temp, global_vars.current_humidity, global_vars.current_soil]
data_row.extend(events)
#temporary code to write to csv
with open(path, mode='a') as data:
data_writer = csv.writer(data)
data_writer.writerow(data_row)
time.sleep(5) #give the sensors a 5 second rest
##Create a class which adjusts target parameters based on the OLED menu and stores the values
class targetAdjust(threading.Thread):
#Create a function to initialize the thread and target object
def __init__(self):
threading.Thread.__init__(self)
self.target = target() #create instance of target object
#Create function to run the thread, which allows the user to adjust each parameter and stores it to the Target.ini file
def run(self):
[self.user_choice, self.node] = target_select()
if self.user_choice != None: #if user selected a value
if self.node.parent.option == "Light": #If the parent is light, be sure to include it in the ini file update
self.target.setTarget(self.node.option, self.user_choice, parent="Light")
else: #otherwise include only the parameter and value
self.target.setTarget(self.node.option, self.user_choice)
time.sleep(1) #sleep 1 second to prevent user from entering into target adjustment mode again
##Create a class responsible for all aspects of actuator control
class actuatorControl(threading.Thread):
#Create a function to initalize the thread and all necessary object instances
def __init__(self, pi, PUMP, LIGHT, FAN_ONE, FAN_TWO):
threading.Thread.__init__(self)
self.target = target() #create instance of target object
self.pi = pi
#intialize all pin number variables
self.pump = PUMP
self.light = LIGHT
self.fan_one = FAN_ONE
self.fan_two = FAN_TWO
#Create a funcion to calculate end time based on start time and hours
def endTime(self, start, hours):
minutes = int(60 * int(hours)) #calculate number of minutes in case of decimal hours
remaining_minutes = minutes % 60 #calculate number of non-whole hour minutes
whole_hours = (minutes-remaining_minutes) / 60 #calculate number of whole number hours
start_hour = int(start[0:2]) #extract starting hour
start_minute = int(start[3:5]) #extract starting minute
#first add the number of hours and minutes
end_hour = int(start_hour + whole_hours)
end_minute = int(start_minute + remaining_minutes)
#check if hours are over 23 or minutes are over 59 then subtract 24 and 60 respectively
if end_hour > 23:
end_hour = end_hour - 24
if end_minute > 59:
end_minute = end_minute - 60
#format the string appropriately
if end_hour < 10:
end_hour = "0%s" % end_hour #add 0 to beginning if < 10
if end_minute < 10:
end_minute = "0%s" % end_minute #add 0 to beginning if < 10
return "{}:{}".format(end_hour, end_minute) #return formatted string
#Create a function to run the thread
def run(self):
float_down = 0 #track how long float_sensor is down
#Create inifinite loop for controlling the pump indefinitely
while True:
#LIGHT CONTROL
try:
current_time = time.strftime("%H:%M") #store current time
target_time = self.target.getTarget("Time", parent="Light") #store target time
target_hours = self.target.getTarget("Hours", parent="Light") #store number of hours to run
end_time = self.endTime(target_time, target_hours) #calculate end time
#turn light on if it passes checks necessary to be within time range
if current_time >= target_time and current_time < end_time:
self.pi.write(self.light, 1) #turn light on
global_vars.currently_lighting = True
elif current_time >= target_time and end_time<target_time:
self.pi.write(self.light, 1) #turn light on
global_vars.currently_lighting = True
elif current_time<end_time and end_time<target_time:
self.pi.write(self.light, 1) #turn light on
global_vars.currently_lighting = True
elif target_time == end_time:
self.pi.write(self.light, 1) #turn light on
global_vars.currently_lighting = True
else:
self.pi.write(self.light, 0) #turn light off otherwise
global_vars.currently_lighting = False
except Exception as e:
logging.error("Failed to control light, reattempting: %s" % e)
time.sleep(10)
#PUMP CONTROL
try:
current_time = time.strftime("%H:%M") #store current time
target_time = self.target.getTarget("Water") #store target time
#if it's time to water, begin other necessary checks
if current_time == target_time:
if global_vars.current_float != 0: #if float sensor if up, it's fine to water
float_down = 0 #reset count of times float has been down
target_soil = self.target.getTarget("Soil") #get target soil moisture value
#run the pump until the timer hits 30 seconds or the current soil moisture is greater than the target
t = 0 #reset timer
while t <= 40 and global_vars.current_soil<int(target_soil):
global_vars.pumped = True #set pumped flag to true to indicate the pump occured
self.pi.write(self.pump, 1) #run pump
t = t + 1 #increase timer
time.sleep(1) #1 second delay
self.pi.write(self.pump, 0) #turn pump back off
elif global_vars.current_float == 0 and float_down < 4: #continue pumping as long as pump counter is less than 4 (4 days)
float_down = float_down + 1 #increment counter for each watering
target_soil = self.target.getTarget("Soil") #get target soil moisture value
#run the pump until the timer hits 30 seconds or the current soil moisture is greater than the target
t = 0 #reset timer
while t <= 40 and global_vars.current_soil<int(target_soil):
global_vars.pumped = True #set pumped flag to true to indicate the pump occured
self.pi.write(self.pump, 1) #run pump
t = t + 1 #increase timer
time.sleep(1) #1 second delay
self.pi.write(self.pump, 0) #turn pump back off
except Exception as e:
logging.error("Failed to control pump: %s" % e)
#FAN CONTROL
try:
target_humidity = int(self.target.getTarget("Humidity")) #get current target humidity
target_temp = int(self.target.getTarget("Temp")) #get current target temp
#If either temp or humidity is too high, turn the fans on (or if temp = 0, then turn fans on to be safe)
if global_vars.current_temp>target_temp or global_vars.current_humidity>target_humidity or global_vars.current_temp == 0:
self.pi.write(self.fan_one, 1)
self.pi.write(self.fan_two, 1)
else: #otherwise make sure theyr're off
self.pi.write(self.fan_one, 0)
self.pi.write(self.fan_two, 0)
except Exception as e:
logging.error("Failed to control temp or humidity: %s" % e)
| 50.933131
| 173
| 0.617652
| 2,196
| 16,757
| 4.604281
| 0.165756
| 0.037583
| 0.035308
| 0.019286
| 0.452972
| 0.406587
| 0.367521
| 0.342894
| 0.312531
| 0.280586
| 0
| 0.012909
| 0.306558
| 16,757
| 328
| 174
| 51.088415
| 0.857229
| 0.330429
| 0
| 0.454545
| 0
| 0
| 0.05205
| 0.001978
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051948
| false
| 0
| 0.04329
| 0
| 0.147186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
092b8a66d28f45edb39d1f480fa99961ca8a29ee
| 257
|
py
|
Python
|
en/en.py
|
iremlaya/brainhack17
|
95e0374785dd7aa7856cac149e04c2949d3ee852
|
[
"MIT"
] | 2
|
2017-06-04T10:01:49.000Z
|
2017-10-23T23:40:09.000Z
|
en/en.py
|
iremlaya/brainhack17
|
95e0374785dd7aa7856cac149e04c2949d3ee852
|
[
"MIT"
] | 3
|
2017-10-23T22:54:43.000Z
|
2018-10-02T15:57:22.000Z
|
en/en.py
|
iremlaya/brainhack17
|
95e0374785dd7aa7856cac149e04c2949d3ee852
|
[
"MIT"
] | 2
|
2018-10-02T16:00:23.000Z
|
2018-10-02T18:05:27.000Z
|
nums = list(map(int,input().split(" ")))
list = []
for j in range(0,5):
sum = 0
for i in range(0,4):
sum += nums[i]
list.append(sum)
temp = nums[0]
del nums[0]
nums.append(temp)
print("{} {}".format(min(list), max(list)))
| 17.133333
| 43
| 0.525292
| 42
| 257
| 3.214286
| 0.52381
| 0.103704
| 0.118519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036649
| 0.256809
| 257
| 14
| 44
| 18.357143
| 0.670157
| 0
| 0
| 0
| 0
| 0
| 0.023346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
092d7b160711ea95b44c682ad660b8ddb0e66742
| 352
|
py
|
Python
|
src/test_instrumenter/if_expr.py
|
joeldentici/python_stepper
|
ab32c62d0d0333ad901d7329fb198c7a23988007
|
[
"MIT"
] | 1
|
2020-11-29T20:00:39.000Z
|
2020-11-29T20:00:39.000Z
|
src/test_instrumenter/if_expr.py
|
joeldentici/python_stepper
|
ab32c62d0d0333ad901d7329fb198c7a23988007
|
[
"MIT"
] | null | null | null |
src/test_instrumenter/if_expr.py
|
joeldentici/python_stepper
|
ab32c62d0d0333ad901d7329fb198c7a23988007
|
[
"MIT"
] | null | null | null |
import instrumenter
import unittest
class TestIfExpr(unittest.TestCase):
def test_if_expr(self):
src = """
5 if x > 7 else 10
""".strip()
expected = """
stepper_lib.if_expr(x > 7, 5, 10)
""".strip()
actual = instrumenter.instrument(src, "ifexpr").strip()
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| 17.6
| 57
| 0.676136
| 46
| 352
| 4.913043
| 0.586957
| 0.053097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027211
| 0.164773
| 352
| 20
| 58
| 17.6
| 0.741497
| 0
| 0
| 0.142857
| 0
| 0
| 0.206799
| 0.05949
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
092e872c851914a08cabf8af551b3897a9d46816
| 1,394
|
py
|
Python
|
Server/Python/src/dbs/dao/Oracle/Run/SummaryList.py
|
vkuznet/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 8
|
2015-08-14T04:01:32.000Z
|
2021-06-03T00:56:42.000Z
|
Server/Python/src/dbs/dao/Oracle/Run/SummaryList.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 162
|
2015-01-07T21:34:47.000Z
|
2021-10-13T09:42:41.000Z
|
Server/Python/src/dbs/dao/Oracle/Run/SummaryList.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 16
|
2015-01-22T15:27:29.000Z
|
2021-04-28T09:23:28.000Z
|
"""
This module provides Run.SummaryList data access object
"""
from WMCore.Database.DBFormatter import DBFormatter
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
class SummaryList(DBFormatter):
def __init__(self, logger, dbi, owner=""):
DBFormatter.__init__(self, logger, dbi)
self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else ""
self.sql = """SELECT MAX(LUMI_SECTION_NUM) AS MAX_LUMI
FROM {owner}FILE_LUMIS FL""".format(owner=self.owner)
def execute(self, conn, dataset="", run_num=-1, transaction=False):
binds = dict(run_num=run_num)
wheresql = "WHERE RUN_NUM=:run_num"
if dataset:
joins = """JOIN {owner}FILES FS ON FS.FILE_ID=FL.FILE_ID
JOIN {owner}DATASETS DS ON FS.DATASET_ID=DS.DATASET_ID""".format(owner=self.owner)
wheresql = "{wheresql} AND DS.DATASET=:dataset".format(wheresql=wheresql)
sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql)
binds.update(dataset=dataset)
else:
sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql)
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for cursor in cursors:
result.extend(self.formatCursor(cursor, size=100))
return result
| 39.828571
| 97
| 0.65208
| 173
| 1,394
| 5.109827
| 0.416185
| 0.033937
| 0.031674
| 0.038462
| 0.054299
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003683
| 0.220947
| 1,394
| 34
| 98
| 41
| 0.810313
| 0.039455
| 0
| 0
| 0
| 0
| 0.220887
| 0.051841
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0933f309561e041149760fb500eeef98ca5f1483
| 4,650
|
py
|
Python
|
PyGame/racey.py
|
esserafael/PythonStuff
|
97900e48f989b64ccde02f5d676f7f823ec0eed7
|
[
"MIT"
] | 1
|
2020-08-03T00:19:05.000Z
|
2020-08-03T00:19:05.000Z
|
PyGame/racey.py
|
esserafael/PythonStuff
|
97900e48f989b64ccde02f5d676f7f823ec0eed7
|
[
"MIT"
] | 1
|
2021-08-23T20:43:21.000Z
|
2021-08-23T20:43:21.000Z
|
PyGame/racey.py
|
esserafael/python-learning-stuff
|
97900e48f989b64ccde02f5d676f7f823ec0eed7
|
[
"MIT"
] | null | null | null |
import pygame
import time
import random
pygame.init()
# display
display_width = 1024
display_height = 768
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Relâmpago Marquinhos')
# colors
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
# car
carImg = pygame.image.load('racecar.png')
car_width = carImg.get_width()
car_height = carImg.get_height()
# boundaries
display_x_boundary = display_width - car_width
display_y_boundary = display_height - car_height
clock = pygame.time.Clock()
def car(x,y):
gameDisplay.blit(carImg, (x,y))
def draw_block(block_x, block_y, block_width, block_height, color):
pygame.draw.rect(gameDisplay, color, [block_x, block_y, block_width, block_height])
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def message_display(text):
largeText = pygame.font.Font('freesansbold.ttf',50)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = (round(display_width/2),round(display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
def explode():
message_display('Explosion!')
def game_loop():
car_x = round((display_width * 0.45))
car_y = round((display_height * 0.8))
car_x_change = 0
car_y_change = 0
car_speed = 10
block_width = 100
block_height = 100
block_x = random.randrange(0, display_width - block_width)
block_y = -block_height
block_speed = 7
gameExit = False
crashed = False
while not gameExit:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
#print(event)
############################
if not crashed:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
car_x_change = -car_speed
elif event.key == pygame.K_RIGHT:
car_x_change = car_speed
elif event.key == pygame.K_UP:
car_y_change = -car_speed
elif event.key == pygame.K_DOWN:
car_y_change = car_speed
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
car_x_change = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
car_y_change = 0
#######################
car_x += car_x_change
car_y += car_y_change
block_y += block_speed
if car_x > display_x_boundary:
car_x = display_x_boundary
elif car_x < 0:
car_x = 0
if car_y > display_y_boundary:
car_y = display_y_boundary
elif car_y < 0:
car_y = 0
if car_y < block_y + block_height and car_y > block_y:
#print('y crossover')
if car_x > block_x and car_x < block_x + block_width or car_x + car_width > block_x and car_x + car_width < block_x + block_width:
#print('x crossover')
car_y = block_y + block_height
crashed = True
if car_y >= display_y_boundary:
explode()
if crashed:
if car_x_change < 1 and car_x_change >= 0 or car_x_change <= 0 and car_x_change > -1:
car_x_change = 0
crashed = False
car_x_change = car_x_change / 1.05
car_y_change = round(car_speed / 1.1)
print(car_x_change)
print(car_speed)
# print(car_y_change)
car(car_x,car_y)
draw_block(block_x, block_y, block_width, block_height, black)
#print("Blk X: {} - {}; Blk Y: {} - {}".format(block_x, (block_x + block_width), block_y, (block_y + block_height)))
if block_y > display_height:
block_y = 0 - block_height
block_x = random.randrange(0, display_width - block_width)
#print("Car X: {} - {}; Car Y: {} - {}".format(car_x, (car_x + car_width), car_y, (car_y + car_height)))
#print(keys_disabled)
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit()
| 30.392157
| 143
| 0.550108
| 583
| 4,650
| 4.090909
| 0.176672
| 0.04696
| 0.054507
| 0.050314
| 0.315304
| 0.232285
| 0.163522
| 0.163522
| 0.103145
| 0.066247
| 0
| 0.021396
| 0.346667
| 4,650
| 153
| 144
| 30.392157
| 0.76366
| 0.072903
| 0
| 0.11
| 0
| 0
| 0.013913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.03
| 0
| 0.1
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0934154c75f6f3525bf51eb76b65fd596ded22e6
| 9,521
|
py
|
Python
|
src/tuco/base.py
|
eatfirst/python-tuco
|
47e58a3ebaf3f7e8ac3e2ba18dfaf9ce993f82db
|
[
"MIT"
] | 8
|
2018-01-05T15:25:45.000Z
|
2018-02-19T02:41:43.000Z
|
src/tuco/base.py
|
eatfirst/python-tuco
|
47e58a3ebaf3f7e8ac3e2ba18dfaf9ce993f82db
|
[
"MIT"
] | 7
|
2018-01-04T15:51:32.000Z
|
2021-04-28T21:54:57.000Z
|
src/tuco/base.py
|
eatfirst/python-tuco
|
47e58a3ebaf3f7e8ac3e2ba18dfaf9ce993f82db
|
[
"MIT"
] | null | null | null |
"""Base classes to be used in FSM."""
import copy
from datetime import datetime
from typing import Dict, Iterator, List, Tuple, Type # noqa
import pytz
from tuco.exceptions import (
TucoAlreadyLockedError,
TucoEventNotFoundError,
TucoInvalidStateChangeError,
TucoInvalidStateHolderError,
)
from tuco.locks import MemoryLock
from tuco.locks.base import BaseLock # noqa
from tuco.meta import FSMBase
from tuco.properties import Event, FinalState, State, Timeout
__all__ = ("FSM",)
mockable_utcnow = datetime.utcnow # Easier to write tests
class FSM(metaclass=FSMBase):
"""Class that handle event transitions.
Your state machines should extend from this.
"""
#: The default initial state is "new" but can be overridden
initial_state = "new"
state_attribute = "current_state"
date_attribute = "current_state_date"
id_field = "id"
fatal_state = "fatal_error"
lock_class = MemoryLock # type: Type[BaseLock]
_states = None # type: Dict[str, State]
def __init__(self, container_object) -> None:
"""Initialize the container object with the initial state."""
self.container_object = container_object
for field in (self.state_attribute, self.date_attribute, self.id_field):
if not hasattr(container_object, field):
raise TucoInvalidStateHolderError(
"Required field {!r} not found inside {!r}.".format(field, container_object)
)
if self.current_state is None:
self.current_state = self.initial_state
self.lock = self.lock_class(self, self.id_field)
def __enter__(self) -> "FSM":
"""Lock the state machine."""
self.lock.lock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""If TucoAlreadyLockedError did not throw, unlock the machine."""
if exc_type and issubclass(exc_type, TucoAlreadyLockedError):
return
self.lock.unlock()
def __repr__(self) -> str:
"""Basic representation."""
return "<{} - current_state {!r} with holder {} - ID {!r}>".format(
self.__class__.__name__,
self.current_state,
self.container_object.__class__.__name__,
getattr(self.container_object, self.id_field),
)
@property
def current_time(self) -> datetime:
"""Return utcnow and should be extended if you care about time zone or something else."""
return mockable_utcnow()
@property
def current_state_date(self) -> datetime:
"""Return current date stored in object."""
return getattr(self.container_object, self.date_attribute)
@property
def current_state(self) -> str:
"""Return the current state stored in object."""
return getattr(self.container_object, self.state_attribute)
@current_state.setter
def current_state(self, new_state) -> None:
"""Set a state on container object."""
call_on_change = bool(self.current_state)
old_state = copy.copy(self.container_object)
if new_state != self.fatal_state:
if not self.state_allowed(new_state):
raise TucoInvalidStateChangeError(
"Old state {!r}, new state {!r}.".format(self.current_state, new_state)
)
setattr(self.container_object, self.state_attribute, new_state)
setattr(self.container_object, self.date_attribute, self.current_time)
for command in self.current_state_instance.on_enter:
command(self.container_object)
else:
setattr(self.container_object, self.state_attribute, new_state)
setattr(self.container_object, self.date_attribute, self.current_time)
if call_on_change:
self._call_on_change(old_state, self.container_object)
def state_allowed(self, state_name) -> bool:
"""Check if the transition to the new state is allowed."""
if self.current_state is None and state_name == self.initial_state:
return True
if isinstance(self.current_state_instance, FinalState):
return False
if self.current_state_instance.timeout and self.current_state_instance.timeout.target_state == state_name:
return True
if any(event for event in self.possible_events if event.target_state == state_name):
return True
current_state = self.current_state_instance
if current_state.error and current_state.error.target_state == state_name:
return True
for event in current_state.events:
if event.error and event.error.target_state == state_name:
return True
return False
@property
def current_state_instance(self) -> State:
"""Return the current `State` instance."""
return self._states[self.current_state]
@property
def possible_events(self) -> List[Event]:
"""Return all possible events for the current state."""
return self.possible_events_from_state(self.current_state)
@classmethod
def possible_events_from_state(cls, state_name) -> List[Event]:
"""Return all possible events from a specific state.
:param state_name: State to check
"""
state = cls._states[state_name]
return getattr(state, "events", [])
def _get_event(self, event_name) -> Event:
"""Get an event inside current state based on it's name."""
for event in self.possible_events:
if event.event_name == event_name:
return event
raise TucoEventNotFoundError(
"Event {!r} not found in {!r} on current state {!r}".format(
event_name, [event.event_name for event in self.possible_events], self.current_state
)
)
def event_allowed(self, event_name) -> bool:
"""Check if is possible to run an event.
:param event_name: Event to check.
"""
try:
self._get_event(event_name)
except TucoEventNotFoundError:
return False
return True
def _trigger_error(self, event) -> None:
"""Search for an error handler inside event, and then inside state."""
if event.error:
error = event.error
else:
error = self._states[self.current_state].error
if not error:
return
for command in error.commands:
command(self.container_object)
self.current_state = error.target_state
def trigger(self, event_name, *args, **kwargs) -> bool:
"""Trigger an event and call its commands with specified arguments..
:param event_name: Event to execute.
"""
event = self._get_event(event_name)
for command in event.commands:
try:
return_value = command(self.container_object, *args, **kwargs)
except Exception as e:
self._call_on_error(e, event.target_state)
raise
if not return_value:
self._trigger_error(event)
return False
self.current_state = event.target_state
return True
def trigger_timeout(self) -> bool:
"""Trigger timeout if it's possible."""
timeout = self.current_state_instance.timeout
if not timeout:
return False
if datetime.utcnow().replace(tzinfo=pytz.UTC) < (self.current_state_date + timeout.timedelta):
return False
for command in timeout.commands:
try:
command(self.container_object)
except Exception as e:
self._call_on_error(e, timeout.target_state)
raise
self.current_state = timeout.target_state
return True
@classmethod
def get_all_states(cls) -> Dict[str, State]:
"""List all states for this state machine."""
return cls._states
@classmethod
def get_all_timeouts(cls) -> Iterator[Tuple[str, Timeout]]:
"""List all configured timeouts for this state machine."""
for state_name, state in cls._states.items():
if isinstance(state, FinalState) or not state.timeout:
continue
yield (state_name, state.timeout)
@classmethod
def get_all_finals(cls) -> Iterator[FinalState]:
"""List all configured final states for this state machine."""
for state_name, state in cls._states.items():
if isinstance(state, FinalState):
yield state_name
def _call_on_change(self, old_state, new_state) -> None:
"""If on_change function exists, call it.
:param old_state: A shallow copy of the holder object.
:param new_state: The changed version of the object holder.
"""
function = getattr(self, "_on_change_event", None)
if function:
function(old_state, new_state)
def _call_on_error(self, exception, new_state) -> None:
"""If on_error function exists, call it."""
function = getattr(self, "_on_error_event", None)
if function:
function(self.current_state, new_state, exception)
@classmethod
def generate_graph(cls, file_format="svg") -> str:
"""Generate a SVG graph."""
from .graph_builder import generate_from_class
return generate_from_class(cls, file_format)
| 34.496377
| 114
| 0.635648
| 1,135
| 9,521
| 5.10837
| 0.163877
| 0.078648
| 0.057951
| 0.031735
| 0.238875
| 0.160228
| 0.125905
| 0.106244
| 0.09417
| 0.065195
| 0
| 0
| 0.276967
| 9,521
| 275
| 115
| 34.621818
| 0.842243
| 0.160802
| 0
| 0.256983
| 0
| 0
| 0.034217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128492
| false
| 0
| 0.055866
| 0
| 0.379888
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
093766ed7bbbb3c00312fb45947b1f3698212662
| 2,342
|
py
|
Python
|
rest_framework_apidoc/apidoc.py
|
jespino/django-rest-framework-apidoc
|
94e47811f96058bcfe33e37e433eda065096c053
|
[
"BSD-3-Clause"
] | 3
|
2015-04-23T02:06:26.000Z
|
2017-12-06T12:52:04.000Z
|
rest_framework_apidoc/apidoc.py
|
jespino/django-rest-framework-apidoc
|
94e47811f96058bcfe33e37e433eda065096c053
|
[
"BSD-3-Clause"
] | null | null | null |
rest_framework_apidoc/apidoc.py
|
jespino/django-rest-framework-apidoc
|
94e47811f96058bcfe33e37e433eda065096c053
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from rest_framework.settings import import_from_string
from .mixins import FileContentMixin, DocStringContentMixin, MarkupProcessMixin, NoProcessMixin, SafeProcessMixin
APIDOC_DEFAULT_DOCUMENTER_CLASSES = getattr(
settings,
'APIDOC_DEFAULT_DOCUMENTER_CLASSES',
['rest_framework_apidoc.apidoc.MDDocStringsDocumenter']
)
def get_view_description(view_cls, html=False, request=None):
documenters = []
if hasattr(view_cls, 'documenter_classes'):
for cls in view_cls.documenter_classes:
documenters.append(cls())
else:
for cls in APIDOC_DEFAULT_DOCUMENTER_CLASSES:
documenter_class = import_from_string(cls, "APIDOC_DEFAULT_DOCUMENTER_CLASS")
documenters.append(documenter_class())
for documenter in documenters:
description = documenter.get_description(view_cls, html, request)
if description:
return description
return ""
class Documenter(object):
def get_description(self, view_cls, html=True, request=None):
if html:
return self.process(self.get_content(view_cls, html, request))
return self.get_content(view_cls, html, request=None)
class RSTFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".rst"
markup = "restructuredtext"
class RSTDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "restructuredtext"
class MDFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".md"
markup = "markdown"
class MDDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "markdown"
class TextileFilesDocumenter(Documenter, FileContentMixin, MarkupProcessMixin):
extension = ".textile"
markup = "textile"
class TextileDocStringsDocumenter(Documenter, DocStringContentMixin, MarkupProcessMixin):
markup = "textile"
class TxtFilesDocumenter(Documenter, FileContentMixin, NoProcessMixin):
extension = ".txt"
class TxtDocStringsDocumenter(Documenter, DocStringContentMixin, NoProcessMixin):
pass
class HtmlFilesDocumenter(Documenter, FileContentMixin, SafeProcessMixin):
extension = ".html"
class HtmlDocStringsDocumenter(Documenter, DocStringContentMixin, SafeProcessMixin):
pass
| 28.560976
| 113
| 0.760034
| 210
| 2,342
| 8.304762
| 0.290476
| 0.028096
| 0.031537
| 0.051606
| 0.036697
| 0.036697
| 0.036697
| 0
| 0
| 0
| 0
| 0
| 0.163108
| 2,342
| 81
| 114
| 28.91358
| 0.889796
| 0
| 0
| 0.16
| 0
| 0
| 0.09351
| 0.049103
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0.04
| 0.08
| 0
| 0.64
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09382774afac9a4d300d0854f84c2849faef3aed
| 541
|
py
|
Python
|
malcolm/modules/ADAndor/parts/andordriverpart.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
malcolm/modules/ADAndor/parts/andordriverpart.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
malcolm/modules/ADAndor/parts/andordriverpart.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
from malcolm.modules.ADCore.parts import ExposureDetectorDriverPart
class AndorDriverPart(ExposureDetectorDriverPart):
def setup_detector(self, child, completed_steps, steps_to_do, params=None):
fs = super(AndorDriverPart, self).setup_detector(
child, completed_steps, steps_to_do, params)
child.wait_all_futures(fs)
# Need to reset acquirePeriod as it's sometimes wrong
fs = child.acquirePeriod.put_value_async(
child.exposure.value + self.readout_time.value)
return fs
| 41.615385
| 79
| 0.728281
| 64
| 541
| 5.953125
| 0.625
| 0.068241
| 0.099738
| 0.125984
| 0.178478
| 0.178478
| 0.178478
| 0
| 0
| 0
| 0
| 0
| 0.19963
| 541
| 12
| 80
| 45.083333
| 0.879908
| 0.09427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
093956eeb27c748398dcb88a46fcaaac217e34f7
| 2,674
|
py
|
Python
|
setup.py
|
addisonElliott/pyqt5AutoCompile
|
5945a85f6230a97f3467c79441d87f5f9cdb478d
|
[
"MIT"
] | 8
|
2018-10-31T04:53:18.000Z
|
2020-11-18T18:47:48.000Z
|
setup.py
|
addisonElliott/pyqt5AutoCompile
|
5945a85f6230a97f3467c79441d87f5f9cdb478d
|
[
"MIT"
] | 24
|
2020-03-27T11:38:39.000Z
|
2020-10-23T07:12:41.000Z
|
setup.py
|
addisonElliott/pyqt5AutoCompile
|
5945a85f6230a97f3467c79441d87f5f9cdb478d
|
[
"MIT"
] | 3
|
2019-05-08T16:41:26.000Z
|
2021-08-23T12:37:55.000Z
|
import os
import re
from setuptools import setup
currentPath = os.path.abspath(os.path.dirname(__file__))
def find_version(filename):
with open(filename, 'r') as fh:
# Read first 2048 bytes, __version__ string will be within that
data = fh.read(2048)
match = re.search(r'^__version__ = [\'"]([\w\d.\-]*)[\'"]$', data, re.M)
if match:
return match.group(1)
raise RuntimeError('Unable to find version string.')
# Get the long description from the README file
with open(os.path.join(currentPath, 'README.md'), 'r') as f:
longDescription = f.read()
longDescription = '\n' + longDescription
REQUIREMENTS = {
'core': [
'PyQt5',
'click',
'pyyaml',
],
'test': [
'pytest',
'pytest-cov',
],
'dev': [
# 'requirement-for-development-purposes-only',
],
'doc': [
],
}
setup(name='pyqt5ac',
version=find_version('pyqt5ac.py'),
description='Python module to automatically compile UI and RC files in PyQt5 to Python files',
long_description=longDescription,
long_description_content_type='text/markdown',
author='Addison Elliott',
author_email='addison.elliott@gmail.com',
url='https://github.com/addisonElliott/pyqt5ac',
license='MIT License',
install_requires=REQUIREMENTS['core'],
extras_require={
**REQUIREMENTS,
# The 'dev' extra is the union of 'test' and 'doc', with an option
# to have explicit development dependencies listed.
'dev': [req
for extra in ['dev', 'test', 'doc']
for req in REQUIREMENTS.get(extra, [])],
# The 'all' extra is the union of all requirements.
'all': [req for reqs in REQUIREMENTS.values() for req in reqs],
},
python_requires='>=3',
py_modules=['pyqt5ac'],
entry_points={
'console_scripts': ['pyqt5ac = pyqt5ac:cli']
},
keywords='pyqt pyqt5 qt qt5 qt auto compile generate ui rc pyuic5 pyrcc5 resource designer creator automatic',
classifiers=[
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
project_urls={
'Source': 'https://github.com/addisonElliott/pyqt5ac',
'Tracker': 'https://github.com/addisonElliott/pyqt5ac/issues',
}
)
| 30.735632
| 116
| 0.597233
| 295
| 2,674
| 5.325424
| 0.508475
| 0.072565
| 0.095481
| 0.08275
| 0.088479
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016828
| 0.266642
| 2,674
| 86
| 117
| 31.093023
| 0.784294
| 0.118549
| 0
| 0.074627
| 0
| 0
| 0.370638
| 0.02
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014925
| false
| 0
| 0.044776
| 0
| 0.074627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
093974a11fcf0bc9652927c15386f72ab4614908
| 8,738
|
py
|
Python
|
Progetto_dataset_adult/Utils/Analisi_variabili.py
|
pasqualefiore/Adult_dataset_analysis
|
c13b229e1f69df765fc2da5f0b64084816ad39c3
|
[
"Apache-2.0"
] | null | null | null |
Progetto_dataset_adult/Utils/Analisi_variabili.py
|
pasqualefiore/Adult_dataset_analysis
|
c13b229e1f69df765fc2da5f0b64084816ad39c3
|
[
"Apache-2.0"
] | null | null | null |
Progetto_dataset_adult/Utils/Analisi_variabili.py
|
pasqualefiore/Adult_dataset_analysis
|
c13b229e1f69df765fc2da5f0b64084816ad39c3
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import kendalltau,chi2_contingency, pearsonr
import pandas as pd
def plot_var_num(dataset,variabile):
""" Plot delle variabili numeriche
--------------
Parametri:
dataset: dataset di riferimento
variabile: variabile da plottare
--------------
Output
ritorna l'istogramma ed il boxplot della variabile
Se assegnata ad un elemento python ritorna i quartili, valore medio, massimo e medio
della distribuzione"""
descrizione = dataset[variabile].describe()
plt.figure(figsize = (14,8))
plt.subplot(1,2,1)
sns.histplot(dataset[variabile], color = "red")
plt.title("Istogramma della variabile {}".format(variabile))
plt.subplot(1,2,2)
sns.boxplot(dataset[variabile])
plt.title("Boxplot della variabile {}".format(variabile))
plt.show()
return descrizione
def dipendenza_correlazione(dataset, variabile1, variabile2, p_value_lv = 0.05):
""" Funzione che restituisce la dipendenza tra due variabili
---------------------
Parametri:
dataset:dataset di riferimento
variabile1, variabile2: stringa del nome delle variabili su cui attuare il test
p_value_lv: livello di significatività
---------------------
Output:
Date due variabili numeriche
dataframe contentente:
* il coefficiente di correlazione di Perason
* p_value associato
* Booleano che indica se il test è significativo per un livello pari a p_value_lv
Date due variabili categoriche
dataframe contentente:
* testchi2
* p_value associato
* Booleano che indica se il test è significativo per un livello pari a p_value_lv
Date due variabili mischiate
dataframe contenente
* coefficiente di correlazione di kendall
* p_value associato
* Booleano che indica se il test è significativo per un livello pari a p_value_lv
"""
if dataset[variabile1].dtypes == "int64" and dataset[variabile2].dtypes == "int64":
p_value = pearsonr(dataset[variabile1], dataset[variabile2])[1]
corr = pearsonr(dataset[variabile1], dataset[variabile2])[0]
data = pd.DataFrame(data = {"corr": [corr], "p_value": [p_value]}, index = ["Pearson"])
sign_0 = []
if data["p_value"][0] < p_value_lv:
sign_0.append(True)
else:
sign_0.append(False)
data["sign_{}".format(p_value_lv)] = sign_0
elif dataset[variabile1].dtypes == "object" and dataset[variabile2].dtypes == "object":
p_value = chi2_contingency(pd.crosstab(dataset[variabile1], dataset[variabile2]))[1]
data = pd.DataFrame(data = {"p_value": [p_value]}, index = ["chi2"])
sign_0 = []
if data["p_value"][0] < p_value_lv:
sign_0.append(True)
else:
sign_0.append(False)
data["sign_{}".format(p_value_lv)] = sign_0
else:
correlation = kendalltau(dataset[variabile1],dataset[variabile2])[0]
p_value = kendalltau(dataset[variabile1],dataset[variabile2])[1]
data = pd.DataFrame(data = {"correlation": [correlation],"p_value":[p_value]},
index = ["Kendall"])
sign_0 = []
if data["p_value"][0] < p_value_lv:
sign_0.append(True)
else:
sign_0.append(False)
data["sign_{}".format(p_value_lv)] = sign_0
return data
def analisi_variabili_categoriche(dataset, variabile1, variabile2, normalize = "index"):
""" Resituisce due grafici:
* il barplot della variabile1
* il barplot della variabile1 condizionata alla variabile2
* Se assegnata ad un elemento Python ritorna la quantità di
osservazioni per ciascuna categoria della variabile1
* Se assegnata ad un elemento Python ritorna la tabella di
contingenza tra la variabile1 e la variabile 2
----------------------------
Parametri:
dataset: dataset di riferimento
variabile1: stringa del nome della variabile per cui disegnare i grafici
variabile2: stringa del nome della variabile di condizionamento
normalize: stringa che permette di decidere come indicizzare la colonna
["index" per riga, "column" per colonna, "all" per entrambi, "False" nessuna
normalizzazione]
"""
conteggio = dataset[variabile1].value_counts()/len(dataset)
tabella = pd.crosstab(dataset[variabile1], dataset[variabile2], normalize = normalize)
plt.figure(figsize =(15,22))
plt.subplot(2,1,1)
sns.countplot(y = dataset[variabile1])
plt.title("""Barplot della variabile "{}" """.format(variabile1))
plt.subplot(2,1,2)
sns.countplot(y = dataset[variabile1], hue = dataset[variabile2])
plt.title("""Barplot della variabile "{}" condizionata alla variabile {} """.format(variabile1,variabile2))
return conteggio,tabella
def unificazione_categorie(dataset, variabile, categoria_da_trasf, trasformazione):
""" Unifica due o più categorie con la stessa stringa
------------------
Parameters:
dataset: dataset di riferimento
variabile: stringa della variabile per la quale avverrà il cambiamento delle categorie
categoria_da_trasf: lista della/e categorie sulla quale applicare il cambiamento
trasformazione: stringa che indica la categoria da attribuire
------------------
Output:
dataset trasformato """
for categoria in categoria_da_trasf:
dataset = dataset.replace(categoria, trasformazione)
return dataset
def histplot_per_categorie(dataset, variabile1, variabile_divisione):
""" Plot che ritorna più istogrammi della variabile a cui siamo interessati
che suddividono il dataset nelle diverse categorie della variabile scelta
per la divisione
-----------------
Parametri:
dataset: dataset di riferimento
variabile1: stringa della variabile numerica per la quale siamo interessati
a conoscere la distribuzione
variabile_divisione: stringa della variabile per la quale siamo interessati
avvenga la suddivisione del dataset
-------------------
"""
lunghezza = len(dataset[variabile_divisione].value_counts().index)
plt.figure(figsize = (20,10))
for i in range(1,lunghezza+1):
plt.subplot(1, lunghezza,i)
data = dataset[dataset[variabile_divisione] == dataset[variabile_divisione].value_counts().index[i-1]]
sns.histplot(data[variabile1])
plt.title("Istogramma della variabile '{}', data la categoria {}".format(variabile1,
dataset[variabile_divisione].value_counts().index[i-1]))
def histplot_1_per_categorie(dataset, variabile1, variabile_divisione, x = 1, y = 0):
""" Plot che ritorna più istogrammi della variabile a cui siamo interessati
che suddividono il dataset nelle diverse categorie della variabile scelta
per la divisione
-----------------
Parametri:
dataset: dataset di riferimento
variabile1: stringa della variabile numerica per la quale siamo interessati
a conoscere la distribuzione
variabile_divisione: stringa della variabile per la quale siamo interessati
avvenga la suddivisione del dataset
x,y: definiscono la suddivisione dei plot per riga e colonna rispettivamente
(Nota: il prodotto tra x e y deve essere uguale o maggiore alle categorie della
variabile per la quale avviene lo split)
-------------------"""
if y == 0:
lunghezza = len(dataset[variabile_divisione].value_counts().index)
plt.figure(figsize = (20,10))
for i in range(1,lunghezza+1):
plt.subplot(1, lunghezza,i)
data = dataset[dataset[variabile_divisione] ==\
dataset[variabile_divisione].value_counts().index[i-1]]
sns.histplot(data[variabile1])
plt.title("Istogramma della variabile '{}', data la categoria {}".format(variabile1,
dataset[variabile_divisione].value_counts().index[i-1]))
else:
lunghezza = len(dataset[variabile_divisione].value_counts().index)
plt.figure(figsize = (20,10))
for i in range(1,lunghezza+1):
plt.subplot(x, y,i)
data = dataset[dataset[variabile_divisione] == \
dataset[variabile_divisione].value_counts().index[i-1]]
sns.histplot(data[variabile1])
plt.title("Istogramma della variabile '{}', data la categoria {}"\
.format(variabile1,dataset[variabile_divisione].value_counts().index[i-1]))
| 42.009615
| 111
| 0.653353
| 1,015
| 8,738
| 5.529064
| 0.207882
| 0.027798
| 0.053457
| 0.048111
| 0.582145
| 0.484854
| 0.444761
| 0.428902
| 0.41536
| 0.396115
| 0
| 0.019029
| 0.23621
| 8,738
| 207
| 112
| 42.21256
| 0.821846
| 0.387961
| 0
| 0.456522
| 0
| 0
| 0.086807
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.043478
| 0
| 0.152174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
093ee142adce433cf7aaa24dd679b5d6c4fc05b5
| 638
|
py
|
Python
|
shares/urls.py
|
ab1cd2eefre3/stock-trading-website
|
f19b3ff11c5348ed9758816dab0ea0b44ec027a9
|
[
"MIT"
] | null | null | null |
shares/urls.py
|
ab1cd2eefre3/stock-trading-website
|
f19b3ff11c5348ed9758816dab0ea0b44ec027a9
|
[
"MIT"
] | null | null | null |
shares/urls.py
|
ab1cd2eefre3/stock-trading-website
|
f19b3ff11c5348ed9758816dab0ea0b44ec027a9
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("", views.index, name="index"),
path("open", views.openPage, name="openPage"),
path("close", views.closePage, name="closePage"),
path("news", views.news, name="news"),
path("stocks", views.stocks, name="stocks"),
path("stocks/<str:symbol>", views.stockinfo, name="stockinfo"),
path("api/v1/open", views.open, name="open"),
path("api/v1/close", views.close, name="close")
]
| 33.578947
| 67
| 0.647335
| 82
| 638
| 5.012195
| 0.292683
| 0.038929
| 0.043796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003663
| 0.144201
| 638
| 19
| 68
| 33.578947
| 0.749084
| 0
| 0
| 0
| 0
| 0
| 0.233542
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
094372a474d52adb03ce58de44449d7f0c3ddd56
| 295
|
py
|
Python
|
Intermediate/collections_namedtuple.py
|
BjornChrisnach/Python_6hour_course
|
0949387c2e423ed0ba7914db7c58af2f913bda1c
|
[
"MIT"
] | null | null | null |
Intermediate/collections_namedtuple.py
|
BjornChrisnach/Python_6hour_course
|
0949387c2e423ed0ba7914db7c58af2f913bda1c
|
[
"MIT"
] | null | null | null |
Intermediate/collections_namedtuple.py
|
BjornChrisnach/Python_6hour_course
|
0949387c2e423ed0ba7914db7c58af2f913bda1c
|
[
"MIT"
] | null | null | null |
# Collections namedTuple nr7
import collections
from collections import namedtuple
Point = namedtuple("Point", {"x": 0, "y": 0, "z": 0})
newP = Point(3, 4, 5)
print(newP.x, newP.y, newP.z)
print(newP._fields)
newP = newP._replace(x=6)
print(newP)
p2 = Point._make(["a", "b", "c"])
print(p2)
| 18.4375
| 53
| 0.661017
| 48
| 295
| 4
| 0.479167
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039526
| 0.142373
| 295
| 15
| 54
| 19.666667
| 0.719368
| 0.088136
| 0
| 0
| 0
| 0
| 0.041199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09440d91afca8dc4e9e6734e9c8588fc67d05760
| 2,879
|
py
|
Python
|
bootstrap_ranks.py
|
baprice/Genewise-Cohort-Integration
|
d1a7434f2a461f572fd85ed8bdeebcda268ef29c
|
[
"MIT"
] | null | null | null |
bootstrap_ranks.py
|
baprice/Genewise-Cohort-Integration
|
d1a7434f2a461f572fd85ed8bdeebcda268ef29c
|
[
"MIT"
] | null | null | null |
bootstrap_ranks.py
|
baprice/Genewise-Cohort-Integration
|
d1a7434f2a461f572fd85ed8bdeebcda268ef29c
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from jive.AJIVE import AJIVE
from jive.PCA import PCA
import warnings
import time, datetime
from random import shuffle
warnings.filterwarnings(action='once')
def getVarianceExplained(original, joint, individual, label):
from numpy.linalg import norm
joint_var = norm(joint)**2/norm(original)**2
individual_var = norm(individual)**2/norm(original)**2
residual_var = 1-joint_var-individual_var
return pd.DataFrame([residual_var, individual_var, joint_var], index=['Residual','Individual','Joint'], columns=[label])
def plotVarianceExplained(df, figsize=[10,6]):
var_plot = plt.figure(figsize=figsize, facecolor='w')
df.plot.bar(stacked=True, figsize=figsize, table=True)
plt.xticks([])
plt.tight_layout()
return var_plot
parser = argparse.ArgumentParser(description='Run AJIVE')
parser.add_argument('-a', required=True, type=str, help='input matrix 1')
parser.add_argument('-ra', required=True, type=int, help='initial signal rank 1')
parser.add_argument('-rb', required=True, type=int, help='initial signal rank 2')
parser.add_argument('-n', required=True, type=str, help='name prefix')
parser.add_argument('-o', required=True, type=str, help='output files path')
args = parser.parse_args()
a_path = args.a
ra = args.ra
rb = args.rb
name_prefix = args.n
output_dir = Path(args.o)
#Create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#Read in files
a = pd.read_csv(a_path, index_col=0)
a = a.sample(frac=1)
#Randomly split in half
a1 = a.iloc[:a.shape[0]//2]
a2 = a.iloc[a.shape[0]//2:]
a = a1.T
b = a2.T
#Run AJIVE
jive_start = time.time()
ajive = AJIVE(init_signal_ranks={'A': ra, 'B': rb})
ajive.fit(blocks={'A': a, 'B': b})
jive_end = time.time()
jive_time = str(datetime.timedelta(seconds=jive_end-jive_start))
print('AJIVE time: ' + jive_time)
#Diagnostic Plot
sns.set_context('notebook', font_scale=1)
diag_plot = plt.figure(0, figsize=[10,10])
ajive.plot_joint_diagnostic()
diag_plot.savefig(os.path.join(output_dir, name_prefix + '_diagnostic.png'))
#Save AJIVE matrices
a_joint_full = pd.DataFrame(ajive.blocks['A'].joint.full_, index=a.index, columns=a.columns)
a_individual_full = pd.DataFrame(ajive.blocks['A'].individual.full_, index=a.index, columns=a.columns)
b_joint_full = pd.DataFrame(ajive.blocks['B'].joint.full_, index=b.index, columns=b.columns)
b_individual_full = pd.DataFrame(ajive.blocks['B'].individual.full_, index=b.index, columns=b.columns)
#Variance Plot
plt_df = getVarianceExplained(a, a_joint_full, a_individual_full, 'A').join(getVarianceExplained(b, b_joint_full, b_individual_full, 'B')).T
plt_df.to_csv(os.path.join(output_dir, name_prefix + '_var_explained.csv'))
| 34.686747
| 140
| 0.740188
| 457
| 2,879
| 4.512035
| 0.299781
| 0.026188
| 0.041222
| 0.038797
| 0.238118
| 0.204656
| 0.125121
| 0.038797
| 0
| 0
| 0
| 0.010588
| 0.114276
| 2,879
| 82
| 141
| 35.109756
| 0.798039
| 0.046544
| 0
| 0
| 0
| 0
| 0.071585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.213115
| 0
| 0.278689
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
094aa257693558a2208948c81f0f7751ce318d41
| 736
|
py
|
Python
|
annoying_dogbone/code.py
|
Tobi-Swali/pypy-Gamejam
|
a6e9455802ae0ea460fd7e2b497d58f94ee35228
|
[
"MIT"
] | 1
|
2019-10-12T13:45:09.000Z
|
2019-10-12T13:45:09.000Z
|
annoying_dogbone/code.py
|
Tobi-Swali/pypy-Gamejam
|
a6e9455802ae0ea460fd7e2b497d58f94ee35228
|
[
"MIT"
] | 2
|
2019-10-13T17:50:42.000Z
|
2019-10-13T17:53:18.000Z
|
annoying_dogbone/code.py
|
Tobi-Swali/pypy-Gamejam
|
a6e9455802ae0ea460fd7e2b497d58f94ee35228
|
[
"MIT"
] | 3
|
2019-10-12T13:33:23.000Z
|
2019-10-12T17:15:45.000Z
|
# /bin/hacken GameJam 2019, f0wL
"""This example lights up the third NeoPixel while button A is being pressed, and lights up the
eighth NeoPixel while button B is being pressed."""
from adafruit_circuitplayground.express import cpx
import random
import time
import math
cpx.pixels.brightness = 0.1
while True:
rnd = random.randrange(1,10,1)
rnd1 = random.randrange(0,255,1)
rnd2 = random.randrange(0,255,1)
rnd3 = random.randrange(0,255,1)
cpx.pixels[rnd] = (rnd1, rnd2, rnd3)
if rnd < 5:
if cpx.button_b:
cpx.play_file("dog.wav")
if rnd > 5:
if cpx.button_a:
cpx.play_file("dog.wav")
cpx.pixels.fill((0,0,0))
| 26.285714
| 95
| 0.618207
| 109
| 736
| 4.12844
| 0.449541
| 0.133333
| 0.106667
| 0.126667
| 0.284444
| 0.075556
| 0
| 0
| 0
| 0
| 0
| 0.069418
| 0.275815
| 736
| 27
| 96
| 27.259259
| 0.774859
| 0.235054
| 0
| 0.111111
| 0
| 0
| 0.025135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
094d11be09267ae59e086160b2251d8815fde678
| 22,234
|
py
|
Python
|
lib/id3c/cli/command/manifest.py
|
UWIT-IAM/uw-redcap-client
|
38a1eb426fa80697446df7a466a41e0305382606
|
[
"MIT"
] | 21
|
2019-04-19T22:45:22.000Z
|
2022-01-28T01:32:09.000Z
|
lib/id3c/cli/command/manifest.py
|
UWIT-IAM/uw-redcap-client
|
38a1eb426fa80697446df7a466a41e0305382606
|
[
"MIT"
] | 219
|
2019-04-19T21:42:24.000Z
|
2022-03-29T21:41:04.000Z
|
lib/id3c/cli/command/manifest.py
|
UWIT-IAM/uw-redcap-client
|
38a1eb426fa80697446df7a466a41e0305382606
|
[
"MIT"
] | 9
|
2020-03-11T20:07:26.000Z
|
2022-03-05T00:36:11.000Z
|
"""
Parse, diff, and upload sample manifests.
Manifests are listings of known samples with associated barcodes and other,
minimal metadata. They are usually produced by the lab processing collection
tubes and contain the link between encounter survey data and molecular biology
results.
The workflow for processing new or updated manifests is generally:
parse → diff (usually) → upload → etl
The first three correspond to subcommands below; the last to the "manifest"
subcommand of the "etl" command.
"""
import click
import fnmatch
import logging
import pandas
import re
import yaml
from functools import reduce
from deepdiff import DeepHash
from hashlib import sha1
from os import chdir
from os.path import dirname
from typing import Iterable, List, Optional, Set, Tuple, Union
from id3c.cli import cli
from id3c.cli.io import LocalOrRemoteFile, urlopen
from id3c.cli.io.google import *
from id3c.cli.io.pandas import read_excel
from id3c.db.session import DatabaseSession
from id3c.json import dump_ndjson, load_ndjson
from id3c.utils import format_doc
LOG = logging.getLogger(__name__)
PROVENANCE_KEY = "_provenance"
RESERVED_COLUMNS = {"sample", "collection", "date"}
@cli.group("manifest", help = __doc__)
def manifest():
pass
@manifest.command("parse")
@click.argument("workbook", metavar = "<filepath>")
@click.option("--sheet",
metavar = "<name>",
help = "Name of the workbook sheet to read",
required = True)
@click.option("--sample-column",
metavar = "<column>",
help = "Name of the single column containing sample barcodes. "
"Must match exactly; shell-style glob patterns are supported.",
required = False)
@click.option("--collection-column",
metavar = "<column>",
help = "Name of the single column containing collection barcodes. "
"Must match exactly; shell-style glob patterns are supported.",
required = False)
@click.option("--date-column",
metavar = "<column>",
help = "Name of the single column containing the sample collected date.",
required = False)
@click.option("--sample-type",
metavar = "<type>",
help = "The type of sample within this manifest. "
"Only applicable to samples from self-test kits.",
type=click.Choice(["utm", "rdt"]),
required = False)
@click.option("--extra-column", "extra_columns",
metavar = "<field>:<column>|<field>:{…}",
help = "Name of an additional <column> to extract into manifest record <field>. "
"Must match exactly; shell-style glob patterns are supported. "
"May be specified multiple times. "
"Option value is parsed as a YAML fragment, so additional options supported by the sibling command \"parse-with-config\" may be inlined for testing, but you're likely better off using a config file at that point.",
multiple = True)
@click.option("--row-filter",
metavar = "<query>",
help = "The pandas query to filter rows (using the python engine) in the manifest. "
"Column names refer to columns in the manifest itself. "
"Example: `corrective action`.notnull() and `corrective action`.str.lower().str.startswith(\"discard\") ",
required = False)
def parse(**kwargs):
"""
Parse a single manifest workbook sheet.
<filepath> must be a path or URL to an Excel workbook or Google Sheets
spreadsheet with at least one sheet in it, identified by name using the required option --sheet.
Supported URL schemes include http[s]:// and s3://, as well as others.
The --sample-column option specifies the name of the column
containing the sample barcode. The --collection-column option specifies
the name of the column containing the collection barcode. You must supply one
or both of those options.
The --date-column specifies the name of the column containing the sample collected date.
Other columns may be extracted into the manifest records as desired using the
--extra-column option.
The row-filter entry specifies a pandas query to filter
(using the python engine) rows in the manifest. Column names refer to columns
in the manifest itself.
Example: `corrective action`.notnull() and `corrective action`.str.lower().str.startswith("discard")
Manifest records are output to stdout as newline-delimited JSON records.
You will likely want to redirect stdout to a file.
"""
kwargs["extra_columns"] = [
(dst, yaml.safe_load(src))
for dst, src
in [arg.split(":", 1) for arg in kwargs["extra_columns"]]
]
manifest = _parse(**kwargs)
dump_ndjson(manifest)
@manifest.command("parse-using-config")
@click.argument("config_file",
metavar = "<config.yaml>",
type = click.File("r"))
def parse_using_config(config_file):
"""
Parse multiple manifest sheets specified by a config file.
<config.yaml> must be a file with at least one YAML document in it. Each
document corresponds closely to the command-line options taken by the
"parse" command (a sibling to this command). For example, the following
configuration contains two documents:
\b
---
workbook: OneDrive/SFS Prospective Samples 2018-2019.xlsx
sheet: HMC
sample_column: "Barcode ID*"
date_column: "Coll_date"
extra_columns:
collection:
name: "Collection ID*"
barcode: true
aliquots:
name: "Aliquot [ABC]"
multiple: true
date: "Collection date*"
aliquot_date: "Date aliquoted"
racks:
name: "Rack [ABC]*"
multiple: true
notes: "Notes"
\b
---
workbook: OneDrive/SFS Retrospective Samples 2018-2019.xlsx
sheet: HMC
sample_column: "Barcode ID*"
extra_columns:
aliquots:
name: "Aliquot [ABC]"
multiple: true
date: "Collection date*"
aliquot_date: "Date aliquoted"
racks:
name: "Rack [ABC]*"
multiple: true
test_results: "Test ResulTS"
...
A YAML document can also contain a list of workbooks that share the same
format:
\b
---
workbooks:
- s3://bucketname/seattleflu/bbi/2020_2021_sfs_aliquoting_01.xlsx
- s3://bucketname/seattleflu/bbi/2020_2021_sfs_aliquoting_02.xlsx
- s3://bucketname/seattleflu/bbi/2020_2021_sfs_aliquoting_03.xlsx
sheet: aliquoting
sample_column: sample_id
extra_columns:
barcode: sample_id
collection_date: collection_date
mrn: mrn
accession_no: accession
sample_origin: sample_origin
...
The sample_column entry specifies the name of the column
containing the sample barcode. The collection_column entry specifies
the name of the column containing the collection barcode. You must supply one
or both of those entries.
The date_column specifies the name of the column containing the sample collected date.
The row_filter entry specifies a pandas query to filter
(using the python engine) rows in the manifest. Column names refer to columns
in the manifest itself.
Example: `corrective action`.notnull() and `corrective action`.str.lower().str.startswith("discard")
The key: value pairs in "extra_columns" name destination record fields (as
the key) and source columns (as the value). For most source columns, a
simple string name (or shell-glob pattern) is enough. Other behaviour is
available by using a dictionary value.
To collect values from multiple source columns into one record field,
specify a dictionary like:
\b
field:
name: column_[abc]
multiple: true
To mark a field as containing unique barcodes, similar to the built-in
"sample_column" option, specify a dictionary like:
\b
field:
name: column
barcode: true
Barcode fields are checked for duplicates and any records containing a
duplicated value are dropped with a warning.
Relative paths in <config.yaml> are treated relative to the containing
directory of the configuration file itself.
All manifest records parsed are output to stdout as newline-delimited JSON
records. You will likely want to redirect stdout to a file.
"""
configs = list(yaml.safe_load_all(config_file))
if config_file.name != "<stdin>":
config_dir = dirname(config_file.name)
# dirname is the empty string if we're in the same directory as the
# config file.
if config_dir:
chdir(config_dir)
for config in configs:
kwargs_list = []
try:
workbooks = config.get("workbooks") or [config["workbook"]]
for workbook in workbooks:
kwargs = {
"workbook": workbook,
"sheet": config["sheet"],
"sample_column": config.get("sample_column"),
"collection_column": config.get("collection_column"),
"date_column": config.get("date_column"),
"extra_columns": list(config.get("extra_columns", {}).items()),
"sample_type": config.get("sample_type"),
"row_filter" : config.get("row_filter")
}
kwargs_list.append(kwargs)
except KeyError as key:
LOG.error(f"Required key «{key}» missing from config {config}")
raise key from None
for kwargs in kwargs_list:
dump_ndjson(_parse(**kwargs))
def _parse(*,
workbook,
sheet,
sample_column = None,
collection_column = None,
date_column = None,
extra_columns: List[Tuple[str, Union[str, dict]]] = [],
sample_type = None,
row_filter: Optional[str] = None):
"""
Internal function powering :func:`parse` and :func:`parse_using_config`.
"""
if not sample_column and not collection_column:
raise ValueError("You must specify the sample_column, the collection_column, or both.")
disallowed_extra_columns = {dst for dst, src in extra_columns} & RESERVED_COLUMNS
assert len(disallowed_extra_columns) == 0, \
f"A reserved column name has been configured in extra_columns: {disallowed_extra_columns}"
# Used to capture internal provenance metadata for data tracing
digest = None
# Determine if the workbook URL is for a Google Document and if so
# retrieve the Google Sheets file as an Excel spreadsheet. Otherwise,
# retrieve it using urlopen.
google_docs_document_id = extract_document_id_from_google_url(workbook)
if google_docs_document_id:
LOG.debug(f"Reading Google Sheets document «{workbook}»")
with export_file_from_google_drive(google_docs_document_id, GoogleDriveExportFormat.EXCEL) as file:
workbook_bytes = file.read()
etag = get_document_etag(google_docs_document_id)
digest = sha1(etag.encode()).hexdigest()
else:
LOG.debug(f"Reading Excel workbook «{workbook}»")
with urlopen(workbook, "rb") as file:
workbook_bytes = file.read()
digest = sha1(workbook_bytes).hexdigest()
LOG.debug(f"Parsing sheet «{sheet}» in workbook «{workbook}»")
# Read all columns as strings using our pandas wrapper
manifest = read_excel(workbook_bytes, sheet_name = sheet)
LOG.debug(f"Columns in manifest: {list(manifest.columns)}")
# Strip leading/trailing spaces from values and replace missing values and
# empty strings (possibly from stripping) with None so they are converted
# to null in JSON.
#
# Note that the two .replace() calls can't be combined because the first
# instance of NA → None will change the column dtype from string → object
# and render subsequent comparisons to NA invalid.
manifest = manifest.apply(
lambda column: (
column
.str.strip()
.replace({pandas.NA: ""})
.replace({"": None, "na": None})))
# If a filter query was provided filter the manifest rows
# using the python engine.
if row_filter:
manifest = manifest.query(row_filter, engine="python")
# Construct parsed manifest by copying columns from source to destination.
# This approach is used to allow the same source column to end up as
# multiple destination columns.
parsed_manifest = pandas.DataFrame()
column_map: List[Tuple[str, dict]] = []
if sample_column:
column_map += [("sample", {"name": sample_column, "barcode": True})]
if collection_column:
column_map += [("collection", {"name": collection_column, "barcode": True})]
if date_column:
column_map += [("date", {"name": date_column})]
column_map += [
(dst, src) if isinstance(src, dict) else (dst, {"name":src})
for dst, src
in extra_columns
if src]
for dst, src in column_map:
if src.get("multiple"):
parsed_manifest[dst] = select_columns(manifest, src["name"]).apply(list, axis="columns")
else:
parsed_manifest[dst] = select_column(manifest, src["name"])
# Set of columns names for barcodes
barcode_columns = {dst for dst, src in column_map if src.get("barcode")}
parsed_manifest = perform_qc(sample_column, collection_column, barcode_columns, parsed_manifest)
# Add sample type for kit related samples
if sample_type:
parsed_manifest["sample_type"] = sample_type
parsed_manifest[PROVENANCE_KEY] = list(
map(lambda index: {
"workbook": workbook,
"sha1sum": digest,
"sheet": sheet,
# Account for header row and convert from 0-based to 1-based indexing
"row": index + 2,
}, parsed_manifest.index))
# Return a standard list of dicts instead of a DataFrame
return parsed_manifest.to_dict(orient = "records")
@manifest.command("diff")
@click.argument("manifest_a",
metavar = "<manifest-a.ndjson>",
type = LocalOrRemoteFile("r"))
@click.argument("manifest_b",
metavar = "<manifest-b.ndjson>",
type = LocalOrRemoteFile("r"))
@format_doc(PROVENANCE_KEY = PROVENANCE_KEY)
def diff(manifest_a, manifest_b):
"""
Compare two manifests and output new or changed records.
<manifest-a.ndjson> and <manifest-b.ndjson> must be newline-delimited JSON
files produced by the "parse" or "parse-using-config" commands which are
siblings to this command.
Records in <manifest-b.ndjson> which do not appear in <manifest-a.ndjson>
will be output to stdout. The internal provenance-tracking field,
"{PROVENANCE_KEY}", is ignored for the purposes of comparison.
"""
manifest_a_hashes = {
deephash(record)
for record in load_ndjson(manifest_a) }
new_or_changed = (
record for record in load_ndjson(manifest_b)
if deephash(record) not in manifest_a_hashes )
dump_ndjson(new_or_changed)
@manifest.command("upload")
@click.argument("manifest_file",
metavar = "<manifest.ndjson>",
type = LocalOrRemoteFile("r"))
def upload(manifest_file):
"""
Upload manifest records into the database receiving area.
<manifest.ndjson> must be a newline-delimited JSON file produced by this
command's sibling commands.
Once records are uploaded, the manifest ETL routine will reconcile the
manifest records with known identifiers and existing samples.
"""
db = DatabaseSession()
try:
LOG.info(f"Copying sample manifest records from {manifest_file.path}")
row_count = db.copy_from_ndjson(("receiving", "manifest", "document"), manifest_file)
LOG.info(f"Received {row_count:,} manifest records")
LOG.info("Committing all changes")
db.commit()
except:
LOG.info("Rolling back all changes; the database will not be modified")
db.rollback()
raise
def select_column(table: pandas.DataFrame, name: str) -> pandas.Series:
"""
Select the single column matching *name* in *table*.
*table* must be a :class:`pandas.DataFrame`.
*name* must be a string, which may contain shell-style wildcards and
pattern matching.
Matching is performed case-insensitively. An `AssertionError` is raised if
no columns are found or if more than one column is found.
Returns a :class:`pandas.Series` column from *table*.
"""
matching = select_columns(table, name)
assert len(matching.columns) == 1, f"More than one column name matching «{name}»: {matching.columns}"
return matching[matching.columns[0]]
def select_columns(table: pandas.DataFrame, name: str) -> pandas.DataFrame:
"""
Select one or more columns matching *name* in *table*.
*table* must be a :class:`pandas.DataFrame`.
*name* must be a string, which may contain shell-style wildcards and
pattern matching.
Matching is performed case-insensitively. An `AssertionError` is raised if
no columns are found.
Returns a :class:`pandas.DataFrame` containing a subset of columns in
*table*.
"""
pattern = re.compile(fnmatch.translate(name), re.IGNORECASE)
matches = list(filter(pattern.match, table.columns.astype(str)))
assert matches, f"No column name matching «{name}» found; column names are: {list(table.columns)}"
return table[matches]
def perform_qc(sample_column: str, collection_column: str, barcode_columns: Set[str],
parsed_manifest: pandas.DataFrame) -> pandas.DataFrame:
"""
Perform quality control on the manifest data, dropping rows which violate
our standards for complete and accurate data.
"""
parsed_manifest = drop_missing_barcodes(sample_column, collection_column, parsed_manifest)
# Drop any rows that have duplicated barcodes
parsed_manifest = deduplicate_barcodes(parsed_manifest, barcode_columns)
return parsed_manifest
def drop_missing_barcodes(sample_column: str, collection_column: str,
parsed_manifest: pandas.DataFrame) -> pandas.DataFrame:
"""
Drop rows that have no data for the *sample_column* and/or the *collection_column*, depending
on which columns are configured. If both *sample_column* and *collection_column* are configured,
drop rows if both columns don't have data.
>>> drop_missing_barcodes(sample_column='sample', collection_column='collection', \
parsed_manifest=pandas.DataFrame([['aa', 'bb', 'foo'], [None, 'dd', 'bar'], \
['ee', None, 'baz'], [None, None, 'fizz']], \
columns=['sample', 'collection', 'other']))
sample collection other
0 aa bb foo
1 None dd bar
2 ee None baz
>>> drop_missing_barcodes(sample_column='sample', collection_column=None, \
parsed_manifest=pandas.DataFrame([['aa', 'bb', 'foo'], [None, 'dd', 'bar'], \
['ee', None, 'baz'], [None, None, 'fizz']], \
columns=['sample', 'collection', 'other']))
sample collection other
0 aa bb foo
2 ee None baz
>>> drop_missing_barcodes(sample_column=None, collection_column='collection', \
parsed_manifest=pandas.DataFrame([['aa', 'bb', 'foo'], [None, 'dd', 'bar'], \
['ee', None, 'baz'], [None, None, 'fizz']], \
columns=['sample', 'collection', 'other']))
sample collection other
0 aa bb foo
1 None dd bar
"""
if sample_column and collection_column:
parsed_manifest = parsed_manifest.dropna(subset = ["sample", "collection"], how='all')
elif sample_column:
parsed_manifest = parsed_manifest.dropna(subset = ["sample"])
elif collection_column:
parsed_manifest = parsed_manifest.dropna(subset = ["collection"])
return parsed_manifest
def deduplicate_barcodes(df: pandas.DataFrame, columns: Iterable) -> pandas.DataFrame:
"""
Check all barcode columns for duplicates and drops records that have
duplicated barcodes.
>>> deduplicate_barcodes(pandas.DataFrame([['aa', 'bb', 'foo'], ['aa', 'cc', 'bar']], \
columns=['sample', 'collection', 'other']), columns=['sample', 'collection'])
Empty DataFrame
Columns: [sample, collection, other]
Index: []
>>> deduplicate_barcodes(pandas.DataFrame([['aa', 'bb', 'foo'], ['aa', 'cc', 'bar']], \
columns=['sample', 'collection', 'other']), columns=['collection'])
sample collection other
0 aa bb foo
1 aa cc bar
>>> deduplicate_barcodes(pandas.DataFrame([['aa', 'bb', 'foo'], ['aa', 'cc', 'bar'], \
['bb', 'aa', 'baz']], columns=['sample', 'collection', 'other']), \
columns=['sample', 'collection'])
sample collection other
2 bb aa baz
"""
deduplicated = df
for column in columns:
# Drop null values so they don't get counted as duplicates
col = df[column].dropna()
# Find duplicates within column
duplicates = col[col.duplicated(keep=False)]
# If duplicates are found, drop rows with duplicate barcodes
if len(duplicates) > 0:
LOG.warning(f"Found duplicate barcodes in column «{column}»")
dup_barcodes = list(duplicates.unique())
LOG.warning(f"Duplicated barcodes: {dup_barcodes}")
LOG.warning(f"Dropping records with duplicate barcodes")
deduplicated_df = df[(~df[column].duplicated(keep=False)) \
| (df[column].isnull())][column].to_frame()
common_idx = deduplicated.index.intersection(deduplicated_df.index)
deduplicated = deduplicated.loc[common_idx]
return deduplicated
def deephash(record):
"""
Return a :class:`DeepHash` of the given manifest *record*, ignoring
the provenance information.
"""
return DeepHash(record, exclude_paths = {f"root['{PROVENANCE_KEY}']"})[record]
| 36.509031
| 225
| 0.655707
| 2,775
| 22,234
| 5.164685
| 0.187748
| 0.026375
| 0.017583
| 0.013676
| 0.303865
| 0.28691
| 0.264513
| 0.248395
| 0.221811
| 0.201716
| 0
| 0.0047
| 0.244041
| 22,234
| 608
| 226
| 36.569079
| 0.846561
| 0.453854
| 0
| 0.111111
| 0
| 0.00823
| 0.23385
| 0.01409
| 0
| 0
| 0
| 0
| 0.012346
| 1
| 0.049383
| false
| 0.004115
| 0.078189
| 0
| 0.156379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|