hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7442858aa79374bc0e7c8a884d99101738677bd6 | 4,339 | py | Python | tools/CH_constants.py | hussamnasir/geni-ch | b6626a14e95768ba33bbc1faca9e4e74b0040920 | [
"MIT"
] | 3 | 2015-05-01T12:48:35.000Z | 2017-02-25T07:13:07.000Z | tools/CH_constants.py | GENI-NSF/geni-ch | 184909ef656976071fd891091caeaf892c99deb1 | [
"MIT"
] | 466 | 2015-05-20T03:49:54.000Z | 2020-03-18T23:09:52.000Z | tools/CH_constants.py | hussamnasir/geni-ch | b6626a14e95768ba33bbc1faca9e4e74b0040920 | [
"MIT"
] | 6 | 2015-05-06T13:44:13.000Z | 2017-07-12T17:57:17.000Z | #----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Constants for the CH (Federation Registry service)
# AKA Service Registry (SR) AKA Clearinghouse (CH)
# List of services provided by the CH server
services = ["SERVICE"]
# dictionary of types of services provided by the CH (name : code)
# That is the kinds of services that are advertised in the CH
SERVICE_AGGREGATE_MANAGER = 0
SERVICE_SLICE_AUTHORITY = 1
SERVICE_PROJECT_AUTHORITY = 2
SERVICE_MEMBER_AUTHORITY = 3
SERVICE_AUTHORIZATION_SERVICE = 4
SERVICE_LOGGING_SERVICE = 5
SERVICE_CREDENTIAL_STORE = 6
SERVICE_CERTIFICATE_AUTHORITY = 7
SERVICE_KEY_MANAGER = 8
SERVICE_WIMAX_SITE = 10
SERVICE_IRODS = 11
service_types = {
"AGGREGATE_MANAGER" : SERVICE_AGGREGATE_MANAGER,
"SLICE_AUTHORITY" : SERVICE_SLICE_AUTHORITY,
"PROJECT_AUTHORITY" : SERVICE_PROJECT_AUTHORITY,
"MEMBER_AUTHORITY" : SERVICE_MEMBER_AUTHORITY,
"AUTHORIZATION_SERVICE" : SERVICE_AUTHORIZATION_SERVICE,
"LOGGING_SERVICE" : SERVICE_LOGGING_SERVICE,
"CREDENTIAL_STORE" : SERVICE_CREDENTIAL_STORE,
"CERTIFICATE_AUTHORITY" : SERVICE_CERTIFICATE_AUTHORITY,
"KEY_MANAGER" : SERVICE_KEY_MANAGER,
"WIMAX_SITE" : SERVICE_WIMAX_SITE,
"IRODS" : SERVICE_IRODS
}
# Mapping from external to internal data schema
field_mapping = {
"_GENI_SERVICE_ID" : "id",
"SERVICE_URN": 'service_urn',
"SERVICE_URL": 'service_url',
"_GENI_SERVICE_CERT_FILENAME": 'service_cert',
"SERVICE_CERT": 'service_cert',
"SERVICE_NAME": 'service_name',
"SERVICE_DESCRIPTION": 'service_description',
"SERVICE_TYPE": "service_type",
"_GENI_SERVICE_SHORT_NAME": "short_name"
}
# The externally visible data schema for services
mandatory_fields = {
"SERVICE_URN": {"TYPE": "URN"},
"SERVICE_URL": {"TYPE": "URL"},
"SERVICE_CERT": {"TYPE": "CERTIFICATE"},
"SERVICE_NAME" : {"TYPE" : "STRING"},
"SERVICE_DESCRIPTION": {"TYPE" : "STRING"}
}
supplemental_fields = {
"_GENI_SERVICE_CERT_FILENAME": {"TYPE": "STRING", "OBJECT": "SERVICE"},
"_GENI_SERVICE_ID" : {"TYPE" : "INTEGER", "OBJECT": "SERVICE"},
"_GENI_SERVICE_ATTRIBUTES" : {"TYPE" : "DICTIONARY", "OBJECT" : "SERVICE"},
"_GENI_SERVICE_SHORT_NAME" : {"TYPE": "STRING", "OBJECT": "SERVICE"}
}
# Defined attributes on services
# A dictionary: For each attribute we have a name pointing to a dictionary
# with 'description', 'service_types', 'acceptable_values'
# 'service_types' means a list of service types to which this attribute
# applies. This tag is optional and if not supplied it is not restricted
# 'acceptable_values' means a list of acceptable values for this attribute
# This tag is optional and if not supplied it is not restricted
defined_attributes = {
"SPEAKS_FOR" : {
"description" : "Does this aggregate accept speaks-for credentials and options?",
"service_types" : [SERVICE_AGGREGATE_MANAGER],
"acceptable_values" : ['t', 'f']
},
"AM_API_VERSION" : {
"description" : "The version of the AM API supported by this aggregate",
"service_types" : [SERVICE_AGGREGATE_MANAGER],
"acceptable_values" : ['1', '2', '3']
}
}
| 40.551402 | 89 | 0.705001 | 543 | 4,339 | 5.41989 | 0.360958 | 0.019028 | 0.031261 | 0.024465 | 0.099218 | 0.085627 | 0.068637 | 0.033979 | 0.033979 | 0.033979 | 0 | 0.006663 | 0.169855 | 4,339 | 106 | 90 | 40.933962 | 0.810383 | 0.454713 | 0 | 0.032787 | 0 | 0 | 0.40515 | 0.072103 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74437028daada4e5ca6b65cd6259ece7a9695dc3 | 1,657 | py | Python | Simulation-Chernobyl/simulation_chernobyl/graphics/logger.py | AllSafeCyberSecur1ty/Nuclear-Engineering | 302d6dcc7c0a85a9191098366b076cf9cb5a9f6e | [
"MIT"
] | 1 | 2022-03-26T20:01:13.000Z | 2022-03-26T20:01:13.000Z | Simulation-Chernobyl/simulation_chernobyl/graphics/logger.py | AllSafeCyberSecur1ty/Nuclear-Engineering | 302d6dcc7c0a85a9191098366b076cf9cb5a9f6e | [
"MIT"
] | null | null | null | Simulation-Chernobyl/simulation_chernobyl/graphics/logger.py | AllSafeCyberSecur1ty/Nuclear-Engineering | 302d6dcc7c0a85a9191098366b076cf9cb5a9f6e | [
"MIT"
] | 1 | 2022-03-26T19:59:13.000Z | 2022-03-26T19:59:13.000Z | import pygame
class Logger():
def __init__(self):
self.mem_text = "" # to store previous text
self.text1, self.text2, self.text3 = "", "", "" # text buffer
self.text_color1, self.text_color2, self.text_color3 = (0,0,0), (0,0,0), (0,0,0) # text color buffer
self.max_width = 100
# log displayed on surface during simulation
def gui_logger(self, surface, font):
surface.blit(font.render(str(self.text1), True, self.text_color1), (363, 669)) # text output line 1
surface.blit(font.render(str(self.text2), True, self.text_color2), (363, 684)) # text output line 2
surface.blit(font.render(str(self.text3), True, self.text_color3), (363, 699)) # text output line 3
# add new text to log
def log_add(self, text, text_color=(0,0,0)):
if text != self.mem_text: # if new text is sent:
self.text3, self.text2, self.text1 = self.text2, self.text1, text # add it to begenning, and shift others
self.text_color3, self.text_color2, self.text_color1 = self.text_color2, self.text_color1, text_color # same for colors
if len(text) > self.max_width: # if text is larger than window: split it and shift again
self.text3, self.text2, self.text1 = self.text2, text[self.max_width : len(text)], text[0 : self.max_width]
self.text_color3, self.text_color2, self.text_color1 = self.text_color2, text_color, text_color # same for colors
self.mem_text = text # update mem
# load max text width
def gui_max_width(self, max_width):
self.max_width = max_width
| 55.233333 | 133 | 0.63971 | 250 | 1,657 | 4.092 | 0.264 | 0.125122 | 0.02346 | 0.02346 | 0.419355 | 0.319648 | 0.231672 | 0.231672 | 0.109482 | 0.109482 | 0 | 0.053514 | 0.244418 | 1,657 | 29 | 134 | 57.137931 | 0.763578 | 0.211225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.047619 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7443df89c1d0dd9c460536ddbdae972c55cb92f9 | 9,765 | py | Python | ob/models.py | peterkuma/tjrapid | 8813cb0a3633785754d20263f180428688bcb026 | [
"MIT"
] | null | null | null | ob/models.py | peterkuma/tjrapid | 8813cb0a3633785754d20263f180428688bcb026 | [
"MIT"
] | 1 | 2021-05-16T16:40:00.000Z | 2021-05-16T16:40:00.000Z | ob/models.py | peterkuma/tjrapid | 8813cb0a3633785754d20263f180428688bcb026 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2012 Peter Kuma
import os
from datetime import date, datetime
import urllib.request, urllib.error, urllib.parse
import json
from django.utils import timezone
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from markdown import markdown
from textile import textile
from django.utils.safestring import mark_safe
from django.contrib.contenttypes.fields import GenericRelation
from django_attach.models import Attachment
from linguo.models import MultilingualModel
from linguo.managers import MultilingualManager
from django.urls import reverse
from django.utils.translation import get_language
from django.core.validators import MaxValueValidator
from main.models import Category
MARKUP_CHOICES = (
('markdown', 'Markdown'),
('textile', 'Textile'),
('html', 'HTML'),
)
MARKERS = (
'amenity/arts_centre',
'amenity/atm',
'amenity/bank',
'amenity/bar',
'amenity/bbq',
'amenity/bench',
'amenity/bicycle_parking',
'amenity/bicycle_repair_station',
'amenity/biergarten',
'amenity/boat_rental',
'amenity/bureau_de_change',
'amenity/bus_station',
'amenity/cafe',
'amenity/car_wash',
'amenity/casino',
'amenity/charging_station',
'amenity/cinema',
'amenity/community_centre',
'amenity/courthouse',
'amenity/dentist',
'amenity/doctors',
'amenity/drinking_water',
'amenity/emergency_phone',
'amenity/excrement_bags',
'amenity/fast_food',
'amenity/ferry',
'amenity/firestation',
'amenity/fountain',
'amenity/fuel',
'amenity/hospital',
'amenity/hunting_stand',
'amenity/ice_cream',
'amenity/internet_cafe',
'amenity/library',
'amenity/motorcycle_parking',
'amenity/nightclub',
'amenity/parking',
'amenity/parking_entrance_multistorey',
'amenity/parking_entrance_underground',
'amenity/parking_subtle',
'amenity/parking_tickets',
'amenity/pharmacy',
'amenity/place_of_worship',
'amenity/police',
'amenity/post_box',
'amenity/post_office',
'amenity/prison',
'amenity/pub',
'amenity/public_bath',
'amenity/public_bookcase',
'amenity/public_transport_tickets',
'amenity/recycling',
'amenity/rental_bicycle',
'amenity/rental_car',
'amenity/restaurant',
'amenity/shelter',
'amenity/shower',
'amenity/social_facility',
'amenity/taxi',
'amenity/telephone',
'amenity/theatre',
'amenity/toilets',
'amenity/town_hall',
'amenity/vehicle_inspection',
'amenity/veterinary',
'amenity/waste_basket',
'amenity/waste_disposal',
'barrier/cattle_grid',
'barrier/cycle_barrier',
'barrier/full-height_turnstile',
'barrier/gate',
'barrier/kissing_gate',
'barrier/lift_gate',
'barrier/motorcycle_barrier',
'barrier/stile',
'barrier/toll_booth',
'highway/bus_stop',
'highway/elevator',
'highway/ford',
'highway/traffic_light',
'historic/archaeological_site',
'historic/bust',
'historic/castle',
'historic/city_gate',
'historic/fort',
'historic/fortress',
'historic/manor',
'historic/memorial',
'historic/monument',
'historic/obelisk',
'historic/palace',
'historic/plaque',
'historic/shrine',
'historic/statue',
'historic/stone',
'leisure/amusement_arcade',
'leisure/beach_resort',
'leisure/bird_hide',
'leisure/bowling_alley',
'leisure/firepit',
'leisure/fishing',
'leisure/fitness',
'leisure/golf',
'leisure/miniature_golf',
'leisure/outdoor_seating',
'leisure/playground',
'leisure/sauna',
'leisure/slipway',
'leisure/water_park',
'man_made/bell_tower',
'man_made/chimney',
'man_made/communications_tower',
'man_made/crane',
'man_made/cross',
'man_made/lighthouse',
'man_made/mast',
'man_made/mast_communications',
'man_made/mast_lighting',
'man_made/power_tower',
'man_made/power_tower_small',
'man_made/storage_tank',
'man_made/telescope_dish',
'man_made/telescope_dome',
'man_made/tower_cantilever_communication',
'man_made/tower_cooling',
'man_made/tower_defensive',
'man_made/tower_dish',
'man_made/tower_dome',
'man_made/tower_generic',
'man_made/tower_lattice',
'man_made/tower_lattice_communication',
'man_made/tower_lattice_lighting',
'man_made/tower_lighting',
'man_made/tower_observation',
'man_made/water_tower',
'man_made/windmill',
'natural/cave',
'natural/peak',
'natural/saddle',
'office/consulate',
'office/embassy',
'religion/buddhist',
'religion/christian',
'religion/hinduist',
'religion/jewish',
'religion/muslim',
'religion/shintoist',
'religion/sikhist',
'religion/taoist',
'shop/alcohol',
'shop/art',
'shop/bag',
'shop/bakery',
'shop/beauty',
'shop/bed',
'shop/beverages',
'shop/bicycle',
'shop/bookmaker',
'shop/butcher',
'shop/car',
'shop/car_parts',
'shop/carpet',
'shop/car_repair',
'shop/charity',
'shop/chemist',
'shop/clothes',
'shop/coffee',
'shop/computer',
'shop/confectionery',
'shop/convenience',
'shop/copyshop',
'shop/dairy',
'shop/deli',
'shop/department_store',
'shop/diy',
'shop/electronics',
'shop/fabric',
'shop/florist',
'shop/furniture',
'shop/garden_centre',
'shop/gift',
'shop/greengrocer',
'shop/hairdresser',
'shop/hifi',
'shop/houseware',
'shop/interior_decoration',
'shop/jewelry',
'shop/laundry',
'shop/marketplace',
'shop/massage',
'shop/medical_supply',
'shop/mobile_phone',
'shop/music',
'shop/musical_instrument',
'shop/newsagent',
'shop/optician',
'shop/outdoor',
'shop/paint',
'shop/perfumery',
'shop/pet',
'shop/photo',
'shop/seafood',
'shop/second_hand',
'shop/shoes',
'shop/sports',
'shop/stationery',
'shop/supermarket',
'shop/tea',
'shop/ticket',
'shop/tobacco',
'shop/toys',
'shop/trade',
'shop/travel_agency',
'shop/tyres',
'shop/variety_store',
'shop/video',
'shop/video_games',
'tourism/alpinehut',
'tourism/apartment',
'tourism/artwork',
'tourism/audioguide',
'tourism/board',
'tourism/camping',
'tourism/caravan_park',
'tourism/chalet',
'tourism/guest_house',
'tourism/guidepost',
'tourism/hostel',
'tourism/hotel',
'tourism/information',
'tourism/map',
'tourism/motel',
'tourism/museum',
'tourism/office',
'tourism/picnic',
'tourism/terminal',
'tourism/viewpoint',
'tourism/wilderness_hut',
)
MARKERS = [(x, x) for x in MARKERS]
class Member(models.Model):
first_name = models.CharField(_('first name'), max_length=50)
surname = models.CharField(_('surname'), max_length=50)
category = models.CharField(_('category'), max_length=5)
email = models.EmailField(_('e-mail'), blank=True)
def __unicode__(self):
return '%s %s' % (self.first_name, self.surname)
def email_special(self):
return self.email.replace('@', '[zavinac]')
class Meta:
ordering = ('category','surname')
verbose_name = _('member')
verbose_name_plural = _('members')
class Event(MultilingualModel):
title = models.CharField(_('title'), max_length=100)
name = models.SlugField(
_('name'),
unique=True,
help_text=_('Short name that will appear in the URL')
)
start_date = models.DateField(_('start date'))
end_date = models.DateField(_('end date'), null=True, blank=True)
location = models.CharField(_('location'), max_length=100)
latitude = models.FloatField(_('latitude'), null=True, blank=True)
longitude = models.FloatField(_('longitude'), null=True, blank=True)
map_zoom = models.PositiveIntegerField(_('map zoom'),
default=15,
validators=[MaxValueValidator(19),]
)
category = models.ForeignKey(Category,
verbose_name=_('category'),
on_delete=models.CASCADE,
)
markup = models.CharField(
_('markup'),
max_length=50,
choices=MARKUP_CHOICES,
default='markdown',
help_text=_('Documentation: <a href="https://en.wikipedia.org/wiki/Markdown">Markdown</a>, <a href="http://en.wikipedia.org/wiki/Textile_(markup_language)">Textile</a>')
)
head = models.TextField(
_('head'),
blank=True,
help_text=_('Add files and images below')
)
body = models.TextField(
_('body'),
blank=True,
help_text=_('Add files and images below')
)
attachments = GenericRelation(Attachment)
created = models.DateTimeField(_('created'),auto_now_add=True)
modified = models.DateTimeField(_('modified'),auto_now=True)
def get_absolute_url(self):
import ob.views
return reverse(ob.views.event, kwargs={
'lang': get_language(),
'category_name': Category.objects.get(name_en='orienteering').name,
'name': self.name,
})
def head_html(self):
if self.markup == 'markdown': return mark_safe(markdown(self.head))
elif self.markup == 'textile': return mark_safe(textile(self.head))
else: return mark_safe(self.head)
def body_html(self):
if self.markup == 'markdown': return mark_safe(markdown(self.body))
elif self.markup == 'textile': return mark_safe(textile(self.body))
else: return mark_safe(self.body)
def is_upcoming(self):
return self.end_date is None and self.start_date >= date.today() or \
self.end_date is not None and self.end_date >= date.today()
def larger_map_link(self):
import ob.views
return None if self.mappoint_set.count() == 0 else \
reverse(ob.views.event_map, kwargs={
'lang': get_language(),
'category_name': self.category.name,
'name': self.name,
})
objects = MultilingualManager()
class Meta:
ordering = ('-start_date',)
verbose_name = _('event')
verbose_name_plural = _('events')
translate = ('title', 'name', 'location', 'head', 'body')
class MapPoint(MultilingualModel):
title = models.CharField(_('title'), max_length=100)
latitude = models.FloatField(_('latitude'))
longitude = models.FloatField(_('longitude'))
marker = models.CharField(_('marker'),
null=True,
blank=True,
max_length=100,
choices=MARKERS,
)
event = models.ForeignKey(Event,
verbose_name=_('event'),
on_delete=models.CASCADE,
)
objects = MultilingualManager()
class Meta:
verbose_name = _('map point')
verbose_name_plural = _('map points')
translate = ('title',)
| 24.659091 | 171 | 0.725038 | 1,190 | 9,765 | 5.756303 | 0.377311 | 0.027591 | 0.01927 | 0.009927 | 0.099562 | 0.07708 | 0.067445 | 0.05635 | 0.040584 | 0.015766 | 0 | 0.003797 | 0.110087 | 9,765 | 395 | 172 | 24.721519 | 0.784465 | 0.005735 | 0 | 0.059621 | 0 | 0.00271 | 0.478207 | 0.130963 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01897 | false | 0 | 0.056911 | 0.00813 | 0.176152 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74455e5242de473902ce5c1ce7ac6048af1210c7 | 18,394 | py | Python | tests/test_hal_nav2.py | rdhyee/restnavigator | 507a854ab9ea5fe42dc10d8698f67e43b8657902 | [
"MIT"
] | 21 | 2015-04-26T08:41:33.000Z | 2020-07-22T04:05:37.000Z | tests/test_hal_nav2.py | rdhyee/restnavigator | 507a854ab9ea5fe42dc10d8698f67e43b8657902 | [
"MIT"
] | 17 | 2015-01-09T18:49:50.000Z | 2015-03-09T07:57:17.000Z | tests/test_hal_nav2.py | rdhyee/restnavigator | 507a854ab9ea5fe42dc10d8698f67e43b8657902 | [
"MIT"
] | 11 | 2015-04-21T04:12:56.000Z | 2022-03-27T00:24:25.000Z | '''Refactored tests from test_hal_nav.py'''
import json
import httpretty
import pytest
import conftest
import uritemplate
import restnavigator as RN
from restnavigator import exc
import restnavigator.halnav as HN
def uri_of(doc):
'''Pull out the url from a hal document'''
return doc['_links']['self']['href']
def link_to(doc):
'''Pull out the self link of a hal document'''
return doc['_links']['self']
def register_hal_page(doc, **kwargs):
status = kwargs.pop('status', 200)
method = kwargs.pop('method', 'GET')
content_type = kwargs.pop('content_type', 'application/hal+json')
def body_callback(request, url, headers):
'''We do a callback so the response body can be updated'''
headers2 = kwargs.pop('headers', headers)
return (
status,
headers2,
json.dumps(doc),
)
httpretty.HTTPretty.register_uri(
method,
body=body_callback,
content_type=content_type,
uri=uri_of(doc),
**kwargs
)
@pytest.fixture
def page(index_page, curie_links, index_uri):
'''Returns a function that creates pages'''
def _page(name, number):
selflink = {
'href': index_uri + name + '/' + str(number),
'name': name + str(number),
}
nextlink = {
'href': index_uri + name + '/' + str(number + 1),
'name': name + str(number + 1),
}
doc = {
'_links': {
'self': selflink,
'curies': curie_links,
'next': nextlink
},
'name': name,
'number': number,
'data': conftest.random_sentence(),
}
register_hal_page(doc)
_page.registry.setdefault(name, []).append(doc)
return doc
_page.registry = {}
return _page
@pytest.yield_fixture
def http(request):
'''Enables httpretty and disables it after the test'''
httpretty.HTTPretty.enable()
yield httpretty.HTTPretty
httpretty.HTTPretty.disable()
httpretty.HTTPretty.reset()
@pytest.fixture
def index_uri():
'''Fixture for the root uri'''
return 'http://fakeuri.example/api/'
@pytest.fixture
def curie():
'''Returns the current curie string'''
return conftest.random_word(2).lower()
@pytest.fixture
def curify(curie):
def _curify(rel):
return curie + ':' + rel
return _curify
@pytest.fixture
def curie_links(curie, index_uri):
'''Returns a templated curie link'''
return [{
'name': curie,
'href': index_uri + 'rels/{rel}',
'templated': True,
}]
@pytest.fixture
def index_page(curie_links, index_uri, http):
'''Registers a basic index page that can be extended'''
doc = {
'_links': {
'curies': curie_links,
'self': {'href': index_uri},
},
'data': conftest.random_paragraphs(),
}
register_hal_page(doc)
return doc
@pytest.fixture
def N(index_uri, index_page):
'''A basic HALNavigator with the index_uri as root'''
return RN.Navigator.hal(index_uri)
class TestNavigator:
'''tests for halnav.Navigator'''
@pytest.fixture
def fake_session(self):
'''Creates a non functional fake session object'''
class FakeNonFuncSession:
headers = {'X-Custom': 'foo'}
return FakeNonFuncSession()
def test_custom_session(self, index_uri, fake_session):
N = RN.Navigator.hal(index_uri, session=fake_session)
N2 = RN.Navigator.hal(index_uri)
assert N._core.session is fake_session
assert N.headers is fake_session.headers
class TestPartialNavigator:
'''tests for halnav.PartialNavigator'''
@pytest.fixture
def rel(self, curify, name):
'''The link relation for the templated link'''
return curify(name)
@pytest.fixture(params=[set(['x']), set(['x', 'y']), set(['x', 'y', 'z'])])
def vars(self, request):
'''A set of random variables'''
return request.param
@pytest.fixture(params=[(0,0,0), (1,2,3)])
def values(self, request):
return dict(zip('xyz', request.param))
@pytest.fixture
def name(self):
'''The name of the templated resource'''
return conftest.random_word(5).lower() + 's'
@pytest.fixture
def post_template(self, name, index_uri, index_page, rel, vars):
'''Creates and registers a post templated link'''
href = "{index_uri}{name}/{{{varpath}}}".format(
index_uri=index_uri,
name=name,
varpath='}/{'.join(v for v in sorted(vars))
)
link = {
'href': href,
'title': 'Templated link for ' + name,
'templated': True,
}
index_page['_links'][rel] = link
return href
@pytest.fixture
def tpl_rel(self, name, curify):
return curify(name + '_tpl')
@pytest.fixture
def posts(self, rel, name, index_uri, index_page, page, tpl_rel):
'''Creates and registers some posts'''
resource0 = page(name, 0)
index_page['_links'][rel] = link_to(resource0)
index_page['_links'][tpl_rel] = {
'href': index_uri + name + '/{id}',
'title': 'Template for ' + name,
'templated': True,
}
register_hal_page(resource0)
last = resource0
for i in range(1, 5):
resource = page(name, i)
last['_links']['next'] = link_to(resource)
last = resource
register_hal_page(resource)
return page.registry[name][:]
@pytest.fixture
def template_partial(self, rel, index_page, N, post_template):
return N[rel]
def test_template_uri(self, template_partial, post_template):
assert template_partial.template_uri == post_template
def test_expand_uri(
self, vars, post_template, template_partial, values):
uri = template_partial.expand_uri(**values)
assert uri == uritemplate.expand(post_template, values)
def test_expand_link(
self, vars, post_template, template_partial, values):
link = template_partial.expand_link(**values)
assert not link.props.get('templated', False)
assert link.uri == uritemplate.expand(post_template, values)
def test_expand(self, vars, post_template, template_partial, values):
post1 = template_partial(**values)
assert not post1.fetched
assert post1.uri == uritemplate.expand(post_template, values)
def test_variables(self, template_partial, vars):
assert template_partial.variables == vars
@pytest.mark.parametrize('i', range(0, 5))
def test_valid_expansion(self, posts, name, N, tpl_rel, i):
partial = N[tpl_rel]
nav = partial(id=i)
nav.fetch()
assert nav.status == (200, 'OK')
assert nav.uri == uri_of(posts[i])
class TestHALNavGetItem:
'''Tests the __getitem__ method of HALNavigator '''
@pytest.fixture
def names(self):
namelist = [conftest.random_word().lower() for _ in range(3)]
def _names(i):
return namelist[i]
return _names
@pytest.fixture
def rels(self, names, curify):
def _rels(i):
return curify(names(i))
return _rels
@pytest.fixture
def resources(self, names, rels, index_page, index_uri, page):
last = index_page
for i in range(3):
new = page(names(i), i)
last['_links'][rels(i)] = {
'href': uri_of(new),
'title': "Page for " + names(i)
}
last = new
def test_fetch_behavior(self, N, resources, rels):
Na = N[rels(0)]
Nb = N[rels(0), rels(1)]
assert Na.fetched
assert not Nb.fetched
def test_sequence_equivalence(self, N, resources, rels):
Na = N[rels(0), rels(1), rels(2)]
Nb = N[rels(0)][rels(1)][rels(2)]
assert Na is Nb
@pytest.fixture
def link_resources(self, rels, names, index_page, page):
first = page(names(0), 1)
index_page['_links'][rels(0)] = link_to(first)
register_hal_page(first)
second1 = page(names(1), 1)
second2 = page(names(1), 2)
first['_links'][rels(1)] = [
{
'href': uri_of(second1),
'name': 'name_x',
},{
'href': uri_of(second2),
'name': 'name_y',
}
]
register_hal_page(second1)
register_hal_page(second2)
third_1 = page(names(2), 1)
third_2 = page(names(2), 2)
second1['_links'][rels(2)] = link_to(third_1)
second2['_links'][rels(2)] = link_to(third_2)
register_hal_page(third_1)
register_hal_page(third_2)
def test_linklist_in_sequence(self, N, link_resources, rels):
Nchained = N[rels(0), rels(1), 'name':'name_x', rels(2)]
Nfirst = N[rels(0)]
Nsecondlist = Nfirst[rels(1)]
Nsecond = Nsecondlist.get_by('name', 'name_x')
Nthird = Nsecond[rels(2)]
assert Nchained is Nthird
def test_linklist_index(self, N, link_resources, rels):
Nchained = N[rels(0), rels(1), 1, rels(2)]
Nfirst = N[rels(0)]
Nsecondlist = Nfirst[rels(1)]
Nsecond = Nsecondlist[1]
Nthird = Nsecond[rels(2)]
assert Nchained is Nthird
def test_bad_rel(self, N, link_resources, rels):
with pytest.raises(exc.OffTheRailsException):
N[rels(1)]
with pytest.raises(exc.OffTheRailsException):
N[rels(0), rels(0)]
def test_bad_name(self, N, link_resources, rels):
with pytest.raises(exc.OffTheRailsException):
N[rels(0), rels(1), 'name':'badname']
def test_bad_index(self, N, link_resources, rels):
with pytest.raises(exc.OffTheRailsException):
N[rels(0), rels(1), 100]
@pytest.fixture
def template_uri(self, index_uri):
return index_uri + 'tpl/{id}'
@pytest.fixture
def tpl_rel(self, curify):
return curify('tpl')
@pytest.fixture
def tpl_resources(self, page, tpl_rel, template_uri, index_page):
index_page['_links'][tpl_rel] = {
'href': template_uri,
'templated': True,
'title': 'Template link',
}
for i in range(3):
resource = page('tpl', i)
register_hal_page(resource)
return template_uri
def test_template_sequence(self, N, tpl_resources, tpl_rel):
Na = N[tpl_rel](id=0)
Nb = N[tpl_rel](id=1)
Nc = N[tpl_rel](id=2)
Na(), Nb(), Nc()
assert Na.status == (200, 'OK')
assert Nb.status == (200, 'OK')
assert Nc.status == (200, 'OK')
class TestEmbedded:
'''tests for embedded document features'''
@pytest.fixture
def blog_posts(self, http, page):
'''Posts are both linked and embedded'''
_posts = [page('post', x) for x in range(3)]
for post in _posts:
register_hal_page(post)
return _posts
@pytest.fixture
def comments(self, page):
'''Comments are embedded only and have no self link'''
comments = [page('comments', x) for x in range(3)]
for comment in comments:
del comment['_links']['self']
return comments
@pytest.fixture
def nested(self, page):
'''Nested are several layers deep embedded docs. They are not
linked to, but do have urls.
'''
nested = [page('nested', n) for n in range(3)]
for (nest1, nest2) in zip(nested[:-1], nested[1:]):
nest1['_embedded'] = {
'xx:nested': nest2
}
register_hal_page(nest1)
register_hal_page(nest2) # register remaining page
return nested
@pytest.fixture
def index(self, index_uri, comments, blog_posts, http, nested):
doc = {
'_links': {
'curies': [{
'name': 'xx',
'href': index_uri + 'rels/{rel}',
'templated': True,
}],
'self': {'href': index_uri},
'first': link_to(blog_posts[0]),
'xx:second': link_to(blog_posts[1]),
'xx:posts': [link_to(post) for post in blog_posts],
'xx:nested-links': [link_to(nest) for nest in nested],
'xx:non-embedded-nest': link_to(nested[0]),
},
'data': 'Some data here',
'_embedded': {
'xx:posts': blog_posts,
'xx:comments': comments,
}
}
register_hal_page(doc)
return doc
def test_comments_are_orphans(self, N, index):
'''Checks that all embedded documents that don't have self
links are OrphanHALNavigators
'''
comments = N['xx:comments']
for comment in comments:
assert comment.parent is N
def test_posts_arent_orphans(self, N, index):
posts = N['xx:posts']
for i, post in enumerate(posts):
href = index['_embedded']['xx:posts'][i]['_links']['self']['href']
assert post.uri == href
def test_length_accurate(self, N, index, comments):
assert len(N['xx:comments']) == len(comments)
def test_embedded_only_rel_in_navigator(self, N, index):
N.fetch()
assert 'xx:comments' in N
def test_embedded_only_rel_not_in_links(self, N, index):
assert 'xx:comments' not in N.links()
def test_embedded_only_rel_in_embedded(self, N, index):
assert 'xx:comments' in N.embedded()
def test_both_rel_in_navigator(self, N, index):
N.fetch()
assert 'xx:posts' in N
def test_both_rel_in_links(self, N, index):
assert 'xx:posts' in N.links()
def test_both_rel_in_embedded(self, N, index):
assert 'xx:posts' in N.embedded()
def test_embedded_default_curie(self, N, index):
N._core.default_curie = 'xx'
p1 = N['posts']
assert p1 is N['xx:posts']
def test_nested_works(self, N, index, nested):
nest1 = N['xx:non-embedded-nest']
nest2 = nest1['xx:nested']
nest3 = nest2['xx:nested']
nest3_chained = N['xx:non-embedded-nest', 'xx:nested', 'xx:nested']
assert nest3 is nest3_chained
def test_fetch_then_get_embedded(self, N, index):
# for this test, nested[0] is linked from index, but not
# embedded anywhere. nested[1] is embedded in nested[0], but
# is also its own resource. We want to ensure the same
# navigator object is used for both
nested1 = N['xx:nested-links'][1]
nested1.fetch()
assert N['xx:non-embedded-nest', 'xx:nested'] is nested1
def test_get_embedded_then_fetch(self, N, index):
# reverse order of previous test
nested1 = N['xx:non-embedded-nest', 'xx:nested']
nested1_linked = N['xx:nested-links'][1]
# Nothing we've done to nested1_linked should have resolved it
# except that we already saw it as an embedded doc.
assert nested1_linked.resolved
assert nested1 is nested1_linked
def test_cached_embedded_requests(self, N, index, http):
N.fetch()
main_nav_request = http.last_request
embedded = N.embedded()['xx:posts'][0]
# get the cached state of the embedded resource, no additional
# http requests should be made.
embedded()
assert main_nav_request is http.last_request
# if we explicitly refetch the embedded (not orphaned) Navigator, we
# expect that a new http request is being made.
embedded.fetch()
assert main_nav_request is not http.last_request
class TestCreate:
@pytest.fixture
def new_resource(self, page):
grelp = page('grelp', 0)
register_hal_page(grelp)
return grelp
@pytest.fixture(params=[302, 303, 202, 202, 303])
def post_status(self, request):
return request.param
@pytest.fixture
def hosts(self, page, http, post_status, new_resource):
host_page = page('hosts', 0)
register_hal_page(
host_page,
method='POST',
status=post_status,
location=uri_of(new_resource),
)
return host_page
@pytest.fixture
def index(self, hosts, index_uri):
doc = {
'_links': {
'self': {'href': index_uri},
'xx:create-hosts': link_to(hosts),
}
}
register_hal_page(doc)
return doc
def test_uses_post(self, N, index, http):
N['xx:create-hosts'].create({'name': 'foo'})
last_request_method = http.last_request.method
assert last_request_method == 'POST'
def test_content_type_json(self, N, index, http):
N['xx:create-hosts'].create({'name': 'foo'})
last_content_type = http.last_request.headers['Content-Type']
assert last_content_type == 'application/json'
def test_body_is_correct(self, N, index, http):
N['xx:create-hosts'].create({'name': 'foo'})
last_body = http.last_request.body
assert last_body == b'{"name": "foo"}'
def test_new_resource_uri_correct(
self, N, index, new_resource, post_status):
N2 = N['xx:create-hosts']
N3 = N2.create({'name': 'foo'})
if post_status == 202:
assert N3.parent.uri == N2.uri
assert N3.fetched
else:
assert N3.uri == uri_of(new_resource)
assert not N3.fetched
def test_headers_passed(self, N, index, http):
headers = {'X-Custom': 'foo'}
N['xx:create-hosts'].create({'name': 'foo'}, headers=headers)
custom_header = http.last_request.headers['X-Custom']
assert custom_header == 'foo'
def test_files_passed(self, N, index, http):
headers = {'X-Custom': 'foo'}
N['xx:create-hosts'].create(files={'file': ('filename', )}, headers=headers)
custom_header = http.last_request
assert custom_header == 'foo'
def test_empty_post(self, N, index):
# Just want to ensure no error is thrown
N['xx:create-hosts'].create()
| 31.604811 | 84 | 0.584321 | 2,296 | 18,394 | 4.516551 | 0.135017 | 0.024301 | 0.043202 | 0.007715 | 0.271649 | 0.208004 | 0.167888 | 0.119286 | 0.094118 | 0.079556 | 0 | 0.013312 | 0.289388 | 18,394 | 581 | 85 | 31.659208 | 0.780047 | 0.09128 | 0 | 0.204036 | 0 | 0 | 0.080155 | 0.001874 | 0 | 0 | 0 | 0 | 0.098655 | 1 | 0.170404 | false | 0.004484 | 0.017937 | 0.020179 | 0.282511 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74473c34a9a7c4a517aae11602ad9b0523cc56e0 | 931 | py | Python | pony_express/solution.py | ijkilchenko/google_code_jam | 4584d0125f79e0420881356e8a722c5afc0c1ca9 | [
"MIT"
] | null | null | null | pony_express/solution.py | ijkilchenko/google_code_jam | 4584d0125f79e0420881356e8a722c5afc0c1ca9 | [
"MIT"
] | null | null | null | pony_express/solution.py | ijkilchenko/google_code_jam | 4584d0125f79e0420881356e8a722c5afc0c1ca9 | [
"MIT"
] | null | null | null | import sys
def read(f):
with open(f) as file:
lines = file.readlines()
T = int(lines[0])
line = 1
for t in range(1, T+1):
N, Q, H, A, UV, line = _get_case(line, lines)
y = solve(N, Q, H, A, UV)
print('Case #%i: %0.6f' % (t, y))
def _get_case(line, lines):
N, Q = [int(s) for s in lines[line].split()]
H = []
for r in range(N):
row = [int(s) for s in lines[line+1+r].split()]
H.append(row)
line = line + 1 + r
A = []
for r in range(N):
row = [int(s) for s in lines[line+1+r].split()]
A.append(row)
line = line + 1 + r
UV = []
for r in range(Q):
row = [int(s) for s in lines[line+1+r].split()]
UV.append(row)
line = line + 1 + r
return N, Q, H, A, UV, line
def solve(N, Q, H, A, UV):
city = 0
horse = H[0]
visited = []
queue = [0]
read('sample.in')
#read(sys.argv[1])
| 20.688889 | 55 | 0.481203 | 166 | 931 | 2.674699 | 0.259036 | 0.078829 | 0.081081 | 0.036036 | 0.515766 | 0.515766 | 0.292793 | 0.25 | 0.25 | 0.25 | 0 | 0.02589 | 0.336198 | 931 | 44 | 56 | 21.159091 | 0.692557 | 0.01826 | 0 | 0.235294 | 0 | 0 | 0.026287 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.029412 | 0 | 0.147059 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74488c7384e202632e7b938723fff09c66866753 | 3,724 | py | Python | corpus_process.py | Protostars/CSeg | 1acd301e804de8135310c5d39ed136b4cfd8865f | [
"MIT"
] | 1 | 2019-12-15T06:03:42.000Z | 2019-12-15T06:03:42.000Z | corpus_process.py | Protostars/CSeg | 1acd301e804de8135310c5d39ed136b4cfd8865f | [
"MIT"
] | null | null | null | corpus_process.py | Protostars/CSeg | 1acd301e804de8135310c5d39ed136b4cfd8865f | [
"MIT"
] | 1 | 2019-12-15T06:03:19.000Z | 2019-12-15T06:03:19.000Z | import re
import os
import pickle
import sys
from math import log
DICT_NAME = "dict.txt"
BI_GRAM_FILE = "bi_gram.txt"
HMM_PROB = "hmm_prob"
SMALL_PROB = 1e-200
# 识别汉字、数字和字母、全角字符,及+,#,&,.,%
re_ch = re.compile("([\u4E00-\u9FD5a-zA-Z0-9\uFF10-\uFF5A+#&\._%%]+)", re.U)
re_stop = re.compile("([。,]+)", re.U)
# 处理分词语料,生成词典和2-gram列表
# 语料内容:每行一个句子,词用空格分开
def process(input_file, output_path):
words = {}
bi_grams = {}
max_word_length = 0
m_w = ''
with open(input_file, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
if not line: continue
word_line = line.split()
last_word = '<BOS>'
for w in word_line:
if re_ch.match(w):
words[w] = words.get(w, 0) + 1 # 没匹配到的是一些符号:、,等等
if last_word:
bg = last_word + ' ' + w
bi_grams[bg] = bi_grams.get(bg, 0) + 1
last_word = w
if len(w) > max_word_length:
max_word_length = len(w)
m_w = w
elif re_stop.match(w):
if last_word:
bg = last_word + ' <EOS>'
bi_grams[bg] = bi_grams.get(bg, 0) + 1
last_word = '<BOS>'
if last_word:
bg = last_word + ' <EOS>'
bi_grams[bg] = bi_grams.get(bg, 0) + 1
print("字典大小:%d" % len(words))
print("最长词长度:%d %s" % (max_word_length,m_w))
with open(os.path.join(output_path, DICT_NAME), 'w', encoding='utf8') as f:
for k in sorted(words):
f.write("%s %d\n" % (k, words[k]))
print("2-gram 数量:%d" % len(bi_grams))
with open(os.path.join(output_path, BI_GRAM_FILE), 'w', encoding='utf8') as f:
for k in sorted(bi_grams):
f.write("%s %d\n" % (k, bi_grams[k]))
# 估计HMM模型的概率
def process_hmm(input_file, output_path):
line_count = 0
state_list = ['B', 'M', 'E', 'S']
A = {}
B = {}
Pi = {}
State_Count = {}
for s in state_list:
A[s] = {t: 0. for t in state_list} # 转移概率
B[s] = {} # 观测概率
Pi[s] = 0. # 初始概率
State_Count[s] = 0
print('开始估计HMM概率...')
with open(input_file, 'r', encoding='utf8') as f:
for line in f:
line_count += 1
line = line.strip()
if not line: continue
word_list = line.split()
chars = ''.join(word_list)
states = []
for w in word_list:
if len(w) == 1: states.append('S')
else: states += ['B']+['M']*(len(w)-2)+['E']
assert len(chars) == len(states)
i = 0
for s in states:
State_Count[s] += 1
if i == 0:
Pi[s] += 1.
else:
A[states[i-1]][s] += 1.
B[s][chars[i]] = B[s].get(chars[i], 0) + 1.
i += 1
A = {k: {tk: log(max(tv/State_Count[k], SMALL_PROB)) for tk, tv in v.items()} for k, v in A.items()}
B = {k: {tk: log(max(tv/State_Count[k], SMALL_PROB)) for tk, tv in v.items()} for k, v in B.items()}
Pi = {k: log(max(v/line_count, SMALL_PROB)) for k, v in Pi.items()}
with open(os.path.join(output_path, HMM_PROB), 'wb') as f:
pickle.dump(A, f)
pickle.dump(B, f)
pickle.dump(Pi, f)
if __name__ == '__main__':
argv = sys.argv[1:]
if len(argv) < 2:
print('corpus_process.py <corpus_file> <out_dir>')
sys.exit()
else:
process(argv[0], argv[1])
process_hmm(argv[0], argv[1])
print("处理完成")
| 32.955752 | 108 | 0.475295 | 535 | 3,724 | 3.145794 | 0.231776 | 0.041592 | 0.030897 | 0.035651 | 0.341652 | 0.341652 | 0.317885 | 0.267974 | 0.229947 | 0.196673 | 0 | 0.021583 | 0.365467 | 3,724 | 112 | 109 | 33.25 | 0.690647 | 0.028733 | 0 | 0.20202 | 0 | 0 | 0.067684 | 0.013315 | 0 | 0 | 0 | 0 | 0.010101 | 1 | 0.020202 | false | 0 | 0.050505 | 0 | 0.070707 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
744953a33d13dce6a8e313b4de8e7bbc4605a9b1 | 1,954 | py | Python | main.py | tvirus-01/scraping-seekingalpha | 0dedd14195a9f4f354475a3cbe1728325cb45b33 | [
"MIT"
] | 1 | 2021-05-18T16:27:06.000Z | 2021-05-18T16:27:06.000Z | main.py | tvirus-01/scraping-seekingalpha | 0dedd14195a9f4f354475a3cbe1728325cb45b33 | [
"MIT"
] | null | null | null | main.py | tvirus-01/scraping-seekingalpha | 0dedd14195a9f4f354475a3cbe1728325cb45b33 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import requests
import time
import os
import csv
root_url = "https://seekingalpha.com"
query = "stock repurchase program"
url = "https://seekingalpha.com/search?q="+query.replcae(" ", "+")
chrome_driver_path = "/usr/lib/chromium-browser/chromedriver" #add your own driver path
opts = Options()
opts.add_argument("--headless")
opts.add_argument("--no-sandbox")
opts.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36")
driver = webdriver.Chrome(chrome_driver_path, options=opts)
driver.get(url)
time.sleep(5)
soup = BeautifulSoup(driver.page_source, 'lxml')
result_list = soup.find("div", {"id":"result_list"})
result_page = result_list.find("div", {"class":"result-pages"})
fields = ['Title', 'Link', 'MetaData', 'Summary']
csv_rows = []
for a in result_page.find_all("a"):
link = a['href']
new_url = url+link
driver.get(new_url)
time.sleep(5)
new_soup = BeautifulSoup(driver.page_source, 'lxml')
new_result_list = new_soup.find("div", {"id":"result_list"})
items = new_result_list.find_all("li")
for item in items:
item_link = item.find("div", {"class":"item-link"})
item_link_a = item_link.find("a")
item_meta = item.find("div", {"class":"item-metadata"})
item_summary = item.find("div", {"class":"item-summary"})
name = item_link_a.text.replace(" ", "").replace("\n", "")
link = root_url+item_link_a['href']
metadata = item_meta.text.replace(" ", "")
summary = item_summary.text
csv_rows.append([str(name), str(link), str(metadata), str(summary)])
with open("SeekingAlpha.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(csv_rows)
print("Done") | 30.53125 | 153 | 0.680655 | 274 | 1,954 | 4.693431 | 0.390511 | 0.046656 | 0.037325 | 0.037325 | 0.139969 | 0.093313 | 0 | 0 | 0 | 0 | 0 | 0.01816 | 0.154555 | 1,954 | 64 | 154 | 30.53125 | 0.760291 | 0.012283 | 0 | 0.043478 | 0 | 0.021739 | 0.236269 | 0.031088 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.152174 | 0 | 0.152174 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
744e61002cde51c152c53abdd50110f3f6f68a3b | 6,071 | py | Python | falx/utils/generate_random_traces.py | Mestway/falx | fcae5e536f44c19780ae9f00b94e397723924757 | [
"BSD-2-Clause"
] | 10 | 2021-01-29T19:08:55.000Z | 2022-03-03T05:08:53.000Z | falx/utils/generate_random_traces.py | Mestway/falx | fcae5e536f44c19780ae9f00b94e397723924757 | [
"BSD-2-Clause"
] | null | null | null | falx/utils/generate_random_traces.py | Mestway/falx | fcae5e536f44c19780ae9f00b94e397723924757 | [
"BSD-2-Clause"
] | 2 | 2020-06-28T21:52:12.000Z | 2020-09-04T07:18:08.000Z | import eval_utils
import json
from falx.eval_interface import FalxEvalInterface
from falx.utils import table_utils
from timeit import default_timer as timer
import numpy as np
from pprint import pprint
from falx.visualization.chart import VisDesign, LayeredChart
from falx.visualization.matplotlib_chart import MatplotlibChart
import falx.visualization.visual_trace as visual_trace
np.random.seed(2019)
def get_mark_type(chart):
chart_obj = chart.to_vl_obj()
marks = [chart_obj['mark']] if "mark" in chart_obj else [layer["mark"] for layer in chart_obj["layer"]]
marks = [m if isinstance(m, (str,)) else m["type"] for m in marks]
return marks
def process_data(bid, num_samples_dict):
f_in = f"../../benchmarks/{bid}.json"
for k in [2, 3, 4]:
if bid in num_samples_dict[k]:
break
num_samples = k
with open(f_in, "r") as f:
data = json.load(f)
#print(data)
input_data = data["input_data"]
extra_consts = data["constants"] if "constants" in data else []
vis = VisDesign.load_from_vegalite(data["vl_spec"], data["output_data"])
full_trace = vis.eval()
partitioned = visual_trace.partition_trace(full_trace)
sample_trace = []
raw_sample_trace = []
raw_full_trace = []
for key in partitioned:
ty = "bar" if key in ["BarV","BarH"] else ("line" if key == "Line" else ("point" if key == "Point" else "area"))
traces = partitioned[key]
num_samples = int(np.ceil(num_samples / 2.0)) if ty == "line" or ty == "area" else num_samples
indexes = np.random.choice(list(range(len(traces))), num_samples)
samples = [traces[i] for i in indexes]
tr_table = visual_trace.trace_to_table(samples)
full_tr_table = visual_trace.trace_to_table(traces)
for tr in full_tr_table[key]:
raw_full_trace.append({"type": ty, "props": tr})
for tr in tr_table[key]:
raw_sample_trace.append({"type": ty, "props": tr})
if ty == "line":
kreplace = lambda x: "x" if x in ["x1", "x2"] else "y" if x in ["y1", "y2"] else x
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x1", "y1", "size", "color", "column"] if k in tr}})
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x2", "y2", "size", "color", "column"] if k in tr}})
elif ty == "bar":
kreplace = lambda x: "x" if x in ["x1"] else "y" if x in ["y1"] else x
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in tr}})
elif ty == "point":
sample_trace.append({"type": ty, "props": tr})
elif ty == "area":
kreplace = lambda x: "x" if x in ["x1", "x2"] else ("y2" if x in ["yb1", "yb2"] else "y" if x in ["yt1", "yt2"] else x)
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x1", "yt1", "yb1", "color", "column"] if k in tr}})
sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x2", "yt2", "yb2", "color", "column"] if k in tr}})
data["sample_trace"] = sample_trace
data["raw_sample_trace"] = raw_sample_trace
data["raw_full_trace"] = raw_full_trace
return data
if __name__ == '__main__':
benchmark_ids = [
"test_1", "test_2", "test_3", "test_4", "test_5", "test_6", "test_7",
"test_8", "test_9", "test_10", "test_11", "test_12", "test_13", "test_14",
"test_15", "test_16", "test_17", "test_18", "test_19", "test_20", "test_21",
"test_22", "test_23",
"001", "002", "003", "004", "005", "006", "007", "008", "009", "010",
"011", "012", "013", "014", "015", "016", "017", "018", "019", "020",
"021", "022", "023", "024", "025", "026", "027", "028", "029", "030",
"031", "032", "033", "034", "035", "036", "037", "038", "039", "040",
"041", "042", "043", "044", "045", "046", "047", "048", "049", "050",
"051", "052", "053", "054", "055", "056", "057", "058", "059", "060",
]
num_samples_dict = {
1: ['test_21', '050', '025', '058', '001', '011', 'test_7', '042', '032', '012', 'test_15', 'test_10', '023', 'test_1', '052', 'test_6', '035', '010', '006', '054', '051', 'test_14', '056', '024', '017', '053', '020', '033', '031', 'test_8', '047', '030', '029', 'test_2', 'test_11', 'test_13'],
2: ['test_21', '025', '050', '001', '011', '058', '012', '032', 'test_7', 'test_10', '010', '017', '023', '042', '052', 'test_15', '035', 'test_6', 'test_1', '006', '054', '051', 'test_14', '024', '053', '056', '009', '020', '033', 'test_8', '031', '047', '030', 'test_16', '029', 'test_2', '034', 'test_13', '014', '037', 'test_12', 'test_11', 'test_23'],
3: ['test_21', '015', '050', '001', '025', '011', '058', '012', '006', '032', 'test_7', '010', 'test_10', '017', '023', 'test_1', '042', 'test_6', 'test_15', '052', '035', '005', '045', '054', '051', 'test_14', '007', '038', '041', '022', '024', '053', '056', '020', '009', '033', 'test_22', '004', 'test_8', 'test_2', '031', 'test_13', '047', 'test_16', '029', '030', '034', '014', '037', 'test_12', 'test_11', 'test_23', '044'],
4: ['058', '015', 'test_21', '050', '006', '011', '032', 'test_7', '010', 'test_10', '012', '017', '025', 'test_14', '023', '042', 'test_15', 'test_1', '052', 'test_6', '005', '035', '045', '001', '051', '038', '007', '041', '022', '054', '016', '024', '056', '053', '009', '020', 'test_17', '033', '021', '008', '044', '031', '030', '047', 'test_16', 'test_22', '004', '029', 'test_13', '034', 'test_2', 'test_8', '014', 'test_11', 'test_12', '037', 'test_23']
}
#benchmark_ids = ["test_4"]
full_data = []
for i, bid in enumerate(benchmark_ids):
data = process_data(bid, num_samples_dict)
full_data.append(data)
print(json.dumps(full_data))
| 54.205357 | 469 | 0.541591 | 889 | 6,071 | 3.502812 | 0.224972 | 0.045922 | 0.038536 | 0.043674 | 0.271676 | 0.229929 | 0.181118 | 0.134233 | 0.111432 | 0.111432 | 0 | 0.159871 | 0.232416 | 6,071 | 111 | 470 | 54.693694 | 0.508369 | 0.006095 | 0 | 0 | 0 | 0 | 0.24685 | 0.004476 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023529 | false | 0 | 0.117647 | 0 | 0.164706 | 0.023529 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
745481823c68affcc1265816e955ae79a8aa4353 | 1,180 | py | Python | Python Advanced/3. Multidimensional Lists/Exercise/08. Bombs.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
] | null | null | null | Python Advanced/3. Multidimensional Lists/Exercise/08. Bombs.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
] | null | null | null | Python Advanced/3. Multidimensional Lists/Exercise/08. Bombs.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
] | null | null | null | from collections import deque
def read_matrix(dimensions):
mtrx = []
for _ in range(dimensions):
col = [int(n) for n in input().split(' ')]
mtrx.append(col)
return mtrx
def get_cells(matrix):
cells_alive = 0
cells_sum = 0
for r in matrix:
for c in r:
if c > 0:
cells_alive += 1
cells_sum += c
return cells_alive, cells_sum
dimensions = int(input())
matrix = read_matrix(dimensions)
bombs = deque([[int(y) for y in x.split(",")] for x in input().split(' ')])
for b in range(len(bombs)):
bomb_row, bomb_col = bombs.popleft()
damage = matrix[bomb_row][bomb_col]
if matrix[bomb_row][bomb_col] > 0:
for row in range(3):
for col in range(3):
if 0 <= bomb_row - 1 + row < len(matrix) and 0 <= bomb_col - 1 + col < len(matrix):
if matrix[bomb_row - 1 + row][bomb_col - 1 + col] > 0:
matrix[bomb_row - 1 + row][bomb_col - 1 + col] -= damage
alive, sum = get_cells(matrix)
print(f'Alive cells: {alive}')
print(f'Sum: {sum}')
for el in matrix:
print(' '.join(str(x) for x in el))
| 22.264151 | 99 | 0.555085 | 177 | 1,180 | 3.570621 | 0.248588 | 0.066456 | 0.079114 | 0.066456 | 0.151899 | 0.088608 | 0.088608 | 0.088608 | 0.088608 | 0 | 0 | 0.01968 | 0.311017 | 1,180 | 52 | 100 | 22.692308 | 0.757688 | 0 | 0 | 0 | 0 | 0 | 0.028814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.030303 | 0 | 0.151515 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
745b09ab868c2f65731e7156a385d464a997846f | 5,676 | py | Python | ndd/counts.py | simomarsili/ndd | 3a8f8f80116ddaf8666dd13b246a04c9806447a7 | [
"BSD-3-Clause"
] | 34 | 2017-01-25T21:42:07.000Z | 2022-03-05T02:12:11.000Z | ndd/counts.py | simomarsili/ndd | 3a8f8f80116ddaf8666dd13b246a04c9806447a7 | [
"BSD-3-Clause"
] | 4 | 2018-06-22T19:15:33.000Z | 2020-05-06T12:37:24.000Z | ndd/counts.py | simomarsili/ndd | 3a8f8f80116ddaf8666dd13b246a04c9806447a7 | [
"BSD-3-Clause"
] | 2 | 2019-07-31T07:53:02.000Z | 2019-07-31T07:53:22.000Z | # -*- coding: utf-8 -*-
"""CountsDistribution class."""
import json
import logging
from collections.abc import Mapping, MappingView
from types import GeneratorType
import numpy
import ndd.fnsb
from ndd.exceptions import NddError
logger = logging.getLogger(__name__)
def unique(nk, sort=True):
"""Return nk, zk"""
counter = ndd.fnsb.counter
counter.fit(nk)
nk = counter.nk
zk = counter.zk
unique.counter = counter
# always return a copy
if sort:
ids = numpy.argsort(nk)
nk = nk[ids]
zk = zk[ids]
else:
nk = numpy.array(nk)
zk = numpy.array(zk)
return nk, zk
def as_counts_array(counts):
"""Convert input to counts array."""
if isinstance(counts, (Mapping, MappingView)):
return numpy.fromiter(counts.values(), dtype=int)
if isinstance(counts, (GeneratorType, map, filter)):
return numpy.fromiter(counts, dtype=int)
return numpy.asarray(counts)
def check_k(k):
"""
if k is an integer, just check
if an array set k = prod(k)
if None, return
Raises
------
NddError
If k is not valid (wrong type, negative, too large...)
"""
MAX_LOGK = 200 * numpy.log(2)
if k is None:
return k
try:
k = numpy.float64(k)
except ValueError:
raise NddError('%r is not a valid cardinality' % k)
if k.ndim:
# if k is a sequence, set k = prod(k)
if k.ndim > 1:
raise NddError('k must be a scalar or 1D array')
logk = numpy.sum(numpy.log(x) for x in k)
if logk > MAX_LOGK:
# too large a number; backoff to n_bins?
# TODO: log warning
raise NddError('k is too large (%e).'
'Must be < 2^200 ' % numpy.exp(logk))
k = numpy.prod(k)
else:
# if a scalar check size
if k <= 0:
raise NddError('k must be > 0 (%r)' % k)
if numpy.log(k) > MAX_LOGK:
raise NddError('k is too large (%e).' 'Must be < 2^200 ' % k)
if not k.is_integer():
raise NddError('k must be a whole number (got %r).' % k)
return k
class CountsDistribution:
"""
Contains counts data and statistics.
Parameters
----------
nk : array-like
Unique frequencies in a counts array.
zk : array_like, optional
Frequencies distribution or "multiplicities".
Must be len(zk) == len(nk).
k : int or array-like, optional
Alphabet size (the number of bins with non-zero probability).
Must be >= len(nk). A float is a valid input for whole numbers
(e.g. k=1.e3). If an array, set k = numpy.prod(k).
Default: k = sum(nk > 0)
"""
def __init__(self, *, nk=None, zk=None, k=None):
self.nk = None
self.k = None
self.zk = None
self._n = None
self._k1 = None
self.counts = None
if (nk is None) != (zk is None):
raise NddError('nk and zk should be passed together.')
if nk is not None:
self.nk = as_counts_array(nk)
self.zk = as_counts_array(zk)
self._n = numpy.sum(self.zk * self.nk)
self._k1 = numpy.sum(self.zk[self.nk > 0])
if k is not None:
self.k = check_k(k)
def __repr__(self):
return 'CountsDistribution(nk=%r, k=%r, zk=%r)' % (self.nk, self.k,
self.zk)
def __str__(self):
return json.dumps(
{
'nk': [int(x) for x in self.nk],
'k': self.k,
'zk': [int(x) for x in self.zk]
},
indent=4)
def fit(self, counts):
"""Fit nk, zk (multiplicities) from counts array."""
counts = as_counts_array(counts)
self.nk, self.zk = unique(counts)
self._n = numpy.sum(self.zk * self.nk)
self._k1 = numpy.sum(self.zk[self.nk > 0])
return self
@property
def normalized(self):
"""CountsDistribution are normalized."""
if self.nk is None:
return False
return (len(self.nk) == 1 and self.nk[0] == 0
and numpy.isclose(sum(self.nk), 1))
def random(self, k=1000, n=100):
"""Generate random counts and fit multiplicities."""
a = numpy.random.randint(k, size=n)
_, self.counts = numpy.unique(a, return_counts=1)
self.nk, self.zk = numpy.unique(self.counts, return_counts=1)
return self
@staticmethod
def sorted_are_equal(a, b):
"""True if sorted arrays are equal."""
def int_sort(x):
return sorted(x.astype(numpy.int32))
return int_sort(a) == int_sort(b)
def __eq__(self, other):
return (self.sorted_are_equal(self.nk, other.nk)
and self.sorted_are_equal(self.zk, other.zk))
@property
def n(self):
"""Number of samples"""
if self._n is None:
self._n = numpy.sum(self.zk * self.nk)
return self._n
@property
def k1(self):
"""Number of bins with counts > 0."""
if self._k1 is None:
self._k1 = numpy.sum(self.zk[self.nk > 0])
return self._k1
@property
def coincidences(self):
"""Number of coincidences."""
return self.n - self.k1
@property
def sampling_ratio(self):
"""The strongly undersampled regime is defined as ratio < 0.1"""
return self.coincidences / self.n
@property
def multiplicities(self):
"""Return counts and their frequencies as (counts, frequencies)."""
return self.nk, self.zk
| 28.522613 | 75 | 0.555673 | 779 | 5,676 | 3.970475 | 0.215661 | 0.036857 | 0.023278 | 0.027158 | 0.137084 | 0.102166 | 0.079534 | 0.079534 | 0.071452 | 0.071452 | 0 | 0.013552 | 0.323996 | 5,676 | 198 | 76 | 28.666667 | 0.792546 | 0.21494 | 0 | 0.145161 | 0 | 0 | 0.061129 | 0.005833 | 0 | 0 | 0 | 0.005051 | 0 | 1 | 0.137097 | false | 0.008065 | 0.056452 | 0.032258 | 0.362903 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
745b6a06e7b05b2a82df687d97773686b7b6c978 | 1,999 | py | Python | events-svc/app/actions/event_actions.py | zerodevgroup/fastapi-referece-architecture | 185e4b3036799fec2044dd415022ba0760b5bb45 | [
"MIT"
] | null | null | null | events-svc/app/actions/event_actions.py | zerodevgroup/fastapi-referece-architecture | 185e4b3036799fec2044dd415022ba0760b5bb45 | [
"MIT"
] | null | null | null | events-svc/app/actions/event_actions.py | zerodevgroup/fastapi-referece-architecture | 185e4b3036799fec2044dd415022ba0760b5bb45 | [
"MIT"
] | null | null | null | import aiohttp
import asyncio
import base64
import os
from events import EventIn, EventOut, EventUpdate
from fastapi.logger import logger
from db import events, database
STRIPE_CHARGES_URL = os.getenv("STRIPE_CHARGES_URL")
STRIPE_API_KEY = os.getenv("STRIPE_API_KEY")
async def add_event(payload: EventIn):
logger.debug(f"Service: Adding event with {payload}")
query = events.insert().values(**payload.dict())
return await database.execute(query=query)
async def get_all_events():
logger.debug(f"Service: Getting all events")
query = events.select()
return await database.fetch_all(query=query)
async def get_event(id):
logger.debug(f"Service: Getting event {id}")
query = events.select(events.c.id==id)
return await database.fetch_one(query=query)
async def delete_event(id: int):
logger.debug(f"Service: Deleting event {id}")
query = events.delete().where(events.c.id==id)
return await database.execute(query=query)
async def update_event(id: int, payload: EventIn):
logger.debug(f"Service: Updating event {id} with {payload}")
query = (
events
.update()
.where(events.c.id == id)
.values(**payload.dict())
)
return await database.execute(query=query)
async def add_stripe_payment(payload: PaymentIn):
logger.debug(f"Service: Adding stripe payment with {payload}")
# Convert amount to stripe (implied decimals)
stripeAmount = int(payload.amount * 100)
stripe_payload = {
"amount": stripeAmount,
"currency": payload.currency,
"source": payload.source,
"description": payload.description,
}
authorizationToken = base64.b64encode(f"{STRIPE_API_KEY}:".encode())
headers = {"Authorization": "Basic " + "".join(chr(x) for x in authorizationToken)}
async with aiohttp.ClientSession() as session:
async with session.post(STRIPE_CHARGES_URL, data=stripe_payload, headers=headers) as resp:
return await resp.text()
| 32.241935 | 98 | 0.69935 | 257 | 1,999 | 5.346304 | 0.315175 | 0.034935 | 0.052402 | 0.082969 | 0.291849 | 0.19869 | 0.150655 | 0.120815 | 0.088792 | 0.088792 | 0 | 0.005505 | 0.182091 | 1,999 | 61 | 99 | 32.770492 | 0.834862 | 0.021511 | 0 | 0.0625 | 0 | 0 | 0.15609 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.145833 | 0 | 0.270833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
745d592616ed0c1497992e114b4112aa3cdca55c | 1,006 | py | Python | backend/server_flask.py | sigmarising/Multimedia-Homework-website | 38fdf0c2c539b2d02d4062c55982f1e9df5f5ef4 | [
"MIT"
] | null | null | null | backend/server_flask.py | sigmarising/Multimedia-Homework-website | 38fdf0c2c539b2d02d4062c55982f1e9df5f5ef4 | [
"MIT"
] | null | null | null | backend/server_flask.py | sigmarising/Multimedia-Homework-website | 38fdf0c2c539b2d02d4062c55982f1e9df5f5ef4 | [
"MIT"
] | null | null | null | import os
from flask import Flask, send_from_directory, request, jsonify
# Path settings
SERVER_PATH = os.path.abspath(os.path.dirname(__file__))
FTP_PATH = os.path.abspath(os.path.join(SERVER_PATH, 'ftp/'))
# Flask app settings
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route('/api/getFilesList', methods=['POST', 'GET'])
def api_get_files_list():
files_list = os.listdir(FTP_PATH)
files_list.sort()
return jsonify({"list": files_list})
@app.route('/api/uploadFile', methods=['POST', 'GET'])
def api_upload_file():
file = request.files['file']
file_path = os.path.abspath(os.path.join(FTP_PATH, file.filename))
file.save(file_path)
return jsonify({})
@app.route('/api/downloadFile/<path:filename>', methods=['POST', 'GET'])
def api_download_file(filename):
return send_from_directory(FTP_PATH, filename, as_attachment=True)
if __name__ == "__main__":
if not os.path.exists(FTP_PATH):
os.makedirs(FTP_PATH)
app.run(debug=True)
| 27.189189 | 72 | 0.709742 | 146 | 1,006 | 4.59589 | 0.342466 | 0.062593 | 0.044709 | 0.076006 | 0.204173 | 0.114754 | 0.080477 | 0 | 0 | 0 | 0 | 0 | 0.131213 | 1,006 | 36 | 73 | 27.944444 | 0.767735 | 0.031809 | 0 | 0 | 0 | 0 | 0.122554 | 0.033986 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.083333 | 0.041667 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
745fc74246c6c357dfc1fd1dc49a06020766a990 | 1,240 | py | Python | Python/468.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | 1 | 2020-12-10T05:36:15.000Z | 2020-12-10T05:36:15.000Z | Python/468.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | null | null | null | Python/468.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | 3 | 2020-04-06T05:55:08.000Z | 2021-08-29T14:26:54.000Z | class Solution:
def validIPAddress(self, IP: str) -> str:
# if not IP:
# return "Neither"
if len(IP.split('.')) == 4:
ips = IP.split('.')
return self.validIPv4(ips)
elif len(IP.split(':')) == 8:
ips = IP.split(':')
return self.validIPv6(ips)
return "Neither"
def validIPv4(self, ips):
for number in ips:
# no leading zero
if len(number) > 1 and number[0] == '0':
return "Neither"
# 0 to 255
if number.isdigit() and 0 <= int(number) <= 255:
continue
else:
return "Neither"
return "IPv4"
def validIPv6(self, ips):
hexSet = set(['a', 'b', 'c', 'd', 'e', 'f'])
for numbers in ips:
# empty group or too long
if len(numbers) == 0 or len(numbers) > 4:
return "Neither"
for idx, num in enumerate(numbers):
if not num.isdigit() and num.lower() not in hexSet:
print("char not valid")
return "Neither"
return "IPv6"
| 28.837209 | 67 | 0.429032 | 132 | 1,240 | 4.030303 | 0.431818 | 0.146617 | 0.037594 | 0.06015 | 0.075188 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030882 | 0.451613 | 1,240 | 43 | 68 | 28.837209 | 0.751471 | 0.064516 | 0 | 0.178571 | 0 | 0 | 0.058874 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0 | 0 | 0.464286 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7461e3bcb8e9c632b051701ae54e04f11972850a | 1,158 | py | Python | buzzmobile/process/bearing/calculate_directions.py | gtagency/buzzmobile | 8f7215d35b3b7fe3c6ca6419f0123ba18e8aca0a | [
"MIT"
] | 25 | 2015-02-18T04:15:09.000Z | 2019-12-11T14:29:02.000Z | buzzmobile/process/bearing/calculate_directions.py | gtagency/buzzmobile | 8f7215d35b3b7fe3c6ca6419f0123ba18e8aca0a | [
"MIT"
] | 173 | 2016-09-16T05:34:27.000Z | 2017-11-15T07:31:53.000Z | buzzmobile/process/bearing/calculate_directions.py | gtagency/buzzmobile | 8f7215d35b3b7fe3c6ca6419f0123ba18e8aca0a | [
"MIT"
] | 4 | 2015-01-18T15:46:35.000Z | 2017-04-08T00:39:05.000Z | """Utilities for calculating directions and distances given coords."""
import math
EARTH_RADIUS = 6.3710088e6
def get_distance(fix1, fix2):
"""Calculates great-circle distance between two positions in meters."""
lat1 = math.radians(fix1.latitude)
lon1 = math.radians(fix1.longitude)
lat2 = math.radians(fix2.latitude)
lon2 = math.radians(fix2.longitude)
angle = (math.pow(math.sin((lat2 - lat1) / 2), 2)
+ math.cos(lat1) * math.cos(lat2)
* math.pow(math.sin((lon2 - lon1) / 2), 2))
unit_distance = 2 * math.atan2(math.sqrt(angle), math.sqrt(1 - angle))
return EARTH_RADIUS * unit_distance
def get_forward_angle(fix1, fix2):
"""Calculates forward azimuth between two positions in radians."""
lat1 = math.radians(fix1.latitude)
lon1 = math.radians(fix1.longitude)
lat2 = math.radians(fix2.latitude)
lon2 = math.radians(fix2.longitude)
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = (math.cos(lat1) * math.sin(lat2)
- math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1))
angle = math.atan2(y, x)
return (angle + 2 * math.pi) % (2 * math.pi)
| 35.090909 | 75 | 0.648532 | 160 | 1,158 | 4.65 | 0.30625 | 0.11828 | 0.080645 | 0.056452 | 0.346774 | 0.295699 | 0.295699 | 0.295699 | 0.295699 | 0.295699 | 0 | 0.058824 | 0.207254 | 1,158 | 32 | 76 | 36.1875 | 0.751634 | 0.16494 | 0 | 0.363636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.045455 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
746ab1f26e2827ead488e3722b6ba0e4f14c48ca | 523 | py | Python | src/proto/player_state_proto.py | edwadli/splendor-ai | 0881924bf59539ba307592a2f546d56732359120 | [
"Apache-2.0"
] | 2 | 2020-02-02T06:01:23.000Z | 2021-09-21T02:49:29.000Z | src/proto/player_state_proto.py | edwadli/splendor-ai | 0881924bf59539ba307592a2f546d56732359120 | [
"Apache-2.0"
] | 3 | 2019-06-04T07:45:33.000Z | 2019-06-07T02:14:27.000Z | src/proto/player_state_proto.py | edwadli/splendor-ai | 0881924bf59539ba307592a2f546d56732359120 | [
"Apache-2.0"
] | 2 | 2019-05-31T06:52:58.000Z | 2019-06-05T19:45:16.000Z | """Data schema for a player's state."""
import collections
PlayerState = collections.namedtuple(
"PlayerState", [
# Dictionary of Gems held by player.
"gems",
# List of purchased DevelopmentCards.
"purchased_cards",
# List of reserved (non-hidden) DevelopmentCards.
"unhidden_reserved_cards",
# List of reserved (hidden) DevelopmentCards. Note that reserved cards are
# typically hidden when topdecked.
"hidden_reserved_cards",
# NobleTiles obtained.
"noble_tiles",
])
| 22.73913 | 78 | 0.695985 | 56 | 523 | 6.392857 | 0.607143 | 0.050279 | 0.061453 | 0.106145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.210325 | 523 | 22 | 79 | 23.772727 | 0.866828 | 0.535373 | 0 | 0 | 0 | 0 | 0.366379 | 0.189655 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
746ad17c2512eef00d948851110356b09d69b731 | 4,871 | py | Python | sheepdoge/config.py | mattjmcnaughton/sheepdoge | 9c028d6f51cb59afcaf25a5680f961ec7e25676b | [
"Apache-2.0"
] | 7 | 2018-03-18T07:25:10.000Z | 2022-01-28T17:35:08.000Z | sheepdoge/config.py | mattjmcnaughton/sheepdoge | 9c028d6f51cb59afcaf25a5680f961ec7e25676b | [
"Apache-2.0"
] | 15 | 2017-08-19T14:03:10.000Z | 2017-12-29T23:22:05.000Z | sheepdoge/config.py | mattjmcnaughton/sheepdoge | 9c028d6f51cb59afcaf25a5680f961ec7e25676b | [
"Apache-2.0"
] | null | null | null | from configparser import ConfigParser, NoOptionError
from typing import Dict # pylint: disable=unused-import
import os
from sheepdoge.exception import (
SheepdogeConfigurationAlreadyInitializedException,
SheepdogeConfigurationNotInitializedException,
)
DEFAULTS = {
"kennel_playbook_path": "kennel.yml",
"kennel_roles_path": ".kennel_roles",
"pupfile_path": "pupfile.yml",
"vault_password_file": None,
} # type: Dict[str, str]
class Config(object):
"""Config class for which there should only be one instance at anytime.
Additionally, we can only set the config values during initialization.
Multiple different classes can access this single instance at a time.
"""
_config = None # type: Config
def __init__(self, config_dict):
# type: (Dict[str, str]) -> None
self._config_dict = config_dict
@classmethod
def clear_config_singleton(cls):
# type: () -> None
"""Delete the current configuration singleton to allow the
initialization of a new one. This method is predominantly used
during test.
"""
cls._config = None
@classmethod
def get_config_singleton(cls):
# type: () -> Config
"""Return the current config singleton instance. We must initialize
the singleton before calling this method.
:return: The singleton instance.
"""
if cls._config is None:
raise SheepdogeConfigurationNotInitializedException
return cls._config
@classmethod
def initialize_config_singleton(
cls, config_file_contents=None, config_options=None
):
# type: (str, Dict[str, str]) -> None
"""Initialize the config singleton with the proper values. If we
specify no additional values during configuration, then the config
will contain all defaults. We can, in priority order, pass in the
contents of a *.cfg file and a dictionary of options. Typically we
derive this dictionary of options from the command line.
Finally, after setting all of the base configuration values,
we compute additional configuration values which are useful
throughout the program.
:param config_file_contents: The str contents of the .cfg file
containing kennel configuration.
:param config_options: The dict specifying the highest priority
configuration values.
"""
if cls._config is not None:
raise SheepdogeConfigurationAlreadyInitializedException()
config_dict = {} # type: Dict[str, str]
cls._set_config_default_values(config_dict)
if config_file_contents:
cls._set_config_file_values(config_dict, config_file_contents)
if config_options:
cls._set_config_option_values(config_dict, config_options)
cls._set_calculated_config_values(config_dict)
cls._config = cls(config_dict)
@classmethod
def _set_config_default_values(cls, config_dict):
# type: (Dict[str, str]) -> None
"""Set defaults for all views here - they will be overwritten in the
following steps if necessary.
"""
config_dict.update(DEFAULTS)
@classmethod
def _set_config_file_values(cls, config_dict, config_file_contents):
# type: (Dict[str, str], str) -> None
config_parser = ConfigParser()
config_parser.read_string(config_file_contents)
kennel_cfg_section = "kennel"
for currently_defined_key in config_dict.keys():
try:
config_file_value = config_parser.get(
kennel_cfg_section, currently_defined_key
)
config_dict[currently_defined_key] = config_file_value
except NoOptionError:
pass # If the value isn't specified, skip
@classmethod
def _set_config_option_values(cls, config_dict, config_options):
# type: (Dict[str, str], Dict[str, str]) -> None
config_dict.update(config_options)
@classmethod
def _set_calculated_config_values(cls, config_dict):
# type: (Dict[str, str]) -> None
pupfile_path = config_dict["pupfile_path"]
pupfile_dir = os.path.dirname(os.path.realpath(pupfile_path))
kennel_roles_path = config_dict["kennel_roles_path"]
abs_kennel_roles_dir = os.path.realpath(kennel_roles_path)
calculated_config = {
"abs_pupfile_dir": pupfile_dir,
"abs_kennel_roles_dir": abs_kennel_roles_dir,
}
config_dict.update(calculated_config)
def get(self, key):
# type: (str) -> str
"""Retrieve the value for the given configuration key.
:param key: One of the available configuration options.
"""
return self._config_dict[key]
| 34.062937 | 76 | 0.667419 | 567 | 4,871 | 5.488536 | 0.268078 | 0.067481 | 0.02892 | 0.031491 | 0.092224 | 0.040488 | 0.032776 | 0.023779 | 0.023779 | 0 | 0 | 0 | 0.25929 | 4,871 | 142 | 77 | 34.302817 | 0.862528 | 0.353726 | 0 | 0.098592 | 0 | 0 | 0.059107 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126761 | false | 0.028169 | 0.056338 | 0 | 0.239437 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
746b61a512fe54bf6d58d51dee7f9f6d9529baf8 | 1,202 | py | Python | _argparse.py | zzyyyl/USTC-Helper | 6f16361bdead655b799da40e3ecd7758745c2568 | [
"MIT"
] | 3 | 2022-03-18T18:24:53.000Z | 2022-03-24T01:50:41.000Z | _argparse.py | zzyyyl/USTC-Helper | 6f16361bdead655b799da40e3ecd7758745c2568 | [
"MIT"
] | null | null | null | _argparse.py | zzyyyl/USTC-Helper | 6f16361bdead655b799da40e3ecd7758745c2568 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
from USTCHelper import config
import json, base64
import os
class ArgumentError(Exception):
def __init__(self, text):
self.text = text
def __str__(self):
return f"ArgumentError: {self.text}"
def ArgParser():
parser = ArgumentParser()
parser.add_argument("--daily", help="run your daily schedule", action='store_true')
parser.add_argument("-s", "--service", help="service to run", metavar="SERVICE", dest="service")
parser.add_argument("--silence", help="run in silence", action='store_true')
parser.add_argument("-u", "--username", help="your student ID", metavar="ID", dest="stuid")
parser.add_argument("--store-password", help="store password in config", action='store_true')
parser.add_argument("--config", help="config for services", metavar="CONF")
return parser
def ArgConflictCheck(args):
if args.daily:
if args.service:
raise ArgumentError("Conflict arguments: --daily, --service")
def ArgInit(args):
if args.config:
config["in-command"]["state"] = True
config["in-command"]["config"] = json.loads(base64.b64decode(args.config.encode()).decode('gbk'))
| 36.424242 | 105 | 0.6797 | 148 | 1,202 | 5.405405 | 0.412162 | 0.0675 | 0.1275 | 0.07875 | 0.12 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0.005964 | 0.163062 | 1,202 | 32 | 106 | 37.5625 | 0.789264 | 0 | 0 | 0 | 0 | 0 | 0.270383 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0.038462 | 0.153846 | 0.038462 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
746fe9ce70247f8f0f948de126caec9e75bc4ccb | 1,378 | py | Python | app/api/client.py | cloudblue/processor-acronis-customer-centric | fbb022de150818e29cf6cb6205650893acb3613e | [
"Apache-2.0"
] | 3 | 2019-12-21T13:45:28.000Z | 2019-12-24T20:04:34.000Z | app/api/client.py | cloudblue/processor-acronis-customer-centric | fbb022de150818e29cf6cb6205650893acb3613e | [
"Apache-2.0"
] | null | null | null | app/api/client.py | cloudblue/processor-acronis-customer-centric | fbb022de150818e29cf6cb6205650893acb3613e | [
"Apache-2.0"
] | null | null | null | """
This file is property of the Ingram Micro Cloud Blue.
Copyright (c) 2019 Ingram Micro. All Rights Reserved.
"""
import json
from requests import api
from connect.logger import logger
from urllib.parse import urlencode, quote_plus
class Client:
@staticmethod
def send_request(verb, uri, config, body=None):
logger.error("REQUEST------------------->")
logger.error('Request: %s %s' % (verb, uri))
logger.debug(body)
options = {'url': uri, 'headers': {'Content-Type': config['Content-Type']}}
if 'bearer' in config:
options['headers']['Authorization'] = 'Bearer ' + config['bearer']
elif 'basic' in config:
options['headers']['Authorization'] = 'Basic ' + config['basic']
if body:
options['data'] = urlencode(body, quote_via=quote_plus) if config[
'Content-Type'] == 'application/x-www-form-urlencoded' else json.dumps(
body)
response = api.request(verb, **options)
if 200 <= response.status_code <= 300:
logger.debug(str(response))
if response.content:
return response.json()
else:
logger.error('Response')
logger.error(str(response))
raise Exception(response.json()['error'])
| 33.609756 | 146 | 0.563135 | 146 | 1,378 | 5.280822 | 0.486301 | 0.057069 | 0.046693 | 0.057069 | 0.090791 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010341 | 0.298258 | 1,378 | 40 | 147 | 34.45 | 0.78697 | 0.077649 | 0 | 0 | 0 | 0 | 0.167854 | 0.047506 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.142857 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7470950ef82ea911ccd94deb0cfaffcdcd9405a5 | 4,048 | py | Python | venmo-sim/leiden/simulate_txs_on_leiden_clustered_venmo_graph.py | akatsarakis/tx_benchmarking | f8233e58bba3f4fb54d82273d7ca8631bae36ebc | [
"MIT"
] | 3 | 2020-07-07T17:08:41.000Z | 2022-01-10T19:25:46.000Z | venmo-sim/leiden/simulate_txs_on_leiden_clustered_venmo_graph.py | akatsarakis/tx_benchmarking | f8233e58bba3f4fb54d82273d7ca8631bae36ebc | [
"MIT"
] | null | null | null | venmo-sim/leiden/simulate_txs_on_leiden_clustered_venmo_graph.py | akatsarakis/tx_benchmarking | f8233e58bba3f4fb54d82273d7ca8631bae36ebc | [
"MIT"
] | null | null | null | import argparse
import csv
import re
# read arg (the num of shards, also the num of distributed nodes) from command line
def parse_args():
global ARGS
parser = argparse.ArgumentParser()
parser.add_argument('node_tot', type=int,
help='total number of distributed-nodes/shards in the system')
ARGS = parser.parse_args()
parse_args()
node_tot = ARGS.node_tot
# read clustering result of the graph, and bind them to distributed nodes
# (ensure the total vertex to each node is as equal as possible)
## first, read in total number of vertices and clusters
## from the first line of the input file
fp = open("./results/clustered_venmo_dataset_7024852.txt", "r") # , encoding='utf-8')
firstline_str = fp.readline()
firstline_pattern = re.compile(r"Clustering with (\d+) elements and (\d+) clusters")
firstline_match = firstline_pattern.match(firstline_str)
if not firstline_match:
raise Exception('Failed to identify total vertex number and cluster number in input file')
vertex_tot = eval(firstline_match.group(1))
cluster_tot = eval(firstline_match.group(2))
print('%d vertices, %d clusters, %d shards' % (vertex_tot, cluster_tot, node_tot))
## binding initialization
curline_str = fp.readline()
cur_cluster = -1
expected_node_size = vertex_tot / node_tot
vertex_cluster_no = [-1] * vertex_tot
vertex_node_no = [-1] * vertex_tot
cluster_size = [0] * cluster_tot
cluster_vertices = [[] for i in range(cluster_tot)] # '[[]] * cluster_tot' is wrong, because it is shallow copy
cluster_node_no = [-1] * cluster_tot
## read in each line and count
while curline_str:
if curline_str[0] == '[': # beginning of a new cluster
cur_cluster += 1
curline_str = curline_str[curline_str.find(']')+1:]
# remove the preceding '[ cluster_no]' of the string
strs = curline_str[:-1].split(',') # slice off newline character, and split by comma
for cur_str in strs:
if not cur_str:
continue # ignore empty substrings (in the case the last char is a comma)
# bind vertex to its cluster written in the input file
cur_vertex = eval(cur_str)
vertex_cluster_no[cur_vertex] = cur_cluster
cluster_vertices[cur_cluster].append(cur_vertex)
cluster_size[cur_cluster] += 1
curline_str = fp.readline() # next line
## bind clusters to distributed nodes according to counting result
cur_node = 0
cur_node_size = 0
for cur_cluster in range(cluster_tot):
if cur_node + 1 < node_tot \
and cur_node_size + cluster_size[cur_cluster] / 2 > expected_node_size \
and cur_node_size != 0:
# should bind to the next node, unless this cluster is the first cluster on the current node
cur_node += 1
cur_node_size = 0
cur_node_size += cluster_size[cur_cluster]
cluster_node_no[cur_cluster] = cur_node
for cur_vertex in cluster_vertices[cur_cluster]:
vertex_node_no[cur_vertex] = cur_node
fp.close()
print('-- Binding result:')
node_size = [0] * node_tot
for cur_vertex in range(vertex_tot):
node_size[vertex_node_no[cur_vertex]] += 1
for cur_node in range(node_tot):
print('Shard %d size:' % cur_node, node_size[cur_node])
# read txes and sort by time
def takeThird(elem):
return elem[2]
# This function is used to sort txes; here we
# assume dates can be sorted in string sort manner
fp = open("../venmo_dataset_normalized_shorted.csv", "r") # , encoding='utf-8')
csv_file = csv.reader(fp)
all_tx = []
for row in csv_file:
all_tx.append([int(row[0]), int(row[1]), row[2]])
# all_tx = all_tx[:4000000] # we only used the first 4000000 lines of txes
all_tx.sort(key=takeThird)
fp.close()
# simulate txs, calculate ratio of remote tx
tx_remote_cnt = 0
tx_cnt = 0
for tx in all_tx:
tx_cnt += 1
if vertex_node_no[tx[0]] != vertex_node_no[tx[1]]:
tx_remote_cnt += 1
vertex_node_no[tx[0]] = vertex_node_no[tx[1]]
print('-- Result:')
print('tx total:', tx_cnt)
print('remote:', tx_remote_cnt)
print('remote ratio:', tx_remote_cnt / tx_cnt)
| 34.896552 | 112 | 0.704792 | 639 | 4,048 | 4.242567 | 0.266041 | 0.033567 | 0.030985 | 0.020657 | 0.095906 | 0.04574 | 0.04574 | 0.022132 | 0.022132 | 0.022132 | 0 | 0.017099 | 0.190959 | 4,048 | 115 | 113 | 35.2 | 0.810687 | 0.272233 | 0 | 0.075 | 0 | 0 | 0.129287 | 0.037037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.0375 | 0.0125 | 0.075 | 0.0875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7471a6cafdc6fccdf2cbf3b0bcfba1e4f01d5df3 | 4,363 | py | Python | custom_components/ampio/__init__.py | pszypowicz/ampio-hacc | 34a929cccc2b0e35f1f5d8aeafbf965bfc7dab6c | [
"MIT"
] | 2 | 2021-08-18T07:18:05.000Z | 2022-01-01T19:07:09.000Z | custom_components/ampio/__init__.py | pszypowicz/ampio-hacc | 34a929cccc2b0e35f1f5d8aeafbf965bfc7dab6c | [
"MIT"
] | null | null | null | custom_components/ampio/__init__.py | pszypowicz/ampio-hacc | 34a929cccc2b0e35f1f5d8aeafbf965bfc7dab6c | [
"MIT"
] | 1 | 2021-11-29T18:16:45.000Z | 2021-11-29T18:16:45.000Z | """Ampio Systems Platform."""
import asyncio
import json
import logging
from typing import Any, Dict, Optional
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_FRIENDLY_NAME,
CONF_ICON,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, callback
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
event,
template,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import EntityRegistry, async_get_registry
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .client import AmpioAPI, async_setup_discovery
from .const import (
AMPIO_CONNECTED,
AMPIO_DISCOVERY_UPDATED,
AMPIO_MODULE_DISCOVERY_UPDATED,
COMPONENTS,
CONF_BROKER,
CONF_STATE_TOPIC,
CONF_UNIQUE_ID,
DATA_AMPIO,
DATA_AMPIO_API,
DATA_AMPIO_DISPATCHERS,
DATA_AMPIO_PLATFORM_LOADED,
PROTOCOL_311,
SIGNAL_ADD_ENTITIES,
)
from .models import AmpioModuleInfo
_LOGGER = logging.getLogger(__name__)
DOMAIN = "ampio"
VERSION_TOPIC_FROM = "ampio/from/info/version"
VERSION_TOPIC_TO = "ampio/to/info/version"
DISCOVERY_TOPIC_FROM = "ampio/from/can/dev/list"
DISCOVERY_TOPIC_TO = "ampio/to/can/dev/list"
ATTR_DEVICES = "devices"
CONF_KEEPALIVE = "keepalive"
PROTOCOL_31 = "3.1"
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_PROTOCOL = PROTOCOL_311
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
vol.Schema(
{
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(
vol.Coerce(int), vol.Range(min=15)
),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.All(
cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])
),
},
),
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Stub to allow setting up this component.
Configuration through YAML is not supported at this time.
"""
return True
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool:
"""Set up the Ampio component."""
ampio_data = hass.data.setdefault(DATA_AMPIO, {})
for component in COMPONENTS:
ampio_data.setdefault(component, [])
conf = CONFIG_SCHEMA({DOMAIN: dict(config_entry.data)})[DOMAIN]
ampio_data[DATA_AMPIO_API]: AmpioAPI = AmpioAPI(
hass, config_entry, conf,
)
ampio_data[DATA_AMPIO_DISPATCHERS] = []
ampio_data[DATA_AMPIO_PLATFORM_LOADED] = []
for component in COMPONENTS:
coro = hass.config_entries.async_forward_entry_setup(config_entry, component)
ampio_data[DATA_AMPIO_PLATFORM_LOADED].append(hass.async_create_task(coro))
await ampio_data[DATA_AMPIO_API].async_connect()
async def async_connected():
"""Start discovery on connected."""
await async_setup_discovery(hass, conf, config_entry)
async_dispatcher_connect(hass, AMPIO_CONNECTED, async_connected)
async def async_stop_ampio(_event: Event):
"""Stop MQTT component."""
await ampio_data[DATA_AMPIO_API].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_ampio)
return True
async def async_unload_entry(hass, config_entry):
"""Unload ZHA config entry."""
dispatchers = hass.data[DATA_AMPIO].get(DATA_AMPIO_DISPATCHERS, [])
for unsub_dispatcher in dispatchers:
unsub_dispatcher()
for component in COMPONENTS:
await hass.config_entries.async_forward_entry_unload(config_entry, component)
return True
| 27.789809 | 88 | 0.698602 | 517 | 4,363 | 5.599613 | 0.270793 | 0.040415 | 0.036269 | 0.037306 | 0.121934 | 0.067012 | 0.021416 | 0 | 0 | 0 | 0 | 0.006741 | 0.217969 | 4,363 | 156 | 89 | 27.967949 | 0.841735 | 0.005272 | 0 | 0.078947 | 0 | 0 | 0.027337 | 0.021479 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.017544 | 0.140351 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
747472dbf6c760efb2300b9c6ffbf248dba2c191 | 6,477 | py | Python | pyformlang/rsa/recursive_automaton.py | IlyaEp/pyformlang | eef239844beff5e9da3be4a4a240440ece81c10b | [
"MIT"
] | 15 | 2020-06-25T14:38:27.000Z | 2022-03-09T17:55:07.000Z | pyformlang/rsa/recursive_automaton.py | IlyaEp/pyformlang | eef239844beff5e9da3be4a4a240440ece81c10b | [
"MIT"
] | 11 | 2020-09-23T09:48:35.000Z | 2021-08-24T08:37:47.000Z | pyformlang/rsa/recursive_automaton.py | IlyaEp/pyformlang | eef239844beff5e9da3be4a4a240440ece81c10b | [
"MIT"
] | 5 | 2020-03-08T19:00:17.000Z | 2021-08-15T12:38:05.000Z | """
Representation of a recursive automaton
"""
from typing import AbstractSet
from pyformlang.finite_automaton.finite_automaton import to_symbol
from pyformlang.finite_automaton.symbol import Symbol
from pyformlang.regular_expression import Regex
from pyformlang.cfg import CFG, Epsilon
from pyformlang.rsa.box import Box
def remove_repetition_of_nonterminals_from_productions(grammar_in_text: str):
""" Remove nonterminal repeats on the left side of the rule
For example:
grammar: S -> a S b
S -> a b
grammar after function execution: S -> a S b | a b
"""
productions = dict()
for production in grammar_in_text.splitlines():
if "->" not in production:
continue
head, body = production.split(" -> ")
if head in productions:
productions[head] += " | " + body
else:
productions[head] = body
grammar_new = str()
for nonterminal in productions:
grammar_new += f'{nonterminal} -> {productions[nonterminal]}\n'
return grammar_new[:-1]
class RecursiveAutomaton:
""" Represents a recursive automaton
This class represents a recursive automaton.
Parameters
----------
labels : set of :class:`~pyformlang.finite_automaton.Symbol`, optional
A finite set of labels for boxes
initial_label : :class:`~pyformlang.finite_automaton.Symbol`, optional
A start label for automaton
boxes : set of :class:`~pyformlang.rsa.Box`, optional
A finite set of boxes
"""
def __init__(self,
labels: AbstractSet[Symbol] = None,
initial_label: Symbol = None,
boxes: AbstractSet[Box] = None):
if labels is not None:
labels = {to_symbol(x) for x in labels}
self._labels = labels or set()
if initial_label is not None:
initial_label = to_symbol(initial_label)
if initial_label not in self._labels:
self._labels.add(initial_label)
self._initial_label = initial_label or Symbol("")
self._boxes = dict()
if boxes is not None:
for box in boxes:
self._boxes.update({to_symbol(box.label): box})
self._labels.add(box.label)
for label in self._labels:
box = self.get_box(label)
if box is None:
raise ValueError("RSA must have the same number of labels and DFAs")
def get_box(self, label: Symbol):
""" Box by label """
label = to_symbol(label)
if label in self._boxes:
return self._boxes[label]
return None
def add_box(self, new_box: Box):
""" Set a box
Parameters
-----------
new_box : :class:`~pyformlang.rsa.Box`
The new box
"""
self._boxes.update({new_box.label: new_box})
self._labels.add(to_symbol(new_box.label))
def get_number_of_boxes(self):
""" Size of set of boxes """
return len(self._boxes)
def change_initial_label(self, new_initial_label: Symbol):
""" Set an initial label
Parameters
-----------
new_initial_label : :class:`~pyformlang.finite_automaton.Symbol`
The new initial label
"""
new_initial_label = to_symbol(new_initial_label)
if new_initial_label not in self._labels:
raise ValueError("New initial label not in set of labels for boxes")
@property
def labels(self) -> set:
""" The set of labels """
return self._labels
@property
def boxes(self) -> dict:
""" The set of boxes """
return self._boxes
@property
def initial_label(self) -> Symbol:
""" The initial label """
return self._initial_label
@classmethod
def from_regex(cls, regex: Regex, initial_label: Symbol):
""" Create a recursive automaton from regular expression
Parameters
-----------
regex : :class:`~pyformlang.regular_expression.Regex`
The regular expression
initial_label : :class:`~pyformlang.finite_automaton.Symbol`
The initial label for the recursive automaton
Returns
-----------
rsa : :class:`~pyformlang.rsa.RecursiveAutomaton`
The new recursive automaton built from regular expression
"""
initial_label = to_symbol(initial_label)
box = Box(regex.to_epsilon_nfa().minimize(), initial_label)
return RecursiveAutomaton({initial_label}, initial_label, {box})
@classmethod
def from_cfg(cls, cfg: CFG):
""" Create a recursive automaton from context-free grammar
Parameters
-----------
cfg : :class:`~pyformlang.cfg.CFG`
The context-free grammar
Returns
-----------
rsa : :class:`~pyformlang.rsa.RecursiveAutomaton`
The new recursive automaton built from context-free grammar
"""
initial_label = to_symbol(cfg.start_symbol)
grammar_in_true_format = remove_repetition_of_nonterminals_from_productions(cfg.to_text())
boxes = set()
labels = set()
notation_for_epsilon = Epsilon().to_text()
for production in grammar_in_true_format.splitlines():
head, body = production.split(" -> ")
labels.add(to_symbol(head))
if body == "":
body = notation_for_epsilon
boxes.add(Box(Regex(body).to_epsilon_nfa().minimize(), to_symbol(head)))
return RecursiveAutomaton(labels, initial_label, boxes)
def is_equivalent_to(self, other):
""" Check whether two recursive automata are equivalent
Parameters
----------
other : :class:`~pyformlang.rsa.RecursiveAutomaton`
The input recursive automaton
Returns
----------
are_equivalent : bool
Whether the two recursive automata are equivalent or not
"""
if not isinstance(other, RecursiveAutomaton):
return False
if self._labels != other._labels:
return False
for label in self._labels:
box_1 = self.get_box(label)
box_2 = other.get_box(label)
if not box_1 == box_2:
return False
return True
def __eq__(self, other):
return self.is_equivalent_to(other)
| 28.915179 | 98 | 0.603366 | 742 | 6,477 | 5.080863 | 0.15903 | 0.098674 | 0.027851 | 0.041114 | 0.24244 | 0.163395 | 0.096021 | 0.068966 | 0.04191 | 0.04191 | 0 | 0.001102 | 0.299213 | 6,477 | 223 | 99 | 29.044843 | 0.829478 | 0.28578 | 0 | 0.14433 | 0 | 0 | 0.036984 | 0.006724 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134021 | false | 0 | 0.061856 | 0.010309 | 0.350515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7475e81fcba007e37fae56c5bb3b0e881ffb33a4 | 17,080 | py | Python | py/ColladaMesh.py | dineshkummarc/Odin | f23719ff05a9d03cebd0d5c4c0179a98eff2d935 | [
"BSD-3-Clause"
] | 1 | 2019-04-22T16:33:55.000Z | 2019-04-22T16:33:55.000Z | py/ColladaMesh.py | dineshkummarc/Odin | f23719ff05a9d03cebd0d5c4c0179a98eff2d935 | [
"BSD-3-Clause"
] | null | null | null | py/ColladaMesh.py | dineshkummarc/Odin | f23719ff05a9d03cebd0d5c4c0179a98eff2d935 | [
"BSD-3-Clause"
] | null | null | null | from bisect import bisect
import ColladaMaterial
# Named array of points.
class PointArray():
def __init__(self, n, ofs):
self.name = n
self.points = []
self.stride = 1
self.offset = int(ofs)
def GetFirstChildElement(node):
for elem in node.childNodes:
if elem.nodeType == elem.ELEMENT_NODE:
return elem
return None
def GetChildElements(node):
elems = []
for elem in node.childNodes:
if elem.nodeType == elem.ELEMENT_NODE:
elems.append(elem)
return elems
def GetSourceArray(parent, srcId):
for src in parent.getElementsByTagName('source'):
if src.getAttribute('id') == srcId[1:]:
technique = src.getElementsByTagName('technique_common')[0]
accessor = GetFirstChildElement(technique)
sourceURL = accessor.getAttribute('source')
count = int(accessor.getAttribute('count'))
param = GetFirstChildElement(accessor)
paramType = param.getAttribute('type')
for node in GetChildElements(src):
if node.getAttribute('id') == sourceURL[1:]:
data = node.firstChild.data
data = data.strip()
data = data.replace('\n', ' ')
if paramType == 'name':
data = [str(s) for s in data.split(' ')]
elif paramType == 'float':
data = [float(s) for s in data.split(' ')]
elif paramType == 'float4x4':
data = [float(s) for s in data.split(' ')]
return data
return []
def GetChildArray(parent, tag, typecast):
for node in GetChildElements(parent):
if node.tagName == tag:
return [typecast(x) for x in node.firstChild.data.strip().split(' ')]
return []
class Skin:
def __init__(self, skin, numWeights, origPosMap):
self.bindShapeMatrix = GetChildArray(skin, 'bind_shape_matrix', float)
jointURL = ""
for joints in skin.getElementsByTagName('joints'):
for inp in joints.getElementsByTagName('input'):
semantic = inp.getAttribute('semantic')
sourceURL = inp.getAttribute('source')
if semantic == 'JOINT':
self.jointNames = GetSourceArray(skin, sourceURL)
jointURL = sourceURL
elif semantic == 'INV_BIND_MATRIX':
self.invBindMatrices = GetSourceArray(skin, sourceURL)
else:
print('Skipping input with unknown semantic ' + semantic)
for vertexWeights in skin.getElementsByTagName('vertex_weights'):
jointOffset = 0
weightOffset = 0
weightURL = ''
for inp in vertexWeights.getElementsByTagName('input'):
semantic = inp.getAttribute('semantic')
offset = int(inp.getAttribute('offset'))
sourceURL = inp.getAttribute('source')
if semantic == 'JOINT':
if sourceURL != jointURL:
print('TODO: multiple jointURLs specified, need to match up indices.')
jointOffset = offset
elif semantic == 'WEIGHT':
weightURL = sourceURL
weightOffset = offset
else:
print('Skipping input with unknown semantic ' + semantic)
weights = GetSourceArray(skin, weightURL)
vertexCount = GetChildArray(vertexWeights, 'vcount', int)
v = GetChildArray(vertexWeights, 'v', int)
vstride = max(jointOffset, weightOffset) + 1
self.vertexWeightCount = numWeights
self.vertexWeights = []
self.jointIndices = []
index = 0
for vc in vertexCount:
tempWeights = []
tempIndices = []
for c in range(vc):
tempWeights.append(weights[v[(index + c) * vstride + weightOffset]])
tempIndices.append(v[(index + c) * vstride + jointOffset])
temp = zip(tempWeights, tempIndices)
temp.sort()
temp.reverse()
tempWeights = [s[0] for s in temp][:numWeights]
tempIndices = [s[1] for s in temp][:numWeights]
for n in range(len(tempWeights), numWeights):
tempWeights.append(0)
tempIndices.append(0)
weightSum = 0
for n in range(numWeights):
weightSum = weightSum + tempWeights[n]
for n in range(numWeights):
tempWeights[n] = tempWeights[n] / weightSum
self.vertexWeights.extend(tempWeights)
self.jointIndices.extend(tempIndices)
index = index + vc
#Expand vertex weights and joint indices according to origPosMap
newVertexWeights = []
newJointIndices = []
for i in range(len(origPosMap)):
origIndex = int(origPosMap[i])
for j in range(numWeights):
newVertexWeights.append(self.vertexWeights[origIndex * numWeights + j])
newJointIndices.append(self.jointIndices[origIndex * numWeights + j])
self.vertexWeights = newVertexWeights
self.jointIndices = newJointIndices
def Write(self, fileHandle):
fileHandle.write(' "bindShapeMatrix" : ')
fileHandle.write(str(self.bindShapeMatrix) + ",\n")
fileHandle.write(' "jointNames" : ')
fileHandle.write(str(self.jointNames).replace("'", '"') + ",\n")
fileHandle.write(' "invBindMatrices" : ')
fileHandle.write(str(self.invBindMatrices) + ",\n")
fileHandle.write(' "vertexWeights" : ')
fileHandle.write(str(self.vertexWeights) + ",\n")
fileHandle.write(' "jointIndices" : ')
fileHandle.write(str(self.jointIndices) + ",\n")
#TODO: passing in the doc is like begging for trouble, pass in the needed elements instead.
class Mesh:
def __init__(self, doc, node):
self.materialLUT = dict()
instanceMaterials = node.getElementsByTagName('instance_material')
for mat in instanceMaterials:
self.materialLUT[mat.getAttribute('symbol')] = mat.getAttribute('target')
geometry = None
instanceGeometryURL = ''
if node.tagName == 'instance_controller':
instanceControllerURL = node.getAttribute('url')
controllers = doc.getElementsByTagName('controller')
controller = None
for c in controllers:
if c.getAttribute('id') == instanceControllerURL[1:]:
controller = c
break
if c == None:
print("Couldn't find the controller with id '" + instanceControllerURL + "', skipping")
return
skins = c.getElementsByTagName('skin')
if len(skins) != 1:
print("Controller doesn't contain exactly one skin, skipping.")
return
instanceGeometryURL = skins[0].getAttribute('source');
elif node.tagName == 'instance_geometry':
instanceGeometryURL = node.getAttribute('url')
if len(instanceGeometryURL) != 0:
if instanceGeometryURL[0] != '#':
print('Geometry URL pointing outside of this document, skipping.')
return
geometries = doc.getElementsByTagName('geometry')
geometry = None
for g in geometries:
if g.getAttribute('id') == instanceGeometryURL[1:]:
geometry = g
break
if geometry == None:
print("Couldn't find the geometry with id '" + instanceGeometryURL + "', skipping")
return
self.faces = []
self.materials = []
self.verts = []
geometryId = geometry.getAttribute("id")
self.uniqueVerts = dict()
self.sourceArrays = dict()
self.outFileName = geometry.getAttribute('name') + '.json'
self.skin = None
self.origPosMap = dict()
self.skinNode = None
# Check if there's a skin node for this mesh.
for controller in doc.getElementsByTagName("controller"):
if self.skinNode != None:
break
controllerId = controller.getAttribute("id")
for skin in controller.getElementsByTagName("skin"):
if skin.getAttribute("source")[1:] == geometryId:
self.skinNode = skin
break
for mesh in geometry.getElementsByTagName("mesh"):
# TODO: This assumes there's only one <mesh> per <geometry>, check spec.
# Only export normals and uv's if they're required by the material.
self.needsNormals = False
self.needsUV = False
# Get all the triangles and polygons in the mesh.
polygons = mesh.getElementsByTagName("polygons")
triangles = mesh.getElementsByTagName("triangles")
for tri in triangles:
polygons.append(tri)
# Get all the materials in the mesh.
self.BuildMaterials(doc, polygons)
# Create a list of all the sources
sourceList = self.BuildSourceList(mesh, polygons)
# Look up the source and pull the data.
for srcItem in sourceList:
sourceURL = srcItem[0]
offset = srcItem[1]
targetAttr = srcItem[2]
foundSource = False
for source in mesh.getElementsByTagName('source'):
if source.getAttribute('id') == sourceURL[1:]:
foundSource = True
if not self.sourceArrays.has_key(targetAttr):
self.sourceArrays[targetAttr] = []
self.GetSrcArray(source, targetAttr, offset)
break
if not foundSource:
print("Couldn't find matching source.")
break
# Get unique indices.
for polygon in polygons:
for p in polygon.getElementsByTagName("p"):
face = p.firstChild.data.strip().split(' ');
stride = len(face) / (int(polygon.getAttribute("count")) * 3)
for i in range(0, len(face), stride):
posArr = self.sourceArrays["vertexPositions"][0]
fIndex = int(face[i+posArr.offset])
px = posArr.points[fIndex*3]
py = posArr.points[fIndex*3+1]
pz = posArr.points[fIndex*3+2]
vert = (px,py,pz)
if self.needsNormals:
for nc in range(0, len(self.sourceArrays["vertexNormals"])):
normArr = self.sourceArrays["vertexNormals"][nc]
fIndex = int(face[i+normArr.offset])
nx = normArr.points[fIndex*3]
ny = normArr.points[fIndex*3+1]
nz = normArr.points[fIndex*3+2]
vert = vert + (nx,ny,nz)
if self.needsUV:
for tn in range(0, len(self.sourceArrays["vertexTextureCoords"])):
texArr = self.sourceArrays["vertexTextureCoords"][tn]
fIndex = int(face[i+texArr.offset])
u = texArr.points[fIndex*2]
v = texArr.points[fIndex*2+1]
vert = vert + (u,v)
index = self.GetUniqueVertexIndex(vert)
self.origPosMap[index] = face[i]
self.faces.append(index)
self.vertArrays = [None]*len(self.uniqueVerts)
for v in self.uniqueVerts.iteritems():
self.vertArrays[v[1]] = v[0]
offs = 0
arr = PointArray("vertexPositions", 0)
for v in self.vertArrays:
arr.points.append(v[offs+0])
arr.points.append(v[offs+1])
arr.points.append(v[offs+2])
self.verts.append([arr])
offs += 3
if (self.needsNormals):
self.verts.append([])
for nc in range(0, len(self.sourceArrays["vertexNormals"])):
arr = PointArray("vertexNormals", 3)
for v in self.vertArrays:
arr.points.append(v[offs+0])
arr.points.append(v[offs+1])
arr.points.append(v[offs+2])
self.verts[-1].append(arr)
offs += 3
if (self.needsUV):
self.verts.append([])
for tn in range(0, len(self.sourceArrays["vertexTextureCoords"])):
arr = PointArray("vertexTextureCoords", offs)
for v in self.vertArrays:
arr.points.append(v[offs+0])
arr.points.append(v[offs+1])
self.verts[-1].append(arr)
offs += 2
# If there's a skin node set, create the skin.
if self.skinNode:
self.skin = Skin(self.skinNode, 4, self.origPosMap)
def BuildSourceList(self, mesh, polygons):
# Build a list of (sourceURL, offset, targetAttr) tuples to extract.
srcArray = []
for polygon in polygons:
for input in polygon.getElementsByTagName("input"):
semantic = input.getAttribute("semantic")
offset = input.getAttribute('offset')
sourceURL = input.getAttribute('source')
targetAttr = 'vertexPositions'
if semantic == 'NORMAL':
targetAttr = 'vertexNormals'
if not self.needsNormals:
continue
elif semantic == 'TEXCOORD':
targetAttr = 'vertexTextureCoords'
if not self.needsUV:
continue
# There's an extra level of indirection for vertex semantics.
if semantic == 'VERTEX':
for vertex in mesh.getElementsByTagName('vertices'):
for input in vertex.getElementsByTagName('input'):
sourceURL = input.getAttribute('source')
semantic = input.getAttribute('semantic')
if semantic == 'NORMAL':
if not self.needsNormals:
continue
targetAttr = 'vertexNormals'
elif semantic == 'POSITION':
targetAttr = 'vertexPositions'
if [sourceURL, offset, targetAttr] not in srcArray:
srcArray.append([sourceURL, offset, targetAttr])
else:
if [sourceURL, offset, targetAttr] not in srcArray:
srcArray.append([sourceURL, offset, targetAttr])
return srcArray
def BuildMaterials(self, doc, polygons):
# Get all the materials in the mesh.
gcount = 0
for polygon in polygons:
materialSymbol = polygon.getAttribute('material')
if materialSymbol == '':
continue
materialURL = self.materialLUT[materialSymbol]
material = None
for mat in doc.getElementsByTagName('material'):
if mat.getAttribute('id') == materialURL[1:]:
material = mat;
break
if material == None:
print("Couldn't find material '" + materialURL + "'.")
instanceEffects = material.getElementsByTagName('instance_effect')
if len(instanceEffects) == 0:
print('No instance effects')
effectURL = instanceEffects[0].getAttribute('url')
if effectURL[0] != '#':
print('Effect URL points outside document.')
for fx in doc.getElementsByTagName("effect"):
fxId = fx.getAttribute('id')
if fxId == effectURL[1:]:
mat = ColladaMaterial.Material(fx, doc, self.skinNode != None)
mat.count = gcount
self.materials.append(mat)
if mat.materialType != "matte":
self.needsNormals = True
if mat.HasTextureChannel():
self.needsUV = True
break
gcount += int(polygon.getAttribute("count")) * 3
def WriteToScene(self, fileHandle, indent, outFolder):
for i in range(indent):
fileHandle.write(' ')
fileHandle.write('{ "type" : "mesh", "file" : "' + outFolder + '/' + self.outFileName + '" }')
# Write the mesh as a JSON file.
def Write(self, outFolder):
print('Writing mesh ' + outFolder + '/' + self.outFileName)
fileHandle = open(outFolder + '/' + self.outFileName, 'w')
fileHandle.write('{\n')
fileHandle.write(' "materials" : \n [\n')
for m in range(len(self.materials)):
self.materials[m].Write(outFolder)
fileHandle.write(' { "file" : "' + outFolder + '/' + self.materials[m].name + '.json", "start" : ' + str(self.materials[m].count) + ' }')
if m != len(self.materials) - 1:
fileHandle.write(',')
fileHandle.write('\n')
fileHandle.write(' ],\n')
fileHandle.write(' "indices" : ')
fileHandle.write(str(self.faces))
fileHandle.write(',\n')
if self.skin != None:
self.skin.Write(fileHandle)
for pa in range(len(self.verts)):
fileHandle.write(' "' + self.verts[pa][0].name + '" : [')
for vsi in range(len(self.verts[pa])):
fileHandle.write(str(self.verts[pa][vsi].points).replace("'", ""))
if vsi != len(self.verts[pa]) - 1:
fileHandle.write(', ')
fileHandle.write(']')
if pa != len(self.verts) - 1:
fileHandle.write(',')
fileHandle.write('\n')
fileHandle.write('\n}')
fileHandle.close()
def GetUniqueVertexIndex(self, a):
if a not in self.uniqueVerts:
self.uniqueVerts[a] = len(self.uniqueVerts)
return self.uniqueVerts[a];
# Pull a float_array from the Collada format and store it as a PointArray in the mesh.
def GetSrcArray(self, source, dstName, offset):
the_array = source.getElementsByTagName("float_array")[0];
arr = the_array.firstChild.data
arr = arr.strip();
arr = arr.replace('\n', ' ')
newArray = PointArray(dstName, offset)
newArray.points = [str(s) for s in arr.split(' ')]
s = source.getElementsByTagName("accessor")[0].getAttribute("stride")
if s == "":
newArray.stride = 1
else:
newArray.stride = int(s)
#print("Got source " + newArray.name + ", count " + str(len(newArray.points)) + ", stride " + str(newArray.stride) + ", offset " + str(newArray.offset))
self.sourceArrays[dstName].append(newArray)
| 37.955556 | 156 | 0.606382 | 1,809 | 17,080 | 5.709232 | 0.165837 | 0.040666 | 0.012393 | 0.012393 | 0.176414 | 0.136135 | 0.111348 | 0.097405 | 0.079009 | 0.060418 | 0 | 0.006349 | 0.271546 | 17,080 | 449 | 157 | 38.040089 | 0.823742 | 0.058782 | 0 | 0.215584 | 0 | 0 | 0.092551 | 0 | 0 | 0 | 0 | 0.002227 | 0 | 1 | 0.036364 | false | 0 | 0.005195 | 0 | 0.083117 | 0.031169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
747a9c5326878d11b9906f17374aa59d5eda3f27 | 5,958 | py | Python | nel-wikipedia/kb_creator.py | nadjet/explosion_projects | 74f09fc97befb429a6db9e3235619c3d7b27ea7a | [
"MIT"
] | 1 | 2020-07-18T19:19:54.000Z | 2020-07-18T19:19:54.000Z | nel-wikipedia/kb_creator.py | JaredDelora/projects | ad0fc26dbc05182a439ffe76a362d40c24e25066 | [
"MIT"
] | null | null | null | nel-wikipedia/kb_creator.py | JaredDelora/projects | ad0fc26dbc05182a439ffe76a362d40c24e25066 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import logging
from spacy.kb import KnowledgeBase
from train_descriptions import EntityEncoder
import wiki_io as io
logger = logging.getLogger(__name__)
def create_kb(
nlp,
max_entities_per_alias,
min_entity_freq,
min_occ,
entity_def_path,
entity_descr_path,
entity_alias_path,
entity_freq_path,
prior_prob_path,
entity_vector_length,
):
# Create the knowledge base from Wikidata entries
kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=entity_vector_length)
entity_list, filtered_title_to_id = _define_entities(nlp, kb, entity_def_path, entity_descr_path, min_entity_freq, entity_freq_path, entity_vector_length)
_define_aliases(kb, entity_alias_path, entity_list, filtered_title_to_id, max_entities_per_alias, min_occ, prior_prob_path)
return kb
def _define_entities(nlp, kb, entity_def_path, entity_descr_path, min_entity_freq, entity_freq_path, entity_vector_length):
# read the mappings from file
title_to_id = io.read_title_to_id(entity_def_path)
id_to_descr = io.read_id_to_descr(entity_descr_path)
# check the length of the nlp vectors
if "vectors" in nlp.meta and nlp.vocab.vectors.size:
input_dim = nlp.vocab.vectors_length
logger.info("Loaded pretrained vectors of size %s" % input_dim)
else:
raise ValueError(
"The `nlp` object should have access to pretrained word vectors, "
" cf. https://spacy.io/usage/models#languages."
)
logger.info("Filtering entities with fewer than {} mentions".format(min_entity_freq))
entity_frequencies = io.read_entity_to_count(entity_freq_path)
# filter the entities for in the KB by frequency, because there's just too much data (8M entities) otherwise
filtered_title_to_id, entity_list, description_list, frequency_list = get_filtered_entities(
title_to_id,
id_to_descr,
entity_frequencies,
min_entity_freq
)
logger.info("Kept {} entities from the set of {}".format(len(description_list), len(title_to_id.keys())))
logger.info("Training entity encoder")
encoder = EntityEncoder(nlp, input_dim, entity_vector_length)
encoder.train(description_list=description_list, to_print=True)
logger.info("Getting entity embeddings")
embeddings = encoder.apply_encoder(description_list)
logger.info("Adding {} entities".format(len(entity_list)))
kb.set_entities(
entity_list=entity_list, freq_list=frequency_list, vector_list=embeddings
)
return entity_list, filtered_title_to_id
def _define_aliases(kb, entity_alias_path, entity_list, filtered_title_to_id, max_entities_per_alias, min_occ, prior_prob_path):
logger.info("Adding aliases from Wikipedia and Wikidata")
_add_aliases(
kb,
entity_list=entity_list,
title_to_id=filtered_title_to_id,
max_entities_per_alias=max_entities_per_alias,
min_occ=min_occ,
prior_prob_path=prior_prob_path,
)
def get_filtered_entities(title_to_id, id_to_descr, entity_frequencies,
min_entity_freq: int = 10):
filtered_title_to_id = dict()
entity_list = []
description_list = []
frequency_list = []
for title, entity in title_to_id.items():
freq = entity_frequencies.get(title, 0)
desc = id_to_descr.get(entity, None)
if desc and freq > min_entity_freq:
entity_list.append(entity)
description_list.append(desc)
frequency_list.append(freq)
filtered_title_to_id[title] = entity
return filtered_title_to_id, entity_list, description_list, frequency_list
def _add_aliases(kb, entity_list, title_to_id, max_entities_per_alias, min_occ, prior_prob_path):
wp_titles = title_to_id.keys()
# adding aliases with prior probabilities
# we can read this file sequentially, it's sorted by alias, and then by count
logger.info("Adding WP aliases")
with prior_prob_path.open("r", encoding="utf8") as prior_file:
# skip header
prior_file.readline()
line = prior_file.readline()
previous_alias = None
total_count = 0
counts = []
entities = []
while line:
splits = line.replace("\n", "").split(sep="|")
new_alias = splits[0]
count = int(splits[1])
entity = splits[2]
if new_alias != previous_alias and previous_alias:
# done reading the previous alias --> output
if len(entities) > 0:
selected_entities = []
prior_probs = []
for ent_count, ent_string in zip(counts, entities):
if ent_string in wp_titles:
wd_id = title_to_id[ent_string]
p_entity_givenalias = ent_count / total_count
selected_entities.append(wd_id)
prior_probs.append(p_entity_givenalias)
if selected_entities:
try:
kb.add_alias(
alias=previous_alias,
entities=selected_entities,
probabilities=prior_probs,
)
except ValueError as e:
logger.error(e)
total_count = 0
counts = []
entities = []
total_count += count
if len(entities) < max_entities_per_alias and count >= min_occ:
counts.append(count)
entities.append(entity)
previous_alias = new_alias
line = prior_file.readline()
def read_kb(nlp, kb_file):
kb = KnowledgeBase(vocab=nlp.vocab)
kb.load_bulk(kb_file)
return kb
| 36.777778 | 158 | 0.648372 | 749 | 5,958 | 4.798398 | 0.230975 | 0.037006 | 0.047579 | 0.042571 | 0.293823 | 0.235392 | 0.195047 | 0.195047 | 0.185031 | 0.185031 | 0 | 0.002789 | 0.277946 | 5,958 | 161 | 159 | 37.006211 | 0.832636 | 0.06764 | 0 | 0.080645 | 0 | 0 | 0.066005 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.040323 | 0 | 0.120968 | 0.008065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
747becfe33166824b286845631f9101c61bac5a1 | 1,045 | py | Python | modulesExecution.py | rando3/leetcode-python | b13bb35fb3cdc9813c62944547d260be2f9cab02 | [
"MIT"
] | null | null | null | modulesExecution.py | rando3/leetcode-python | b13bb35fb3cdc9813c62944547d260be2f9cab02 | [
"MIT"
] | null | null | null | modulesExecution.py | rando3/leetcode-python | b13bb35fb3cdc9813c62944547d260be2f9cab02 | [
"MIT"
] | null | null | null | from collections import deque, defaultdict
GRAY, BLACK = 0, 1
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list) # dictionary containing adjacency List
self.V = vertices # No. of vertices
def addEdge(self, u, v):
''' Function to add an edge to graph '''
self.graph[u].append(v)
def topological(self):
order, enter, state = deque(), set(self.graph), {}
def dfs(node):
state[node] = False
for k in self.graph.get(node, ()):
sk = state.get(k, None)
if not sk:
print("No valid ordering exists.")
return
else:
continue
enter.discard(k)
dfs(k)
order.appendleft(node)
state[node] = True
while enter:
dfs(enter.pop())
return order
if __name__ == "__main__":
g = Graph(3)
g.addEdge(1, 2)
g.addEdge(1, 3)
print(g.topological())
| 24.302326 | 78 | 0.504306 | 120 | 1,045 | 4.291667 | 0.525 | 0.069903 | 0.050485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01092 | 0.386603 | 1,045 | 42 | 79 | 24.880952 | 0.792512 | 0.083254 | 0 | 0 | 0 | 0 | 0.034737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.032258 | 0 | 0.258065 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
747ef7880794c3ef33f57ad1412aece6844cc7a6 | 4,661 | py | Python | facebook/messenger/hello-world/webhook.py | imsardine/learning | 925841ddd93d60c740a62e12d9f57ef15b6e0a20 | [
"MIT"
] | null | null | null | facebook/messenger/hello-world/webhook.py | imsardine/learning | 925841ddd93d60c740a62e12d9f57ef15b6e0a20 | [
"MIT"
] | null | null | null | facebook/messenger/hello-world/webhook.py | imsardine/learning | 925841ddd93d60c740a62e12d9f57ef15b6e0a20 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from flask import Flask, request, abort, jsonify
import requests
SEND_API = 'https://graph.facebook.com/v2.6/me/messages'
WEBHOOK_VERIFY_TOKEN = os.environ['WEBHOOK_VERIFY_TOKEN']
PAGE_ACCESS_TOKEN = os.environ['PAGE_ACCESS_TOKEN']
APP_SECRET = os.environ['APP_SECRET']
app = Flask(__name__)
@app.route('/webhook', methods=['GET', 'POST'])
def webhook():
if request.method == 'GET': # GET for webhook verification
return verify_webhook()
# POST for message events
assert request.is_json
data = request.get_json()
if data['object'] == 'page':
for entry in data['entry']:
for event in entry['messaging']:
timestamp = event['timestamp']
sender_id = event['sender']['id']
recipient_id = event['recipient']['id']
if 'message' in event:
on_message_event(timestamp, sender_id, event['message'])
elif 'postback' in event:
on_postback_event(timestamp, sender_id, event['postback'])
else:
abort(400) # Unknown event
else:
abort(400) # Bad Request
return ''
def verify_webhook(mode, verify_token):
query_params = request.args
hub_mode = query_params.get('hub.mode')
hub_verify_token = query_params.get('hub.verify_token')
if hub_verify_token != WEBHOOK_VERIFY_TOKEN:
abort(403) # Forbidden
elif hub_mode == 'subscribe':
return query_params['hub.challenge']
def on_message_event(timestamp, sender_id, message):
if 'text' in message:
handle_text_message(sender_id, message['text'])
elif 'attachments' in message:
pass
else:
abort(400)
def on_postback_event(timestamp, sender_id, postback):
payload = postback['payload']
send_text(sender_id, 'Thanks for selecting %s' % payload)
def send_text(recipient_id, text):
send_message(recipient_id, {'text': text})
def send_message(recipient_id, message):
params = {'access_token': PAGE_ACCESS_TOKEN}
data = {
'recipient': {
'id': recipient_id
},
'message': message,
}
resp = requests.post(SEND_API, params=params, json=data)
app.logger.info('Message posted: message = %s, response = %s', data, resp.json())
def handle_text_message(sender_id, text):
if u'吃什麼' in text:
send_text(sender_id, u'Judy 爸爸說:不知道')
elif text == 'generic':
send_message(sender_id, demo_generic_template(sender_id, text))
else:
send_text(sender_id, text)
def demo_generic_template(sender_id, message):
return {
'attachment': {
'type': 'template', # structured message
'payload': {
'template_type': 'generic',
'elements': [
{
'title': 'rift',
'subtitle': 'Next-generation virtual reality',
'item_url': 'https://www.oculus.com/en-us/rift/',
'image_url': 'http://messengerdemo.parseapp.com/img/rift.png',
'buttons': [
{
'type': 'web_url',
'url': 'https://www.oculus.com/en-us/rift/',
'title': 'Open Web URL',
},
{
'type': 'postback',
'title': 'Call Postback',
'payload': 'Payload for first bubble'
}
],
},
{
'title': 'touch',
'subtitle': 'Your Hands, Now in VR',
'item_url': 'https://www.oculus.com/en-us/touch/',
'image_url': 'http://messengerdemo.parseapp.com/img/touch.png',
'buttons': [
{
'type': 'web_url',
'url': 'https://www.oculus.com/en-us/touch/',
'title': 'Open Web URL',
},
{
'type': 'postback',
'title': 'Call Postback',
'payload': 'Payload for second bubble'
}
],
},
],
}
}
}
if __name__ == '__main__':
app.run()
| 34.272059 | 87 | 0.488307 | 451 | 4,661 | 4.840355 | 0.286031 | 0.051306 | 0.045809 | 0.050389 | 0.288136 | 0.223546 | 0.165827 | 0.130096 | 0.096198 | 0.096198 | 0 | 0.005287 | 0.391332 | 4,661 | 135 | 88 | 34.525926 | 0.764188 | 0.027676 | 0 | 0.173913 | 0 | 0 | 0.221092 | 0 | 0 | 0 | 0 | 0 | 0.008696 | 1 | 0.069565 | false | 0.008696 | 0.026087 | 0.008696 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
747f3e4104e555a4cdac60485eb1931d3dd4f55a | 18,847 | py | Python | custom_components/midea_dehumidifier_lan/config_flow.py | drthanwho/homeassistant-midea-dehumidifier-lan | b22c609bf07fd540ffa3a3ff28aaa2a184578b69 | [
"MIT"
] | null | null | null | custom_components/midea_dehumidifier_lan/config_flow.py | drthanwho/homeassistant-midea-dehumidifier-lan | b22c609bf07fd540ffa3a3ff28aaa2a184578b69 | [
"MIT"
] | null | null | null | custom_components/midea_dehumidifier_lan/config_flow.py | drthanwho/homeassistant-midea-dehumidifier-lan | b22c609bf07fd540ffa3a3ff28aaa2a184578b69 | [
"MIT"
] | null | null | null | """Config flow for Midea Dehumidifier (Local) integration."""
from __future__ import annotations
import ipaddress
import logging
from typing import Any, Final
from homeassistant import data_entry_flow
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import (
CONF_API_VERSION,
CONF_DEVICES,
CONF_ID,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PASSWORD,
CONF_TOKEN,
CONF_TYPE,
CONF_UNIQUE_ID,
CONF_USERNAME,
)
import voluptuous as vol
from midea_beautiful.appliance import AirConditionerAppliance, DehumidifierAppliance
from midea_beautiful.cloud import MideaCloud
from midea_beautiful.exceptions import (
AuthenticationError,
CloudAuthenticationError,
CloudError,
MideaError,
MideaNetworkError,
ProtocolError,
RetryLaterError,
)
from midea_beautiful.lan import LanDevice
from midea_beautiful.midea import DEFAULT_APP_ID, DEFAULT_APPKEY, SUPPORTED_APPS
from custom_components.midea_dehumidifier_lan import MideaClient
from .const import ( # pylint: disable=unused-import
CONF_ADVANCED_SETTINGS,
CONF_APPID,
CONF_APPKEY,
CONF_DETECT_AC_APPLIANCES,
CONF_MOBILE_APP,
CONF_BROADCAST_ADDRESS,
CONF_TOKEN_KEY,
CONF_USE_CLOUD,
CONF_WHAT_TO_DO,
CURRENT_CONFIG_VERSION,
DEFAULT_APP,
DEFAULT_PASSWORD,
DEFAULT_USERNAME,
DOMAIN,
IGNORED_IP_ADDRESS,
TAG_CAUSE,
TAG_ID,
TAG_NAME,
)
_LOGGER = logging.getLogger(__name__)
IGNORE = "IGNORE"
USE_CLOUD = "CLOUD"
LAN = "LAN"
def _unreachable_appliance_schema(
name: str,
):
return vol.Schema(
{
vol.Optional(CONF_WHAT_TO_DO, default=LAN): vol.In(
{
IGNORE: "Ignore appliance",
LAN: "Provide appliance's IPv4 address",
USE_CLOUD: "Use cloud API to poll devices",
}
),
vol.Optional(
CONF_IP_ADDRESS,
description={"suggested_value": IGNORED_IP_ADDRESS},
): str,
vol.Optional(CONF_NAME, default=name): str,
vol.Optional(CONF_TOKEN): str,
vol.Optional(CONF_TOKEN_KEY): str,
}
)
# pylint: disable=too-many-arguments
def _advanced_settings_schema(
username: str,
password: str,
appkey: str,
appid: int,
broadcast_address: str,
use_cloud: bool,
):
return vol.Schema(
{
vol.Required(CONF_USERNAME, default=username): str,
vol.Required(CONF_PASSWORD, default=password): str,
vol.Required(CONF_APPKEY, default=appkey): str,
vol.Required(CONF_APPID, default=appid): int,
vol.Optional(CONF_BROADCAST_ADDRESS, default=broadcast_address): str,
vol.Required(CONF_USE_CLOUD, default=use_cloud): bool,
vol.Required(CONF_DETECT_AC_APPLIANCES, default=False): bool,
}
)
def _reauth_schema(
password: str,
appkey: str,
appid: int,
):
return vol.Schema(
{
vol.Required(CONF_PASSWORD, default=password): str,
vol.Required(CONF_APPKEY, default=appkey): str,
vol.Required(CONF_APPID, default=appid): int,
}
)
def _user_schema(username: str, password: str, app: str):
return vol.Schema(
{
vol.Required(CONF_USERNAME, default=username): str,
vol.Required(CONF_PASSWORD, default=password): str,
vol.Optional(CONF_MOBILE_APP, default=app): vol.In(SUPPORTED_APPS.keys()),
vol.Required(CONF_ADVANCED_SETTINGS, default=False): bool,
}
)
class _FlowException(Exception):
def __init__(self, message, cause: str = None) -> None:
super().__init__()
self.message = message
self.cause = cause
# pylint: disable=too-many-instance-attributes
class MideaLocalConfigFlow(ConfigFlow, domain=DOMAIN):
"""
Configuration flow for Midea dehumidifiers on local network uses discovery based on
Midea cloud, so it first requires credentials for it.
If some appliances are registered in the cloud, but not discovered, configuration
flow will prompt for additional information.
"""
VERSION = CURRENT_CONFIG_VERSION
cloud: MideaCloud | None = None # type: ignore
appliance_idx = -1
appliances: list[LanDevice] = []
devices_conf: list[dict] = []
conf = {}
advanced_settings = False
client: Final = MideaClient()
error_cause: str = ""
errors: dict = {}
def _supported_appliance(self, appliance: LanDevice) -> bool:
"""Checks if appliance is supported by integration"""
aircon = False
if self.conf.get(CONF_DETECT_AC_APPLIANCES, False):
aircon = AirConditionerAppliance.supported(appliance.type)
return aircon or DehumidifierAppliance.supported(appliance.type)
def _validate_appliance(self, appliance: LanDevice, conf: dict):
"""
Validates that appliance configuration is correct and matches physical
device
"""
assert self.cloud
use_cloud = conf.get(CONF_USE_CLOUD, False)
if appliance.address == IGNORED_IP_ADDRESS or (
appliance.address is None and not use_cloud
):
_LOGGER.debug("Ignored appliance with id=%s", appliance.appliance_id)
return
try:
if use_cloud:
discovered = self.client.appliance_state(
cloud=self.cloud,
use_cloud=use_cloud,
appliance_id=appliance.appliance_id,
)
else:
try:
ipaddress.IPv4Address(appliance.address)
except Exception as ex:
raise _FlowException(
"invalid_ip_address", appliance.address
) from ex
discovered = self.client.appliance_state(
address=appliance.address,
cloud=self.cloud,
)
except ProtocolError as ex:
raise _FlowException("connection_error", str(ex)) from ex
except AuthenticationError as ex:
raise _FlowException("invalid_auth", str(ex)) from ex
except MideaNetworkError as ex:
raise _FlowException("cannot_connect", str(ex)) from ex
except MideaError as ex:
raise _FlowException("not_discovered", str(ex)) from ex
if discovered is None:
raise _FlowException("not_discovered", appliance.address)
appliance.update(discovered)
def _connect_and_discover(self: MideaLocalConfigFlow):
"""Validates that cloud credentials are valid and discovers local appliances"""
cloud = self.client.connect_to_cloud(
account=self.conf[CONF_USERNAME],
password=self.conf[CONF_PASSWORD],
appkey=self.conf[CONF_APPKEY],
appid=self.conf[CONF_APPID],
)
addresses = self.conf.get(CONF_BROADCAST_ADDRESS, [])
if isinstance(addresses, str):
addresses = [addresses]
if appliances := self.client.find_appliances(cloud, addresses=addresses):
self.devices_conf = [{} for _ in appliances]
else:
self.devices_conf = []
self.appliances = appliances
self.cloud = cloud
async def _validate_discovery_phase(self, user_input: dict[str, Any] | None):
assert user_input is not None
if self.advanced_settings:
assert self.conf is not None
self.conf[CONF_APPKEY] = user_input[CONF_APPKEY]
self.conf[CONF_APPID] = user_input[CONF_APPID]
if address := user_input.get(CONF_BROADCAST_ADDRESS):
try:
ipaddress.IPv4Address(address)
except Exception as ex:
raise _FlowException("invalid_ip_address", address) from ex
self.conf[CONF_BROADCAST_ADDRESS] = address
self.conf[CONF_USE_CLOUD] = user_input[CONF_USE_CLOUD]
self.conf[CONF_DETECT_AC_APPLIANCES] = user_input[CONF_DETECT_AC_APPLIANCES]
else:
self.conf = user_input
self.conf[CONF_USE_CLOUD] = False
self.conf[CONF_DETECT_AC_APPLIANCES] = False
app = user_input.get(CONF_MOBILE_APP, DEFAULT_APP)
self.conf.update(SUPPORTED_APPS.get(app, SUPPORTED_APPS[DEFAULT_APP]))
if user_input.get(CONF_ADVANCED_SETTINGS):
return await self.async_step_advanced_settings()
self.appliance_idx = -1
await self.hass.async_add_executor_job(self._connect_and_discover)
if self.conf[CONF_USE_CLOUD]:
for i, appliance in enumerate(self.appliances):
self.devices_conf[i][CONF_USE_CLOUD] = True
else:
for i, appliance in enumerate(self.appliances):
if self._supported_appliance(appliance):
if not appliance.address:
self.appliance_idx = i
break
if self.appliance_idx >= 0:
return await self.async_step_unreachable_appliance()
return await self._async_add_entry()
def _process_exception(self, ex: Exception):
if isinstance(ex, _FlowException):
self.error_cause = str(ex.cause)
self.errors["base"] = ex.message
elif isinstance(ex, CloudAuthenticationError):
self.error_cause = f"{ex.error_code} - {ex.message}"
self.errors["base"] = "invalid_auth"
elif isinstance(ex, CloudError):
self.error_cause = f"{ex.error_code} - {ex.message}"
self.errors["base"] = "midea_client"
elif isinstance(ex, RetryLaterError):
self.error_cause = f"{ex.error_code} - {ex.message}"
self.errors["base"] = "retry_later"
elif isinstance(ex, MideaError):
self.error_cause = f"{ex.message}"
self.errors["base"] = "midea_client"
else:
raise ex
async def _do_validate(self, user_input: dict[str, Any]):
try:
return await self._validate_discovery_phase(user_input)
except Exception as ex: # pylint: disable=broad-except
self._process_exception(ex)
return None
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> data_entry_flow.FlowResult:
self.advanced_settings = False
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
self.errors = {}
self.error_cause = ""
username = DEFAULT_USERNAME
password = DEFAULT_PASSWORD
app = DEFAULT_APP
if user_input is not None:
username = user_input.get(CONF_USERNAME, username)
password = user_input.get(CONF_PASSWORD, password)
app = user_input.get(CONF_MOBILE_APP, app)
res = await self._do_validate(user_input)
if res:
return res
return self.async_show_form(
step_id="user",
data_schema=_user_schema(username=username, password=password, app=app),
description_placeholders=self._placeholders(),
errors=self.errors,
)
async def async_step_advanced_settings(
self, user_input: dict[str, Any] | None = None
):
"""Step for managing advanced settings"""
self.errors = {}
self.error_cause = ""
self.advanced_settings = True
if user_input is not None:
res = await self._do_validate(user_input)
if res:
return res
else:
user_input = {}
username = user_input.get(
CONF_USERNAME, self.conf.get(CONF_USERNAME, DEFAULT_USERNAME)
)
password = user_input.get(
CONF_PASSWORD, self.conf.get(CONF_PASSWORD, DEFAULT_PASSWORD)
)
appkey = user_input.get(CONF_APPKEY, DEFAULT_APPKEY)
appid = user_input.get(CONF_APPID, DEFAULT_APP_ID)
broadcast_address = user_input.get(
CONF_BROADCAST_ADDRESS, self.conf.get(CONF_BROADCAST_ADDRESS, "")
)
use_cloud = user_input.get(CONF_USE_CLOUD, self.conf.get(CONF_USE_CLOUD, False))
return self.async_show_form(
step_id="advanced_settings",
data_schema=_advanced_settings_schema(
username=username,
password=password,
appkey=appkey,
appid=appid,
broadcast_address=broadcast_address,
use_cloud=use_cloud,
),
description_placeholders=self._placeholders(),
errors=self.errors,
)
async def async_step_unreachable_appliance(
self, user_input: dict[str, Any] | None = None
):
"""Manage the appliances that were not discovered automatically on LAN."""
errors: dict = {}
self.error_cause = ""
appliance = self.appliances[self.appliance_idx]
device_conf = self.devices_conf[self.appliance_idx]
if user_input is not None:
what_to_do = user_input.get(CONF_WHAT_TO_DO, LAN)
appliance.address = (
user_input.get(CONF_IP_ADDRESS, IGNORED_IP_ADDRESS)
if what_to_do == LAN
else IGNORED_IP_ADDRESS
)
appliance.name = user_input.get(CONF_NAME, appliance.name)
appliance.token = user_input.get(CONF_TOKEN, "")
appliance.key = user_input.get(CONF_TOKEN_KEY, "")
device_conf[CONF_USE_CLOUD] = what_to_do == USE_CLOUD
try:
await self.hass.async_add_executor_job(
self._validate_appliance,
appliance,
device_conf,
)
# Find next unreachable appliance
self.appliance_idx = self.appliance_idx + 1
while self.appliance_idx < len(self.appliances):
if self._supported_appliance(appliance):
if self.appliances[self.appliance_idx].address is None:
return await self.async_step_unreachable_appliance()
self.appliance_idx = self.appliance_idx + 1
# If no unreachable appliances, create entry
if self.appliance_idx >= len(self.appliances):
return await self._async_add_entry()
appliance = self.appliances[self.appliance_idx]
except _FlowException as ex:
self.error_cause = str(ex.cause)
errors["base"] = ex.message
name = appliance.name
return self.async_show_form(
step_id="unreachable_appliance",
data_schema=_unreachable_appliance_schema(name),
description_placeholders=self._placeholders(appliance=appliance),
errors=errors,
)
def _placeholders(self, appliance: LanDevice = None):
placeholders = {
TAG_CAUSE: self.error_cause or "",
}
if appliance:
placeholders[TAG_ID] = appliance.unique_id
placeholders[TAG_NAME] = appliance.name
return placeholders
async def _async_add_entry(self):
assert self.conf is not None
for i, appliance in enumerate(self.appliances):
if not self._supported_appliance(appliance):
continue
if self.devices_conf[i].get(CONF_USE_CLOUD, False) or (
appliance.address and appliance.address != IGNORED_IP_ADDRESS
):
self.devices_conf[i].update(
{
CONF_IP_ADDRESS: appliance.address,
CONF_UNIQUE_ID: appliance.unique_id,
CONF_ID: appliance.appliance_id,
CONF_NAME: appliance.name,
CONF_TYPE: appliance.type,
CONF_TOKEN: appliance.token,
CONF_TOKEN_KEY: appliance.key,
CONF_API_VERSION: appliance.version,
}
)
self.conf[CONF_DEVICES] = self.devices_conf
existing_entry = await self.async_set_unique_id(self.conf[CONF_USERNAME])
if existing_entry:
self.hass.config_entries.async_update_entry(
entry=existing_entry,
data=self.conf,
)
# Reload the config entry otherwise devices will remain unavailable
self.hass.async_create_task(
self.hass.config_entries.async_reload(existing_entry.entry_id)
)
return self.async_abort(reason="reauth_successful")
if len(self.devices_conf) == 0:
return self.async_abort(reason="no_configured_devices")
return self.async_create_entry(
title="Midea Dehumidifiers",
data=self.conf,
)
async def async_step_reauth(self, config):
"""Handle reauthorization request from Abode."""
self.conf = {**config}
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input: dict[str, Any] | None = None):
"""Handle reauthorization flow."""
self.errors = {}
username = self.conf.get(CONF_USERNAME, DEFAULT_USERNAME)
password = ""
appkey = self.conf.get(CONF_APPKEY, DEFAULT_APPKEY)
appid = self.conf.get(CONF_APPID, DEFAULT_APP_ID)
if user_input is not None:
password = user_input.get(CONF_PASSWORD, "")
appkey = user_input.get(CONF_APPKEY, DEFAULT_APPKEY)
appid = user_input.get(CONF_APPID, DEFAULT_APP_ID)
try:
self.client.connect_to_cloud(
account=username,
password=password,
appkey=appkey,
appid=appid,
)
except Exception as ex: # pylint: disable=broad-except
self._process_exception(ex)
else:
self.conf[CONF_USERNAME] = username
self.conf[CONF_PASSWORD] = password
self.conf[CONF_APPKEY] = appkey
self.conf[CONF_APPID] = appid
return await self._async_add_entry()
return self.async_show_form(
step_id="reauth_confirm",
data_schema=_reauth_schema(
password=password,
appkey=appkey,
appid=appid,
),
description_placeholders=self._placeholders(),
errors=self.errors,
)
| 36.314066 | 88 | 0.607205 | 2,029 | 18,847 | 5.375062 | 0.120749 | 0.033009 | 0.022006 | 0.029342 | 0.404915 | 0.30121 | 0.219879 | 0.161287 | 0.128461 | 0.108839 | 0 | 0.000694 | 0.312092 | 18,847 | 518 | 89 | 36.38417 | 0.840494 | 0.044729 | 0 | 0.284738 | 0 | 0 | 0.030973 | 0.003667 | 0 | 0 | 0 | 0 | 0.009112 | 1 | 0.022779 | false | 0.045558 | 0.034169 | 0.009112 | 0.143508 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74858fe3b513ef3751cf3b9d1d2671162ca352a5 | 1,136 | py | Python | setup.py | abuvanth/framework-identifier | 0619b1831bf29eaabdb8b7a0ef1abfd2328b39f9 | [
"MIT"
] | null | null | null | setup.py | abuvanth/framework-identifier | 0619b1831bf29eaabdb8b7a0ef1abfd2328b39f9 | [
"MIT"
] | null | null | null | setup.py | abuvanth/framework-identifier | 0619b1831bf29eaabdb8b7a0ef1abfd2328b39f9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools,os
with open("README.md", "r") as fh:
long_description = fh.read()
thelibFolder = os.path.dirname(os.path.realpath(__file__))
requirementPath = thelibFolder + '/requirements.txt'
install_requires = [] # Examples: ["gunicorn", "docutils>=0.3", "lxml==0.5a7"]
if os.path.isfile(requirementPath):
with open(requirementPath) as f:
install_requires = f.read().splitlines()
setuptools.setup(
name="wappalyze",
version="1.6",
author="Shaddy Garg",
author_email="shaddygarg1@gmail.com",
description="Framework Identifier tool",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shaddygarg/framework-identifier",
packages=setuptools.find_packages(),
package_dir={'wappalyze': 'wappalyze'},
package_data={'wappalyze': ['apps.json']},
install_requires=install_requires,
scripts=['wapp'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 34.424242 | 78 | 0.680458 | 127 | 1,136 | 5.944882 | 0.661417 | 0.07947 | 0.050331 | 0.07947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010493 | 0.161092 | 1,136 | 32 | 79 | 35.5 | 0.781742 | 0.085387 | 0 | 0 | 0 | 0 | 0.295367 | 0.02027 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034483 | 0 | 0.034483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74859fe61f59a2e68fef51e02980d620f6fd3fff | 4,089 | py | Python | convert.py | FlorianPoot/Faroad-PnP | aa31ce80f8a3888a903eac92827e7aef6c58dfe3 | [
"MIT"
] | null | null | null | convert.py | FlorianPoot/Faroad-PnP | aa31ce80f8a3888a903eac92827e7aef6c58dfe3 | [
"MIT"
] | null | null | null | convert.py | FlorianPoot/Faroad-PnP | aa31ce80f8a3888a903eac92827e7aef6c58dfe3 | [
"MIT"
] | null | null | null | import math
import sqlite3
import re
DATABASE_PATH = "line1.db"
class Convert:
model_altium = {"desc": 0, "designator": (1, 10), "position": (2, 3), "rotation": 9}
model_kicad = {"desc": 0, "designator": (1, 2), "position": (3, 4), "rotation": 5}
model_mnt = {"desc": 0, "designator": (4, 5), "position": (1, 2), "rotation": 3}
model_ultiboard = {"desc": 0, "designator": (1, 7), "position": (2, 3), "rotation": 4}
def __init__(self, path: str):
self.path = path
def parse(self) -> list:
"""Parse data from pick and place file"""
# TODO Work only for 4 types of file
# TODO Read already generated file
with open(self.path, "r") as file:
lines = file.readlines()
lines = [line.replace("\n", "") for line in lines] # Remove new line
if lines[0].split() == ['Designator', 'Footprint', 'Mid', 'X', 'Mid', 'Y',
'Ref', 'X', 'Ref', 'Y', 'Pad', 'X', 'Pad', 'Y', 'TB', 'Rotation', 'Comment']:
file_model = self.model_altium
lines = [[s.strip() for s in line.split(" ") if s] for line in lines]
lines = lines[2:] # Remove header
elif lines[0] == "Ref,Val,Package,PosX,PosY,Rot,Side\n":
file_model = self.model_kicad
lines = [line.replace('"', "") for line in lines]
lines = [line.split(",") for line in lines]
lines = lines[1:] # Remove header
elif lines[0] == "Ultiboard Information Export File":
file_model = self.model_ultiboard
lines = lines[9:] # Remove header
lines = [line.split() for line in lines]
elif self.path[-3:] == "mnt":
file_model = self.model_mnt
lines = [line.replace("-", " ") for line in lines]
lines = [line.split() for line in lines]
else:
raise ValueError("Unknown file model")
data = list()
for line in lines:
if len(line) > 0:
d = dict()
d["desc"] = "".join(re.findall("[a-zA-Z]", line[file_model["desc"]]))
digit = re.findall(r"\d+", line[file_model["desc"]])
if len(digit) > 0:
d["desc"] += digit[0].zfill(3)
d["designator"] = f"{line[int(file_model['designator'][0])]} {line[int(file_model['designator'][1])]}"
d["position"] = [float(re.findall(r"\d+\.\d+|\d+", line[i])[0]) for i in file_model["position"]]
d["rotation"] = float(line[file_model["rotation"]])
data.append(d)
return data
@staticmethod
def search(designator: str) -> list:
"""Look in database"""
matches = re.split("[_ :]", designator)
conn = sqlite3.connect(DATABASE_PATH)
cur = conn.cursor()
dat = cur.execute("SELECT * FROM chip_lib;").fetchall()
# chip_name = [d[2] for d in dat] # Select chip_name
for matche in matches:
temp = list()
for d in dat:
if matche.upper() in d[2].upper():
temp.append(d)
if len(temp) > 0:
dat = temp
return dat
@staticmethod
def panel_dimensions(points: list) -> tuple:
"""Get panel dimensions"""
x = [p[0] for p in points]
y = [p[1] for p in points]
x_min, y_min = min(x), min(y)
x_max, y_max = max(x), max(y)
return round(abs(x_max - x_min), 3), round(abs(y_max - y_min), 3)
@staticmethod
def rotate(origin: tuple, points: list, angle: int) -> list:
"""Rotate a list of points clockwise by a given angle around a given origin"""
new_pos = list()
angle = math.radians(-angle)
for p in points:
ox, oy = origin
px, py = p
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
new_pos.append([qx, qy])
return new_pos
| 32.452381 | 118 | 0.512106 | 531 | 4,089 | 3.868173 | 0.278719 | 0.048199 | 0.035054 | 0.054528 | 0.133398 | 0.08666 | 0.070107 | 0.056475 | 0.056475 | 0.056475 | 0 | 0.018162 | 0.32673 | 4,089 | 125 | 119 | 32.712 | 0.727933 | 0.079237 | 0 | 0.0625 | 0 | 0.0125 | 0.127375 | 0.031041 | 0 | 0 | 0 | 0.008 | 0 | 1 | 0.0625 | false | 0 | 0.0375 | 0 | 0.2125 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7488b095e308da429d77f9667247659a77a3caeb | 10,490 | py | Python | src/device/Device.py | Electronya/PirBlaster | 804b86b2699183c40f2157cba0e151bec9d4725a | [
"MIT"
] | 1 | 2020-11-02T09:37:43.000Z | 2020-11-02T09:37:43.000Z | src/device/Device.py | Electronya/PirBlasterBackend | 804b86b2699183c40f2157cba0e151bec9d4725a | [
"MIT"
] | 1 | 2021-06-26T14:56:31.000Z | 2021-06-26T14:56:31.000Z | src/device/Device.py | Electronya/PirBlasterBackend | 804b86b2699183c40f2157cba0e151bec9d4725a | [
"MIT"
] | 1 | 2020-11-02T09:34:33.000Z | 2020-11-02T09:34:33.000Z | from logging import Logger
import paho.mqtt.client as mqtt
from ircodec.command import CommandSet
import os
from exceptions import CommandNotFound, \
CommandFileAccess
class Device():
# Constants
STATUS_TOPIC = 'status'
CMD_TOPIC = 'command'
RESULT_TOPIC = 'result'
ONLINE_MSG = 'ONLINE'
OFFLINE_MSG = 'OFFLINE'
SUCCESS_MSG = 'done'
ERROR_MSG = 'unsupported'
def __init__(self, logger, appConfig, devConfig, isNew=False):
"""
Constructor.
Params:
logger: The logger.
appConfig: The application configuration.
devConfig: The device configuration.
isNew: The flag indicating if the device is a new one,
or an existing commande set exists.
"""
self.config = devConfig
self.logger = logger.getLogger(f"{devConfig['location']}."
f"{devConfig['name']}")
if isNew:
self.logger.info('Creating new device')
name = self.config['commandSet']['model']
emitter = self.config['commandSet']['emitterGpio']
receiver = self.config['commandSet']['receiverGpio']
description = self.config['commandSet']['description']
self.commandSet = CommandSet(name, emitter_gpio=emitter,
receiver_gpio=receiver,
description=description)
else:
self.logger.info('Loading existing device')
manufacturer = self.config['commandSet']['manufacturer']
model = self.config['commandSet']['model']
try:
self.commandSet = CommandSet.load(os.path.join('./commandSets',
manufacturer, f"{model}."
f"json"))
except Exception:
raise CommandFileAccess('unable to access the command file.')
self.baseTopic = f"{self.config['topicPrefix']}/{self.config['location']}/{self.config['name']}/" # noqa: E501
self._initMqttClient(appConfig.getUserName(),
appConfig.getUserPassword(),
appConfig.getBrokerHostname(),
appConfig.getBrokerPort())
def _initMqttClient(self, userName, userPassword,
brokerIp, brokerPort):
"""
Initialize the MQTT client.
Params:
userName: The user name for connecting to the broker.
userPassword: The user password for connecting to the broker.
brokerHostname: The broker hostname.
brokerPort: The broker port.
"""
self.client = mqtt.Client(client_id=f"{self.config['location']}."
f"{self.config['name']}")
self.client.on_connect = self._on_connect
self.client.on_disconnect = self._on_disconnect
self.client.on_message = self._on_message
self.client.on_publish = self._on_publish
self.client.on_subscribe = self._on_subscribe
self.client.on_log = self._on_log
willTopic = self.baseTopic + self.STATUS_TOPIC
self.client.will_set(willTopic, self.OFFLINE_MSG,
self.config['lastWill']['qos'],
self.config['lastWill']['retain'])
self.client.username_pw_set(userName, userPassword)
# TODO: Implement switch for secure or not.
# self.client.tls_set()
# self.client.tls_insecure_set(True)
self.logger.info(f"Connecting to {brokerIp}:{brokerPort}")
self.logger.debug(f"Connecting as {userName} with password "
f"{userPassword}")
self.client.connect(brokerIp, port=brokerPort)
def _publishCmdResult(self, success):
"""
Publish a command result.
Params:
success: The flag indicating to send success
or fail result.
"""
resultTopic = self.baseTopic + self.RESULT_TOPIC
if success:
self.logger.info('Command sent')
self.client.publish(resultTopic, payload=self.SUCCESS_MSG)
else:
self.logger.warning('Command unsupported')
self.client.publish(resultTopic, payload=self.ERROR_MSG)
def _on_connect(self, client, usrData, flags, rc):
"""
The on connect callback.
Params:
client: The mqtt client.
usrData: User data.
flags: The connection flags.
rc: The connection result.
"""
self.logger.info('Connected')
self.logger.debug(f"rc {rc}")
statusTopic = self.baseTopic + self.STATUS_TOPIC
self.client.publish(statusTopic, payload=self.ONLINE_MSG,
qos=1, retain=True)
cmdTopic = self.baseTopic + self.CMD_TOPIC
self.client.subscribe(cmdTopic)
def _on_disconnect(self, client, usrData, rc):
"""
The on disconnect callback.
Params:
client: The mqtt client.
usrData: User data.
flags: The connection flags.
rc: The connection result.
"""
self.logger.info('Disconnected')
self.logger.debug(f"rc {rc}")
def _on_message(self, client, usrData, msg):
"""
The on message callback.
Params:
client: The mqtt client.
usrData: User data.
msg: The message data.
"""
reuslt = True
receivedMsg = msg.payload.decode('utf-8')
self.logger.info(f"Message recieved {receivedMsg}")
try:
for i in range(0, 4):
self.logger.debug(f"Sending packet #{i}")
gap = self.config['commandSet']['packetGap']
self.commandSet.emit(receivedMsg, emit_gap=gap)
except KeyError as e:
self.logger.warning(str(e))
reuslt = False
self._publishCmdResult(reuslt)
def _on_publish(self, client, usrData, mid):
"""
The on publish callback.
Params:
client: The mqtt client.
usrData: User data.
mid: The message ID that have been published.
"""
self.logger.info('Message published')
self.logger.debug(f"mid {mid}")
def _on_subscribe(self, client, usrData, mid, grantedQoS):
"""
The on subscribe callback.
Params:
client: The mqtt client.
usrData: User data.
mid: The message ID that have been published.
grantedQoS: The granted QoS for the subcription.
"""
self.logger.info(f"Subscibed with QoS {grantedQoS}")
self.logger.debug(f"mid {mid}")
def _on_log(self, client, usrData, logLevel, logMsg):
"""
The on log callback.
Params:
client: The mqtt client.
usrData: User data.
logLevel: The level of the log message.
logMsg: The log message.
"""
switcher = {
mqtt.MQTT_LOG_INFO: self.logger.info,
mqtt.MQTT_LOG_NOTICE: self.logger.info,
mqtt.MQTT_LOG_WARNING: self.logger.warning,
mqtt.MQTT_LOG_ERR: self.logger.error,
mqtt.MQTT_LOG_DEBUG: self.logger.debug,
}
switcher[logLevel](logMsg)
def startLoop(self):
"""
Start the network loop.
"""
self.client.loop_start()
def stopLoop(self):
"""
Stop the network loop.
"""
self.client.loop_stop()
self.client.disconnect()
def getName(self):
"""
Get the device name.
Return:
The device name.
"""
return self.config['name']
def getLocation(self):
"""
Get the device location,
Return:
The device location.
"""
return self.config['location']
def getConfig(self):
"""
Get the device configuration.
Return:
The device configuration.
"""
self.logger.debug('Getting device config')
return self.config
def setConfig(self, config):
"""
Set the device configuration.
Params:
config: The device configuration.
"""
self.logger.debug(f"Setting device config to {config}")
self.config = config
def getCommandList(self):
"""
Get the device command list.
Return:
The device command list.
"""
self.logger.debug('Getting command list')
cmdSetJson = self.commandSet.to_json()
return cmdSetJson['commands'].keys()
def addCommand(self, command, description):
""""
Add a command to the device.
Params:
command: The command name.
description: The command description.
"""
self.logger.debug(f"Adding command {command} to command set")
self.commandSet.add(command, description=description)
def deleteCommand(self, command):
"""
Delete a command from the device.
Params:
command: The command name.
Raise:
CommandNotFound if the requested command is not supported.
"""
self.logger.debug(f"Deleting command {command} from command set")
try:
self.commandSet.remove(command)
except KeyError:
raise CommandNotFound(command)
def saveCommandSet(self):
"""
Save the device command set.
Raise:
CommandFileAccess if the save operation fail.
"""
try:
self.commandSet.save_as(os.path.join('./commandSets',
self.config['commandSet']['manufacturer'],
f"{self.config['commandSet']['model']}"
f".json"))
except Exception:
raise CommandFileAccess('unable to access the command file.')
| 33.301587 | 120 | 0.538418 | 1,012 | 10,490 | 5.504941 | 0.211462 | 0.052055 | 0.03231 | 0.025848 | 0.210734 | 0.202118 | 0.148627 | 0.122061 | 0.112368 | 0.095136 | 0 | 0.001053 | 0.366444 | 10,490 | 314 | 121 | 33.407643 | 0.837195 | 0.250906 | 0 | 0.097222 | 0 | 0 | 0.149661 | 0.029874 | 0 | 0 | 0 | 0.003185 | 0 | 1 | 0.131944 | false | 0.034722 | 0.034722 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
748b0cd71a8c5de3ca15b5e64c2515f58ee12902 | 5,459 | py | Python | hrssCalcbulk from folder of .fit files.py | Emberflurry/Bulk-HRSS-TSS-calc-from-Garmin-.fitfiles | d82f064ee0a97b2d826f434b5a21e8869c090e17 | [
"MIT"
] | 2 | 2021-08-21T14:05:45.000Z | 2022-03-26T02:22:02.000Z | hrssCalcbulk from folder of .fit files.py | Emberflurry/Bulk-HRSS-TSS-calc-from-Garmin-.fitfiles | d82f064ee0a97b2d826f434b5a21e8869c090e17 | [
"MIT"
] | 1 | 2021-07-19T17:45:13.000Z | 2021-07-19T19:18:07.000Z | hrssCalcbulk from folder of .fit files.py | Emberflurry/Bulk-HRSS-TSS-calc-from-Garmin-.fitfiles | d82f064ee0a97b2d826f434b5a21e8869c090e17 | [
"MIT"
] | null | null | null | import os
import datetime
from fitparse import *
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime
import re
# import matplotlib.pyplot as plt (not needed I guess)
directory = 'fitfiles' # may want to make this more flexible--ie: not just in the directory of the code...works for now tho and not bad.
#Note^: may need to rename, perhaps make easy tkinter interface for picking a local directory or have part of the program move the fit files to the project directory automatically
# if files need renaming INTEGRATE IN THE FUTURE!!!! ESPECIALLY WITH FULL GARMIN->OUTPUT WORKFLOW AUTO!!!
def fitfile_decapitalization():
rename_dict = {'FIT': 'fit'}
for filename in os.listdir(directory):
base_file, ext = os.path.splitext(filename)
ext = ext.replace('.','')
if ext in rename_dict:
new_ext = rename_dict[ext]
new_file = base_file + '.' + new_ext
old_path = os.path.join(directory, filename)
new_path = os.path.join(directory, new_file)
os.rename(old_path, new_path)
fitfile_decapitalization()
# HRSS Calc--PERSONAL INFO--REQUIRED FOR CALCULATIONS TO BE ACCURATE
lthr = 191.0 # heart rate(bpm) at lactate threshold
my_maxhr = 212 # max heart rate(bpm)
my_rhr = 50 # resting heart rate(bpm)
my_sex = "MALE"
eulersNum = 2.7182818 # duh
if my_sex == "MALE":
my_baseconstant = .64
my_yvalue = 1.92
else:
my_yvalue = 1.67
my_baseconstant = .86
# component calcs of the multi-part exponential HRSS equation:
my_hrrAtLT = ((lthr - my_rhr) / (my_maxhr - my_rhr))
sixtyatLTHR_SS = 60 * my_hrrAtLT * my_baseconstant * (
eulersNum ** (my_yvalue * my_hrrAtLT)) # aka "N" in relevant equations
N_ova_hundy = sixtyatLTHR_SS / 100
hundy_ova_N = 100 / sixtyatLTHR_SS
def load_workout(workout_file):
"""
Load fitfile and transforms
it into a pandas Dataframe.
Nan Values are replaced.
"""
fitfile = FitFile(workout_file)
# This is an ugly hack to avoid timing issues
while True:
try:
fitfile.messages
break
except KeyError:
continue
# Get all data messages that are of type "record"
workout = []
for record in fitfile.get_messages('record'):
r = {}
# Go through all the data entries in this record
for record_data in record:
r[record_data.name] = record_data.value
# add the record(s) to the workout file
workout.append(r)
# not used, don't remember why, but im not touching it.
"""workout_df = pd.DataFrame(workout)
workout_df.fillna(method='ffill', inplace=True)
workout_df.fillna(method='backfill', inplace=True)"""
# save as a df (specifically a numpy array)
workout = np.array(workout)
return workout
def get_date(workout_df):
# pass the workout df, returns the date
workout_date = workout_df['timestamp'][0].date()
return workout_date
def gett_date(string):
# splits a date that is input for future timestamp parsing
split = []
for i in re.split("-|T|:| ", string)[:-1]:
if (i[0] == '0'):
i = i[1:]
split.append(eval(i))
date = datetime(split[0], split[1], split[2], split[3], split[4], split[5])
return date
def difference_between_dates(date1, date2):
# parses timestamps (which are still stored as date data (haha)) for changes in time between recordings
secs = (date2 - date1).seconds
mins = (secs / 60)
return round(mins, 4) # NEW, ROUNDS TO 4 DP
# Loop through fitfile directory, load hr data, calculate HRSS
for filename in tqdm(os.listdir(directory)):
if filename.endswith('.fit'):
workout = load_workout((os.path.join(directory, filename)))
if 'heart_rate' in workout[0]:
# printing first 2 rows to manually check presence/forms
print(workout[0])
print(workout[1])
print(filename)
# for HRSS: form is SUM (Ti*HRRi*baseconst * e^(yval*HRRi) ) * 100/(60*HHRlt*basconst * e^(yval*HRRlt) )
# simplified: SUM (ATERM) * BTERM
# workflow is: calc aterm*bterm indiv, then sum
instantChT = [] # list of "instantaneous" changes in time
for i in range(len(workout) - 1):
# print(workout[i])
instantChT.append(difference_between_dates(workout[i]["timestamp"], workout[i + 1]["timestamp"]))
print(instantChT) # this works
instantHr = [] # list of (hopefully corresponding) instantaneous heart rate readings
for i in range(len(workout) - 1):
instantHr.append(workout[i]["heart_rate"])
print(instantHr)
HRRi = [] # list of instantaneous heart rate reserve values
for i in range(len(instantHr)):
HRRi.append((instantHr[i] - my_rhr) / (my_maxhr - my_rhr))
print(HRRi)
AtermBterm = [] # see simplified equation roughly 20 lines above
for i in range(len(instantChT)):
AtermBterm.append(
(instantChT[i] * HRRi[i] * my_baseconstant * (eulersNum ** (my_yvalue * HRRi[i]))) * hundy_ova_N)
print(AtermBterm)
print(sum(AtermBterm))
else:
print("issue w HR in: " + filename + " :_(...either lacking HR data or is mislabeled, i think.")
continue
| 35.914474 | 179 | 0.63235 | 732 | 5,459 | 4.621585 | 0.382514 | 0.018623 | 0.008868 | 0.013006 | 0.073603 | 0.023056 | 0.013006 | 0 | 0 | 0 | 0 | 0.01697 | 0.265983 | 5,459 | 151 | 180 | 36.152318 | 0.827302 | 0.328082 | 0 | 0.0625 | 0 | 0 | 0.046465 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052083 | false | 0 | 0.083333 | 0 | 0.177083 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
748c84a6a78daf32f1db550d0822cdb68bd66142 | 1,908 | py | Python | textquote.py | patmanteau/panflutist | 74e56fe6b6873015cfba766bb61277d6d81bbece | [
"BSD-3-Clause"
] | null | null | null | textquote.py | patmanteau/panflutist | 74e56fe6b6873015cfba766bb61277d6d81bbece | [
"BSD-3-Clause"
] | null | null | null | textquote.py | patmanteau/panflutist | 74e56fe6b6873015cfba766bb61277d6d81bbece | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
r"""
Panflute filter supporting \textquote and \foreigntextquote in LaTeX
Issues:
- Nested parens with pandoc-citeproc
Usage:
- Use Pandoc markdown bracketed Spans:
- [Ganz Gallien ist von den Römern besetzt]{.textquote cite="[vgl. @Goscinny_Asterix_1967, 1\psqq]"}
- [Toute la Gaule est occupée par les Romains]{.textquote lang="francais" punct="..." cite="[vgl. @Goscinny_Asterix_1967, 1\psqq]"}
- This filter will emit \{textquote/foreigntextquote}[<cite>][<punct>]{<text>} commands
"""
from jinja2tex import latex_env
import panflute as pf
QUOTE = latex_env.from_string(r"""
<%- if lang %>\foreigntextquote{<< lang >>}<% else %>\textquote<% endif -%>
<% if cite %>[{<< cite >>}]<% endif -%>
<% if punct %>[<< punct >>]<% endif -%>
{<< text >>}""")
def prepare(doc):
pass
def action(e, doc):
if not doc.format == 'latex':
return None
if isinstance(e, pf.Span) and 'textquote' in e.classes:
cite = e.attributes.get('cite')
if cite:
cite = pf.convert_text(cite,
extra_args=['--biblatex'],
input_format='markdown',
output_format='latex')
text = pf.convert_text(pf.Plain(e),
extra_args=['--biblatex'],
input_format='panflute',
output_format='latex')
values = {
'lang': e.attributes.get('lang'),
'cite': cite,
'punct': e.attributes.get('punct'),
'text': text
}
tex = QUOTE.render(values)
return pf.RawInline(tex, format='latex')
else:
return None
def finalize(doc):
pass
def main(doc=None):
return pf.run_filter(action, prepare=prepare, finalize=finalize, doc=doc)
if __name__ == '__main__':
main()
| 28.058824 | 135 | 0.559224 | 213 | 1,908 | 4.896714 | 0.441315 | 0.042186 | 0.040268 | 0.042186 | 0.113135 | 0.059444 | 0.059444 | 0 | 0 | 0 | 0 | 0.008154 | 0.292977 | 1,908 | 67 | 136 | 28.477612 | 0.765011 | 0.271488 | 0 | 0.2 | 0 | 0 | 0.196816 | 0.015919 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.05 | 0.05 | 0.025 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
748cbeedaa4ac6fe4d023634af107928994f029c | 335 | py | Python | sandbox/gff/fixit.py | molecules/bio | 2a86a931162be47beca4d7bf73b2b3978f3ba402 | [
"MIT"
] | 28 | 2020-11-28T01:18:16.000Z | 2022-03-25T16:42:53.000Z | sandbox/gff/fixit.py | molecules/bio | 2a86a931162be47beca4d7bf73b2b3978f3ba402 | [
"MIT"
] | 3 | 2021-09-28T11:52:07.000Z | 2022-03-22T07:47:46.000Z | sandbox/gff/fixit.py | molecules/bio | 2a86a931162be47beca4d7bf73b2b3978f3ba402 | [
"MIT"
] | 8 | 2020-12-01T17:02:26.000Z | 2022-02-14T16:57:46.000Z | """
Adds version number sequence id of a GFF file.
"""
import sys
ACC=sys.argv[1]
VER=sys.argv[2]
for line in sys.stdin:
line = line.strip()
elems = line.split()
if elems and elems[0] == ACC:
elems[0] = f'{elems[0]}.{VER}'
if line.startswith("#"):
print (line)
else:
print("\t".join(elems)) | 20.9375 | 46 | 0.570149 | 52 | 335 | 3.673077 | 0.615385 | 0.094241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01992 | 0.250746 | 335 | 16 | 47 | 20.9375 | 0.741036 | 0.137313 | 0 | 0 | 0 | 0 | 0.067376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7492f98aa56400f8ae0d4988b98264efada3bc27 | 2,010 | py | Python | rstobj/directives/toc.py | MacHu-GWU/rstobj-project | 40601f08e9e7ee2df5c514af04b040f61f76ed78 | [
"MIT"
] | null | null | null | rstobj/directives/toc.py | MacHu-GWU/rstobj-project | 40601f08e9e7ee2df5c514af04b040f61f76ed78 | [
"MIT"
] | null | null | null | rstobj/directives/toc.py | MacHu-GWU/rstobj-project | 40601f08e9e7ee2df5c514af04b040f61f76ed78 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
table of content directive.
"""
import attr
from .base import Directive
@attr.s
class TableOfContent(Directive):
"""
``.. contents::`` directive.
parameter definition see here: http://docutils.sourceforge.net/docs/ref/rst/directives.html#table-of-contents
:param title: str, required.
:param depth: int, optional.
:param local: bool, optional.
:type backlinks: str
:param backlinks: optional. one of
:attr:`TableOfContent.BacklinksOptions`.
Example::
toc = TableOfContent(title="Table of Contents", depth=2)
toc.render()
Output::
.. contents:: Table of Contents
:depth: 2
"""
title = attr.ib(default=None) # type: str
depth = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
) # type: int
local = attr.ib(
default=False,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
) # type: bool
backlinks = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(str)),
)
meta_directive_keyword = "contents"
meta_not_none_fields = tuple()
class BacklinksOptions(object):
"""
``backlinks`` argument choices.
- ``TableOfContent.BacklinksOptions.entry``: ``"entry"``
- ``TableOfContent.BacklinksOptions.top``: ``"top"``
- ``TableOfContent.BacklinksOptions.none``: ``"none"``
"""
entry = "entry"
top = "top"
none = "none"
@backlinks.validator
def check_backlinks(self, attribute, value): # pragma: no cover
if value not in [None, "entry", "top", "none"]:
raise ValueError(
"TableOfContent.backlinks has to be one of 'entry', 'top', 'none'!"
)
@property
def arg(self):
if self.title is None:
return ""
else:
return self.title
| 25.769231 | 113 | 0.595025 | 208 | 2,010 | 5.706731 | 0.399038 | 0.070767 | 0.043808 | 0.042965 | 0.203033 | 0.16765 | 0.16765 | 0.16765 | 0.121314 | 0.121314 | 0 | 0.002046 | 0.270647 | 2,010 | 77 | 114 | 26.103896 | 0.80764 | 0.389055 | 0 | 0.057143 | 0 | 0 | 0.088022 | 0.021779 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.057143 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7493ca43323db77c43ed2fc83412e3ad722c32a7 | 3,256 | py | Python | StaffPage.py | yurunyang1998/XUPT_- | 415c20550b54914b1206cfa3b83ddbe8260e7a6f | [
"MIT"
] | null | null | null | StaffPage.py | yurunyang1998/XUPT_- | 415c20550b54914b1206cfa3b83ddbe8260e7a6f | [
"MIT"
] | null | null | null | StaffPage.py | yurunyang1998/XUPT_- | 415c20550b54914b1206cfa3b83ddbe8260e7a6f | [
"MIT"
] | null | null | null |
from flask import *
import databaseModel
from functools import wraps
import time
bp = Blueprint("StaffPage", __name__, url_prefix='/StaffPage')
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
UserName = session.get('UserName')
if(UserName is not None):
g.UserName = UserName
return func(*args, **kwargs)
else:
return redirect(url_for("index"))
return wrapper
@bp.route("/OrderToStore",methods=["POST"])
@login_required
def OrderToStore():
try:
OrderNum = request.form["OrderNum"]
method = request.form["way"]
order = databaseModel.Orders.query.filter_by(OrderNum = OrderNum).first()
if(order != None):
if(method == "inStore"): #如果是入库操作
neworder_ = databaseModel.HistoryOrders(OrderNum=OrderNum, StaffNum=g.UserName) #像historyorder表里添加一行,表示该快递员经手过这个快递
databaseModel.db.session.add(neworder_)
orderstaff_ = databaseModel.OrderStaffs.query.filter_by(OrderNum=OrderNum).first() #修改orderstaff 表的staffnum,表示当前快递已更换配送者
orderstaff_.StaffNum = g.UserName
databaseModel.db.session.commit()
if(method == "sigh"): #如果是签收操作
order_ = databaseModel.Orders.query.filter_by(OrderNum=OrderNum).first()
order_.StagNum = 2 #更改为已签收
neworder_ = databaseModel.HistoryOrders(OrderNum=OrderNum, StaffNum=g.UserName) # 像historyorder表里添加一行,表示该快递员经手过这个快递
databaseModel.db.session.add(neworder_)
orderstaff_ = databaseModel.OrderStaffs.query.filter_by(OrderNum=OrderNum).first() # 修改orderstaff 表的staffnum,表示当前快递已更换配送者
orderstaff_.StaffNum = "0"
databaseModel.db.session.commit()
except Exception as e:
return {"code":"0"}
return {"code":"200"}
@bp.route("/HistoryOrders",methods=["GET"])
@login_required
def HistoryOrders():
historyorders = databaseModel.HistoryOrders.query.filter_by(StaffNum= g.UserName).all()
jsondata = {}
for i,c in enumerate(historyorders):
ordernum_ = c.OrderNum
order = databaseModel.Orders.query.filter_by(OrderNum=ordernum_).first()
recvaddr_ = order.RecvAddr
if(order.StagNum == 1):
orderstag_ = '已寄出'
if(order.StagNum == 2):
orderstag_ = "已签收"
data = {"OrderNum":ordernum_,"RecvAddr":recvaddr_,"OrderStag":orderstag_}
jsondata[i] = data
return jsondata
@bp.route("/StaffInfo",methods=["POST"])
@login_required
def StaffInfo():
try:
staffinfo = databaseModel.Staffs.query.filter_by(UserName=g.UserName).first()
jsondata = {}
StaffName =staffinfo.StaffName
StaffNum = staffinfo.UserName
StaffTele = staffinfo.StaffTele
StaffIdCard = staffinfo.StaffIdCard
print(StaffIdCard,StaffName)
jsondata["code"] = "1"
jsondata["StaffName"]= StaffName
jsondata["StaffNum"] = StaffNum
jsondata["StaffTele"] = StaffTele
jsondata["StaffIdCard"] = StaffIdCard
except Exception as e:
current_app.logger.debug(e)
return {"code":"0"}
return jsondata
| 32.56 | 138 | 0.635749 | 312 | 3,256 | 6.519231 | 0.314103 | 0.06293 | 0.044739 | 0.051622 | 0.377581 | 0.333333 | 0.333333 | 0.333333 | 0.333333 | 0.247788 | 0 | 0.00409 | 0.249079 | 3,256 | 99 | 139 | 32.888889 | 0.827812 | 0.04914 | 0 | 0.276316 | 0 | 0 | 0.06252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065789 | false | 0 | 0.052632 | 0 | 0.223684 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7493cdea568afe3db8cc36c7efb7d3a24d08c8e1 | 1,849 | py | Python | daily_problems/problem_101_to_200/problem_131.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | 1 | 2019-04-18T03:29:02.000Z | 2019-04-18T03:29:02.000Z | daily_problems/problem_101_to_200/problem_131.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | null | null | null | daily_problems/problem_101_to_200/problem_131.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | null | null | null | """
Given the head to a singly linked list, where each node also has a “random” pointer
that points to anywhere in the linked list, deep clone the list.
"""
from typing import Tuple
class Node:
def __init__(self, data: int) -> None:
self.data = data
self.next = None
self.random = None
def deep_clone_ll(ll_head: Node) -> Tuple[Node, Node]:
"""
Time Complexity: O(n)
Space Complexity: O(1)
"""
runner = ll_head
# get nodes of new linked list
while runner:
node = Node(runner.data)
node.next = runner.next
runner.next = node
runner = runner.next.next
# get random pointers of new ll
runner = ll_head
while runner:
runner.next.random = runner.random.next
runner = runner.next.next
# detach two lists and fix their next pointers
runner = ll_head
new_head = ll_head.next
while runner.next:
next_node = runner.next
if next_node.next:
runner.next = next_node.next
next_node.next = next_node.next.next
runner = runner.next
else:
runner.next = None
return ll_head, new_head
if __name__ == "__main__":
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.random = head.next.next
head.next.random = head
head.next.next.random = head.next.next.next.next
head.next.next.next.random = head.next.next
head.next.next.next.next.random = head.next
original, copied = deep_clone_ll(head)
while original:
print("orig node:", original.data, ", random:", original.random.data)
print("copied node:", copied.data, ", random:", copied.random.data)
original = original.next
copied = copied.next
| 26.042254 | 83 | 0.622499 | 259 | 1,849 | 4.332046 | 0.254826 | 0.171123 | 0.096257 | 0.071301 | 0.171123 | 0.153298 | 0.039216 | 0.039216 | 0 | 0 | 0 | 0.004464 | 0.273121 | 1,849 | 70 | 84 | 26.414286 | 0.830357 | 0.161168 | 0 | 0.155556 | 0 | 0 | 0.031537 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.022222 | 0 | 0.111111 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
749446711702bf00c7f0f56b3ec4b6af7960f483 | 1,095 | py | Python | view/basev64/__init__.py | desktophzj/wolverine-tkinter | 8e499c877ba7f5da11e63e30ecb25d54f6f3b6e9 | [
"Apache-2.0"
] | null | null | null | view/basev64/__init__.py | desktophzj/wolverine-tkinter | 8e499c877ba7f5da11e63e30ecb25d54f6f3b6e9 | [
"Apache-2.0"
] | null | null | null | view/basev64/__init__.py | desktophzj/wolverine-tkinter | 8e499c877ba7f5da11e63e30ecb25d54f6f3b6e9 | [
"Apache-2.0"
] | null | null | null | import tkinter as tk
from tkinter import ttk
raw_data_label_y_location = 20
raw_data_entry_location = raw_data_label_y_location + 20
encode_button_location = raw_data_entry_location + 20
decode_button_location = encode_button_location + 20
base64_data_label_y_location = decode_button_location + 20
base64_data_entry_y_location = base64_data_label_y_location + 20
class Base64:
def create_base64_tab(self, tab_control):
tab = ttk.Frame(tab_control)
# raw data
raw_data_label = tk.Label(tab, text="raw data")
raw_data_label.place(x=500, y=0)
raw_data_entry = tk.Entry(tab)
raw_data_entry.place(x=500, y=100)
encode_btn = tk.Button(tab, text="encode", width=5)
encode_btn.place(x=500, y=200)
decode_btn = tk.Button(tab, text="decode", width=5)
decode_btn.place(x=500, y=300)
# base64
base64_data_label = tk.Label(tab, text="base64 data")
base64_data_label.place(x=500, y=400)
base64_data_entry = tk.Entry(tab)
base64_data_entry.place(x=500, y=500)
return tab
| 33.181818 | 64 | 0.699543 | 172 | 1,095 | 4.116279 | 0.215116 | 0.09887 | 0.076271 | 0.084746 | 0.536723 | 0.237288 | 0 | 0 | 0 | 0 | 0 | 0.080645 | 0.207306 | 1,095 | 32 | 65 | 34.21875 | 0.735023 | 0.013699 | 0 | 0 | 0 | 0 | 0.028784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
749477a57d991a4da5d6d7c1a4757df77ab77a72 | 3,307 | py | Python | research/cv/csd/src/edsr_slim.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/csd/src/edsr_slim.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/csd/src/edsr_slim.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""edsr_slim.py"""
from src import common
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
class EDSR(nn.Cell):
"""[EDSR]
Args:
nn ([type]): [description]
"""
def __init__(self, args):
super(EDSR, self).__init__()
self.n_colors = args.n_colors
n_resblocks = args.n_resblocks
self.n_feats = args.n_feats
self.kernel_size = 3
scale = args.scale[0]
act = nn.ReLU()
self.rgb_range = args.rgb_range
self.sub_mean = common.MeanShift(self.rgb_range)
self.add_mean = common.MeanShift(self.rgb_range, sign=1)
self.head = common.conv(args.n_colors, self.n_feats, self.kernel_size, padding=self.kernel_size//2)
m_body = [
common.ResidualBlock(
self.n_feats, self.kernel_size, act=act, res_scale=args.res_scale
) for _ in range(n_resblocks)
]
self.body = nn.CellList(m_body)
self.body_conv = common.conv(self.n_feats, self.n_feats, self.kernel_size, padding=self.kernel_size//2)
self.upsampler = common.Upsampler(scale, self.n_feats)
self.tail_conv = common.conv(self.n_feats, args.n_colors, self.kernel_size, padding=self.kernel_size//2)
def construct(self, x, width_mult=Tensor(1.0)):
"""construct"""
width_mult = width_mult.asnumpy().item()
feature_width = int(self.n_feats * width_mult)
conv2d = ops.Conv2D(out_channel=feature_width, kernel_size=self.kernel_size, mode=1, pad_mode='pad',
pad=self.kernel_size // 2)
biasadd = ops.BiasAdd()
x = self.sub_mean(x)
weight = self.head.weight[:feature_width, :self.n_colors, :, :]
bias = self.head.bias[:feature_width]
x = conv2d(x, weight)
x = biasadd(x, bias)
residual = x
for block in self.body:
residual = block(residual, width_mult)
weight = self.body_conv.weight[:feature_width, :feature_width, :, :]
bias = self.body_conv.bias[:feature_width]
residual = conv2d(residual, weight)
residual = biasadd(residual, bias)
residual += x
x = self.upsampler(residual, width_mult)
weight = self.tail_conv.weight[:self.n_colors, :feature_width, :, :]
bias = self.tail_conv.bias[:self.n_colors]
conv2d = ops.Conv2D(out_channel=self.n_colors, kernel_size=self.kernel_size,
mode=1, pad_mode='pad', pad=self.kernel_size//2)
x = conv2d(x, weight)
x = biasadd(x, bias)
x = self.add_mean(x)
return x
| 38.011494 | 112 | 0.629574 | 451 | 3,307 | 4.439024 | 0.292683 | 0.06993 | 0.083916 | 0.034965 | 0.286214 | 0.214785 | 0.147852 | 0.147852 | 0.102897 | 0.102897 | 0 | 0.010723 | 0.238585 | 3,307 | 86 | 113 | 38.453488 | 0.784353 | 0.213789 | 0 | 0.074074 | 0 | 0 | 0.002346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7494c2f962bcdfe2ec84fe9915ae2232069d6c59 | 6,317 | py | Python | 1_prepare_data/docker/code/make_tfRecords.py | gianpd/amazon-sagemaker-tensorflow-object-detection-api | 80c654a9767bb26389db468c7e6df3300f2debb4 | [
"MIT-0"
] | null | null | null | 1_prepare_data/docker/code/make_tfRecords.py | gianpd/amazon-sagemaker-tensorflow-object-detection-api | 80c654a9767bb26389db468c7e6df3300f2debb4 | [
"MIT-0"
] | null | null | null | 1_prepare_data/docker/code/make_tfRecords.py | gianpd/amazon-sagemaker-tensorflow-object-detection-api | 80c654a9767bb26389db468c7e6df3300f2debb4 | [
"MIT-0"
] | null | null | null | import os
import sys
import json
from pathlib import Path
import pandas as pd
import random
import tensorflow as tf
import io
import argparse
from PIL import Image
from collections import namedtuple
from object_detection.utils import dataset_util, label_map_util
import logging
logging.basicConfig(stream=sys.stdout, format='',
level=logging.INFO, datefmt=None)
logger = logging.getLogger('NJDD-prepare-data')
# Initiate argument parser
parser = argparse.ArgumentParser(
description="Sample TensorFlow json-to-TFRecord converter")
parser.add_argument("-json",
"--json_path",
help="Path to the input .json files.",
type=str)
# parser.add_argument("-subset",
# "--subset",
# help="Type of the subset: train, validation, test", type=str)
parser.add_argument("-l",
"--labels_path",
help="Path to the labels (.pbtxt) file.", type=str)
parser.add_argument("-o",
"--output_dir",
help="Path of the output dir for storing TFRecord (.record) file.", type=str)
parser.add_argument("-i",
"--image_dir",
help="Path to the folder where the input image files are stored. "
"Defaults to the same directory as JSON_DIR.",
type=str, default=None)
parser.add_argument("-c",
"--csv_path",
help="Path of output .csv file. If none provided, then no file will be "
"written.",
type=str, default=None)
args = parser.parse_args()
if args.image_dir is None:
args.image_dir = args.json_dir
label_map = label_map_util.load_labelmap(args.labels_path)
label_map_dict = label_map_util.get_label_map_dict(label_map)
def bbox_dict_to_df(bbox_dict):
"""
This function assumes that the objects list contains one element (v['objects'][0])
"""
log_index = 'bbox_dict_to_df>'
df_ls = []
for k, v in bbox_dict.items():
filename = k
height = v['size']['height']
width = v['size']['width']
ym = v['objects'][0]['bbox'][0]
xm = v['objects'][0]['bbox'][1]
yM = v['objects'][0]['bbox'][2]
xM = v['objects'][0]['bbox'][3]
values = (filename, height, width, ym, xm, yM, xM, v['objects'][0]['name'])
df_ls.append(values)
logger.info(f'{log_index} Collected {len(df_ls)} objects')
df = pd.DataFrame(df_ls, columns=['fname', 'height', 'width', 'ym', 'xm', 'yM', 'xM', 'class'])
return df
def split_dataset(df, perc=0.9):
log_index = 'split_dataset>'
df = df.sample(frac=1).reset_index(drop=True)
num_train = int(perc * len(df))
df_train = df.iloc[0:num_train]
df_val = df.iloc[num_train:]
logger.info(f'{log_index} TRAINING EXAMPLES: {len(df_train)} - VALIDATION EXAMPLES: {len(df_val)}')
return df_train, df_val
def class_text_to_int(row_label):
return label_map_dict[row_label]
def split(df, group):
data = namedtuple('data', ['fname', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
log_index = 'create_tf_example>'
with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.fname)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
logger.info(f'{log_index} Retrived image with size: {width, height} - (w,h)')
filename = group.fname.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for _, row in group.object.iterrows():
xmins.append(row['xm'])
xmaxs.append(row['xM'])
ymins.append(row['ym'])
ymaxs.append(row['yM'])
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
logger.info(f'{log_index} Collected {len(xmins)} rows')
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main():
log_index = 'main>'
logger.info(f'{log_index} Reading bbox at {args.json_path} ...')
with open(args.json_path, 'r') as f:
bbox_dict = json.load(f)
df = bbox_dict_to_df(bbox_dict)
df_train, df_val = split_dataset(df)
for df_s, subset in zip([df_train, df_val], ['train', 'val']):
logger.info(f'{log_index} Writing TFRecords for subset: {subset}')
writer = tf.io.TFRecordWriter(os.path.join(args.output_dir, f'dywidag_{subset}.records'))
path = Path(args.image_dir)
grouped = split(df_s, 'fname')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
logger.info(f'{log_index} Successfully created the TFRecord file: {args.output_dir}')
if args.csv_path is not None:
df.to_csv(args.csv_path, index=None)
logger.info(f'{log_index} Successfully created the CSV file: {args.csv_path}')
if __name__ == '__main__':
main() | 39.48125 | 104 | 0.614849 | 837 | 6,317 | 4.444444 | 0.253286 | 0.038441 | 0.023656 | 0.030108 | 0.214516 | 0.094086 | 0.03871 | 0.022043 | 0 | 0 | 0 | 0.004636 | 0.248852 | 6,317 | 160 | 105 | 39.48125 | 0.779347 | 0.040051 | 0 | 0.014925 | 0 | 0 | 0.228805 | 0.027068 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.097015 | 0.007463 | 0.179104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74984e77e478676a74e3414a580cf3837bbf6e4c | 16,641 | py | Python | reinforcement_learning/q_learn.py | noderod/DARLMID | 5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2 | [
"MIT"
] | null | null | null | reinforcement_learning/q_learn.py | noderod/DARLMID | 5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2 | [
"MIT"
] | null | null | null | reinforcement_learning/q_learn.py | noderod/DARLMID | 5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2 | [
"MIT"
] | null | null | null | """
SUMMARY
Reinforcement learning via q-learning on the provided data, using previous data if requested
"""
import argparse
import json
import random
import sys
import matplotlib.pyplot as plt
import numpy as np
import auxiliary as aux
from vehicle import Vehicle
# Sets seed for reproducibility
random.seed(0)
# Processes arguments
parser = argparse.ArgumentParser()
required_flags = parser.add_argument_group(title="Required")
required_flags.add_argument("--epochs",required=True, help="Number of epochs", type=int)
required_flags.add_argument("--explore-probability",required=True, help="Explore probability [0, 1]", type=float)
required_flags.add_argument("--learning-rate",required=True, help="Learning rate [0, 1]", type=float)
required_flags.add_argument("--discount-factor",required=True, help="Discount factor [0, 1]", type=float)
required_flags.add_argument("--data",required=True, help="JSON filepath to read Q, rewards matrices and other information", type=str)
required_flags.add_argument("--positive-demonstration", help="JSON filepath to read Q matrix updates after a number of positive demonstrations (already processed)", type=str)
required_flags.add_argument("--negative-demonstration", help="JSON filepath to read Q matrix updates after a number of negative demonstrations (already processed)", type=str)
required_flags.add_argument("--good-advice-decay", help="Training epochs good advice is remembered (50 by defaulr)", type=int)
required_flags.add_argument("--bad-advice-decay", help="Training epochs bad advice is remembered (5 by defaulr)", type=int)
required_flags.add_argument("--output", required=True, help="JSON filepath to output the results", type=str)
parser.add_argument("--show", help="Show output reward vs. epoch plot", action="store_true")
args = parser.parse_args()
p_exp = args.explore_probability
α = args.learning_rate
γ = args.discount_factor
assert (0 <= p_exp) and (p_exp <= 1), "Explore probability must be between 0 and 1"
assert (0 <= α) and (α <= 1), "Learning rate must be between 0 and 1"
assert (0 <= γ) and (γ <= 1), "Discount factor must be between 0 and 1"
good_advice_decay_epochs = 50
good_decay_ratio = 1/good_advice_decay_epochs
bad_advice_decay_epochs = 5
bad_decay_ratio = 1/bad_advice_decay_epochs
if args.good_advice_decay:
assert args.good_advice_decay >= 0, "Good advice decay cannot be negative epochs"
good_advice_retention_epochs = args.good_advice_decay
if args.bad_advice_decay:
assert args.bad_advice_decay >= 0, "Bad advice decay cannot be negative epochs"
bad_advice_retention_epochs = args.bad_advice_decay
#-----------------------------------------------------
# DATA PREPROCESSING
#-----------------------------------------------------
# Loads original data
with open(args.data, "r") as jf:
original_data = json.load(jf)
R = original_data["rewards matrix"]
Q = original_data["Q matrix"]
nx = original_data["nx"]
ny = original_data["ny"]
possible_speeds = original_data["possible speeds"]
speed_max = possible_speeds - 1
valid_positions = original_data["valid positions"]
orientations = [i for i in range(0, len(original_data["orientations"]))]
actions = [j for j in range(0, len(original_data["actions"]))]
num_actions = len(actions)
#-----------------------------------------------------
# NECESSARY VARIABLES
#-----------------------------------------------------
β_good = 0.2
β_bad = 0.2
ξ_0 = 1
δ_0 = 0
Φ_0 = 0
R_expert_good = 1
R_expert_bad = -1
# Sets the Φ(s, a), R^{expert}
# Always 0
Φ = np.zeros((nx, ny, len(orientations), possible_speeds, len(actions)))
R_expert = np.zeros((nx, ny, len(orientations), possible_speeds, len(actions)))
#-----------------------------------------------------
# ADVICE PROCESSING
#-----------------------------------------------------
# Stores advice actions
# "good":{"x, y, o, v":True, ...}
# "bad": {"x, y, o, v":True, ...}
advice_locations = {"good":{}, "bad":{}}
# From Useful Policy Invariant Shaping from Arbitrary Advice (Behboudian et al.)
# Utilizes positive demonstration data
# Positive intent -> Intentionally good demonstrations (although perhaps the user is incompetent)
if args.positive_demonstration:
# Retrieves demonstration data
with open(args.positive_demonstration, "r") as jf:
original_demonstration_data = json.load(jf)
action_sets_taken = original_demonstration_data["actions taken"]
# Simply take the data as is, modify the appropriate Q matrix value, adding +1 to the appropiate Q[s, a] location
for an_action_path in action_sets_taken:
# Goes step by step
for a_step in an_action_path:
step_x = a_step[0]
step_y = a_step[1]
step_o = a_step[2]
step_v = a_step[3]
step_a = a_step[4]
advice_locations["good"][aux.state_to_str(step_x, step_y, step_o, step_v)] = [good_advice_decay_epochs, step_a]
R_expert[step_x][step_y][step_o][step_v][step_a] = R_expert_good
# Utilizes negative demonstration data
# Negative intent -> Intentionally poor or misleading demonstrations
if args.negative_demonstration:
# Retrieves demonstration data
with open(args.negative_demonstration, "r") as jf:
original_demonstration_data = json.load(jf)
action_sets_taken = original_demonstration_data["actions taken"]
for an_action_path in action_sets_taken:
# Goes step by step
for a_step in an_action_path:
step_x = a_step[0]
step_y = a_step[1]
step_o = a_step[2]
step_v = a_step[3]
step_a = a_step[4]
advice_locations["bad"][aux.state_to_str(step_x, step_y, step_o, step_v)] = [bad_advice_decay_epochs, step_a]
R_expert[step_x][step_y][step_o][step_v][step_a] = R_expert_bad
#-----------------------------------------------------
# NECESSARY FUNCTIONS
#-----------------------------------------------------
# Tests with the current Q matrix
# Each epoch tests a starting location with a random orientation but always zero speed
# Each reward in the array is: max(Reward - steps, 0)
# Up to 100 steps can be used
# Returns an array containing rewards
max_testing_iterations = 100
def test_Q():
results = []
# Reshuffles the valid starting locations
random.shuffle(valid_positions)
# Goes through every valid position
for a_valid_position in valid_positions:
xloc, yloc = a_valid_position
starting_orientation = random.randint(0, 3)
tested_vehicle = Vehicle(nx, ny, xloc, yloc, starting_orientation, 0, speed_max, R)
reward_so_far = 0
for an_iteration in range(0, max_testing_iterations):
# Gets the current location
v_x = tested_vehicle.xloc
v_y = tested_vehicle.yloc
v_orientation = tested_vehicle.orientation_index
v_speed = tested_vehicle.speed
# Adds the penalty/reward corresponding to this location
reward_so_far += R[v_x][v_y]
# If this is a reward, obstacle, or outside the circuit (unless it is outside the borders) add the reward and then exit this iteration
if R[v_x][v_y] != -1:
break
# Chooses the action index with the maximum reward in Q
# If two actions have the same optimal Q-value, the first one will be chosen
Q_values_to_choose = Q[v_x][v_y][v_orientation][v_speed]
best_Q_value = max(Q_values_to_choose)
action_index = Q_values_to_choose.index(best_Q_value)
# Makes the vehicle attempt it
tested_vehicle.execute_action(action_index, modify_self=True, get_copy_there=False, get_end_location=False)
results.append(max(0, reward_so_far))
return results
# Trains starting with the current Q matrix, which is updated at each step
# Each epoch tests a starting location with a random orientation but always zero speed
# Each reward in the array is: max(Reward - steps, 0)
# Up to 100 steps can be used
# Does not return anything
max_training_iterations = 100
def train_Q():
# Reshuffles the valid starting locations
random.shuffle(valid_positions)
# Stores the good and bad advice states reached this round
good_advice_states_seen = {}
bad_advice_states_seen = {}
# Goes through every valid position
for a_valid_position in valid_positions:
xloc, yloc = a_valid_position
starting_orientation = random.randint(0, 3)
tested_vehicle = Vehicle(nx, ny, xloc, yloc, starting_orientation, 0, speed_max, R)
for an_iteration in range(0, max_training_iterations):
# Gets the current location
v_x = tested_vehicle.xloc
v_y = tested_vehicle.yloc
v_orientation = tested_vehicle.orientation_index
v_speed = tested_vehicle.speed
# If this is a reward, obstacle, or outside the circuit (unless it is outside the borders) then exit this iteration
if R[v_x][v_y] != -1:
break
# Gets a random probability
what_to_do = random.random()
# If below the explore probability, explore, choose an action at random
if what_to_do <= p_exp:
chosen_action_index = random.randint(0, 4)
given_reward = R[v_x][v_y]
expert_opinion_used = False
α_used = α
else:
# Chooses the action index with the maximum reward in Q
# If two actions have the same optimal Q-value, the first one will be chosen
Q_values_to_choose = Q[v_x][v_y][v_orientation][v_speed]
# Selects the best actions a priori
a_priori_best_Q_value = max(Q_values_to_choose)
a_priori_best_action = Q_values_to_choose.index(a_priori_best_Q_value)
# Checks if this state was considered good or bad
s_as_state = aux.state_to_str(v_x, v_y, v_orientation, v_speed)
if (s_as_state in advice_locations["good"]) and (advice_locations["good"][s_as_state][1] == a_priori_best_action) and (advice_locations["good"][s_as_state][0] > 0):
if s_as_state not in good_advice_states_seen:
good_advice_states_seen[s_as_state] = True
expert_opinion_used = True
advice_followed_times = good_advice_decay_epochs - advice_locations["good"][s_as_state][0]
decay_ratio = good_decay_ratio
α_used = 0.05
β_used = β_good
elif (s_as_state in advice_locations["bad"]) and (advice_locations["bad"][s_as_state][1] == a_priori_best_action) and (advice_locations["bad"][s_as_state][0] > 0):
if s_as_state not in bad_advice_states_seen:
bad_advice_states_seen[s_as_state] = True
expert_opinion_used = True
advice_followed_times = bad_advice_decay_epochs - advice_locations["bad"][s_as_state][0]
decay_ratio = bad_decay_ratio
α_used = 0.1
β_used = β_bad
else:
# Action not provided as advice
best_Q_value = a_priori_best_Q_value
chosen_action_index = a_priori_best_action
given_reward = R[v_x][v_y]
expert_opinion_used = False
α_used = α
if expert_opinion_used:
# Q(s, a) - ξ_t*Φ_t(s, a)
policies_to_choose = [0 for a in range(0, num_actions)]
# Stores Φ_t(s, a), Φ_t(s', a') values before the update
pu_Φ_t_sa = np.zeros((num_actions))
pu_Φ_t_snan = np.zeros((num_actions))
for an_action in range(0, num_actions):
# Gets the next location but does not move there yet if no expert was provided using a priori data
[_0, possible_next_sa] = tested_vehicle.execute_action(a_priori_best_action,
modify_self=False,
get_copy_there=False,
get_end_location=True)
sn_x = possible_next_sa[0][0]
sn_y = possible_next_sa[0][1]
sn_o = possible_next_sa[1]
sn_v = possible_next_sa[2]
Q_sn = Q[sn_x][sn_y][sn_o][sn_v]
sn_a = Q_sn.index(max(Q_sn))
# Φ_t(s, a)
Φ_t_sa = Φ[v_x][v_y][v_orientation][v_speed][an_action]
pu_Φ_t_sa[an_action] = Φ_t_sa
# Assumption to avoid BFS
# Φ_{t+1}(s', a') = Φ_t(s', a')
# Φ_t(s', a')
Φ_t_snan = Φ[sn_x][sn_y][sn_o][sn_v][sn_a]
pu_Φ_t_snan[an_action] = Φ_t_snan
# δ_t^Φ
δ_t_Φ = -R_expert[v_x][v_y][v_orientation][v_speed][an_action] + γ*Φ_t_snan - Φ_t_sa
# ξ_t
# Counts how many times this particular advice has been followed
ξ_t = 1 - advice_followed_times*decay_ratio
# generates the local policies to choose from
policies_to_choose[an_action] = Q_values_to_choose[an_action] - ξ_t*Φ_t_sa
# Generates Φ_{t+1}(s, a)
Φ[v_x][v_y][v_orientation][v_speed][an_action] = Φ_t_sa + β_used*δ_t_Φ
# Chooses the optimal policy action
chosen_action_index = policies_to_choose.index(max(policies_to_choose))
given_reward = R[v_x][v_y] + γ*pu_Φ_t_snan[chosen_action_index] - pu_Φ_t_sa[chosen_action_index]
# Makes the vehicle attempt the action
[_1, location_end] = tested_vehicle.execute_action(chosen_action_index,
modify_self=True,
get_copy_there=False,
get_end_location=True)
# Updates the Q matrix
# Q[s, a] = Q[s, a] + α*(R[s] + γ*max(Q[s', a'], a') - Q[s, a])
v_x_new = location_end[0][0]
v_y_new = location_end[0][1]
v_orientation_new = location_end[1]
v_speed_new = location_end[2]
Q_apostrophe_max = max(Q[v_x_new][v_y_new][v_orientation_new][v_speed_new])
Q_sa = Q[v_x][v_y][v_orientation][v_speed][chosen_action_index]
s_as_state = aux.state_to_str(v_x, v_y, v_orientation, v_speed)
Q[v_x][v_y][v_orientation][v_speed][chosen_action_index] = Q_sa + α_used*(given_reward + γ*Q_apostrophe_max - Q_sa)
# Marks certain states as seen this round
for a_good_seen_state in good_advice_states_seen:
# Good advice reward decays
advice_locations["good"][a_good_seen_state][0] -= 1
for a_bad_seen_state in bad_advice_states_seen:
# Bad advice reward rises
advice_locations["bad"][a_bad_seen_state][0] -= 1
#-----------------------------------------------------
# Q-LEARNING
#-----------------------------------------------------
# [[epoch index, RMS reward], ...]
epoch_rewards = []
for an_epoch in range(0, args.epochs):
# Tests
tested_rewards = test_Q()
# Calculates and appends the RMS reward to results
epoch_rewards.append([an_epoch, aux.RMS(tested_rewards)])
# Trains (unless it is the last epoch)
if an_epoch != (args.epochs - 1):
train_Q()
#-----------------------------------------------------
# OUTPUTS RESULTS
#-----------------------------------------------------
with open(args.output, "w") as jf:
jf.write(json.dumps({"Q matrix":Q, "epoch rewards":epoch_rewards}, indent=4))
#-----------------------------------------------------
# SHOWS PLOT WITH RESULTS IF REQUESTED
#-----------------------------------------------------
if not args.show:
sys.exit()
plt.figure()
epochs_used = []
rewards_obtained = []
for mt in range(0, len(epoch_rewards)):
epochs_used.append(epoch_rewards[mt][0])
rewards_obtained.append(epoch_rewards[mt][1])
plt.plot(epochs_used, rewards_obtained, "k-")
plt.xlabel("Epoch")
plt.ylabel("Reward")
plt.title("Reward vs. Epoch")
plt.show()
| 36.654185 | 180 | 0.607656 | 2,287 | 16,641 | 4.136423 | 0.143857 | 0.004651 | 0.004757 | 0.006342 | 0.50518 | 0.463953 | 0.427907 | 0.383932 | 0.346934 | 0.32093 | 0 | 0.010339 | 0.261823 | 16,641 | 453 | 181 | 36.735099 | 0.759769 | 0.241151 | 0 | 0.262443 | 0 | 0 | 0.089313 | 0.005507 | 0 | 0 | 0 | 0 | 0.022624 | 1 | 0.00905 | false | 0 | 0.036199 | 0 | 0.049774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7499a0d47d4f28aa8fbfe508262da370434f71b2 | 12,260 | py | Python | src/sark110.py | EA4FRB/sark110-python | ff5ace7c0d71f9de48772cdb1cedb98f5c22df8a | [
"MIT"
] | 2 | 2019-09-03T18:43:53.000Z | 2019-11-06T18:26:23.000Z | src/sark110.py | EA4FRB/sark110-python | ff5ace7c0d71f9de48772cdb1cedb98f5c22df8a | [
"MIT"
] | 1 | 2020-04-03T22:59:18.000Z | 2020-04-29T16:17:02.000Z | src/sark110.py | EA4FRB/sark110-python | ff5ace7c0d71f9de48772cdb1cedb98f5c22df8a | [
"MIT"
] | 1 | 2019-09-05T16:40:55.000Z | 2019-09-05T16:40:55.000Z | # ---------------------------------------------------------
"""
This file is a part of the "SARK110 Antenna Vector Impedance Analyzer" software
MIT License
@author Copyright (c) 2020 Melchor Varela - EA4FRB
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# ---------------------------------------------------------
import os
import struct
import time
if os.name == 'nt':
import pywinusb.hid as hid
import threading
elif os.name == 'posix':
import hid
else:
raise ImportError("Error: no implementation for your platform ('{}') available".format(os.name))
SARK110_VENDOR_ID = 0x0483
SARK110_PRODUCT_ID = 0x5750
WAIT_HID_DATA_MS = 1000
class Sark110:
_handler = 0
_is_connect = 0
_max_freq = 0
_min_freq = 0
_dev_name = ""
_fw_version = ""
_fw_protocol = -1
@property
def fw_version(self) -> str:
return self._fw_version
@property
def fw_protocol(self) -> int:
return self._fw_protocol
@property
def dev_name(self) -> str:
return self._dev_name
@property
def max_freq(self) -> int:
return self._max_freq
@property
def min_freq(self) -> int:
return self._min_freq
@property
def is_connected(self) -> bool:
return self._is_connect
def __init__(self):
self._handler = 0
self._is_connect = 0
def open(self) -> int:
"""
Opens the device
:return: <0 err; >0 ok
"""
# Windows: pywinusb
if os.name == 'nt':
target_vendor_id = SARK110_VENDOR_ID
target_product_id = SARK110_PRODUCT_ID
hid_filter = hid.HidDeviceFilter(vendor_id=target_vendor_id, product_id=target_product_id)
try:
self._handler = hid_filter.get_devices()[0]
if not self._handler:
return -1
else:
self._handler.open()
self._handler.set_raw_data_handler(self._rx_handler)
return 1
except:
return -2
# Linux: hidapi
else:
self._handler = hid.device()
try:
self._handler.open(SARK110_VENDOR_ID, SARK110_PRODUCT_ID)
self._handler.set_nonblocking(0)
return 1
except IOError as ex:
return -1
def connect(self) -> int:
"""
Connect to the device and get its characteristics
:return: <0 err; >0 ok
"""
if not self._handler:
return -1
if self._cmd_version() < 0:
return -2
self._is_connect = 1;
return 1
def close(self):
"""
Closes the device
:return:
"""
if self._handler:
self._handler.close()
self._handler = 0
self._is_connect = 0
def measure(self, freq: int, rs: float, xs: float, cal=True, samples=1) -> int:
"""
Takes one measurement sample at the specified frequency
:param freq: frequency in hertz; 0 to turn-off the generator
:param cal: True to get OSL calibrated data; False to get uncalibrated data
:param samples: number of samples for averaging
:param rs real part of the impedance
:param xs imag part of the impedance
:return: <0 err; >0 ok
"""
if not self._is_connect:
return -1
snd = [0x0] * 19
snd[1] = 2
b = self._int2bytes(freq)
snd[2] = b[0]
snd[3] = b[1]
snd[4] = b[2]
snd[5] = b[3]
if cal:
snd[6] = 1
else:
snd[6] = 0
snd[7] = samples
rcv = self._send_rcv(snd)
if rcv[0] != 79:
return -2
b = bytearray([0, 0, 0, 0])
b[0] = rcv[1]
b[1] = rcv[2]
b[2] = rcv[3]
b[3] = rcv[4]
rs[0] = struct.unpack('f', b)
b[0] = rcv[5]
b[1] = rcv[6]
b[2] = rcv[7]
b[3] = rcv[8]
xs[0] = struct.unpack('f', b)
return 1
def buzzer(self, freq=0, duration=0) -> int:
"""
Sounds the sark110 buzzer.
:param device: handler
:param freq: frequency in hertz
:param duration: duration in ms
:return: <0 err; >0 ok
"""
if not self._is_connect:
return -1
snd = [0x0] * 19
snd[1] = 20
b = self._short2bytes(freq)
snd[2] = b[0]
snd[3] = b[1]
b = self._short2bytes(duration)
snd[4] = b[0]
snd[5] = b[1]
rcv = self._send_rcv(snd)
if duration == 0:
time.sleep(.2)
else:
time.sleep(duration / 1000)
if rcv[0] == 79:
return 1
return -2
def reset(self) -> int:
"""
Resets the device
:return: <0 err; >0 ok
"""
if not self._is_connect:
return -1
snd = [0x0] * 19
snd[1] = 50
rcv = self._send_rcv(snd)
if rcv == 79:
return 1
return -2
def measure_ext(self, freq: int, step: int, rs: float, xs: float, cal=True, samples=1) -> int:
"""
Takes four measurement samples starting at the specified frequency and incremented at the specified step
Uses half float, so a bit less precise
:param device: handler
:param freq: frequency in hertz; 0 to turn-off the generator
:param step: step in hertz
:param cal: True to get OSL calibrated data; False to get uncalibrated data
:param samples: number of samples for averaging
:param rs real part of the impedance (four vals)
:param xs imag part of the impedance (four vals)
:return: <0 err; >0 ok
"""
if not self._is_connect:
return -1
snd = [0x0] * 19
snd[1] = 12
b = self._int2bytes(freq)
snd[2] = b[0]
snd[3] = b[1]
snd[4] = b[2]
snd[5] = b[3]
b = self._int2bytes(step)
snd[8] = b[0]
snd[9] = b[1]
snd[10] = b[2]
snd[11] = b[3]
if cal:
snd[6] = 1
else:
snd[6] = 0
snd[7] = samples
rcv = self._send_rcv(snd)
if rcv[0] != 79:
return -2
rs[0] = self._half2float(rcv[1], rcv[2])
xs[0] = self._half2float(rcv[3], rcv[4])
rs[1] = self._half2float(rcv[5], rcv[6])
xs[1] = self._half2float(rcv[7], rcv[8])
rs[2] = self._half2float(rcv[9], rcv[10])
xs[2] = self._half2float(rcv[11], rcv[12])
rs[3] = self._half2float(rcv[13], rcv[14])
xs[3] = self._half2float(rcv[15], rcv[16])
return 1
# ---------------------------------------------------------
# Get version command: used to check the connection and dev params
def _cmd_version(self):
if not self._handler:
return -1
self._fw_protocol = 0
self._fw_version = ""
snd = [0x0] * 19
snd[1] = 1
rcv = self._send_rcv(snd)
if rcv[0] != 79:
return -2
self._fw_protocol = (rcv[2] << 8) & 0xFF00
self._fw_protocol += rcv[1] & 0xFF
ver = [0x0] * 15
ver[:] = rcv[3:]
# Identifies the device
if (self._fw_protocol & 0xff00) == 0x0100:
self._max_freq = 200000000
self._min_freq = 100000
self._dev_name = "sark110 (100k to 200M)"
elif (self._fw_protocol & 0xff00) == 0x0200:
self._max_freq = 230000000
self._min_freq = 10000
self._dev_name = "sark110 (10k to 230M)"
elif (self._fw_protocol & 0xff00) == 0x0300:
self._max_freq = 230000000
self._min_freq = 10000
self._dev_name = "sark110 mk1"
elif (self._fw_protocol & 0xff00) == 0x0a00:
self._max_freq = 1000000000
self._min_freq = 100000
self._dev_name = "sark110 ulm"
else:
self._max_freq = 230000000
self._min_freq = 100000
self._dev_name = "sark110"
# Converts version to str
for i in range(15):
if ver[i] == 0:
break
elif ver[i] == 46:
self._fw_version += "."
else:
self._fw_version += "%c" % (ver[i])
return 1
# ---------------------------------------------------------
# half float decompress
def _half2float(self, byte1, byte2):
hfs = (byte2 << 8) & 0xFF00
hfs += byte1 & 0xFF
temp = self.__half2float(hfs)
res_pack = struct.pack('I', temp)
return struct.unpack('f', res_pack)[0]
def __half2float(self, float16):
s = int((float16 >> 15) & 0x00000001) # sign
e = int((float16 >> 10) & 0x0000001f) # exponent
f = int(float16 & 0x000003ff) # fraction
if e == 0:
if f == 0:
return int(s << 31)
else:
while not (f & 0x00000400):
f = f << 1
e -= 1
e += 1
f &= ~0x00000400
# print(s,e,f)
elif e == 31:
if f == 0:
return int((s << 31) | 0x7f800000)
else:
return int((s << 31) | 0x7f800000 | (f << 13))
e = e + (127 - 15)
f = f << 13
return int((s << 31) | (e << 23) | f)
# ---------------------------------------------------------
def _short2bytes(self, n):
"""
short to buffer array
:param n:
:return:
"""
b = bytearray([0, 0])
b[0] = n & 0xFF
n >>= 8
b[1] = n & 0xFF
return b
def _int2bytes(self, n):
"""
int to buffer array
:param n:
:return:
"""
b = bytearray([0, 0, 0, 0])
b[0] = n & 0xFF
n >>= 8
b[1] = n & 0xFF
n >>= 8
b[2] = n & 0xFF
n >>= 8
b[3] = n & 0xFF
return b
# ---------------------------------------------------------
def _send_rcv(self, snd):
# Windows: pywinusb
if os.name == 'nt':
try:
report = self._handler.find_output_reports()[0]
self.event.clear()
report.set_raw_data(snd)
report.send()
self.event.wait()
return _g_rcv[1:18]
except:
return [0] * 18
# Linux: hidapi
else:
try:
self._handler.write(snd)
return self._handler.read(18, WAIT_HID_DATA_MS)
except:
return [0] * 18
def _rx_handler(self, data):
"""
Handler called when a report is received
:param data:
:return:
"""
global _g_rcv
_g_rcv = data.copy()
self.event.set()
# ---------------------------------------------------------
_g_rcv = [0xff] * 19
if os.name == 'nt':
event = threading.Event()
| 29.757282 | 112 | 0.49633 | 1,508 | 12,260 | 3.899204 | 0.215517 | 0.029932 | 0.019048 | 0.011224 | 0.331463 | 0.292347 | 0.264456 | 0.233163 | 0.188605 | 0.185884 | 0 | 0.072789 | 0.370228 | 12,260 | 411 | 113 | 29.829684 | 0.688771 | 0.258075 | 0 | 0.431159 | 0 | 0 | 0.017615 | 0 | 0 | 0 | 0.022865 | 0 | 0 | 1 | 0.076087 | false | 0 | 0.025362 | 0.021739 | 0.278986 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
749bf7c085175f0c77d7d99fa53fad97cc10c478 | 2,295 | py | Python | Utkarsh1.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | null | null | null | Utkarsh1.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | null | null | null | Utkarsh1.py | utkarsh7236/SCILLA | e11e4d753823ad522a1b3168283b6e6ffe3ea393 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sqlite3
path = '/Users/Utkarsh/PycharmProjects/SCILLA/Experiments/circuits.db'
def select_all_tasks(conn):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM tasks")
rows = cur.fetchall()
for row in rows:
print(row)
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except sqlite3.Error as e:
print(e)
return conn
if __name__ == '__main__':
data = create_connection(path)
print("Opened database successfully")
select_all_tasks(data)
# import sqlite3
# from sqlite3 import Error
#
#
# def create_connection(db_file):
# """ create a database connection to the SQLite database
# specified by the db_file
# :param db_file: database file
# :return: Connection object or None
# """
# conn = None
# try:
# conn = sqlite3.connect(db_file)
# except Error as e:
# print(e)
#
# return conn
#
#
# def select_all_tasks(conn):
# """
# Query all rows in the tasks table
# :param conn: the Connection object
# :return:
# """
# cur = conn.cursor()
# cur.execute("SELECT * FROM tasks")
#
# rows = cur.fetchall()
#
# for row in rows:
# print(row)
#
#
# def select_task_by_priority(conn, priority):
# """
# Query tasks by priority
# :param conn: the Connection object
# :param priority:
# :return:
# """
# cur = conn.cursor()
# cur.execute("SELECT * FROM tasks WHERE priority=?", (priority,))
#
# rows = cur.fetchall()
#
# for row in rows:
# print(row)
#
#
# def main():
# database = '/Users/Utkarsh/PycharmProjects/SCILLA/Experiments/circuits.db'
#
# # create a database connection
# conn = create_connection(database)
# with conn:
# print("1. Query task by priority:")
# select_task_by_priority(conn, 1)
#
# print("2. Query all tasks")
# select_all_tasks(conn)
#
#
# if __name__ == '__main__':
# main() | 21.650943 | 80 | 0.608715 | 277 | 2,295 | 4.891697 | 0.227437 | 0.035424 | 0.041328 | 0.039852 | 0.729889 | 0.673801 | 0.673801 | 0.558672 | 0.558672 | 0.526199 | 0 | 0.00537 | 0.269717 | 2,295 | 106 | 81 | 21.650943 | 0.803103 | 0.680174 | 0 | 0 | 0 | 0 | 0.186495 | 0.098071 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.210526 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
749c75778c0228cfbbf366e87be5465e687bc993 | 1,839 | py | Python | inference.py | ShaneTian/Induction-Networks | 5785e13425e3c9020c8699402d546d018ec735bf | [
"Apache-2.0"
] | 5 | 2020-06-13T07:43:33.000Z | 2021-06-14T03:27:11.000Z | inference.py | ShaneTian/Induction-Networks | 5785e13425e3c9020c8699402d546d018ec735bf | [
"Apache-2.0"
] | 2 | 2020-06-15T04:53:18.000Z | 2020-10-24T12:37:27.000Z | inference.py | ShaneTian/Induction-Networks | 5785e13425e3c9020c8699402d546d018ec735bf | [
"Apache-2.0"
] | 1 | 2021-01-08T11:27:07.000Z | 2021-01-08T11:27:07.000Z | import argparse
import numpy as np
from paddle import fluid
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str,
help="Path of __model__ and __params__")
parser.add_argument("--use_cuda", action="store_true")
args = parser.parse_args()
print("Args:", args)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
# Load inference model
inference_program, feed_target_names, fetch_targets = load_model(args.model_path, exe)
print("Feed target names:", feed_target_names)
print("Fetch targets:", fetch_targets)
# A temp sample
B, N, K, Q = 4, 2, 5, 5
max_length = 512
totalQ = np.array([N * Q], dtype=np.int32)
support = np.random.randint(0, high=1000, size=[B, N, K, max_length])
support_len = np.random.randint(10, high=max_length, size=[B, N, K])
query = np.random.randint(0, high=1000, size=[B, N * Q, max_length])
query_len = np.random.randint(10, high=max_length, size=[B, N * Q])
# Run inference model
pred_label, = exe.run(inference_program,
feed={
feed_target_names[0]: totalQ,
feed_target_names[1]: support,
feed_target_names[2]: support_len,
feed_target_names[3]: query,
feed_target_names[4]: query_len
},
fetch_list=fetch_targets)
print("The predict label is:", pred_label) # [B, totalQ]
def load_model(model_path, exe):
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(dirname=model_path, executor=exe,
params_filename="__params__"))
return inference_program, feed_target_names, fetch_targets
if __name__ == "__main__":
main() | 35.365385 | 90 | 0.641109 | 248 | 1,839 | 4.455645 | 0.346774 | 0.090498 | 0.135747 | 0.070588 | 0.241629 | 0.241629 | 0.241629 | 0.124887 | 0.124887 | 0.070588 | 0 | 0.020759 | 0.240348 | 1,839 | 52 | 91 | 35.365385 | 0.770222 | 0.035889 | 0 | 0 | 0 | 0 | 0.079141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.076923 | 0 | 0.153846 | 0.102564 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
749d83fb75da72d4575c40c6649383b7eedc9468 | 3,125 | py | Python | dynaban/pypot/callback.py | laukik-hase/imitation_of_human_arm_on_robotic_manipulator | 995beb1ab41597ca6cbecd0baecdef1ef13450f9 | [
"MIT"
] | 3 | 2021-11-13T16:54:31.000Z | 2021-11-13T20:50:18.000Z | dynaban/pypot/callback.py | laukik-hase/human_arm_imitation | 995beb1ab41597ca6cbecd0baecdef1ef13450f9 | [
"MIT"
] | null | null | null | dynaban/pypot/callback.py | laukik-hase/human_arm_imitation | 995beb1ab41597ca6cbecd0baecdef1ef13450f9 | [
"MIT"
] | null | null | null | import paho.mqtt.client as paho
import time
import Queue as queue
import json
import real_time_manipulator_math_utils
import pprint
pp = pprint.PrettyPrinter(indent=4)
# import rbdl
# import manip motion
def on_connect(client, userdata, flags, rc):
client.subscribe(topic, qos)
def on_message(client, userdata, message):
msg = message.payload.decode("utf-8")
q.put(msg)
# print("Received: ", msg)
broker = "test.mosquitto.org"
topic = "fyp/sensors"
qos = 0
client = paho.Client("client_001")
client.on_connect=on_connect
client.on_message = on_message
client.connect(broker)
client.loop_start()
JOINTS = 1
q = queue.Queue()
JOINTS = 4
SPLINE = 1
WINDOWSIZE = 5
math_utils_obj = real_time_manipulator_math_utils.manipulator_math_utils(JOINTS)
timestamps = []
angles = []
[angles.append([]) for j in range(JOINTS)]
torques = []
[torques.append([]) for j in range(JOINTS)]
padded_angles = []
first_val = True
laukik_tatti = True
while loop_flag==1:
message = q.get()
msg = json.loads(message)
if laukik_tatti:
laukik_tatti = False
continue
if first_val:
init_timestamp = msg['timestamp']
first_val = False
else:
# collect till SPLINE
if(msg['timestamp'] - init_timestamp > SPLINE):
# print(timestamps)
init_timestamp = msg['timestamp']
# padding for smooth transition in moving average
for j in range(JOINTS):
if padded_angles == []:
angles[j] = [angles[j][0]]*(WINDOWSIZE-1) + angles[j]
else:
angles[j] = padded_angles[j] + angles[j]
# moving average with length similar to timestamps
print("raw angles")
pp.pprint(angles)
padded_angles = [ angles[j][-(WINDOWSIZE-1):] for j in range(JOINTS) ]
angles = math_utils_obj.real_time_moving_average(angles)
print("angles after moving avg")
pp.pprint(angles)
# torques = get torque from rbdl (timestamp, angles)
# convert angles to steps
transformation = [[1,0]]*JOINTS
angles = math_utils_obj.angles_to_steps(angles, transformation)
print("angles to steps")
pp.pprint(angles)
# call to get coeffs
angle_coeffs = math_utils_obj.calculate_coefficients_angles(timestamps, angles)
print("angles coefficients")
pp.pprint(angle_coeffs)
# set motion on manipulator
# empty
timestamps = []
angles = []
[angles.append([]) for j in range(JOINTS)]
torques = []
[torques.append([]) for j in range(JOINTS)]
timestamps.append(msg['timestamp'] - init_timestamp)
angles[0].append(msg['shoulder']['pitch'])
angles[1].append(msg['shoulder']['roll'])
angles[2].append(msg['shoulder']['yaw'])
angles[3].append(msg['elbow']['pitch'])
client.disconnect()
client.loop_stop(
) | 26.939655 | 91 | 0.5984 | 365 | 3,125 | 4.983562 | 0.312329 | 0.034634 | 0.019791 | 0.036284 | 0.173722 | 0.090159 | 0.090159 | 0.090159 | 0.090159 | 0.090159 | 0 | 0.009005 | 0.28928 | 3,125 | 116 | 92 | 26.939655 | 0.809996 | 0.10144 | 0 | 0.220779 | 0 | 0 | 0.069027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025974 | false | 0 | 0.077922 | 0 | 0.103896 | 0.12987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
749f2f672fb718a35a7272b4f9459c0ee34795d3 | 4,679 | py | Python | emwiki/article/views.py | etmula/emwiki | e162a9976688d543f2e3bd71634913c14cdb9d54 | [
"MIT"
] | 2 | 2019-08-02T01:08:13.000Z | 2020-11-17T12:47:02.000Z | emwiki/article/views.py | g063ff/emwiki | a379b2b2863e9059eac014183cb94c39670e11a0 | [
"MIT"
] | 101 | 2019-07-29T07:44:56.000Z | 2022-03-31T04:11:04.000Z | emwiki/article/views.py | g063ff/emwiki | a379b2b2863e9059eac014183cb94c39670e11a0 | [
"MIT"
] | 11 | 2019-07-24T02:37:35.000Z | 2021-12-09T05:24:31.000Z | import os
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.http import HttpResponse, JsonResponse, Http404
from django.shortcuts import render, get_object_or_404
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import View
from django.views.generic.base import TemplateView
from .models import Article, Comment
class ArticleView(TemplateView):
template_name = 'article/index.html'
extra_context = {
'context_for_js': {
'article_html_base_uri': reverse_lazy('article:htmls'),
'comments_uri': reverse_lazy('article:comments'),
'bibs_uri': reverse_lazy('article:bibs'),
'names_uri': reverse_lazy('article:names'),
'search_uri': reverse_lazy('search:index')
}
}
def get(self, request, name_or_filename, *args, **kwargs):
response = super().get(request, *args, **kwargs)
# These context data cannot define in class field.
response.context_data['context_for_js']['article_base_uri'] = \
reverse('article:index', kwargs=dict(name_or_filename="temp")).replace('temp', '')
response.context_data['context_for_js']['is_authenticated'] = request.user.is_authenticated
response.context_data['context_for_js']['target'] = request.GET.get('target', 'article')
response.context_data['target'] = request.GET.get('target', 'article')
return response
class ArticleIndexView(View):
def get(self, request):
return JsonResponse({'index': [
dict(name=article.name) for article in Article.objects.all()
]})
class ArticleHtmlView(View):
def get(self, request, *args, **kwargs):
if 'article_name' in request.GET:
article = get_object_or_404(Article, name=request.GET.get('article_name'))
return render(request, article.template_url)
else:
raise Http404()
class BibView(View):
def get(self, request):
if 'article_name' in request.GET:
article_name = request.GET.get("article_name")
bib_file_path = os.path.join(settings.MML_FMBIBS_DIR, f'{article_name}.bib')
if os.path.exists(bib_file_path):
with open(bib_file_path, "r") as f:
bib_text = f.read()
else:
bib_text = f"{bib_file_path} not found"
return JsonResponse({"bib_text": bib_text})
class ProofView(View):
def get(self, request, article_name, proof_name):
return HttpResponse(
open(os.path.join(settings.MML_HTML_DIR, 'proofs',
article_name, proof_name)).read(),
content_type='application/xml'
)
class RefView(View):
def get(self, request, article_name, ref_name):
return HttpResponse(
open(os.path.join(settings.MML_HTML_DIR,
'refs', article_name, ref_name)).read(),
content_type='application/xml'
)
class CommentView(View):
def get(self, request, *args, **kwargs):
query = Comment.objects
if 'article_name' in request.GET:
query = query.filter(
article=Article.objects.get(
name=request.GET.get("article_name"))
)
if 'block' in request.GET:
query = query.filter(
block=request.GET.get('block')
)
if 'block_order' in request.GET:
query = query.filter(
block_order=int(request.GET.get("block_order"))
)
return HttpResponse(
serializers.serialize('json', query.all()), content_type='application/json'
)
@method_decorator(login_required)
def post(self, request):
article_name = request.POST.get('article_name', None)
block = request.POST.get('block', None)
block_order = request.POST.get("block_order", None)
text = request.POST.get('comment', None)
article = Article.objects.get(name=article_name)
if Comment.objects.filter(article=article, block=block, block_order=block_order).exists():
comment = Comment.objects.get(
article=article, block=block, block_order=block_order)
else:
comment = Comment(article=article, block=block,
block_order=block_order, text='')
comment.text = text
comment.save()
article.save_db2mizfile()
article.commit_mizfile(request.user.username)
return HttpResponse(status=201)
| 37.432 | 99 | 0.63069 | 546 | 4,679 | 5.223443 | 0.234432 | 0.065568 | 0.024544 | 0.041725 | 0.342216 | 0.301893 | 0.222651 | 0.084151 | 0.037868 | 0.037868 | 0 | 0.004587 | 0.254542 | 4,679 | 124 | 100 | 37.733871 | 0.813073 | 0.010259 | 0 | 0.173077 | 0 | 0 | 0.118168 | 0.004537 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.105769 | 0.028846 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
749f6d74baed280144d534ef8d72acec4e74fc71 | 3,131 | py | Python | kivy3/objects/lines.py | sb-blueprint/kivy3 | e5d6c5758b610503109eb2c788bcff4f3410bd06 | [
"MIT"
] | null | null | null | kivy3/objects/lines.py | sb-blueprint/kivy3 | e5d6c5758b610503109eb2c788bcff4f3410bd06 | [
"MIT"
] | null | null | null | kivy3/objects/lines.py | sb-blueprint/kivy3 | e5d6c5758b610503109eb2c788bcff4f3410bd06 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2013 Niko Skrypnik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from kivy.graphics import Mesh as KivyMesh
from kivy3 import Vector3
from kivy3.core.object3d import Object3D
DEFAULT_VERTEX_FORMAT = [
(b'v_pos', 3, 'float'),
(b'v_normal', 3, 'float'),
(b'v_tc0', 2, 'float')
]
DEFAULT_MESH_MODE = 'lines'
class Lines(Object3D):
def __init__(self, geometry, material, **kw):
super(Lines, self).__init__(**kw)
self.geometry = geometry
self.material = material
self.mtl = self.material # shortcut for material property
self.vertex_format = kw.pop('vertex_format', DEFAULT_VERTEX_FORMAT)
self.mesh_mode = kw.pop('mesh_mode', DEFAULT_MESH_MODE)
self.create_mesh()
def create_mesh(self):
""" Create real mesh object from the geometry and material """
vertices = []
indices = []
idx = 0
for line in self.geometry.lines:
for i, k in enumerate(['a', 'b']):
v_idx = getattr(line, k)
vertex = self.geometry.vertices[v_idx]
vertices.extend(vertex)
try:
normal = line.vertex_normals[i]
except IndexError:
normal = Vector3([0, 0, 0])
vertices.extend(normal)
try:
tex_coords = self.geometry.face_vertex_uvs[0][idx]
vertices.extend(tex_coords)
except IndexError:
vertices.extend([0, 0])
indices.append(idx)
idx += 1
if idx >= 65535 - 1:
msg = 'Mesh must not contain more than 65535 indices, {} given'
raise ValueError(msg.format(idx + 1))
kw = dict(
vertices=vertices,
indices=indices,
fmt=self.vertex_format,
mode=self.mesh_mode
)
if self.material.map:
kw['texture'] = self.material.map
self._mesh = KivyMesh(**kw)
def custom_instructions(self):
yield self.material
yield self._mesh
| 36.406977 | 77 | 0.63526 | 404 | 3,131 | 4.831683 | 0.423267 | 0.045082 | 0.01332 | 0.008197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015604 | 0.283615 | 3,131 | 85 | 78 | 36.835294 | 0.854659 | 0.372724 | 0 | 0.074074 | 0 | 0 | 0.06359 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.12963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
749fd6335e4af9798ed3bf9db18e89282313e71e | 1,262 | py | Python | core/tools/pencil.py | INTJT/conway | 7d3165382b3a691163cae30e33db97c544c043a6 | [
"MIT"
] | null | null | null | core/tools/pencil.py | INTJT/conway | 7d3165382b3a691163cae30e33db97c544c043a6 | [
"MIT"
] | null | null | null | core/tools/pencil.py | INTJT/conway | 7d3165382b3a691163cae30e33db97c544c043a6 | [
"MIT"
] | null | null | null | from core.tools.tool import Tool
from core.editor import Editor
class Pencil(Tool):
def __init__(self, button, board):
super(Pencil, self).__init__(button, board)
self._changed = set()
Editor()["pre-pencil"] = board.copy()
self._fill = button
def _need_change(self, value):
return value != self._fill
def move(self, x, y):
editor = Editor()
radius = editor["radius"]
selection = editor["selection"]
pre_board = editor["pre-pencil"]
for i in range(-radius + 1, radius):
if y + i not in selection[1]:
continue
for j in range(-radius + 1, radius):
if x + j not in selection[0]:
continue
if self._need_change(pre_board[y + i, x + j]) and not (x + j, y + i) in self._changed:
self._changed.add((x + j, y + i))
pre_board[y + i, x + j] = not pre_board[y + i, x + j]
def execute(self):
Editor()["pre-pencil"] = None
for point in self._changed:
self._board[point] = not self._board[point]
def revoke(self):
self.execute()
def need_save(self):
return len(self._changed) != 0
| 25.24 | 102 | 0.532488 | 165 | 1,262 | 3.915152 | 0.278788 | 0.018576 | 0.069659 | 0.04644 | 0.123839 | 0.123839 | 0 | 0 | 0 | 0 | 0 | 0.006046 | 0.344691 | 1,262 | 49 | 103 | 25.755102 | 0.775091 | 0 | 0 | 0.0625 | 0 | 0 | 0.035658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.0625 | 0.0625 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74a12178988d7dbec19f36ed51efbc1d85879c24 | 3,248 | py | Python | pinsey/thread/LikesBotThread.py | RailKill/Pinsey | 72a283e6c5683b27918b511d80e45c3af4e67539 | [
"MIT"
] | 3 | 2021-02-01T06:47:06.000Z | 2022-01-09T05:54:35.000Z | pinsey/thread/LikesBotThread.py | RailKill/Pinsey | 72a283e6c5683b27918b511d80e45c3af4e67539 | [
"MIT"
] | 4 | 2019-10-23T09:52:36.000Z | 2022-03-11T23:17:23.000Z | pinsey/thread/LikesBotThread.py | RailKill/Pinsey | 72a283e6c5683b27918b511d80e45c3af4e67539 | [
"MIT"
] | null | null | null | import logging
from pynder import errors
from random import randint
from PyQt5 import QtCore
class LikesBotThread(QtCore.QThread):
"""
This is a QThread which runs in the background as a PyQt Signal. It emits the matches object.
To access the matches object, you need to retrieve from the signal, which is named 'data_downloaded'.
For example:
instance = MatchesThread()
instance.data_downloaded.connect(yourMethod)
instance.start()
With the example above, yourMethod() will be called when the background thread has finished fetching the
matches data. The matches object will be passed in as the first parameter. Therefore, if you define your
method like this: yourMethod(matches), then the session object will be passed into 'matches'.
"""
data_downloaded = QtCore.pyqtSignal(object)
def __init__(self, session, likes_handler, decision_handler=None):
QtCore.QThread.__init__(self)
self.session = session
self.friends = session.get_fb_friends()
self.likes_handler = likes_handler
self.decision_handler = decision_handler
self.abort = False
self.logger = logging.getLogger(__name__)
def stop(self):
self.abort = True
def run(self):
while not self.abort:
if self.session.likes_remaining != 0:
nearby_users = self.session.nearby_users()
try:
user = next(nearby_users) # Iterate through generator object.
if self.decision_handler:
if not self.decision_handler.analyze(user, self.friends):
self.likes_handler.dislike_user(user, 'Bot')
continue
self.likes_handler.like_user(user, 'Bot')
self.logger.info(u'Liking ' + user.name + '.')
except StopIteration:
try:
# No more users to go through. Reset the distance filter to fetch the users again.
self.session.profile.distance_filter = self.session.profile.distance_filter
except errors.RequestError:
self.logger.error('Request timed out when trying to update distance filter in profile.')
except errors.RecsError:
self.logger.info('There are probably no more nearby users to fetch. '
'Increasing distance filter by 1 mile...')
self.session.profile.distance_filter += 1
self.sleep(randint(3, 5)) # Give it a break, 3 to 5 seconds between every swipe.
else:
try:
like_in_seconds = self.session.can_like_in
like_in_hours = like_in_seconds / 60 / 60
self.logger.info('Out of likes. Can like in: ' + str(like_in_seconds) + ' seconds (' +
str(like_in_hours) + ' hours).')
except errors.RequestError:
self.logger.info('Out of likes. Retrying in an hour...')
self.sleep(3600 * 6) # Out of likes, pausing for X hours.
| 47.764706 | 112 | 0.591441 | 375 | 3,248 | 4.994667 | 0.4 | 0.046983 | 0.029899 | 0.041644 | 0.107848 | 0.025627 | 0 | 0 | 0 | 0 | 0 | 0.007889 | 0.336515 | 3,248 | 67 | 113 | 48.477612 | 0.861253 | 0.248768 | 0 | 0.106383 | 0 | 0 | 0.106672 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.085106 | 0 | 0.191489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74a2534ab9e9ba29bdec5c71f0832442026ad789 | 13,081 | py | Python | train.py | SkyLLine/Inception-v4-in-mindspore | 9a23261252baad27cbd7bc8028087e4bba48b318 | [
"MIT"
] | 1 | 2020-12-23T12:31:08.000Z | 2020-12-23T12:31:08.000Z | train.py | SkyLLine/Inception-v4-in-mindspore | 9a23261252baad27cbd7bc8028087e4bba48b318 | [
"MIT"
] | null | null | null | train.py | SkyLLine/Inception-v4-in-mindspore | 9a23261252baad27cbd7bc8028087e4bba48b318 | [
"MIT"
] | null | null | null | import mindspore as ms
import mindspore.nn as nn
import mindspore.ops.operations as operator
import os
from lr_generator import get_lr
from CrossEntropy import CrossEntropy
import argparse
from inception_A import inception_A
from inception_B import inception_B
import numpy as np
from inception_C import inception_C
from network import Stem
from reduction_A import reduction_A
from reduction_B import reduction_B
from reduction_C import reduction_C
import mindspore.dataset as ds
from mindspore import context
from mindspore import Tensor
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.nn.optim.momentum import Momentum
import os
import urllib.request
from urllib.parse import urlparse
import gzip
import argparse
import mindspore.dataset as ds
import mindspore.nn as nn
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.train import Model
from mindspore.common.initializer import TruncatedNormal
import mindspore.dataset.transforms.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.transforms.vision import Inter
from mindspore.nn.metrics import Accuracy
from mindspore.common import dtype as mstype
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.model import Model, ParallelMode
from config import config
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.communication.management import init
import mindspore.nn as nn
import mindspore.common.initializer as weight_init
from dataloader import create_dataset
def unzipfile(gzip_path):
"""unzip dataset file
Args:
gzip_path: dataset file path
"""
open_file = open(gzip_path.replace('.gz', ''), 'wb')
gz_file = gzip.GzipFile(gzip_path)
open_file.write(gz_file.read())
gz_file.close()
def download_dataset():
"""Download the dataset from http://yann.lecun.com/exdb/mnist/."""
print("******Downloading the MNIST dataset******")
train_path = "./MNIST_Data/train/"
test_path = "./MNIST_Data/test/"
train_path_check = os.path.exists(train_path)
test_path_check = os.path.exists(test_path)
if train_path_check == False and test_path_check == False:
os.makedirs(train_path)
os.makedirs(test_path)
train_url = {"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"}
test_url = {"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"}
for url in train_url:
url_parse = urlparse(url)
# split the file name from url
file_name = os.path.join(train_path, url_parse.path.split('/')[-1])
if not os.path.exists(file_name.replace('.gz', '')):
file = urllib.request.urlretrieve(url, file_name)
unzipfile(file_name)
os.remove(file_name)
for url in test_url:
url_parse = urlparse(url)
# split the file name from url
file_name = os.path.join(test_path, url_parse.path.split('/')[-1])
if not os.path.exists(file_name.replace('.gz', '')):
file = urllib.request.urlretrieve(url, file_name)
unzipfile(file_name)
os.remove(file_name)
# def create_dataset(data_path, batch_size=32, repeat_size=1,
# num_parallel_workers=1):
# """ create dataset for train or test
# Args:
# data_path: Data path
# batch_size: The number of data records in each group
# repeat_size: The number of replicated data records
# num_parallel_workers: The number of parallel workers
# """
# # define dataset
# mnist_ds = ds.MnistDataset(data_path)
# # define operation parameters
# resize_height, resize_width = 299, 299
# rescale = 1.0 / 255.0
# shift = 0.0
# rescale_nml = 1 / 0.3081
# shift_nml = -1 * 0.1307 / 0.3081
# # define map operations
# resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Resize images to (32, 32)
# rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images
# rescale_op = CV.Rescale(rescale, shift) # rescale images
# hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network.
# type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network
# # apply map operations on images
# mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers)
# mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers)
# mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers)
# mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers)
# mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers)
# # apply DatasetOps
# buffer_size = 10000
# mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script
# mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
# mnist_ds = mnist_ds.repeat(repeat_size)
# return mnist_ds
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--run_distribute', type=bool, default=True, help='Run distribute')
parser.add_argument('--device_num', type=int, default=8, help='Device num.')
parser.add_argument('--do_train', type=bool, default=True, help='Do train or not.')
parser.add_argument('--do_eval', type=bool, default=False, help='Do eval or not.')
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
parser.add_argument('--data_url', default=None, help='Location of data.')
parser.add_argument('--train_url', default=None, help='Location of training outputs.')
opt = parser.parse_args()
dict = {}
i = 0
class InceptionV4(nn.Cell):
def __init__(self):
super().__init__()
self.Stem = Stem(3)
self.inception_A = inception_A(384)
self.reduction_A = reduction_A(384)
self.inception_B = inception_B(1024)
self.reduction_B = reduction_B(1024)
self.inception_C = inception_C(1536)
self.avgpool = nn.AvgPool2d(8)
#### reshape成2维
self.dropout = nn.Dropout(0.8)
self.linear = nn.Dense(1536, 1000)
def construct(self, x):
x = self.Stem(x)
x = self.inception_A(x)
x = self.inception_A(x)
x = self.inception_A(x)
x = self.inception_A(x)
x = self.reduction_A(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.inception_B(x)
x = self.reduction_B(x)
x = self.inception_C(x)
x = self.inception_C(x)
x = self.inception_C(x)
x = self.avgpool(x)
x = self.dropout(x)
x = nn.Flatten()(x)
x = self.linear(x)
return x
def generate_inception_module(self, input_channels, output_channels, block_num, block):
if block == 1:
layers = nn.SequentialCell([inception_A(input_channels)])
for i in range(block_num):
layers = nn.SequentialCell(inception_A(input_channels), layers)
input_channels = output_channels
if block == 2:
layers = nn.SequentialCell([inception_B(input_channels)])
for i in range(block_num):
layers = nn.SequentialCell(inception_B(input_channels), layers)
input_channels = output_channels
if block == 3:
layers = nn.SequentialCell([inception_C(input_channels)])
for i in range(block_num):
layers = nn.SequentialCell(inception_C(input_channels), layers)
input_channels = output_channels
return layers
def train(opt):
# device_id = int(os.getenv('DEVICE_ID'))
#
context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=False)
# context.set_context(enable_task_sink=True, device_id=device_id)
# context.set_context(enable_loop_sink=True)
# context.set_context(enable_mem_reuse=True)
#
# if not opt.do_eval and opt.run_distribute:
# context.set_auto_parallel_context(device_num=opt.device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
# mirror_mean=True, parameter_broadcast=True)
# auto_parallel_context().set_all_reduce_fusion_split_indices([107, 160])
# init()
loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
mnist_path = "./MNIST_Data"
download_dataset()
dataset = create_dataset(os.path.join(mnist_path, "train"), 32, 1)
net = InceptionV4(4, 7, 3)
# net = LeNet5()
stepsize = 32
lr = 0.01
optt = nn.Momentum(net.trainable_params(), lr, momentum=0.9)
config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)
# save the network model and parameters for subsequence fine-tuning
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)
# group layers into an object with training and evaluation features
net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')
model = Model(net, net_loss, optt, metrics={"Accuracy": Accuracy()})
model.train(config.epoch_size, dataset, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False)
#########################################
def weight_variable():
"""Weight initial."""
return TruncatedNormal(0.02)
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""Conv layer weight initial."""
weight = weight_variable()
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight, has_bias=False, pad_mode="valid")
def fc_with_initialize(input_channels, out_channels):
"""Fc layer weight initial."""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
class LeNet5(nn.Cell):
"""Lenet network structure."""
# define the operator required
def __init__(self):
super(LeNet5, self).__init__()
self.conv1 = conv(1, 6, 5)
self.conv2 = conv(6, 16, 5)
self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
self.fc2 = fc_with_initialize(120, 84)
self.fc3 = fc_with_initialize(84, 10)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
# use the preceding operators to construct networks
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
def ans():
context.set_context(mode=context.GRAPH_MODE)
net = InceptionV4()
print("start")
ds = create_dataset('./dataset', True, config.epoch_size, config.batch_size)
lr = 0.01
optt = nn.Momentum(net.trainable_params(), lr, momentum=0.9)
config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10)
# save the network model and parameters for subsequence fine-tuning
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck)
# group layers into an object with training and evaluation features
net_loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
lr = Tensor(get_lr(global_step=0, lr_init=config.lr_init, lr_end=0.0, lr_max=config.lr_max,
warmup_epochs=config.warmup_epochs, total_epochs=config.epoch_size, steps_per_epoch=config.batch_size,
lr_decay_mode='cosine'))
optt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum,
config.weight_decay, config.loss_scale)
model = Model(net, net_loss, optt, metrics={"Accuracy": Accuracy()})
model.train(config.epoch_size, ds, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False)
if __name__ == '__main__':
ans() | 39.759878 | 125 | 0.690467 | 1,776 | 13,081 | 4.866554 | 0.193131 | 0.007868 | 0.022215 | 0.024297 | 0.413514 | 0.385746 | 0.346292 | 0.292607 | 0.264144 | 0.25188 | 0 | 0.018462 | 0.200826 | 13,081 | 329 | 126 | 39.759878 | 0.808303 | 0.240272 | 0 | 0.306604 | 0 | 0.018868 | 0.066019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.216981 | 0 | 0.311321 | 0.009434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74a4b609cbd07e7dfd15a960bb342cacfe14d3fb | 2,034 | py | Python | setup.py | Jumpscale/ays9 | 63bd414ff06372ba885c55eec528f427e63bcbe1 | [
"Apache-2.0"
] | 4 | 2017-06-07T08:10:06.000Z | 2017-11-10T02:20:38.000Z | setup.py | Jumpscale/ays9 | 63bd414ff06372ba885c55eec528f427e63bcbe1 | [
"Apache-2.0"
] | 242 | 2017-05-18T10:51:48.000Z | 2019-09-18T15:09:47.000Z | setup.py | Jumpscale/ays9 | 63bd414ff06372ba885c55eec528f427e63bcbe1 | [
"Apache-2.0"
] | 5 | 2017-06-16T15:43:25.000Z | 2017-09-29T12:48:06.000Z | from setuptools import setup, find_packages
from setuptools.command.install import install as _install
from setuptools.command.develop import develop as _develop
import os
def _post_install(libname, libpath):
from js9 import j
# add this plugin to the config
c = j.core.state.configGet('plugins', defval={})
c[libname] = libpath
j.core.state.configSet('plugins', c)
print("****:%s:%s" % (libname, libpath))
j.tools.jsloader.generate()
# not needed to do
# j.tools.jsloader.copyPyLibs()
class install(_install):
def run(self):
_install.run(self)
libname = self.config_vars['dist_name']
libpath = os.path.join(os.path.dirname(
os.path.abspath(__file__)), libname)
self.execute(_post_install, (libname, libpath),
msg="Running post install task")
class develop(_develop):
def run(self):
_develop.run(self)
libname = self.config_vars['dist_name']
libpath = os.path.join(os.path.dirname(
os.path.abspath(__file__)), libname)
self.execute(_post_install, (libname, libpath),
msg="Running post install task")
long_description = ""
try:
from pypandoc import convert
long_description = convert("README.md", 'rst')
except ImportError:
long_description = ""
setup(
name='JumpScale9AYS',
version='9.2.0',
description='Automation framework for cloud workloads ays lib',
long_description=long_description,
url='https://github.com/Jumpscale/ays9',
author='GreenItGlobe',
author_email='info@gig.tech',
license='Apache',
packages=find_packages(),
include_package_data=True,
install_requires=[
'JumpScale9>=9.2.0',
'JumpScale9Lib>=9.2.0',
'jsonschema>=2.6.0',
'python-jose==1.3.2',
'sanic>=0.5.4',
'aiohttp>=2.2.5'
],
cmdclass={
'install': install,
'develop': develop,
'developement': develop
},
scripts=['cmds/ays'],
)
| 25.746835 | 67 | 0.629302 | 243 | 2,034 | 5.123457 | 0.45679 | 0.028916 | 0.043373 | 0.060241 | 0.24257 | 0.24257 | 0.24257 | 0.24257 | 0.24257 | 0.24257 | 0 | 0.016731 | 0.235988 | 2,034 | 78 | 68 | 26.076923 | 0.784427 | 0.037365 | 0 | 0.237288 | 0 | 0 | 0.187308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.118644 | 0 | 0.20339 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74a63aca3e7e9199c9e3c65398a87cb9cc5d927d | 12,246 | py | Python | nettruyen_downloader_script.py | quantrancse/nettruyen-downloader-script | 1ff5d67fc3a99009385fd3ab5976c582246a2783 | [
"MIT"
] | 4 | 2020-09-15T16:29:53.000Z | 2022-02-18T17:36:46.000Z | nettruyen_downloader_script.py | quantrancse/nettruyen-downloader-script | 1ff5d67fc3a99009385fd3ab5976c582246a2783 | [
"MIT"
] | null | null | null | nettruyen_downloader_script.py | quantrancse/nettruyen-downloader-script | 1ff5d67fc3a99009385fd3ab5976c582246a2783 | [
"MIT"
] | 4 | 2021-07-06T04:26:15.000Z | 2022-01-07T05:55:46.000Z | import argparse
import signal
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from os import mkdir
from os.path import isdir
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
HEADERS = {
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
'DNT': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9'
}
class MangaInfo():
def __init__(self):
self.manga_url = ''
self.manga_name = ''
self.chapter_name_list = []
self.chapter_url_list = []
self.save_path = ''
self.list_of_download_chapter = []
class DownloadEngine():
def __init__(self):
self.stop_signal = 0
self.error403_signal = 0
def set_manga(self, manga):
self.current_manga = manga
self.image_formats = ['.jpg', '.jpeg', '.png', '.gif', '.tiff', '.bmp']
def stop_download(self, sig, frame):
self.stop_signal = 1
def run(self):
signal.signal(signal.SIGINT, self.stop_download)
self.crawl_chapter_data_list()
def crawl_chapter_data_list(self):
chapter_list = []
# Get each chapter info
for index in self.current_manga.list_of_download_chapter:
chapter_detail = {}
chapter_detail['chapter_url'] = self.current_manga.chapter_url_list[index]
chapter_detail['chapter_name'] = self.current_manga.chapter_name_list[index]
if ':' in chapter_detail['chapter_name']:
chapter_detail['chapter_name'] = chapter_detail['chapter_name'].split(':')[
0]
chapter_list.append(chapter_detail)
# Remove downloaded chapters | if not create directory
chapter_list = [i_chapter for i_chapter in chapter_list if not isdir(
self.current_manga.save_path + '/' + i_chapter['chapter_name'])]
chapter_list = list(reversed(chapter_list))
if chapter_list:
# Create directory and start to download
index = 0
print('Start download ..... Press Ctrl+C to stop.')
for chapter_data in chapter_list:
if self.stop_signal:
break
chapter_dir_path = self.current_manga.save_path + \
'/' + chapter_data['chapter_name']
mkdir(chapter_dir_path.replace('\"', '').replace(
'\'', '').replace('?', '').replace('!', ''))
chapter_data['chapter_dir_path'] = chapter_dir_path
self.get_chapter_contents(chapter_data)
index += 1
print('Download Done')
sys.exit(0)
def get_image_urls(self, soup):
contents = []
for content_url in soup.find('div', class_='reading-detail box_doc').find_all('img'):
if content_url not in contents:
if any(img_fm in content_url['src'] for img_fm in self.image_formats):
img_url = content_url['src']
elif content_url.has_attr('data-original'):
img_url = content_url['data-original']
elif content_url.has_attr('data-cdn') and any(img_fm in content_url['data-cdn'] for img_fm in self.image_formats):
img_url = content_url['data-cdn']
else:
img_url = content_url['src']
contents.append(self.format_img_url(img_url))
return contents
def format_img_url(self, url):
return url.replace('//', 'http://')
def get_image_paths(self, chapter_dir_path, contents):
img_path_list = []
image_index = 1
for img_url in contents:
img_name = img_url.split('/')[-1]
if any(img_fm in img_name[-4:] for img_fm in self.image_formats):
img_path_name = chapter_dir_path + '/image_' + img_name
else:
img_path_name = chapter_dir_path + \
'/image_' + '{0:0=3d}'.format(image_index) + '.jpg'
img_path_list.append(img_path_name)
image_index += 1
return img_path_list
def get_chapter_contents(self, chapter_data):
try:
# Request chapter url
request = requests.get(
chapter_data['chapter_url'], headers=HEADERS, timeout=10)
soup = BeautifulSoup(request.text, 'html.parser')
# Get image url
contents = self.get_image_urls(soup)
# Get image name
img_path_list = self.get_image_paths(
chapter_data['chapter_dir_path'], contents)
image_data_list = list(
map(lambda x, y: (x, y), img_path_list, contents))
# Update Dialog
chapter_name = 'Downloading ' + \
chapter_data['chapter_name'] + ' .....'
print(chapter_name)
# Threading for download each image
with ThreadPoolExecutor(max_workers=20) as executor:
executor.map(self.download_image, image_data_list)
if self.error403_signal:
print(chapter_data['chapter_name'] +
': Can not download some images. Please check again!')
self.error403_signal = 0
except Exception:
print('Error get chapter info. Please try again later.')
print('Finish ' + chapter_data['chapter_name'])
def download_image(self, image_data_list):
if not self.stop_signal:
img_path_name, img_url = image_data_list
# Limit download time of an image is 5 secs
start = time.time()
timeout = 10
while True:
try:
img_data = requests.get(
img_url, headers=HEADERS, timeout=10)
if img_data.status_code == 403:
self.error403_signal = 1
else:
with open(img_path_name, 'wb') as handler:
handler.write(img_data.content)
break
except Exception:
if time.time() - start > timeout:
print('Error download image: ' + img_path_name)
break
print('Retry download image: ' + img_url)
time.sleep(1)
continue
class Bridge():
current_manga = MangaInfo()
def start_download(self, manga_url, from_chapter_input, to_chapter_input):
self.manga_url = manga_url
self.from_chapter_input = from_chapter_input
self.to_chapter_input = to_chapter_input
self.download_chapter()
def download_chapter(self):
if self.check_valid_url() and self.get_chapter_input():
manga_save_path = self.current_manga.manga_name
manga_save_path = manga_save_path.replace(
'\"', '').replace('\'', '').replace('?', '').replace('!', '')
if not isdir(manga_save_path):
mkdir(manga_save_path)
self.current_manga.save_path = manga_save_path
engine = DownloadEngine()
engine.set_manga(self.current_manga)
engine.run()
else:
return
def check_valid_url(self):
current_manga_url = self.manga_url
result = False
domain = urlparse(current_manga_url)
referer_header = '{uri.scheme}://{uri.netloc}/'.format(uri=domain)
HEADERS['Referer'] = referer_header
if not any(substr in current_manga_url for substr in ['nhattruyen', 'nettruyen']):
print('Invalid manga url. Please try again.')
return result
else:
try:
request = requests.get(
current_manga_url, headers=HEADERS, timeout=5)
soup = BeautifulSoup(request.text, 'html.parser')
if not soup.find('div', id='nt_listchapter'):
print('Invalid manga url. Please try again.')
else:
self.current_manga.manga_url = str(current_manga_url)
self.crawl_manga_home_page()
result = True
return result
except Exception:
print('Error getting manga page. Please try again.')
return False
def crawl_manga_home_page(self):
try:
request = requests.get(
self.current_manga.manga_url, headers=HEADERS, timeout=10)
soup = BeautifulSoup(request.text, 'html.parser')
self.current_manga.manga_name = soup.find(
'h1', class_='title-detail').text
self.current_manga.chapter_name_list = [
i.find('a').text for i in soup.find_all('div', class_='chapter')]
chapter_url_list = []
for chapter in soup.find('div', id='nt_listchapter').find('ul').find_all('a'):
chapter_url_list.append(chapter['href'])
self.current_manga.chapter_url_list = chapter_url_list
except Exception:
print('Error getting manga page. Please try again.')
def get_chapter_index(self, chapter_input):
index = None
if chapter_input == 'start_chapter':
index = 0
elif chapter_input == 'end_chapter':
index = len(self.current_manga.chapter_name_list) - 1
else:
for chapter in self.current_manga.chapter_name_list:
chapter_name = chapter.split()[1]
if ':' in chapter_name:
chapter_name = chapter_name[:-1]
if chapter_input == chapter_name:
index = self.current_manga.chapter_name_list.index(
chapter)
return index
def get_chapter_input(self):
from_chapter_index = self.get_chapter_index(
self.from_chapter_input)
to_chapter_index = self.get_chapter_index(self.to_chapter_input)
if from_chapter_index is not None and to_chapter_index is not None:
if from_chapter_index > to_chapter_index:
from_chapter_index, to_chapter_index = to_chapter_index, from_chapter_index
self.current_manga.list_of_download_chapter = list(
range(from_chapter_index, to_chapter_index + 1))
return True
else:
print('Invalid manga chapter input. Please try again.')
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('manga_url', type=str,
help='url to the manga homepage')
parser.add_argument('-a', '--all', action='store_true',
help='download/update all chapter')
parser.add_argument('-f', '--fromto', nargs=2, metavar=('from_chapter', 'to_chapter'),
help='download from one chapter to another chapter')
parser.add_argument('-c', '--chapter', nargs=1, metavar=('chapter'),
help='download one chapter')
args = parser.parse_args()
bridge = Bridge()
if not (args.all or args.fromto or args.chapter):
parser.error('No action requested, add --all or --fromto or --chapter')
elif args.all:
bridge.start_download(args.manga_url, 'start_chapter', 'end_chapter')
elif args.fromto:
bridge.start_download(
args.manga_url, args.fromto[0], args.fromto[1])
elif args.chapter:
bridge.start_download(
args.manga_url, args.chapter[0], args.chapter[0])
| 38.87619 | 153 | 0.565981 | 1,400 | 12,246 | 4.683571 | 0.180714 | 0.043923 | 0.046363 | 0.024554 | 0.295715 | 0.230441 | 0.140003 | 0.077322 | 0.049108 | 0.049108 | 0 | 0.011773 | 0.33415 | 12,246 | 314 | 154 | 39 | 0.792372 | 0.020578 | 0 | 0.173387 | 0 | 0.008065 | 0.137532 | 0.013967 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068548 | false | 0 | 0.040323 | 0.004032 | 0.165323 | 0.052419 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74a8fdbcc478237889a10e97a2c4357eca96a428 | 3,017 | py | Python | tenff/__main__.py | rr-/10ff | 9be5cf03eda09f1fe0920d67d9e09d4886c34ef3 | [
"WTFPL"
] | 53 | 2017-09-30T16:05:39.000Z | 2022-02-04T08:19:26.000Z | tenff/__main__.py | rr-/10ff | 9be5cf03eda09f1fe0920d67d9e09d4886c34ef3 | [
"WTFPL"
] | 2 | 2017-12-08T10:07:42.000Z | 2020-05-20T21:46:27.000Z | tenff/__main__.py | rr-/10ff | 9be5cf03eda09f1fe0920d67d9e09d4886c34ef3 | [
"WTFPL"
] | 2 | 2019-12-18T04:23:10.000Z | 2020-09-25T10:31:04.000Z | """Main executable."""
import argparse
import asyncio
from tenff.game import GameSettings, run_game
from tenff.terminal import TerminalInputHandler
from tenff.util import CORPORA_PATH, get_corpus_path, parse_corpus
DEFAULT_TIME = 60
PROLOG = (
"A certain typing contest site spin-off in CLI, without all the "
"advertisements, tracking and 10 megabytes of AJAX crap."
)
class CustomHelpFormatter(argparse.HelpFormatter):
"""A HelpFormatter that uses concise syntax for short and long options
help.
"""
def _format_action_invocation(self, action: argparse.Action) -> str:
"""Format action invocation.
Example of the default argparse formatting:
-c CORPUS, --corpus CORPUS
Example of the concise formatting:
-c, --corpus CORPUS
"""
if not action.option_strings or action.nargs == 0:
return super()._format_action_invocation(action)
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
return ", ".join(action.option_strings) + " " + args_string
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
prog="10ff", description=PROLOG, formatter_class=CustomHelpFormatter
)
parser.add_argument(
"-t",
"--time",
type=int,
default=DEFAULT_TIME,
help="how long to play the game for (in seconds)",
)
parser.add_argument(
"-c",
"--corpus",
type=str,
default="english",
help="path to the word list to play the game with",
)
parser.add_argument(
"-w",
"--width",
type=int,
default=80,
help="width of the terminal to play in",
)
parser.add_argument(
"-l", "--list", action="store_true", help="lists the built-in corpora"
)
parser.add_argument(
"-r",
"--rigorous-spaces",
action="store_true",
help="treat double space as an error",
)
return parser.parse_args()
def main() -> None:
"""Main program logic. Start the event loop, parse the CLI arguments and
run the game.
"""
loop = asyncio.new_event_loop()
args = parse_args()
if args.list:
for path in sorted(CORPORA_PATH.iterdir()):
if path.suffix == ".txt":
print(path.stem)
return
input_handler = TerminalInputHandler(loop)
with input_handler.enable_raw_terminal():
corpus_path = get_corpus_path(args.corpus)
corpus = parse_corpus(corpus_path)
settings = GameSettings(
corpus=corpus,
max_time=args.time,
rigorous_spaces=args.rigorous_spaces,
)
loop.run_until_complete(
run_game(
loop,
input_handler,
settings,
)
)
loop.close()
if __name__ == "__main__":
main()
| 26.699115 | 78 | 0.608552 | 341 | 3,017 | 5.202346 | 0.410557 | 0.040586 | 0.047914 | 0.019166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004204 | 0.290355 | 3,017 | 112 | 79 | 26.9375 | 0.824381 | 0.121975 | 0 | 0.088608 | 0 | 0 | 0.152496 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037975 | false | 0 | 0.063291 | 0 | 0.164557 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74a90fbaab5a1695a359f7f2248d252b5d5d1dbb | 2,248 | py | Python | python-algorithm/leetcode/problem_76.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 5 | 2017-06-11T09:19:34.000Z | 2019-01-16T16:58:31.000Z | python-algorithm/leetcode/problem_76.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 5 | 2020-03-22T13:53:54.000Z | 2020-03-23T08:49:35.000Z | python-algorithm/leetcode/problem_76.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 1 | 2019-03-02T15:50:43.000Z | 2019-03-02T15:50:43.000Z | """76. Minimum Window Substring
https://leetcode.com/problems/minimum-window-substring/
Given a string S and a string T, find the minimum window in S which will
contain all the characters in T in complexity O(n).
Example:
Input: S = "ADOBECODEBANC", T = "ABC"
Output: "BANC"
Note:
If there is no such window in S that covers all characters in T, return the
empty string "".
If there is such window, you are guaranteed that there will always be only
one unique minimum window in S.
"""
class Solution:
def min_window(self, s: str, t: str) -> str:
def is_valid(d: dict):
for v in d.values():
if v > 0:
return False
return True
store = {}
for c in t:
if c not in store:
store[c] = 1
else:
store[c] = store[c] + 1
min_head = min_tail = 0
head = tail = -1
min_len = len(s) + 1
not_found = True
while head <= tail:
if not_found:
# if not found, move the cur_tail pointer.
if tail == len(s) - 1:
break
tail += 1
cur_char = s[tail]
if cur_char in store:
store[cur_char] = store[cur_char] - 1
if is_valid(store):
not_found = False
cur_len = tail - head
if cur_len < min_len:
min_head, min_tail, min_len = head, tail, cur_len
else:
# already found, move the cur_head pointer.
head += 1
cur_char = s[head]
cur_len = tail - head
if cur_char in store:
store[cur_char] = store[cur_char] + 1
if not is_valid(store):
not_found = True
else:
if cur_len < min_len:
min_head, min_tail, min_len = head, tail, cur_len
else:
if cur_len < min_len:
min_head, min_tail, min_len = head, tail, cur_len
return s[min_head + 1:min_tail + 1]
| 33.552239 | 77 | 0.479982 | 289 | 2,248 | 3.584775 | 0.276817 | 0.054054 | 0.03861 | 0.054054 | 0.299228 | 0.260618 | 0.233591 | 0.233591 | 0.233591 | 0.233591 | 0 | 0.012058 | 0.446619 | 2,248 | 66 | 78 | 34.060606 | 0.82074 | 0.25089 | 0 | 0.355556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74ab68465c97b9a682ec0c65d4118ab45c6835b2 | 4,138 | py | Python | inventory/urls.py | common1/newassetcms | 65eee3c2ed9dac4cc56bfff863a6cbaff9830d26 | [
"MIT"
] | null | null | null | inventory/urls.py | common1/newassetcms | 65eee3c2ed9dac4cc56bfff863a6cbaff9830d26 | [
"MIT"
] | 7 | 2020-06-05T20:43:46.000Z | 2022-01-13T01:14:21.000Z | inventory/urls.py | common1/newassetcms | 65eee3c2ed9dac4cc56bfff863a6cbaff9830d26 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'inventory'
urlpatterns = [
path('assettypes/',
views.AssetTypesIndexView.as_view(), name='index_assettypes'),
path('assettypes/create/',
views.AssetTypeCreateView.as_view(), name='create_assettype'),
path('assettypes/update/<int:pk>',
views.AssetTypeUpdateView.as_view(), name='update_assettype'),
path('assettypes/read/<int:pk>',
views.AssetTypeReadView.as_view(), name='read_assettype'),
path('assettypes/delete/<int:pk>',
views.AssetTypeDeleteView.as_view(), name='delete_assettype'),
path('featured/',
views.AssetsFeaturedIndexView.as_view(), name='index_featured_assets'),
path('featured/read/<int:pk>',
views.AssetFeaturedReadView.as_view(), name='read_featured_asset'),
path('assets/',
views.AssetsIndexView.as_view(), name='index_assets'),
path('assets/create/',
views.AssetCreateView.as_view(), name='create_asset'),
path('assets/update/<int:pk>',
views.AssetUpdateView.as_view(), name='update_asset'),
path('assets/read/<int:pk>',
views.AssetReadView.as_view(), name='read_asset'),
path('assets/delete/<int:pk>',
views.AssetDeleteView.as_view(), name='delete_asset'),
path('reservations/',
views.ReservationsIndexView.as_view(), name='index_reservations'),
path('reservations/create/',
views.ReservationCreateView.as_view(), name='create_reservation'),
path('reservations/update/<int:pk>',
views.ReservationUpdateView.as_view(), name='update_reservation'),
path('reservations/edit/<int:pk>',
views.ReservationEditView.as_view(), name='edit_reservation'),
path('reservations/read/<int:pk>',
views.ReservationReadView.as_view(), name='read_reservation'),
path('reservations/delete/<int:pk>',
views.ReservationDeleteView.as_view(), name='delete_reservation'),
path('reservations/addbasket/<int:pk>',
views.ReservationAddBasket.as_view(), name='add_basket_reservation'),
path('reservations/addbasket/',
views.add_basket, name='add_basket'),
path('reservations/clearreservedassets/',
views.clear_reserved_assets, name='clear_reserved_assets'),
path('reservations/deletereservedasset/',
views.delete_reserved_asset, name='delete_reserved_asset'),
path('reservedassets/',
views.ReservedAssetsIndexView.as_view(), name='index_reservedassets'),
path('reservedassets/create/',
views.ReservedAssetCreateView.as_view(), name='create_reservedasset'),
path('reservedassets/update/<int:pk>',
views.ReservedAssetUpdateView.as_view(), name='update_reservedasset'),
path('reservedassets/read/<int:pk>',
views.ReservedAssetReadView.as_view(), name='read_reservedasset'),
path('reservedassets/delete/<int:pk>',
views.ReservedAssetDeleteView.as_view(), name='delete_reservedasset'),
path('loanedassets/',
views.LoanedAssetsIndexView.as_view(), name='index_loanedassets'),
path('loanedassets/create/',
views.LoanedAssetCreateView.as_view(), name='create_loanedasset'),
path('loanedassets/update/<int:pk>',
views.LoanedAssetUpdateView.as_view(), name='update_loanedasset'),
path('loanedassets/read/<int:pk>',
views.LoanedAssetReadView.as_view(), name='read_loanedasset'),
path('loanedassets/delete/<int:pk>',
views.LoanedAssetDeleteView.as_view(), name='delete_loanedasset'),
path('returnedassets/',
views.ReturnedAssetsIndexView.as_view(), name='index_returnedassets'),
path('returnedassets/create/',
views.ReturnedAssetCreateView.as_view(), name='create_returnedasset'),
path('returnedassets/update/<int:pk>',
views.ReturnedAssetUpdateView.as_view(), name='update_returnedasset'),
path('returnedassets/read/<int:pk>',
views.ReturnedAssetReadView.as_view(), name='read_returnedasset'),
path('returnedassets/delete/<int:pk>',
views.ReturnedAssetDeleteView.as_view(), name='delete_returnedasset'),
]
| 45.977778 | 80 | 0.694538 | 421 | 4,138 | 6.631829 | 0.175772 | 0.073066 | 0.121777 | 0.037607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.149106 | 4,138 | 89 | 81 | 46.494382 | 0.792957 | 0 | 0 | 0 | 0 | 0 | 0.361044 | 0.182939 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025316 | 0 | 0.025316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77639b8b9fd4e3c9cd0c8b422d0c68a3e323eb36 | 239 | py | Python | Python/Transpose and Flatten/Solution.py | chessmastersan/HackerRank | 850319e6f79e7473afbb847d28edde7b2cdfc37d | [
"MIT"
] | 2 | 2019-08-07T19:58:20.000Z | 2019-08-27T00:06:09.000Z | Python/Transpose and Flatten/Solution.py | chessmastersan/HackerRank | 850319e6f79e7473afbb847d28edde7b2cdfc37d | [
"MIT"
] | 1 | 2020-06-11T19:09:48.000Z | 2020-06-11T19:09:48.000Z | Python/Transpose and Flatten/Solution.py | chessmastersan/HackerRank | 850319e6f79e7473afbb847d28edde7b2cdfc37d | [
"MIT"
] | 7 | 2019-08-27T00:06:11.000Z | 2021-12-11T10:01:45.000Z | #author SANKALP SAXENA
import numpy
size = input().split(" ")
n = int(size[0])
l = []
for i in range(0, n):
a = input().split(" ")
l.append(a)
arr = numpy.array(l, int)
trans = arr.transpose()
print(trans)
print(arr.flatten())
| 15.933333 | 26 | 0.606695 | 38 | 239 | 3.815789 | 0.631579 | 0.137931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010363 | 0.192469 | 239 | 14 | 27 | 17.071429 | 0.740933 | 0.087866 | 0 | 0 | 0 | 0 | 0.009302 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7763bc166d4d7ffcd7c15717edaf06b9a61c5b0b | 1,564 | py | Python | src/service.py | wisrovi/AWS_DIA_17NOV | bfe734197dd9658504ecea6a9f89162877d80fdf | [
"MIT"
] | null | null | null | src/service.py | wisrovi/AWS_DIA_17NOV | bfe734197dd9658504ecea6a9f89162877d80fdf | [
"MIT"
] | null | null | null | src/service.py | wisrovi/AWS_DIA_17NOV | bfe734197dd9658504ecea6a9f89162877d80fdf | [
"MIT"
] | null | null | null | from flask import Flask, app, jsonify, request, redirect, make_response
from leer_modelo import predecir
ALLOWED = ['png','jpg', 'jpeg', 'gif']
def evaluar_extension_archivo(filename):
tiene_punto = "." in filename
if tiene_punto:
extension_archivo = filename.split(".", 1)[1].lower()
if extension_archivo in ALLOWED:
return True
return False
nombres_parametros = {
"imagen":"file1"
}
html = """
<!doctype html>
<form method="POST" enctype="multipart/form-data">
<label for="fname">Elija su imagen a evaluar:</label>
<input type="file" id="fname" name="file1"><br><br>
<input type="submit" value="Evaluar con RNA">
</form>
"""
nombre_guardar_archivo = "recibido.jpg"
app = Flask(__name__)
@app.route("/RNA", methods=["POST", "GET"])
def recibir_archivo():
if request.method == "POST":
if nombres_parametros["imagen"] not in request.files:
redirect(request.url)
nombre_imagen_recibida = request.files["file1"]
if nombre_imagen_recibida.filename == "":
redirect(request.url)
if evaluar_extension_archivo(nombre_imagen_recibida.filename):
nombre_imagen_recibida.save(nombre_guardar_archivo)
# evaluacion por el modelo de RNA
rta = predecir(nombre_guardar_archivo)
return "La imagen recibida es un " + rta
return html
if __name__=="__main__":
#print(evaluar_extension_archivo("queso.jpg"))
app.run(host="0.0.0.0", port=2022, debug=True)
| 27.928571 | 71 | 0.642583 | 189 | 1,564 | 5.100529 | 0.470899 | 0.082988 | 0.082988 | 0.058091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010788 | 0.22954 | 1,564 | 55 | 72 | 28.436364 | 0.789212 | 0.048593 | 0 | 0.052632 | 0 | 0 | 0.232323 | 0.034343 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77640065b3e76d426adcd9f96234a785912b54fb | 1,220 | py | Python | system/detection.py | syazanihussin/FLUX | 22a1885c9ff7ef82cd306e4c3544998a71027b5d | [
"MIT"
] | null | null | null | system/detection.py | syazanihussin/FLUX | 22a1885c9ff7ef82cd306e4c3544998a71027b5d | [
"MIT"
] | 1 | 2019-05-26T04:58:17.000Z | 2019-05-30T15:30:51.000Z | system/detection.py | syazanihussin/FLUX | 22a1885c9ff7ef82cd306e4c3544998a71027b5d | [
"MIT"
] | null | null | null | from interface import implements, Interface
from keras.models import load_model
class IDetection(Interface):
def detect_fake_news(self, type, news):
pass
class Detection(implements(IDetection)):
def detect_fake_news(self, type, news):
# load detection model
detection_model = self.load_detection_model(type)
# predict probability
probabilities = detection_model.predict(news)
# get probability according to its assosiated class
class_label, fake_prob, real_prob = self.get_class_label(probabilities)
return class_label, fake_prob, real_prob
def load_detection_model(self, type):
if(type == 'content'):
return load_model('./model/content_model.h5')
elif(type == 'stance'):
return load_model('./model/stance_model.h5')
def get_class_label(self, probabilities):
for probability in probabilities:
fake_prob = probability[0]
real_prob = probability[1]
if(fake_prob > real_prob):
class_label = 'Fake'
elif(real_prob > fake_prob):
class_label = 'Real'
return class_label, fake_prob, real_prob
| 25.957447 | 79 | 0.64918 | 142 | 1,220 | 5.323944 | 0.274648 | 0.092593 | 0.074074 | 0.084656 | 0.195767 | 0.195767 | 0.161376 | 0 | 0 | 0 | 0 | 0.004484 | 0.268852 | 1,220 | 46 | 80 | 26.521739 | 0.843049 | 0.07377 | 0 | 0.16 | 0 | 0 | 0.060391 | 0.041741 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0.04 | 0.08 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77650de3eab4c78cafaa8bab4bb47104bd26bb30 | 2,926 | py | Python | Spark-Example-FlightsData/flights_example.py | kiat/met-cs777 | 405aca20c8db4350a7a0d04607ff05f3c4e97c2f | [
"BSD-3-Clause"
] | 32 | 2020-07-02T00:51:13.000Z | 2022-03-31T22:31:30.000Z | Flight-Example/flights_example.py | pvkothapalli/MET-CS777 | 6825bec99581b04f9bcc328cdaa698faa2af7313 | [
"BSD-3-Clause"
] | 2 | 2021-02-17T00:42:23.000Z | 2021-02-18T17:28:38.000Z | Flight-Example/flights_example.py | pvkothapalli/MET-CS777 | 6825bec99581b04f9bcc328cdaa698faa2af7313 | [
"BSD-3-Clause"
] | 47 | 2020-07-04T15:29:15.000Z | 2022-03-25T05:08:17.000Z |
# https://s3.amazonaws.com/metcs777/flights.csv.bz2
# s3n://metcs777/flights.csv.bz2
# lines = sc.textFile("file:///home/kia/Data/Collected-Datasets/flight-delays/flight-delays/flights.csv")
lines = sc.textFile("s3://metcs777/flights.csv.bz2")
# Removing the Header Line from CSV file
linesHeader = lines.first()
header = sc.parallelize([linesHeader])
linesWithOutHeader = lines.subtract(header)
# The data is about the flights from different airports which includes following attributes
#[u'YEAR,MONTH,DAY,DAY_OF_WEEK,AIRLINE,FLIGHT_NUMBER,TAIL_NUMBER,ORIGIN_AIRPORT,DESTINATION_AIRPORT,SCHEDULED_DEPARTURE,DEPARTURE_TIME,DEPARTURE_DELAY,TAXI_OUT,WHEELS_OFF,SCHEDULED_TIME,ELAPSED_TIME,AIR_TIME,DISTANCE,WHEELS_ON,TAXI_IN,SCHEDULED_ARRIVAL,ARRIVAL_TIME,ARRIVAL_DELAY,DIVERTED,CANCELLED,CANCELLATION_REASON,AIR_SYSTEM_DELAY,SECURITY_DELAY,AIRLINE_DELAY,LATE_AIRCRAFT_DELAY,WEATHER_DELAY']
flights = linesWithOutHeader.map(lambda x: x.split(','))
# YEAR,MONTH,DAY,DAY_OF_WEEK,AIRLINE,FLIGHT_NUMBER,TAIL_NUMBER,ORIGIN_AIRPORT,DESTINATION_AIRPORT,SCHEDULED_DEPARTURE,DEPARTURE_TIME,DEPARTURE_DELAY, CANCELLED
mainFlightsData = flights.map(lambda p: (p[0], p[1] , p[2] , p[3], p[4] , p[5] , p[6], p[7] , p[8] , p[9], p[10], p[11], p[24] ))
# number 6 is ORIGIN_AIRPORT
flightsFromBoston = mainFlightsData.filter(lambda p: True if p[7] == "BOS" else False )
# Get the total number of Flights from BOS
flightsFromBoston.count()
# 107847 flights from Logan Airport in Boston
# Find the subset of flights departing on the weekend.
weekEndFlights = flightsFromBoston.filter(lambda p: True if (int(p[3]) == 6 or int(p[3]) ==7) else False )
weekEndFlights.count()
# 26092
#Q1 Find a list of Origin Airports
#Q2 Find a list of (Origin, Destination) pairs
#Q3 Find the Origin airport which had the largest departure delay in the month of January
#Q4 Find out which carrier has the largest delay on Weekends.
#Q5 Which airport has the most cancellation of flights?
#Q6 Find the percent of flights cancelled for each carrier.
#Q7 Find the largest departure delay for each carrier
#Q8 Find the largest departure delay for each carrier for each month
#Q9 For each carrier find the average Departure delay
#Q10 For each carrier find the average Departure delay for each month
#Q11 Which date of year has the highest rate of flight cancellations?
# Rate of flight cancellation is calculated by deviding number of canceled flights by total number of flights.
#Q12 Calculate the number of flights to each destination state
# For each carrier, for which state do they have the largest average delay?
# You will need the airline and airport data sets for this question.
# AirLine dataset https://s3.amazonaws.com/metcs777/airlines.csv or s3://metcs777/airlines.csv
# Airport dataset https://s3.amazonaws.com/metcs777/airports.csv or s3://metcs777/airports.csv
# add your own questions.
| 31.12766 | 400 | 0.775803 | 449 | 2,926 | 4.973274 | 0.36971 | 0.025078 | 0.037618 | 0.025526 | 0.262875 | 0.21854 | 0.188088 | 0.188088 | 0.112853 | 0.112853 | 0 | 0.031127 | 0.132604 | 2,926 | 93 | 401 | 31.462366 | 0.8487 | 0.757348 | 0 | 0 | 0 | 0 | 0.049327 | 0.043348 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77678f260d8155b4fbe2ca23186e87d0ca0dde81 | 1,510 | py | Python | setup.py | farirat/rima | eb2165d972256c32d4859c3bf54c5e4b589d780f | [
"Apache-2.0"
] | 2 | 2016-05-07T10:09:29.000Z | 2016-06-10T05:36:20.000Z | setup.py | farirat/rima | eb2165d972256c32d4859c3bf54c5e4b589d780f | [
"Apache-2.0"
] | null | null | null | setup.py | farirat/rima | eb2165d972256c32d4859c3bf54c5e4b589d780f | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
import sys, os
version = '0.0'
setup(name='rima',
version=version,
description="Minimalist Python REST API Framework",
long_description="""\
Minimalist Python REST API Framework
""",
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Web Environment",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Server",
], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='REST API',
author='Ghassen Telmoudi',
author_email='ghassen.telmoudi@gmail.com',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
"tornado",
"mongoengine",
],
entry_points="""
# -*- Entry points: -*-
""",
)
| 34.318182 | 82 | 0.575497 | 140 | 1,510 | 6.128571 | 0.628571 | 0.060606 | 0.055944 | 0.06993 | 0.156177 | 0.100233 | 0 | 0 | 0 | 0 | 0 | 0.006458 | 0.282119 | 1,510 | 43 | 83 | 35.116279 | 0.785055 | 0.065563 | 0 | 0.1 | 0 | 0 | 0.514915 | 0.018466 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7767ac0e64dfc02b7c55003a7ff7dca82f05c66d | 10,438 | py | Python | src/backend/aspen/workflows/nextstrain_run/export.py | chanzuckerberg/covidr | afe05d703d30ec18ac83944bfb551c313cb216c4 | [
"MIT"
] | 2 | 2021-01-13T18:37:20.000Z | 2021-01-16T02:17:38.000Z | src/backend/aspen/workflows/nextstrain_run/export.py | chanzuckerberg/covidr | afe05d703d30ec18ac83944bfb551c313cb216c4 | [
"MIT"
] | 16 | 2021-01-14T20:21:13.000Z | 2021-01-29T22:23:13.000Z | src/backend/aspen/workflows/nextstrain_run/export.py | chanzuckerberg/covidr | afe05d703d30ec18ac83944bfb551c313cb216c4 | [
"MIT"
] | null | null | null | import csv
import io
import json
from pathlib import Path
from typing import Any, Iterable, List, Mapping, MutableMapping, Set, Tuple
import click
from sqlalchemy import and_
from sqlalchemy.orm import aliased, joinedload, with_polymorphic
from aspen.config.config import Config
from aspen.database.connection import (
get_db_uri,
init_db,
session_scope,
SqlAlchemyInterface,
)
from aspen.database.models import (
AlignedGisaidDump,
Entity,
EntityType,
PathogenGenome,
PhyloRun,
PublicRepositoryType,
Sample,
UploadedPathogenGenome,
)
from aspen.database.models.workflow import Workflow
METADATA_CSV_FIELDS = [
"strain",
"virus",
"gisaid_epi_isl",
"genbank_accession",
"date",
"region",
"country",
"division",
"location",
"region_exposure",
"country_exposure",
"division_exposure",
"segment",
"length",
"host",
"age",
"sex",
"pango_lineage",
"GISAID_clade",
"originating_lab",
"submitting_lab",
"authors",
"url",
"title",
"paper_url",
"date_submitted",
]
@click.command("save")
@click.option("--phylo-run-id", type=int, required=True)
@click.option("sequences_fh", "--sequences", type=click.File("w"), required=False)
@click.option("selected_fh", "--selected", type=click.File("w"), required=False)
@click.option("metadata_fh", "--metadata", type=click.File("w"), required=False)
@click.option("builds_file_fh", "--builds-file", type=click.File("w"), required=True)
@click.option(
"county_sequences_fh", "--county-sequences", type=click.File("w"), required=False
)
@click.option(
"county_metadata_fh", "--county-metadata", type=click.File("w"), required=False
)
@click.option("--test", type=bool, is_flag=True)
def cli(
phylo_run_id: int,
sequences_fh: io.TextIOBase,
selected_fh: io.TextIOBase,
metadata_fh: io.TextIOBase,
builds_file_fh: io.TextIOBase,
county_sequences_fh: io.TextIOBase,
county_metadata_fh: io.TextIOBase,
test: bool,
):
if test:
print("Success!")
return
interface: SqlAlchemyInterface = init_db(get_db_uri(Config()))
with session_scope(interface) as session:
# this allows us to load the secondary tables of a polymorphic type. In this
# case, we want to load the inputs of a phylo run, provided the input is of type
# `PathogenGenome` and `AlignedGisaidDump`.
phylo_run_inputs = with_polymorphic(
Entity,
[PathogenGenome, AlignedGisaidDump],
flat=True,
)
phylo_run: PhyloRun = (
session.query(PhyloRun)
.filter(PhyloRun.workflow_id == phylo_run_id)
.options(
joinedload(PhyloRun.inputs.of_type(phylo_run_inputs)).undefer(
phylo_run_inputs.PathogenGenome.sequence
)
)
.one()
)
# If we're writing a file for all county-wide samples, generate it here.
if county_sequences_fh:
# Get all samples for the group
group = phylo_run.group
all_samples: Iterable[Sample] = (
session.query(Sample)
.filter(Sample.submitting_group_id == group.id)
.options(
joinedload(Sample.uploaded_pathogen_genome, innerjoin=True).undefer(
PathogenGenome.sequence
)
)
)
pathogen_genomes = [
sample.uploaded_pathogen_genome for sample in all_samples
]
# Write all those samples to the sequences/metadata files
write_sequences_files(
session, pathogen_genomes, county_sequences_fh, county_metadata_fh
)
# Populate builds.yaml file with values from the phylo_run template_args
# and write them to the filesystem
aspen_root = Path(__file__).parent.parent.parent.parent.parent
with (aspen_root / phylo_run.template_file_path).open("r") as build_template_fh:
build_template = build_template_fh.read()
template_args = (
phylo_run.template_args
if isinstance(phylo_run.template_args, Mapping)
else {}
)
builds_file_fh.write(build_template.format(**template_args))
# get all the children that are pathogen genomes
pathogen_genomes = [
inp for inp in phylo_run.inputs if isinstance(inp, PathogenGenome)
]
# get the aligned gisaid run info.
aligned_gisaid = [
inp for inp in phylo_run.inputs if isinstance(inp, AlignedGisaidDump)
][0]
if sequences_fh:
write_sequences_files(session, pathogen_genomes, sequences_fh, metadata_fh)
if selected_fh:
write_includes_file(session, phylo_run, pathogen_genomes, selected_fh)
print(
json.dumps(
{
"bucket": aligned_gisaid.s3_bucket,
"metadata_key": aligned_gisaid.metadata_s3_key,
"sequences_key": aligned_gisaid.sequences_s3_key,
}
)
)
def write_includes_file(session, phylo_run, pathogen_genomes, selected_fh):
# Create a list of the inputted pathogen genomes that are uploaded pathogen genomes
sample_ids: List[int] = [
pathogen_genome.sample_id
for pathogen_genome in pathogen_genomes
if isinstance(pathogen_genome, UploadedPathogenGenome)
]
# Write an includes.txt with the sample ID's.
sample_query = session.query(Sample).filter(Sample.id.in_(sample_ids))
for sample in sample_query:
public_identifier = sample.public_identifier
if public_identifier.lower().startswith("hcov-19"):
public_identifier = public_identifier[8:]
selected_fh.write(f"{public_identifier}\n")
for gisaid_id in phylo_run.gisaid_ids:
selected_fh.write(f"{gisaid_id}\n")
def write_sequences_files(session, pathogen_genomes, sequences_fh, metadata_fh):
# Create a list of the inputted pathogen genomes that are uploaded pathogen genomes
uploaded_pathogen_genomes = {
pathogen_genome
for pathogen_genome in pathogen_genomes
if isinstance(pathogen_genome, UploadedPathogenGenome)
}
sample_ids = {
uploaded_pathogen_genome.sample_id
for uploaded_pathogen_genome in uploaded_pathogen_genomes
}
sample_id_to_sample: Mapping[int, Sample] = {
sample.id: sample
for sample in session.query(Sample).filter(Sample.id.in_(sample_ids))
}
accession_input_alias = aliased(Entity)
pathogen_genome_id_repository_type_to_accession_names: Mapping[
Tuple[int, PublicRepositoryType], str
] = {
(
accession.get_parents(PathogenGenome)[0].entity_id,
PublicRepositoryType.from_entity_type(accession.entity_type),
): accession.public_identifier
# We have overlap between aligned gisaid file & aspen data.
for accession in session.query(Entity)
.join(Entity.producing_workflow)
.join(accession_input_alias, Workflow.inputs)
.filter(
and_(
Entity.entity_type.in_(
(
EntityType.GISAID_REPOSITORY_SUBMISSION,
EntityType.GENBANK_REPOSITORY_SUBMISSION,
)
),
accession_input_alias.id.in_(
{pathogen_genome.entity_id for pathogen_genome in pathogen_genomes}
),
)
)
}
aspen_samples: Set[str] = set()
metadata_csv_fh = csv.DictWriter(metadata_fh, METADATA_CSV_FIELDS, delimiter="\t")
metadata_csv_fh.writeheader()
for pathogen_genome in pathogen_genomes:
# find the corresponding sample
if isinstance(pathogen_genome, UploadedPathogenGenome):
sample_id = pathogen_genome.sample_id
else:
raise ValueError("pathogen genome of unknown type")
sample = sample_id_to_sample[sample_id]
aspen_samples.add(sample.public_identifier)
sequence = "".join(
[
line
for line in pathogen_genome.sequence.splitlines()
if not (line.startswith(">") or line.startswith(";"))
]
)
sequence = sequence.strip("Nn")
upload_date = None
if sample.sequencing_reads_collection is not None:
upload_date = sample.sequencing_reads_collection.upload_date.strftime(
"%Y-%m-%d"
)
elif sample.uploaded_pathogen_genome is not None:
upload_date = sample.uploaded_pathogen_genome.upload_date.strftime(
"%Y-%m-%d"
)
aspen_metadata_row: MutableMapping[str, Any] = {
"strain": sample.public_identifier,
"virus": "ncov",
"gisaid_epi_isl": pathogen_genome_id_repository_type_to_accession_names.get(
(pathogen_genome.entity_id, PublicRepositoryType.GISAID), ""
),
"genbank_accession": pathogen_genome_id_repository_type_to_accession_names.get(
(pathogen_genome.entity_id, PublicRepositoryType.GENBANK), ""
),
"date": sample.collection_date.strftime("%Y-%m-%d"),
"date_submitted": upload_date,
"region": sample.region.value,
"country": sample.country,
"division": sample.division,
"location": sample.location,
"region_exposure": sample.region.value,
"country_exposure": sample.country,
"division_exposure": sample.division,
"segment": "genome",
"length": len(sequence),
"host": "Human",
"age": "?",
"sex": "?",
"originating_lab": sample.sample_collected_by,
"submitting_lab": sample.submitting_group.name,
"authors": ", ".join(sample.authors),
"pango_lineage": sample.uploaded_pathogen_genome.pangolin_lineage,
}
metadata_csv_fh.writerow(aspen_metadata_row)
sequences_fh.write(f">{sample.public_identifier}\n")
sequences_fh.write(sequence)
sequences_fh.write("\n")
if __name__ == "__main__":
cli()
| 35.144781 | 91 | 0.627132 | 1,122 | 10,438 | 5.567736 | 0.213904 | 0.056027 | 0.024652 | 0.013446 | 0.245718 | 0.227789 | 0.191452 | 0.185369 | 0.171923 | 0.128382 | 0 | 0.001064 | 0.279651 | 10,438 | 296 | 92 | 35.263514 | 0.829765 | 0.079805 | 0 | 0.070313 | 0 | 0 | 0.087705 | 0.005214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011719 | false | 0 | 0.046875 | 0 | 0.0625 | 0.007813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77685ce53cb05d06d03bc4a12c6065a439cae421 | 485 | py | Python | dictlearn_gpu/utils.py | mukheshpugal/dictlearn_gpu | 79265ddc439e2a406adbe7e505b1b6d61fa50d25 | [
"MIT"
] | null | null | null | dictlearn_gpu/utils.py | mukheshpugal/dictlearn_gpu | 79265ddc439e2a406adbe7e505b1b6d61fa50d25 | [
"MIT"
] | null | null | null | dictlearn_gpu/utils.py | mukheshpugal/dictlearn_gpu | 79265ddc439e2a406adbe7e505b1b6d61fa50d25 | [
"MIT"
] | null | null | null | import cupy as cp
import numpy as np
def to_gpu(mat):
if not isinstance(mat, cp.ndarray):
return cp.asarray(mat, dtype=cp.float32)
return mat
def dct_dict_1d(n_atoms, size):
dct = np.zeros((size, n_atoms))
for k in range(n_atoms):
basis = np.cos(np.arange(size) * k * np.pi / n_atoms)
if k > 0:
basis = basis - np.mean(basis)
basis /= np.linalg.norm(basis)
dct[:, k] = basis
return dct
| 22.045455 | 62 | 0.560825 | 75 | 485 | 3.533333 | 0.493333 | 0.090566 | 0.090566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012158 | 0.321649 | 485 | 21 | 63 | 23.095238 | 0.793313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7768c92d141b2480a0d707c158a88bcde5139d2f | 4,263 | py | Python | yaglm/opt/prox.py | ngierty/yaglm | 57e212b576af7c5c0f0d62f9d6ad2484b9e25f10 | [
"MIT"
] | 15 | 2021-10-19T15:00:12.000Z | 2022-03-24T22:24:54.000Z | yaglm/opt/prox.py | ngierty/yaglm | 57e212b576af7c5c0f0d62f9d6ad2484b9e25f10 | [
"MIT"
] | 13 | 2021-07-22T21:52:55.000Z | 2021-10-07T11:50:10.000Z | yaglm/opt/prox.py | ngierty/yaglm | 57e212b576af7c5c0f0d62f9d6ad2484b9e25f10 | [
"MIT"
] | 2 | 2021-11-12T18:45:12.000Z | 2021-12-14T14:57:42.000Z | import numpy as np
from yaglm.linalg_utils import euclid_norm
def soft_thresh(vec, thresh_vals):
"""
The soft thresholding operator.
Parameters
----------
vec: array-like
The values to threshold
thresh_vals: float, array-like
The thresholding values
Output
-------
vec_thresh: array-like
"""
return np.sign(vec) * np.fmax(abs(vec) - thresh_vals, 0)
# TODO: is this useful? If not remove it
# def prox_ridge_lasso(x, lasso_pen_val=1, lasso_weights=None,
# ridge_pen_val=1, ridge_weights=None, step=1):
# """
# Evaluates the proximal operator of
# f(x; step) = lasso_mul * sum_j lasso_weights_j |x_j|
# + 0.5 * ridge_mul * sum_j lasso_weights_j x_j^2
# Parameters
# ----------
# x: array-like
# The value at which to evaluate the prox operator.
# lasso_pen_val: float
# The multiplicative penalty value for the lasso penalty.
# lasso_weights: None, array-like
# The (optional) variable weights for the lasso penalty.
# ridge_pen_val: float
# The multiplicative penalty value for the ridge penalty.
# ridge_weights: None, array-like
# The (optional) variable weights for the ridge penalty.
# step: float
# The step size.
# Output
# ------
# prox_val: array-like
# The proximal operator.
# """
# lasso_pen_val = lasso_pen_val * step
# ridge_pen_val = ridge_pen_val * step
# if lasso_weights is None:
# lasso_weights = np.ones_like(x)
# thresh = lasso_pen_val * np.array(lasso_weights)
# if ridge_weights is None:
# ridge_weights = np.ones_like(x)
# mult = ridge_pen_val * np.array(ridge_weights)
# mult = 1 / (1 + mult)
# return soft_thresh(x * mult, thresh * mult)
# TODO: is this useful? If not remove it.
# def prox_ridge_perturb(x, prox, ridge_pen_val=1, step=1):
# """
# Evaluates the proximal operator of
# f(x) + 0.5 * ridge_pen_val ||x||_2^2
# e.g. see Theorem 6.13 of (Beck, 2017).
# Parameters
# ----------
# x: array-like
# The value at which to evaluate the prox operator.
# prox: callable(x, step) -> array-like
# The proximal operator of f.
# ridge_pen_val: float
# The ridge penalty value.
# step: float
# The step size.
# Output
# ------
# prox_val: array-like
# The proximal operator.
# References
# ----------
# Beck, A., 2017. First-order methods in optimization. Society for Industrial and Applied Mathematics.
# """
# denom = ridge_pen_val * step + 1
# return prox(x / denom, step=step / denom)
def L2_prox(x, mult):
"""
Computes the proximal operator of mult * ||x||_2
"""
norm = euclid_norm(x)
if norm <= mult:
return np.zeros_like(x)
else:
return x * (1 - (mult / norm))
def squared_l1_prox_pos(x, step=1, weights=None, check=False):
"""
prox_{step * f}(x) for postive vectors x
where f(z) = (sum_i w_i |z_i|)^2
Parameters
----------
x: array-like
The vector to evaluate the prox at. Note this must be positive.
step: float
The prox step size.
weights: array-like
The (optional) positive weights.
check: bool
Whether or not to check that x is non-negative.
Output
------
p: array-like
The value of the proximal operator.
References
----------
Lin, M., Sun, D., Toh, K.C. and Yuan, Y., 2019. A dual Newton based preconditioned proximal point algorithm for exclusive lasso models. arXiv preprint arXiv:1902.00151.
"""
if check:
assert all(x >= 0)
if weights is None:
weights = np.ones_like(x)
# get indices to sort x / weights in decreasing order
x_over_w = x / weights
decr_sort_idxs = np.argsort(x_over_w)[::-1]
x_sort = x[decr_sort_idxs]
weights_sort = weights[decr_sort_idxs]
# compute threshold value
s = np.cumsum(x_sort * weights_sort)
L = np.cumsum(weights_sort ** 2)
alpha_bar = max(s / (1 + 2 * step * L))
thresh = 2 * step * alpha_bar * weights
# return soft thresholding
return np.maximum(x - thresh, 0)
| 25.076471 | 172 | 0.604269 | 597 | 4,263 | 4.157454 | 0.268007 | 0.033844 | 0.058018 | 0.033844 | 0.311039 | 0.267123 | 0.257051 | 0.257051 | 0.239323 | 0.172442 | 0 | 0.017286 | 0.280788 | 4,263 | 169 | 173 | 25.224852 | 0.792237 | 0.726484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005917 | 0.041667 | 1 | 0.125 | false | 0 | 0.083333 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77692efc43ca99a211134cc384641108b1111a00 | 902 | py | Python | HW-1.py | SESCNCFUARTYOM/repforkeys | a20b14f16f3dd0df4f95b9927855b77bde7ef8e3 | [
"MIT"
] | null | null | null | HW-1.py | SESCNCFUARTYOM/repforkeys | a20b14f16f3dd0df4f95b9927855b77bde7ef8e3 | [
"MIT"
] | null | null | null | HW-1.py | SESCNCFUARTYOM/repforkeys | a20b14f16f3dd0df4f95b9927855b77bde7ef8e3 | [
"MIT"
] | null | null | null | n, i, m, g, gg = 0, 0, 0, 0, 0
n = int(input())
if n > 1983:
while n > 1983:
n -= 60
m = n - 1923
else:
while n < 1983:
n += 60
m = n - 1983
g = (m // 12) + 1
gg = m % 12
result = ''
if g == 1:
result = 'зеленой'
elif g == 2:
result = 'красной'
elif g == 3:
result = 'желтой'
elif g == 4:
result = 'белой'
elif g == 5:
result = 'черной'
elif g == 6:
result = 'черной'
if gg == 1:
result += 'крысы'
elif gg == 2:
result += 'коровы'
elif gg == 3:
result += ' тигра'
elif gg == 4:
result += ' зайца'
elif gg == 5:
result += ' дракона'
elif gg == 6:
result += ' змеи'
elif gg == 7:
result += ' лошади'
elif gg == 8:
result += ' овцы'
elif gg == 9:
result += ' обезьяны'
elif gg == 10:
result += ' курицы'
elif gg == 11:
result += ' собаки'
elif gg == 0:
result += ' свиньи'
print(result) | 15.824561 | 30 | 0.473392 | 133 | 902 | 3.210526 | 0.345865 | 0.154567 | 0.021077 | 0.018735 | 0.070258 | 0.070258 | 0.070258 | 0 | 0 | 0 | 0 | 0.091681 | 0.347007 | 902 | 57 | 31 | 15.824561 | 0.633277 | 0 | 0 | 0.04 | 0 | 0 | 0.127353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77694ac8b2dc5a16098243dd7f10b1803ad2cd33 | 1,961 | py | Python | App/workflow.py | dataminion/ScienceManager | 924e1de41aca56985d9e30750264eb995604f352 | [
"MIT"
] | null | null | null | App/workflow.py | dataminion/ScienceManager | 924e1de41aca56985d9e30750264eb995604f352 | [
"MIT"
] | null | null | null | App/workflow.py | dataminion/ScienceManager | 924e1de41aca56985d9e30750264eb995604f352 | [
"MIT"
] | null | null | null | #workflow
import sys
##python libraries
import argparse
import logging
import logging.config
##
## Internal libraries
from service.configuration import Configuration
from service.job import Job
from model.object import Object
from model.connection import ConnectionSettings
from model.workflow import *
class Program(object):
""" A simple data structure to hold job parameters """
Log = logging
M = Object()
S = Object()
Args = Object()
M.Workflow = Workflow()
M.Tasks = []
S.Conf = Configuration(file='./var/cfg.yaml')
logging.config.dictConfig(S.Conf.get_logging())
def __init__(self):
pass
def main(self):
self._handle_arguments()
program = None
program = self.Args.program
if program is not None:
self.M.conn = self.S.Conf.set_database_connection(ConnectionSettings())
self.S.job = Job(self.Log, self.M.conn)
#
self.M.Workflow = self.S.job.get_job_details(self.M.Workflow, self.Args.program)
#
self.M.Workflow.batch_id = self.S.job.register_job(1, 1)
#
self.M.Workflow.tasks = self.S.job.setup_job(self.M.Workflow.id)
#
self.S.job.process_job_items(self.M.Workflow.tasks)
def _handle_arguments(self):
self.Log.info('handling arguments')
parser = argparse.ArgumentParser(description='Teradata "Science Manager" v0.1 ')
parser.add_argument('-p', '--program', help='set the program to be executed')
parser.add_argument('-l', '--list', help='get a list of available programs')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parser.parse_args(namespace=self.Args)
class Error(Exception):
def __init__(self, m='Workflow error occurred.'):
self.message = m
def __str__(self):
return self.message
if __name__ == "__main__":
Program().main() | 30.169231 | 92 | 0.63743 | 249 | 1,961 | 4.86747 | 0.37751 | 0.037129 | 0.075083 | 0.021452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004046 | 0.243753 | 1,961 | 65 | 93 | 30.169231 | 0.813216 | 0.046405 | 0 | 0 | 0 | 0 | 0.095469 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0.021277 | 0.191489 | 0.021277 | 0.446809 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
776b5a586a307c515edd0602fb6ce773dfcc4ddd | 5,969 | py | Python | cloudify_aws/kms/tests/test_key.py | jrzeszutek/cloudify-aws-plugin | 59832b4ac5ddad496110085ed2e21dd36db5e9df | [
"Apache-2.0"
] | 13 | 2015-05-28T23:21:05.000Z | 2022-03-20T05:38:20.000Z | cloudify_aws/kms/tests/test_key.py | jrzeszutek/cloudify-aws-plugin | 59832b4ac5ddad496110085ed2e21dd36db5e9df | [
"Apache-2.0"
] | 49 | 2015-01-04T16:05:34.000Z | 2022-03-27T11:35:13.000Z | cloudify_aws/kms/tests/test_key.py | jrzeszutek/cloudify-aws-plugin | 59832b4ac5ddad496110085ed2e21dd36db5e9df | [
"Apache-2.0"
] | 41 | 2015-01-21T17:16:05.000Z | 2022-03-31T06:47:48.000Z | # Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Standard imports
import unittest
# Third party imports
from mock import MagicMock
# Local imports
from cloudify_aws.common.tests.test_base import CLIENT_CONFIG
from cloudify_aws.kms.tests.test_kms import TestKMS
from cloudify_aws.kms.resources import key
# Constants
KEY_TH = ['cloudify.nodes.Root',
'cloudify.nodes.aws.kms.CustomerMasterKey']
NODE_PROPERTIES = {
'use_external_resource': False,
'resource_config': {
"kwargs": {
"Description": "An example CMK.",
"Tags": [{
"TagKey": "Cloudify",
"TagValue": "Example"
}]
}
},
'client_config': CLIENT_CONFIG
}
RUNTIME_PROPERTIES = {
'resource_config': {}
}
RUNTIME_PROPERTIES_AFTER_CREATE = {
'aws_resource_arn': 'arn_id',
'aws_resource_id': 'key_id',
'resource_config': {}
}
class TestKMSKey(TestKMS):
def test_prepare(self):
self._prepare_check(
type_hierarchy=KEY_TH,
type_name='kms',
type_class=key
)
def test_create_raises_UnknownServiceError(self):
self._prepare_create_raises_UnknownServiceError(
type_hierarchy=KEY_TH,
type_name='kms',
type_class=key
)
def test_create(self):
_ctx = self._prepare_context(
KEY_TH, NODE_PROPERTIES
)
self.fake_client.create_key = MagicMock(return_value={
'KeyMetadata': {
'Arn': "arn_id",
'KeyId': 'key_id'
}
})
key.create(ctx=_ctx, resource_config=None, iface=None)
self.fake_boto.assert_called_with('kms', **CLIENT_CONFIG)
self.fake_client.create_key.assert_called_with(
Description='An example CMK.',
Tags=[{'TagKey': 'Cloudify', 'TagValue': 'Example'}]
)
self.assertEqual(
_ctx.instance.runtime_properties,
RUNTIME_PROPERTIES_AFTER_CREATE
)
def test_enable(self):
_ctx = self._prepare_context(
KEY_TH, NODE_PROPERTIES, RUNTIME_PROPERTIES_AFTER_CREATE
)
self.fake_client.schedule_key_deletion = MagicMock(return_value={})
key.enable(ctx=_ctx, resource_config=None, iface=None)
self.fake_boto.assert_called_with('kms', **CLIENT_CONFIG)
self.assertEqual(
_ctx.instance.runtime_properties,
RUNTIME_PROPERTIES_AFTER_CREATE
)
def test_disable(self):
_ctx = self._prepare_context(
KEY_TH, NODE_PROPERTIES, RUNTIME_PROPERTIES_AFTER_CREATE
)
self.fake_client.schedule_key_deletion = MagicMock(return_value={})
key.disable(ctx=_ctx, resource_config=None, iface=None)
self.fake_boto.assert_called_with('kms', **CLIENT_CONFIG)
self.assertEqual(
_ctx.instance.runtime_properties,
RUNTIME_PROPERTIES_AFTER_CREATE
)
def test_delete(self):
_ctx = self._prepare_context(
KEY_TH, NODE_PROPERTIES, RUNTIME_PROPERTIES_AFTER_CREATE
)
self.fake_client.schedule_key_deletion = MagicMock(return_value={})
key.delete(ctx=_ctx, resource_config=None, iface=None)
self.fake_boto.assert_called_with('kms', **CLIENT_CONFIG)
self.fake_client.schedule_key_deletion.assert_called_with(
KeyId='key_id'
)
self.assertEqual(
_ctx.instance.runtime_properties,
RUNTIME_PROPERTIES_AFTER_CREATE
)
def test_KMSKey_status(self):
test_instance = key.KMSKey("ctx_node", resource_id='queue_id',
client=self.fake_client, logger=None)
self.assertEqual(test_instance.status, None)
def test_KMSKey_properties(self):
test_instance = key.KMSKey("ctx_node", resource_id='queue_id',
client=self.fake_client, logger=None)
self.assertEqual(test_instance.properties, None)
def test_KMSKey_properties_with_key(self):
test_instance = key.KMSKey("ctx_node", resource_id='queue_id',
client=self.fake_client, logger=None)
self.fake_client.describe_key = MagicMock(
return_value={'KeyMetadata': 'z'}
)
self.assertEqual(test_instance.properties, 'z')
def test_KMSKey_enable(self):
test_instance = key.KMSKey("ctx_node", resource_id='queue_id',
client=self.fake_client, logger=None)
self.fake_client.enable_key = MagicMock(
return_value={'KeyMetadata': 'y'}
)
self.assertEqual(
test_instance.enable({'a': 'b'}),
{'KeyMetadata': 'y'}
)
self.fake_client.enable_key.assert_called_with(a='b')
def test_KMSKey_disable(self):
test_instance = key.KMSKey("ctx_node", resource_id='queue_id',
client=self.fake_client, logger=None)
self.fake_client.disable_key = MagicMock(
return_value={'KeyMetadata': 'y'}
)
self.assertEqual(
test_instance.disable({'a': 'b'}),
{'KeyMetadata': 'y'}
)
self.fake_client.disable_key.assert_called_with(a='b')
if __name__ == '__main__':
unittest.main()
| 28.42381 | 75 | 0.626906 | 669 | 5,969 | 5.273543 | 0.22571 | 0.045351 | 0.063492 | 0.063492 | 0.637188 | 0.573696 | 0.556406 | 0.540533 | 0.540533 | 0.496315 | 0 | 0.001847 | 0.27425 | 5,969 | 209 | 76 | 28.559809 | 0.812558 | 0.108393 | 0 | 0.367647 | 0 | 0 | 0.094268 | 0.011501 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.080882 | false | 0 | 0.036765 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
776c1f07ea108fe03cb9a7205323ee4e16411b3f | 10,184 | py | Python | terraref/laser3d/laser3d.py | terraref/ply2las | 90c791c64e2d94effa7d155228bf306e2f3fa29c | [
"BSD-3-Clause"
] | 1 | 2018-09-27T05:45:56.000Z | 2018-09-27T05:45:56.000Z | terraref/laser3d/laser3d.py | terraref/laser3d | 90c791c64e2d94effa7d155228bf306e2f3fa29c | [
"BSD-3-Clause"
] | 3 | 2018-05-16T19:40:26.000Z | 2018-12-14T20:02:53.000Z | terraref/laser3d/laser3d.py | terraref/ply2las | 90c791c64e2d94effa7d155228bf306e2f3fa29c | [
"BSD-3-Clause"
] | 1 | 2018-07-31T13:34:13.000Z | 2018-07-31T13:34:13.000Z | import subprocess
import numpy
import os
import laspy
from osgeo import gdal
from plyfile import PlyData, PlyElement
import matplotlib.pyplot as plt
#from rpy2.robjects import r, pandas2ri, numpy2ri
from terrautils.formats import create_geotiff
from terrautils.spatial import scanalyzer_to_mac
def ply_to_array(inp, md, utm):
"""Read PLY files into a numpy matrix.
:param inp: list of input PLY files or single file path
:param md: metadata for the PLY files
:param utm: True to return coordinates to UTM, False to return gantry fixed coordinates
:return: tuple of (x_points, y_points, z_points, utm_bounds)
"""
if not isinstance(inp, list):
inp = [inp]
scandist = float(md['sensor_variable_metadata']['scan_distance_mm'])/1000.0
scan_dir = int(md['sensor_variable_metadata']['scan_direction'])
pco = md['sensor_variable_metadata']['point_cloud_origin_m']['east']
# Create concatenated list of vertices to generate one merged LAS file
first = True
for plyf in inp:
if plyf.find("west") > -1:
curr_side = "west"
cambox = [2.070, 2.726, 1.135]
else:
curr_side = "east"
cambox = [2.070, 0.306, 1.135]
plydata = PlyData.read(plyf)
merged_x = plydata['vertex']['x']
merged_y = plydata['vertex']['y']
merged_z = plydata['vertex']['z']
# Attempt fix using math from terrautils.spatial.calculate_gps_bounds
fix_x = merged_x + cambox[0] + 0.082
if scan_dir == 0:
fix_y = merged_y + float(2.0*float(cambox[1])) - scandist/2.0 + (
-0.354 if curr_side == 'east' else -4.363)
utm_x, utm_y = scanalyzer_to_mac(
(fix_x * 0.001) + pco['x'],
(fix_y * 0.001) + pco['y']/2.0 - 0.1
)
else:
fix_y = merged_y + float(2.0*float(cambox[1])) - scandist/2.0 + (
4.2 if curr_side == 'east' else -3.43)
utm_x, utm_y = scanalyzer_to_mac(
(fix_x * 0.001) + pco['x'],
(fix_y * 0.001) + pco['y']/2.0 + 0.4
)
fix_z = merged_z + cambox[2]
utm_z = (fix_z * 0.001)+ pco['z']
# Create matrix of fixed gantry coords for TIF, but min/max of UTM coords for georeferencing
if first:
if utm:
x_pts = utm_x
y_pts = utm_y
else:
x_pts = fix_x
y_pts = fix_y
z_pts = utm_z
min_x_utm = numpy.min(utm_x)
min_y_utm = numpy.min(utm_y)
max_x_utm = numpy.max(utm_x)
max_y_utm = numpy.max(utm_y)
first = False
else:
if utm:
x_pts = numpy.concatenate([x_pts, utm_x])
y_pts = numpy.concatenate([y_pts, utm_y])
else:
x_pts = numpy.concatenate([x_pts, fix_x])
y_pts = numpy.concatenate([y_pts, fix_y])
z_pts = numpy.concatenate([z_pts, utm_z])
min_x_utm2 = numpy.min(utm_x)
min_y_utm2 = numpy.min(utm_y)
max_x_utm2 = numpy.max(utm_x)
max_y_utm2 = numpy.max(utm_y)
min_x_utm = min_x_utm if min_x_utm < min_x_utm2 else min_x_utm2
min_y_utm = min_y_utm if min_y_utm < min_y_utm2 else min_y_utm2
max_x_utm = max_x_utm if max_x_utm > max_x_utm2 else max_x_utm2
max_y_utm = max_y_utm if max_y_utm > max_y_utm2 else max_y_utm2
bounds = (min_y_utm, max_y_utm, min_x_utm, max_x_utm)
return (x_pts, y_pts, z_pts, bounds)
def generate_las_from_ply(inp, out, md, utm=True):
"""Read PLY file to array and write that array to an LAS file.
:param inp: list of input PLY files or single file path
:param out: output LAS file
:param md: metadata for the PLY files
:param utm: True to return coordinates to UTM, False to return gantry fixed coordinates
"""
(x_pts, y_pts, z_pts, bounds) = ply_to_array(inp, md, utm)
# Create header and populate with scale and offset
w = laspy.base.Writer(out, 'w', laspy.header.Header())
w.header.offset = [numpy.floor(numpy.min(y_pts)),
numpy.floor(numpy.min(x_pts)),
numpy.floor(numpy.min(z_pts))]
if utm:
w.header.scale = [.000001, .000001, .000001]
else:
w.header.scale = [1, 1, .000001]
w.set_x(y_pts, True)
w.set_y(x_pts, True)
w.set_z(z_pts, True)
w.set_header_property("x_max", numpy.max(y_pts))
w.set_header_property("x_min", numpy.min(y_pts))
w.set_header_property("y_max", numpy.max(x_pts))
w.set_header_property("y_min", numpy.min(x_pts))
w.set_header_property("z_max", numpy.max(z_pts))
w.set_header_property("z_min", numpy.min(z_pts))
w.close()
return bounds
def generate_tif_from_ply(inp, out, md, mode='max'):
"""
Create a raster (e.g. Digital Surface Map) from LAS pointcloud.
:param inp: input LAS file
:param out: output TIF file
:param md: metadata for the PLY files
:param mode: max | min | mean | idx | count | stdev (https://pdal.io/stages/writers.gdal.html)
"""
pdal_dtm = out.replace(".tif", "_dtm.json")
las_raw = out.replace(".tif", "_temp.las")
tif_raw = out.replace(".tif", "unreferenced.tif")
bounds = generate_las_from_ply(inp, las_raw, md, False)
if not os.path.exists(tif_raw):
# Generate a temporary JSON file with PDAL pipeline for conversion to TIF and execute it
with open(pdal_dtm, 'w') as dtm:
dtm.write("""{
"pipeline": [
"%s",
{
"filename":"%s",
"output_type":"%s",
"resolution": 1,
"type": "writers.gdal"
}
]
}""" % (las_raw, tif_raw, mode))
# "gdalopts": "t_srs=epsg:32612"
cmd = 'pdal pipeline %s' % pdal_dtm
subprocess.call([cmd], shell=True)
os.remove(las_raw)
# Georeference the unreferenced TIF file according to PLY UTM bounds
ds = gdal.Open(tif_raw)
px = ds.GetRasterBand(1).ReadAsArray()
#if scan_dir == 0:
# px = numpy.rot90(px, 2)
# x = numpy.fliplr(px)
create_geotiff(px, bounds, out, asfloat=True)
os.remove(tif_raw)
def las_to_height(in_file, out_file=None):
"""Return a tuple of (height histogram, max height) from an LAS file."""
number_of_bins = 500
height_hist = numpy.zeros(number_of_bins)
las_handle = laspy.file.File(in_file)
zData = las_handle.Z
if (zData.size) == 0:
return height_hist, 0
max_height = (numpy.max(zData))
height_hist = numpy.histogram(zData, bins=range(-1, number_of_bins), normed=False)[0]
if out_file:
out = open(out_file, 'w')
out.write("bin,height_cm,count\n")
for i in range(len(height_hist)):
out.write("%s,%s,%s\n" % (i+1, "%s-%s" % (i, i+1), height_hist[i]))
out.close()
return height_hist, max_height
def load_tif_vector(heightmap_tif):
"""Load heightmap geotiff into a vector for other methods."""
f = gdal.Open(heightmap_tif)
vector = numpy.array(f.GetRasterBand(1).ReadAsArray())
vector[vector == -9999.] = numpy.nan
return vector
def tif_sample(geotiff, sample_num=1000, vector=None):
"""Return random sampling of heightmap values.
vector: Use already-loaded vector instead of reloading."""
if not vector:
vector = load_tif_vector(geotiff)
return numpy.random.choice(vector[~numpy.isnan(vector)], sample_num)
def tif_mean(geotiff, vector=None):
"""Get average of geotiff values.
vector: Use already-loaded vector instead of reloading."""
if not vector:
vector = load_tif_vector(geotiff)
return numpy.nanmean(vector)
def tif_var(geotiff, vector=None):
"""Get variance of geotiff values.
vector: Use already-loaded vector instead of reloading."""
if not vector:
vector = load_tif_vector(geotiff)
return numpy.nanvar(vector)
def tif_hist(geotiff, save=False, vector=None):
"""Get histogram of geotiff values.
save: False, or a path to .png file.
vector: Use already-loaded vector instead of reloading.
"""
if not vector:
vector = load_tif_vector(geotiff)
newv = numpy.concatenate(vector, axis=0)
plt.hist(newv[~numpy.isnan(newv)], 50, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('Geotiff value')
plt.ylabel('Probability')
plt.title('Histogram of Geotiff')
if save:
plt.savefig(save)
plt.close()
else:
plt.show()
def tif_fit_rleafangle(geotiff):
"""Use R to fit leaf angle."""
f = gdal.Open(geotiff)
vector = numpy.concatenate(numpy.array(f.GetRasterBand(1).ReadAsArray()), axis=0)
vector[vector == -9999.] = numpy.nan
newvector = vector[~numpy.isnan(vector)]
rstring = """
function(angles){
n <- length(angles)
betapara <- RLeafAngle::computeBeta(angles)
result <- data.frame(rbind(
c(trait = 'leaf_angle_twoparbeta',
beta1 = betapara[1],
beta2 = betapara[2],
mean = betapara[1]/(betapara[1]+betapara[2]),
variance = betapara[1]*betapara[2]/(betapara[1]+betapara[2])/(betapara[1]+betapara[2])/(betapara[1]+betapara[2]+1),
n = n)))
return(result)
}
"""
numpy2ri.activate()
rfunc = r(rstring)
r_df = rfunc(newvector)
newdf = pandas2ri.ri2py(r_df)
return newdf
def tif_fit_pyleafangle(geotiff):
"""Use Python to fit leaf angle."""
f = gdal.Open(geotiff)
vector = numpy.concatenate(numpy.array(f.GetRasterBand(1).ReadAsArray()), axis=0)
vector[vector == -9999.] = numpy.nan
newvector = vector[~numpy.isnan(vector)]
xbar = numpy.mean(newvector)
xvar = numpy.var(newvector)
alpha = (((1 - xbar) / xvar - 1) / xbar) * (xbar ^ 2)
beta = alpha * (1 / xbar - 1)
return ('leaf_angle_twoparbeta', alpha, beta, xbar, xvar)
| 34.174497 | 129 | 0.600255 | 1,463 | 10,184 | 3.993165 | 0.197539 | 0.008216 | 0.01027 | 0.018487 | 0.365457 | 0.304519 | 0.238959 | 0.219445 | 0.219445 | 0.212256 | 0 | 0.02728 | 0.276512 | 10,184 | 297 | 130 | 34.289562 | 0.765608 | 0.192655 | 0 | 0.164179 | 0 | 0.004975 | 0.147333 | 0.038667 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054726 | false | 0 | 0.044776 | 0 | 0.149254 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77724149191f0e80307f99dd8a0656415ea312e7 | 559 | py | Python | conflictgateway/social/jobsupdate.py | sebastianlees/Conflict-Gateway | e875abb48ad4d51db90983a35a6c7bd47a54d5e9 | [
"MIT"
] | null | null | null | conflictgateway/social/jobsupdate.py | sebastianlees/Conflict-Gateway | e875abb48ad4d51db90983a35a6c7bd47a54d5e9 | [
"MIT"
] | null | null | null | conflictgateway/social/jobsupdate.py | sebastianlees/Conflict-Gateway | e875abb48ad4d51db90983a35a6c7bd47a54d5e9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from twython import Twython
import random
APP_KEY = 'APP KEY' # Customer Key here
APP_SECRET = 'APP SECRET' # Customer secret here
OAUTH_TOKEN = 'OAUTH TOKEN' # Access Token here
OAUTH_TOKEN_SECRET = 'OAUTH TOKEN SECRET' # Access Token Secret here
jobsnumber = random.randint(50, 100)
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
twitter.update_status(status="Weekly jobs update: " + str(jobsnumber) + " Mediation & Conflict Resolution jobs & scholarships added... http://www.conflictgateway.com/jobs") | 39.928571 | 172 | 0.763864 | 77 | 559 | 5.402597 | 0.454545 | 0.144231 | 0.115385 | 0.096154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010352 | 0.135957 | 559 | 14 | 172 | 39.928571 | 0.850932 | 0.182469 | 0 | 0 | 0 | 0 | 0.359823 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
777390888167fc8811e643a135d220bdf95baecc | 3,457 | py | Python | experiment_utils/initialization_utils.py | HanGuo97/experiment-utils | ff1288d4ab89af90e5c8c486de4dd7673a99079b | [
"MIT"
] | null | null | null | experiment_utils/initialization_utils.py | HanGuo97/experiment-utils | ff1288d4ab89af90e5c8c486de4dd7673a99079b | [
"MIT"
] | null | null | null | experiment_utils/initialization_utils.py | HanGuo97/experiment-utils | ff1288d4ab89af90e5c8c486de4dd7673a99079b | [
"MIT"
] | null | null | null | import os
import click
from git import Repo
from absl import logging
from datetime import datetime
from collections import namedtuple
from typing import Tuple, Optional
from . import wandb_utils
logging.set_verbosity(logging.INFO)
ExperimentConfig = namedtuple(
"ExperimentConfig", (
"project_name",
"experiment_tag",
"experiment_name",
"experiment_description",
"experiment_dir",
"repo"))
CONFIG: Optional[ExperimentConfig] = None
LOG_STRING = click.style("Experiment", fg="blue", bold=True)
def interactive_initialize(
base_experiment_dir: str=".",
default_project_name: Optional[str]=None,
default_experiment_tag: Optional[str]=None,
default_experiment_description: Optional[str]=None,
initialize_wandb: bool=True
) -> ExperimentConfig:
if not isinstance(base_experiment_dir, str):
raise ValueError("`base_experiment_dir` must be String")
# Set file-scope configuration
global CONFIG
# Query user inputs
project_name = click.prompt(
"Please Enter The Project Name",
type=str, default=default_project_name)
experiment_tag = click.prompt(
"Please Enter The Experiment Name",
type=str, default=default_experiment_tag)
experiment_description = click.prompt(
"Please Enter The Experiment Description",
type=str, default=default_experiment_description)
# Project Name should be in camel-case
project_name = to_PascalCase(project_name)
# Experimetn Tag should be in camel-case
experiment_tag = to_PascalCase(experiment_tag)
# Experiment Name will include more info
experiment_name, git_repo = _get_experiment_name(tag=experiment_tag)
# Experiment log dir
experiment_dir = os.path.join(base_experiment_dir, experiment_name)
CONFIG = ExperimentConfig(
repo=git_repo,
project_name=project_name,
experiment_tag=experiment_tag,
experiment_dir=experiment_dir,
experiment_name=experiment_name,
experiment_description=experiment_description)
_print_config()
# Confirm is the directory already exists
if os.path.exists(CONFIG.experiment_dir):
click.confirm(click.style(
"Experiment Dir already exists, continue?", fg="red"), abort=True)
# Confirm
click.confirm("Do you want to continue?", abort=True)
if initialize_wandb:
wandb_utils.wandb_initialize(
project_name=project_name,
experiment_name=experiment_name,
experiment_tag=experiment_tag,
experiment_notes=experiment_description)
return CONFIG
def _get_experiment_name(tag: Optional[str]=None) -> Tuple[str, Repo]:
"""Get the experiment name based on Git status and time"""
repo = Repo("./")
date = datetime.now()
name = (
f"{date.year}{date.month}{date.day}_"
f"BRANCH_{repo.active_branch}_"
f"COMMIT_{repo.head.commit.hexsha[:5]}_"
f"TAG_{tag}")
return name, repo
def _print_config() -> None:
if CONFIG is None:
return
# Maximum field lengths
max_length = max(map(len, CONFIG._fields)) + 1
for i in range(len(CONFIG)):
click.echo(f"{LOG_STRING} "
f"{CONFIG._fields[i]: <{max_length}}: "
f"{CONFIG[i]}")
def to_PascalCase(s: str) -> str:
return "".join(x for x in s.title() if not x.isspace())
| 29.29661 | 78 | 0.676598 | 413 | 3,457 | 5.445521 | 0.2954 | 0.058693 | 0.081814 | 0.032014 | 0.249 | 0.066696 | 0 | 0 | 0 | 0 | 0 | 0.000751 | 0.22939 | 3,457 | 117 | 79 | 29.547009 | 0.843468 | 0.087938 | 0 | 0.075 | 0 | 0 | 0.154508 | 0.045237 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0.0125 | 0.2 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7775682dcc6dc253e667940b371c684ba9d3f2b3 | 1,295 | py | Python | Kiraro/Kiraro_Voice/Say.py | NotJakeR/Kiraro-Discord-Bot | de4f765eedd92cedcb7c61b0444e8f69e905f9e6 | [
"Apache-2.0"
] | null | null | null | Kiraro/Kiraro_Voice/Say.py | NotJakeR/Kiraro-Discord-Bot | de4f765eedd92cedcb7c61b0444e8f69e905f9e6 | [
"Apache-2.0"
] | null | null | null | Kiraro/Kiraro_Voice/Say.py | NotJakeR/Kiraro-Discord-Bot | de4f765eedd92cedcb7c61b0444e8f69e905f9e6 | [
"Apache-2.0"
] | 1 | 2021-01-25T19:06:17.000Z | 2021-01-25T19:06:17.000Z | from Kiraro import bot
from discord.ext import commands
from discord.errors import *
from discord.utils import get
from Kiraro.Kiraro_Voice import idk, queue
import discord
import gtts
queue_list = {}
@bot.command()
async def say(ctx, *, word):
boolean, voice = await idk(ctx)
if boolean:
tts = gtts.gTTS(word)
tts.save("Voice_Files/TTS.wav")
try:
voice.play(discord.FFmpegPCMAudio("Voice_Files/TTS.wav")) #, after=lambda e: queue(ctx, queue_list, voice))
voice.source = discord.PCMVolumeTransformer(voice.source)
except ClientException:
queue_list[ctx.guild.id].append(word)
print(queue_list)
@say.error
async def say_error(ctx, error):
if isinstance(error, discord.HTTPException):
await ctx.send("Something went wrong, try again later")
elif isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(
title="Say",
description="To use the Say command just add the text",
color=discord.Color.blue()
)
embed.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)
embed.add_field(name="Usage", value="Say `message` ")
await ctx.send(embed=embed)
else:
print(F"Say Error {error}") | 33.205128 | 119 | 0.658687 | 167 | 1,295 | 5.035928 | 0.45509 | 0.042806 | 0.026159 | 0.03805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.23166 | 1,295 | 39 | 120 | 33.205128 | 0.845226 | 0.037066 | 0 | 0 | 0 | 0 | 0.123496 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7777e8520a12559d29d6241cbcb36e71e0940302 | 7,056 | py | Python | simple_estimator.py | carlos9310/models | d5038337a42544b95d6bf97e40099ef140399b89 | [
"Apache-2.0"
] | 2 | 2020-09-14T02:15:17.000Z | 2021-09-28T07:08:32.000Z | simple_estimator.py | carlos9310/models | d5038337a42544b95d6bf97e40099ef140399b89 | [
"Apache-2.0"
] | null | null | null | simple_estimator.py | carlos9310/models | d5038337a42544b95d6bf97e40099ef140399b89 | [
"Apache-2.0"
] | 1 | 2022-01-04T06:55:54.000Z | 2022-01-04T06:55:54.000Z | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
ROOT_PATH = '/tmp/census_data'
TRAIN_PATH = '/tmp/census_data/adult.data'
EVAL_PATH = '/tmp/census_data/adult.test'
PREDICT_PATH = '/content/models/official/r1/wide_deep/census_test.csv'
MODEL_PATH = '/tmp/adult_model'
EXPORT_PATH = '/tmp/adult_export_model'
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
_CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
_HASH_BUCKET_SIZE = 1000
_NUM_EXAMPLES = {
'train': 32561,
'validation': 16281,
}
def build_model_columns():
"""Builds a set of wide and deep feature columns."""
# Continuous variable columns
age = tf.feature_column.numeric_column('age')
education_num = tf.feature_column.numeric_column('education_num')
capital_gain = tf.feature_column.numeric_column('capital_gain')
capital_loss = tf.feature_column.numeric_column('capital_loss')
hours_per_week = tf.feature_column.numeric_column('hours_per_week')
education = tf.feature_column.categorical_column_with_vocabulary_list(
'education', [
'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
'5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
'marital_status', [
'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
'relationship', [
'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
'Other-relative'])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
'workclass', [
'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=_HASH_BUCKET_SIZE)
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
education, marital_status, relationship, workclass, occupation,
age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
['education', 'occupation'], hash_bucket_size=_HASH_BUCKET_SIZE),
tf.feature_column.crossed_column(
[age_buckets, 'education', 'occupation'],
hash_bucket_size=_HASH_BUCKET_SIZE),
]
wide_columns = base_columns + crossed_columns
deep_columns = [
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(marital_status),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(occupation, dimension=8),
]
return wide_columns, deep_columns
def input_fn(data_path, shuffle, num_epochs, batch_size):
"""Generate an input function for the Estimator."""
def parse_csv(value):
tf.logging.info('Parsing {}'.format(data_path))
columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
features = dict(zip(_CSV_COLUMNS, columns))
labels = features.pop('income_bracket')
tf.logging.info(f'labels:{labels}')
classes = tf.equal(labels, '>50K') # binary classification
return features, classes
# Extract lines from input files using the Dataset API.
dataset = tf.data.TextLineDataset(data_path)
if shuffle:
dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])
dataset = dataset.map(parse_csv, num_parallel_calls=5)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
return dataset
# estimator.train()可以循环运行,模型的状态将持久保存在model_dir
def run():
wide_columns, deep_columns = build_model_columns()
# os.system('rm -rf {}'.format(MODEL_PATH))
config = tf.estimator.RunConfig(save_checkpoints_steps=100)
estimator = tf.estimator.DNNLinearCombinedClassifier(model_dir=MODEL_PATH,
linear_feature_columns=wide_columns,
linear_optimizer=tf.train.FtrlOptimizer(learning_rate=0.01),
dnn_feature_columns=deep_columns,
dnn_hidden_units=[256, 64, 32, 16],
dnn_optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
config=config)
# Linear model.
# estimator = tf.estimator.LinearClassifier(feature_columns=wide_columns, n_classes=2,
# optimizer=tf.train.FtrlOptimizer(learning_rate=0.03))
# Train the model.
estimator.train(
input_fn=lambda: input_fn(data_path=TRAIN_PATH, shuffle=True, num_epochs=40, batch_size=100), steps=2000)
"""
steps: 最大训练次数,模型训练次数由训练样本数量、num_epochs、batch_size共同决定,通过steps可以提前停止训练
"""
# Evaluate the model.
eval_result = estimator.evaluate(
input_fn=lambda: input_fn(data_path=EVAL_PATH, shuffle=False, num_epochs=1, batch_size=40))
print('Test set accuracy:', eval_result)
# Predict.
pred_dict = estimator.predict(
input_fn=lambda: input_fn(data_path=PREDICT_PATH, shuffle=False, num_epochs=1, batch_size=40))
for pred_res in pred_dict:
print(pred_res['probabilities'][1])
columns = wide_columns + deep_columns
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns=columns)
serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
estimator.export_savedmodel(EXPORT_PATH, serving_input_fn)
if __name__ == '__main__':
run() | 40.090909 | 118 | 0.639172 | 794 | 7,056 | 5.367758 | 0.338791 | 0.040122 | 0.06687 | 0.025809 | 0.266072 | 0.186298 | 0.163069 | 0.104176 | 0.017363 | 0 | 0 | 0.018438 | 0.24674 | 7,056 | 176 | 119 | 40.090909 | 0.783443 | 0.10771 | 0 | 0.016807 | 0 | 0 | 0.152771 | 0.025129 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033613 | false | 0 | 0.05042 | 0 | 0.109244 | 0.02521 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7778e854b1769ba9428d3a4d3170d1002d65dedd | 2,385 | py | Python | examples/run_multi_functions_parallel.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | examples/run_multi_functions_parallel.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | examples/run_multi_functions_parallel.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Created by "Thieu" at 10:26, 02/03/2022 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
import concurrent.futures as parallel
from functools import partial
from opfunu.cec_basic import cec2014_nobias
from pandas import DataFrame
from mealpy.evolutionary_based.DE import BaseDE
from os import getcwd, path, makedirs
PATH_RESULTS = "history/results/"
check_dir = f"{getcwd()}/{PATH_RESULTS}"
if not path.exists(check_dir):
makedirs(check_dir)
model_name = "DE"
n_dims = 30
func_names = ["F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15", "F16", "F17", "F18", "F19"]
def find_minimum(function_name, n_dims):
print(f"Start running: {function_name}")
problem = {
"fit_func": getattr(cec2014_nobias, function_name),
"lb": [-100, ] * n_dims,
"ub": [100, ] * n_dims,
"minmax": "min",
"verbose": True,
}
model = BaseDE(problem, epoch=10, pop_size=50, wf=0.8, cr=0.9, name=model_name, fit_name=function_name)
_, best_fitness = model.solve()
print(f"Finish function: {function_name}")
return {
"func_name": function_name,
"best_fit": [best_fitness],
"error": model.history.list_global_best_fit
}
if __name__ == '__main__':
## Run model
best_fit_full = {}
best_fit_columns = []
error_full = {}
error_columns = []
with parallel.ProcessPoolExecutor() as executor:
results = executor.map(partial(find_minimum, n_dims=n_dims), func_names)
for result in results:
error_full[result["func_name"]] = result["error"]
error_columns.append(result["func_name"])
best_fit_full[result["func_name"]] = result["best_fit"]
best_fit_columns.append(result["func_name"])
df_err = DataFrame(error_full, columns=error_columns)
df_err.to_csv(f"{PATH_RESULTS}{n_dims}D_{model_name}_error.csv", header=True, index=False)
df_fit = DataFrame(best_fit_full, columns=best_fit_columns)
df_fit.to_csv(f"{PATH_RESULTS}{n_dims}D_{model_name}_best_fit.csv", header=True, index=False)
| 36.692308 | 137 | 0.6 | 301 | 2,385 | 4.468439 | 0.438538 | 0.052045 | 0.041636 | 0.02974 | 0.157621 | 0.047584 | 0.047584 | 0.047584 | 0.047584 | 0.047584 | 0 | 0.040243 | 0.239413 | 2,385 | 64 | 138 | 37.265625 | 0.701213 | 0.165199 | 0 | 0 | 0 | 0 | 0.178931 | 0.060484 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.130435 | 0 | 0.173913 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77798b6704167fd268a08dbc9c3f502cb2811a76 | 9,876 | py | Python | table_sorter.py | manawesome326/table-sorter | 53c6b0cdcf6b70216e1bf0a69bdd6b193cf64604 | [
"Unlicense"
] | null | null | null | table_sorter.py | manawesome326/table-sorter | 53c6b0cdcf6b70216e1bf0a69bdd6b193cf64604 | [
"Unlicense"
] | null | null | null | table_sorter.py | manawesome326/table-sorter | 53c6b0cdcf6b70216e1bf0a69bdd6b193cf64604 | [
"Unlicense"
] | null | null | null | import random
import math
import time
import copy
allstudents = []
class Student:
def __init__(self,name,friends,robot=False):
self.name = name
self.__name__ = name
self.robot = robot
#self.friends = friends
self.friends = friends
self.currenttable = 0
allstudents.append(self)
################################################################
#ignore anything above this line
#example students
#add new ones by pasting more below here with this exact syntax
#make sure to get the names right!
#the first name is the person's name. The stuff in the curly brackets is the other people,
#and how much this person wants to be in a group with them.
#For example here greg has given jordan a rating of -1, and alexandria a rating of -0.99
#I might be able to help you convert other formats into this if you need it. I admit it's not the most convenient.
Student("greg",{"jordan":-1,"alexandria":-0.99})
Student("jordan",{"greg":1,"alexandria":100})
Student("alexandria",{"greg":0.2,"jordan":1})
#and so on
#students are allowed to set a preference for themselves, but this has no effect.
#Students do not need to give a preference to every user - unknowns default to...
value_of_unknown_people = 0 #If you want people to meet new people, set this higher! Lower if being friends is useful.
#This is a completely untested feature. 0 is probably a good value. Anything more than 20% of the range people are setting preferences in is probably very silly.
#Preferences can be any number, including negatives and decimals.
#Use of precise decimals prevents some minor weirdness that (might) worsen your results
#So ask your respondents to be as precise as they like in their rankings!
#Failing that, set the below value to True to run some code to jitter the values a little...
jitter = False
#Negative numbers are treated differently by the code - ask people to only use them on people they really do not want to share a group with!
#student generator
#used to generate people ("robots") at random
#I use this for testing, but I don't think you'll need it? Best just leave it on 0.
robots_to_add = 0
total_tables =20 #set to the number of tables, of course
t_pop = 5 #and how many people go on each table
#note: the program will crash if there isn't enough room on the tables!
#However, it is fine to have too many tables for the person count!
goes = 10 #Adjust this value upwards if the program finishes too fast!
#A good 5 minute run will help find the best groups, especially if there's a lot of people.
#On the other hand, if it seems to be taking a very long time, lower the value and accept that things won't quite be perfect.
#No matter how many attempts you take and thus what result you get, it'll be very unlikely that any singular swap of people will improve the rating of it.
#no more config options are below this line.
#################################################################
for i in range(robots_to_add):
Student("robot " + str(i),{},True)
tables = []
for i in range(total_tables):
tables.append([].copy())
count = 0
ghostly_hatred = {}
for thing in allstudents:
if thing.robot:
for i in range(10):
this = random.choice(allstudents)
thing.friends[this] = random.randint(-10,10)
print(thing.name + " thinks " + this.name + " is worth " + str(thing.friends[this]))
#only used for the robot people
else:
tempdict = {}
for key in thing.friends.keys():
tempdict[[x for x in allstudents if x.name == key][0]] = thing.friends[key]
print(thing.name + " thinks " + [x for x in allstudents if x.name == key][0].name + " is worth " + str(tempdict[[x for x in allstudents if x.name == key][0]]))
thing.friends = tempdict
tables[math.floor(count/t_pop)].append(thing)
thing.currenttable = math.floor(count/t_pop)
count += 1
ghostly_hatred[thing] = -0.1 #everybody slightly dislikes ghosts. Actually, I don't know what this does, I wrote this code ages ago.
for table in tables:
while len(table) < t_pop:
table.append(Student("a ghost!",ghostly_hatred))
#ghosts are added to tables that aren't full.
#I didn't consider having one big group instead of one small one,
#so if you need that, probably just have your extra people pick new groups on their own
for table in tables:
print("Table " + str(tables.index(table)) + ": ")
for person in table:
print(person.name)
#this section does something which probably leads to better results:
#if person A gives person B a positive rating, but person B gives person A a negative rating,
#Person A's rating of person B is set to half of person B's rating of person A.
#The effect of this is that a malicious person who wishes to be on the same table as somebody who wants to avoid them
#is less likely to succeed in this. But they won't be separated as strongly as two people who *both* hate eachother!
for student in allstudents:
for friend in student.friends.keys():
try:
if (student.friends[friend] < 0) and (friend.friends[student] >= 0):
friend.friends[student] = student.friends[friend]/2
print(friend.name + " has decided " + student.name + " is actually only worth " + str(friend.friends[student]))
except KeyError:
friend.friends[student] = student.friends[friend]/2
print(friend.name + " has never heard of " + student.name + ", but now dislikes them with a value of " + str(friend.friends[student]))
if jitter:
for student in allstudents:
for friend in student.friends.keys():
student.friends[friend] = student.friends[friend] + random.uniform(0.00001,0.00002)
def score_eval(tables):
score = 0
for table in tables:
tablescore = 0
for student in table:
#print(student.name)
for partner in table:
try:
score += student.friends[partner]
tablescore += student.friends[partner]
except KeyError:
score += value_of_unknown_people
tablescore += value_of_unknown_people
return score
print("Basic happiness: " + str(score_eval(tables)))
initial_tables = copy.deepcopy(tables)
def test(no_leeching, randomer):
world_record = score_eval(initial_tables)
give_up = 0
record_breaks = 0
maximum_swaps = int(t_pop*t_pop*(math.factorial(total_tables)/(math.factorial(total_tables-2)*2)))
print("maximum should be " +str(maximum_swaps))
maximum_boredom = maximum_swaps*2
for i in range(goes):
print("starting attempt " + str(i))
tables = copy.deepcopy(initial_tables)
give_up = 0
#randoming
if randomer:
for i in range(maximum_swaps*4):
swap_table_1 = random.choice(tables)
swap_table_2 = random.choice(tables)
while swap_table_2 == swap_table_1:
swap_table_2 = random.choice(tables)
swap_student_1 = random.randint(0,len(swap_table_1)-1)
swap_student_2 = random.randint(0,len(swap_table_2)-1)
swap_table_1[swap_student_1], swap_table_2[swap_student_2] = swap_table_2[swap_student_2], swap_table_1[swap_student_1]
print("starting happiness: " + str(score_eval(tables)))
boredom = 0
attempts = []
while True:
while True:
swap_table_1 = random.choice(tables)
swap_table_2 = random.choice(tables)
while swap_table_2 == swap_table_1:
swap_table_2 = random.choice(tables)
swap_student_1 = random.randint(0,len(swap_table_1)-1)
swap_student_2 = random.randint(0,len(swap_table_2)-1)
if not (swap_table_1[swap_student_1].name + swap_table_2[swap_student_2].name in attempts):
break
current_score = score_eval(tables)
if no_leeching:
current_s_1 = score_eval([swap_table_1])
current_s_2 = score_eval([swap_table_2])
swap_table_1[swap_student_1], swap_table_2[swap_student_2] = swap_table_2[swap_student_2], swap_table_1[swap_student_1]
new_score = score_eval(tables)
if no_leeching:
new_s_1 = score_eval([swap_table_1])
new_s_2 = score_eval([swap_table_2])
if (new_score < current_score) or (no_leeching and ((new_s_1 < current_s_1) or (new_s_2 < current_s_2))):
attempts.append(swap_table_1[swap_student_1].name + swap_table_2[swap_student_2].name)
swap_table_1[swap_student_1], swap_table_2[swap_student_2] = swap_table_2[swap_student_2], swap_table_1[swap_student_1]
#SEND EM BACK
#print(current_score)
give_up += 1
else:
give_up = 0
#attempts = []
#print("swap!" + str(random.random()))
#print(maximum_boredom-boredom)
if new_score == current_score:
#boredom += 1
#if boredom > maximum_boredom:
# print("What!")
# break
pass
else:
boredom = 0
attempts = []
if len(attempts) > 999999:
print(len(attempts))
if len(attempts) >= maximum_swaps:
break
if current_score > world_record:
world_record = current_score
record_breaks += 1
print("Record broken!")
#time.sleep(0.5)
elif current_score == world_record:
print("Record found again!")
else:
print("no record broken!")
#time.sleep(0.5)
print(current_score)
print("Total happiness: " + str(world_record))
print("Average happiness: " + str(world_record/len(allstudents)))
for table in tables:
print("Group " + str(tables.index(table)) + ": ")
print("Happiness: " + str(score_eval([table])))
if score_eval([table]) < 0:
print("This group has a negative score! They probably won't have a lot of fun. You probably shouldn't be seeing this; maybe try running the program again?")
for person in table:
print(person.name)
print("Results for no leeching:")
test(True, True)
print("Results for yes leeching")
test(False, True)
#In "no leeching" trials, a swap that improves the rating of one table at the expense of another is not allowed.
#I'm unsure as to whether this actually improves the results you get. Thus why the program gives you results both without and with it.
print("Type \"yes\" and hit enter to leave this program. This will likely vanish your results, so copy them somewhere first! ")
while True:
if input("> ")[0] == 'y':
break
| 37.984615 | 163 | 0.714662 | 1,594 | 9,876 | 4.297365 | 0.230238 | 0.044672 | 0.026277 | 0.022482 | 0.228759 | 0.197226 | 0.190511 | 0.159854 | 0.159854 | 0.159854 | 0 | 0.018907 | 0.175273 | 9,876 | 259 | 164 | 38.131274 | 0.822099 | 0.352471 | 0 | 0.319018 | 0 | 0.01227 | 0.11308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018405 | false | 0.006135 | 0.02454 | 0 | 0.055215 | 0.147239 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
777b5e5839004ea6dfd6b25eec2bff8b3f5e00ac | 1,045 | py | Python | unit_test/test_obj_avoidance.py | riven314/capstone_dash_interface | 5eab25f4c15ad09aa889554820231175b0a3ed28 | [
"CC0-1.0"
] | 1 | 2019-12-10T14:59:12.000Z | 2019-12-10T14:59:12.000Z | unit_test/test_obj_avoidance.py | riven314/capstone_dash_interface | 5eab25f4c15ad09aa889554820231175b0a3ed28 | [
"CC0-1.0"
] | null | null | null | unit_test/test_obj_avoidance.py | riven314/capstone_dash_interface | 5eab25f4c15ad09aa889554820231175b0a3ed28 | [
"CC0-1.0"
] | 1 | 2020-01-01T12:24:51.000Z | 2020-01-01T12:24:51.000Z | import os
import sys
import time
PATH = os.path.join(os.getcwd(), '..')
sys.path.append(PATH)
import cv2
import numpy as np
import matplotlib.pyplot as plt
from obj_avoidance import run_avoidance
# for the reference
label_dict = {1: 'wall', 2: 'floor', 3: 'plant', 4: 'ceiling', 5: 'furniture', 6: 'person', 7: 'door', 8: 'objects'}
# read in image
D1_IMG_PATH = os.path.join(os.getcwd(), '..', 'test_cases', 'test_obj_avoid_resize_d1.png')
SEG_IDX_PATH = os.path.join(os.getcwd(), '..', 'test_cases', 'test_obj_avoid_pred_idx.png')
d1_img = cv2.imread(D1_IMG_PATH, cv2.IMREAD_GRAYSCALE)
seg_idx = cv2.imread(SEG_IDX_PATH, cv2.IMREAD_GRAYSCALE)
for i in range(5):
start = time.time()
obj_tup, obj_img = run_avoidance(d1_img, seg_idx, depth_threshold = 8, visible_width = 90)
end = time.time()
print('obj_tup = {}'.format(obj_tup))
print('runtime: {}'.format(end - start))
rgb_img = cv2.cvtColor(obj_img, cv2.COLOR_GRAY2RGB)
plt.imshow(obj_img)
plt.show()
plt.imshow(rgb_img)
plt.show()
| 30.735294 | 117 | 0.683254 | 168 | 1,045 | 4.017857 | 0.440476 | 0.02963 | 0.044444 | 0.062222 | 0.16 | 0.16 | 0.127407 | 0.127407 | 0.127407 | 0.127407 | 0 | 0.028441 | 0.158852 | 1,045 | 33 | 118 | 31.666667 | 0.739477 | 0.029665 | 0 | 0.08 | 0 | 0 | 0.154555 | 0.056295 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.28 | 0 | 0.28 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
777d83516dc3cad54a0ce07bf96a195097e68974 | 1,519 | py | Python | demoapp/cabinet_structure/settings.py | pythoninner/myfistpython | c1f52d8a3a284a89c0f1e33615067e8845aa1617 | [
"MIT"
] | 17 | 2015-12-10T02:09:07.000Z | 2018-06-25T06:46:59.000Z | demoapp/cabinet_structure/settings.py | pythoninner/myfistpython | c1f52d8a3a284a89c0f1e33615067e8845aa1617 | [
"MIT"
] | 6 | 2015-12-09T08:09:52.000Z | 2016-01-11T06:53:10.000Z | demoapp/cabinet_structure/settings.py | pythoninner/myfistpython | c1f52d8a3a284a89c0f1e33615067e8845aa1617 | [
"MIT"
] | 9 | 2015-12-10T09:04:00.000Z | 2019-07-12T13:33:25.000Z | import logging
from django.conf import settings
logger = logging.getLogger(__name__)
class VerifySettings(object):
__settings_name__ = 'MAX_CABINET_ROWS_NUM'
__error_msg_dict__ = {'no_attribute': ("MAX_CABINET_ROWS_NUM must be specified in "
"your Django settings file"),
'no_int': ("MAX_CABINET_ROWS_NUM "
"must be specified integer"),
'min_cells': ("cabinet_cells "
"is bigger than MAX_CABINET_ROWS_NUM")}
def __init__(self, cabinet_cells):
self.cabinet_cells = cabinet_cells
@property
def settings_max_cabinet_num(self):
return getattr(
settings, self.__settings_name__, 6
)
def verify_cabinet_num_type(self):
if not isinstance(self.settings_max_cabinet_num, int):
log_msg = self.__error_msg_dict__['no_int']
logger.info(log_msg)
raise TypeError(log_msg)
else:
return self.settings_max_cabinet_num
def verify_cabinet_cells_gt_max_cabinet_num(self):
if self.cabinet_cells < self.settings_max_cabinet_num:
raise TypeError(self.__error_msg_dict__['min_cells'])
return self.verify_cabinet_num_type()
class SettingsConfig(VerifySettings):
def verify(self):
max_rows_num = super(SettingsConfig, self).verify_cabinet_cells_gt_max_cabinet_num()
return max_rows_num
| 33.755556 | 92 | 0.631995 | 175 | 1,519 | 4.942857 | 0.308571 | 0.115607 | 0.090173 | 0.078613 | 0.236994 | 0.150289 | 0.150289 | 0 | 0 | 0 | 0 | 0.000935 | 0.296248 | 1,519 | 44 | 93 | 34.522727 | 0.808232 | 0 | 0 | 0 | 0 | 0 | 0.147465 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.060606 | 0.030303 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
777dd8bf196474b662d087b28f11dea01ef85e30 | 756 | py | Python | l_04_list_and_dictionaries/dictionaries/ex_08_filter_base.py | VasAtanasov/SoftUni-Python-Fundamentals | 471d0537dd6e5c8b61ede92b7673c0d67e2964fd | [
"MIT"
] | 1 | 2019-06-05T11:16:08.000Z | 2019-06-05T11:16:08.000Z | l_04_list_and_dictionaries/dictionaries/ex_08_filter_base.py | VasAtanasov/SoftUni-Python-Fundamentals | 471d0537dd6e5c8b61ede92b7673c0d67e2964fd | [
"MIT"
] | null | null | null | l_04_list_and_dictionaries/dictionaries/ex_08_filter_base.py | VasAtanasov/SoftUni-Python-Fundamentals | 471d0537dd6e5c8b61ede92b7673c0d67e2964fd | [
"MIT"
] | null | null | null | def check_type(text):
try:
if float(text) == int(float(text)):
return {'Age': int(float(text))}
elif float(text) != int(float(text)):
return {'Salary': float(text)}
except ValueError:
return {'Position': text}
employees_data = []
while True:
in_line = input()
if 'filter base' == in_line:
break
[name, value] = filter(None, in_line.split(" -> "))
employees_data.append({name: check_type(value)})
criteria = input()
separator = '=' * 20
result = ''
for entry in range(len(employees_data)):
for name, data in employees_data[entry].items():
if criteria in data:
result += f'Name: {name}\n{criteria}: {data[criteria]}\n{separator}\n'
print(result)
| 23.625 | 82 | 0.589947 | 95 | 756 | 4.6 | 0.442105 | 0.12357 | 0.08238 | 0.077803 | 0.12357 | 0.12357 | 0 | 0 | 0 | 0 | 0 | 0.003515 | 0.247355 | 756 | 31 | 83 | 24.387097 | 0.764499 | 0 | 0 | 0 | 0 | 0 | 0.119048 | 0.041005 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.173913 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
777ef7869d9e99660684cbd42f3a06660212884f | 905 | py | Python | install_release.py | NunoEdgarGFlowHub/poptorch | 2e69b81c7c94b522d9f57cc53d31be562f5e3749 | [
"MIT"
] | null | null | null | install_release.py | NunoEdgarGFlowHub/poptorch | 2e69b81c7c94b522d9f57cc53d31be562f5e3749 | [
"MIT"
] | null | null | null | install_release.py | NunoEdgarGFlowHub/poptorch | 2e69b81c7c94b522d9f57cc53d31be562f5e3749 | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Graphcore Ltd. All rights reserved.
# This script is run by the release agent to create a release of PopTorch
def install_release(release_utils, release_id, snapshot_id, version_str):
# Tag must contain the string 'poptorch' to keep it unique.
tag = "{}-poptorch".format(version_str)
release_utils.log.info('Tagging poptorch release ' + tag)
# Create the release on the document server.
release_utils.create_document_release(snapshot_id)
# Tag the view repository with the release.
release_utils.tag_view_repo(
'ssh://git@phabricator.sourcevertex.net/diffusion/' \
+ 'POPONNXVIEW/poponnxview.git',
snapshot_id,
release_id,
tag)
# Increment the point version number.
release_utils.increment_version_point(
'ssh://git@phabricator.sourcevertex.net/diffusion/' \
+ 'POPTORCH/poptorch.git')
| 34.807692 | 73 | 0.707182 | 115 | 905 | 5.4 | 0.486957 | 0.096618 | 0.061192 | 0.093398 | 0.132045 | 0.132045 | 0 | 0 | 0 | 0 | 0 | 0.00554 | 0.20221 | 905 | 25 | 74 | 36.2 | 0.854571 | 0.337017 | 0 | 0.153846 | 0 | 0 | 0.306914 | 0.246206 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
778181d3874c190067f24ee03aa82a2d0d031e77 | 1,667 | py | Python | ciscosparkapi/tests/conftest.py | Futuramistic/Bot | e22672e9d627faf3d9393feb04d214cb62dec98d | [
"MIT"
] | null | null | null | ciscosparkapi/tests/conftest.py | Futuramistic/Bot | e22672e9d627faf3d9393feb04d214cb62dec98d | [
"MIT"
] | 1 | 2021-06-01T21:52:12.000Z | 2021-06-01T21:52:12.000Z | ciscosparkapi/tests/conftest.py | Futuramistic/Bot | e22672e9d627faf3d9393feb04d214cb62dec98d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""pytest configuration and top-level fixtures."""
__author__ = "Chris Lunsford"
__author_email__ = "chrlunsf@cisco.com"
__copyright__ = "Copyright (c) 2016-2018 Cisco and/or its affiliates."
__license__ = "MIT"
import os
import string
import tempfile
import pytest
from tests.utils import download_file
pytest_plugins = [
'tests.test_ciscosparkapi',
'tests.api.test_memberships',
'tests.api.test_messages',
'tests.api.test_people',
'tests.api.test_rooms',
'tests.api.test_teammemberships',
'tests.api.test_teams',
'tests.api.test_webhooks',
'tests.api.test_organizations',
'tests.api.test_licenses',
'tests.api.test_roles',
]
TEST_DOMAIN = "cmlccie.com"
TEST_FILE_URL = "https://developer.ciscospark.com/images/logo_spark_lg@256.png"
email_template = string.Template("test${number}@" + TEST_DOMAIN)
# Helper Functions
def new_email_generator():
i = 50
while True:
email_address = email_template.substitute(number=i)
i += 1
yield email_address
# pytest Fixtures
@pytest.fixture("session")
def temp_directory():
directory_abs_path = tempfile.mkdtemp()
yield directory_abs_path
os.rmdir(directory_abs_path)
@pytest.fixture("session")
def local_file(temp_directory):
file = download_file(TEST_FILE_URL, temp_directory)
yield file
os.remove(file)
@pytest.fixture(scope="session")
def get_new_email_address():
generator = new_email_generator()
def inner_function():
return next(generator)
return inner_function
| 21.101266 | 80 | 0.675465 | 200 | 1,667 | 5.32 | 0.455 | 0.075188 | 0.112782 | 0.043233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011442 | 0.213557 | 1,667 | 78 | 81 | 21.371795 | 0.800153 | 0.059988 | 0 | 0.042553 | 0 | 0 | 0.305199 | 0.133693 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.106383 | 0.021277 | 0.255319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77842d9f499b49e55088705f1121160416001ae6 | 1,550 | py | Python | nalu.py | chauhanjatin10/NeuralArithmeticLogicUnits | c8a71e4bb99b7f5bdb5c1b85a75376b0322d6853 | [
"MIT"
] | null | null | null | nalu.py | chauhanjatin10/NeuralArithmeticLogicUnits | c8a71e4bb99b7f5bdb5c1b85a75376b0322d6853 | [
"MIT"
] | null | null | null | nalu.py | chauhanjatin10/NeuralArithmeticLogicUnits | c8a71e4bb99b7f5bdb5c1b85a75376b0322d6853 | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as Func
import torch.nn.init as init
from nac import NeuralAccumulatorCell
from torch.nn.parameter import Parameter
class NeuralArithmeticLogicCell(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.eps = 1e-8
self.W_hat = Parameter(torch.Tensor(out_dim, in_dim))
self.M_hat = Parameter(torch.Tensor(out_dim, in_dim))
self.nac = NeuralAccumulatorCell(in_dim, out_dim)
self.G = Parameter(torch.Tensor(out_dim, in_dim))
self.register_parameter('bias', None)
init.kaiming_uniform_(self.G, a=math.sqrt(5))
def forward(self, inputs):
self.out_nac = self.nac(inputs)
self.g = Func.sigmoid(Func.linear(inputs, self.G, self.bias))
self.add_part = self.out_nac * self.g
self.log_part = torch.log(torch.abs(inputs) + self.eps)
self.m_part = torch.exp(self.nac(self.log_part))
self.mul_part = (1-self.g)*self.m_part
self.output = self.add_part + self.mul_part
return self.output
class NALU_mutiple_cells(nn.Module):
def __init__(self, num_layers, in_dim, hidden_dim,out_dim):
super().__init__()
self.num_layers = num_layers
self.in_dim = in_dim
self.out_dim = out_dim
self.hidden_dim = hidden_dim
layers = []
for i in range(num_layers):
layers.append(NeuralArithmeticLogicCell(
hidden_dim if i>0 else in_dim,
hidden_dim if i<num_layers-1 else out_dim))
self.model = nn.Sequential(*layers)
def forward(self, inputs):
return self.model(inputs)
| 28.703704 | 63 | 0.740645 | 257 | 1,550 | 4.214008 | 0.252918 | 0.050785 | 0.041551 | 0.055402 | 0.237304 | 0.202216 | 0.165282 | 0.165282 | 0.132964 | 0.062789 | 0 | 0.004508 | 0.14129 | 1,550 | 53 | 64 | 29.245283 | 0.809166 | 0 | 0 | 0.186047 | 0 | 0 | 0.002581 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.162791 | 0.023256 | 0.348837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
778cfa1ba927b0196782d749a97b1b4c8af54073 | 5,170 | py | Python | src/vivarium/examples/disease_model/population.py | ihmeuw/vivarium | 77393d2e84ff2351c926f65b33272b7225cf9628 | [
"BSD-3-Clause"
] | 41 | 2017-07-14T03:39:06.000Z | 2022-03-20T05:36:33.000Z | src/vivarium/examples/disease_model/population.py | ihmeuw/vivarium | 77393d2e84ff2351c926f65b33272b7225cf9628 | [
"BSD-3-Clause"
] | 26 | 2017-08-08T22:13:44.000Z | 2021-08-18T00:14:54.000Z | src/vivarium/examples/disease_model/population.py | ihmeuw/vivarium | 77393d2e84ff2351c926f65b33272b7225cf9628 | [
"BSD-3-Clause"
] | 8 | 2017-08-03T17:15:39.000Z | 2021-09-30T21:57:50.000Z | import pandas as pd
from vivarium.framework.engine import Builder
from vivarium.framework.population import SimulantData
from vivarium.framework.event import Event
class BasePopulation:
"""Generates a base population with a uniform distribution of age and sex.
Attributes
----------
configuration_defaults :
A set of default configuration values for this component. These can be
overwritten in the simulation model specification or by providing
override values when constructing an interactive simulation.
"""
configuration_defaults = {
'population': {
# The range of ages to be generated in the initial population
'age_start': 0,
'age_end': 100,
# Note: There is also a 'population_size' key.
},
}
def __init__(self):
self.name = 'base_population'
# noinspection PyAttributeOutsideInit
def setup(self, builder: Builder):
"""Performs this component's simulation setup.
The ``setup`` method is automatically called by the simulation
framework. The framework passes in a ``builder`` object which
provides access to a variety of framework subsystems and metadata.
Parameters
----------
builder :
Access to simulation tools and subsystems.
"""
self.config = builder.configuration
self.with_common_random_numbers = bool(self.config.randomness.key_columns)
self.register = builder.randomness.register_simulants
if (self.with_common_random_numbers
and not ['entrance_time', 'age'] == self.config.randomness.key_columns):
raise ValueError("If running with CRN, you must specify ['entrance_time', 'age'] as"
"the randomness key columns.")
self.age_randomness = builder.randomness.get_stream('age_initialization',
for_initialization=self.with_common_random_numbers)
self.sex_randomness = builder.randomness.get_stream('sex_initialization')
columns_created = ['age', 'sex', 'alive', 'entrance_time']
builder.population.initializes_simulants(self.on_initialize_simulants,
creates_columns=columns_created)
self.population_view = builder.population.get_view(columns_created)
builder.event.register_listener('time_step', self.age_simulants)
def on_initialize_simulants(self, pop_data: SimulantData):
"""Called by the simulation whenever new simulants are added.
This component is responsible for creating and filling four columns
in the population state table:
'age' :
The age of the simulant in fractional years.
'sex' :
The sex of the simulant. One of {'Male', 'Female'}
'alive' :
Whether or not the simulant is alive. One of {'alive', 'dead'}
'entrance_time' :
The time that the simulant entered the simulation. The 'birthday'
for simulants that enter as newborns. A `pandas.Timestamp`.
Parameters
----------
pop_data :
A record containing the index of the new simulants, the
start of the time step the simulants are added on, the width
of the time step, and the age boundaries for the simulants to
generate.
"""
age_start = self.config.population.age_start
age_end = self.config.population.age_end
if age_start == age_end:
age_window = pop_data.creation_window / pd.Timedelta(days=365)
else:
age_window = age_end - age_start
age_draw = self.age_randomness.get_draw(pop_data.index)
age = age_start + age_draw * age_window
if self.with_common_random_numbers:
population = pd.DataFrame({'entrance_time': pop_data.creation_time,
'age': age.values}, index=pop_data.index)
self.register(population)
population['sex'] = self.sex_randomness.choice(pop_data.index, ['Male', 'Female'])
population['alive'] = 'alive'
else:
population = pd.DataFrame(
{'age': age.values,
'sex': self.sex_randomness.choice(pop_data.index, ['Male', 'Female']),
'alive': pd.Series('alive', index=pop_data.index),
'entrance_time': pop_data.creation_time},
index=pop_data.index)
self.population_view.update(population)
def age_simulants(self, event: Event):
"""Updates simulant age on every time step.
Parameters
----------
event :
An event object emitted by the simulation containing an index
representing the simulants affected by the event and timing
information.
"""
population = self.population_view.get(event.index, query="alive == 'alive'")
population['age'] += event.step_size / pd.Timedelta(days=365)
self.population_view.update(population)
| 39.769231 | 111 | 0.623985 | 580 | 5,170 | 5.408621 | 0.293103 | 0.024546 | 0.022952 | 0.025502 | 0.161938 | 0.068856 | 0.030602 | 0.030602 | 0.030602 | 0.030602 | 0 | 0.002727 | 0.290716 | 5,170 | 129 | 112 | 40.077519 | 0.852741 | 0.339458 | 0 | 0.071429 | 0 | 0 | 0.102041 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
778df52cd98d358aeeaa3cd2f864bdb6647f52ff | 5,390 | py | Python | python3/ssm/requester.py | renardbebe/ssm-rotation-sdk-python | cffe573f5462d0b93be9b5fb110970ade98f1642 | [
"Apache-2.0"
] | null | null | null | python3/ssm/requester.py | renardbebe/ssm-rotation-sdk-python | cffe573f5462d0b93be9b5fb110970ade98f1642 | [
"Apache-2.0"
] | null | null | null | python3/ssm/requester.py | renardbebe/ssm-rotation-sdk-python | cffe573f5462d0b93be9b5fb110970ade98f1642 | [
"Apache-2.0"
] | 1 | 2021-07-21T02:46:58.000Z | 2021-07-21T02:46:58.000Z | import logging
from threading import Timer
from tencentcloud.ssm.v20190923 import models, ssm_client
from tencentcloud.common.profile import client_profile
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
class Error:
"""自定义错误类
"""
def __init__(self, message=None):
"""
:param message: 错误信息
:type message: str
"""
if message is None:
self.message = None
else:
self.message = message
class LoopTimer(Timer):
"""定时器类
"""
def __init__(self, interval, function, args, kwargs):
Timer.__init__(self, interval, function, args, kwargs)
def run(self):
"""每隔指定时间调用一次函数
"""
while True:
self.finished.wait(self.interval)
if self.finished.is_set():
self.finished.set()
break
self.function(*self.args, **self.kwargs)
class DbAccount:
"""DB 账号信息类
"""
def __init__(self, params=None):
"""
:param user_name: 用户名
:type user_name: str
:param password: 密码
:type password: str
"""
if params is None:
self.user_name = None
self.password = None
else:
self.user_name = params['user_name'] if 'user_name' in params else None
self.password = params['password'] if 'password' in params else None
class SsmAccount:
"""SSM 账号信息类
"""
def __init__(self, params=None):
"""
:param secret_id: 密钥ID,用于标识调用者身份(类似用户名)
:type secret_id: str
:param secret_key: 密钥值,用于验证调用者身份(类似密码)
:type secret_key: str
:param url: SSM 服务地址
:type url: str
:param region: 地域
:type region: str
"""
if params is None:
self.secret_id = None # string `yaml:"secret_id"`
self.secret_key = None # string `yaml:"secret_key"`
self.url = None # string `yaml:"url"`
self.region = None # string `yaml:"region"`
else:
self.secret_id = params['secret_id'] if 'secret_id' in params else None
self.secret_key = params[
'secret_key'] if 'secret_key' in params else None
self.url = params['url'] if 'url' in params else None
self.region = params['region'] if 'region' in params else None
def __get_client(secret_id, secret_key, url, region):
"""创建 SSM 客户端实例
:param secret_key: 密钥ID
:type secret_key: str
:param url: SSM 服务地址
:type url: str
:param region: 地域
:type region: str
:rtype :client: SSM 客户端实例
:rtype :error: 异常报错信息
"""
cred = credential.Credential(secret_id, secret_key)
http_profile = client_profile.HttpProfile()
http_profile.reqMethod = "POST"
if url and len(url) != 0:
http_profile.endpoint = url
# 客户端配置
cpf = client_profile.ClientProfile()
cpf.httpProfile = http_profile
client, err = None, None
try:
# 创建 SSM 客户端对象
client = ssm_client.SsmClient(cred, region, cpf)
except TencentCloudSDKException as e:
err = Error(str(e.args[0]))
return client, err
def __get_current_product_secret_value(secret_name, ssm_acc):
"""获取当前云产品凭据内容
:param secret_name: 凭据名称
:type secret_name: str
:param ssm_acc: SSM 账号信息
:type ssm_acc: SsmAccount class
:rtype :str: 凭据内容
:rtype :error: 异常报错信息
"""
print("get value for secret_name=%s" % secret_name)
# print("get_client: ", ssm_acc.secret_id, ssm_acc.secret_key, ssm_acc.url, ssm_acc.region)
client, err = __get_client(ssm_acc.secret_id, ssm_acc.secret_key, ssm_acc.url,
ssm_acc.region)
if err:
logging.error("create ssm client error: ", err.message)
return None, Error("create ssm HTTP client error: %s" % err.message)
# 获取凭据内容
request = models.GetSecretValueRequest()
request.SecretName = secret_name
request.VersionId = "SSM_Current" # hard-code
rsp = None
try:
rsp = client.GetSecretValue(request)
except TencentCloudSDKException as e:
err = Error(str(e.args[0]))
print("ssm GetSecretValue error: " + err.message)
if err:
logging.error("ssm GetSecretValue error: " + err.message)
return None, Error("ssm GetSecretValue error: " + err.message)
return rsp.SecretString, None
def get_current_account(secret_name, ssm_acc):
"""获取当前账号信息
:param secret_name: 凭据名称
:type secret_name: str
:param ssm_acc: SSM 账号信息
:type ssm_acc: SsmAccount class
:rtype :DbAccount: 账号信息
:rtype :error: 异常报错信息
"""
# 获取 secret_name 对应的凭据内容
secret_value, err = __get_current_product_secret_value(secret_name, ssm_acc)
if err:
logging.error("failed to GetSecretValue, err=" + err.message)
return None, err
# secret_value 是 JSON格式的字符串,形如: {"UserName":"test_user","Password":"test_pwd"}
print("secret value: ", secret_value)
if len(secret_value) == 0:
return None, Error("no valid account info found because secret value is empty")
current_user_and_password = eval(secret_value)
account = DbAccount(current_user_and_password["UserName"], current_user_and_password["Password"])
return account, None
| 29.779006 | 101 | 0.625974 | 663 | 5,390 | 4.897436 | 0.209653 | 0.027718 | 0.022174 | 0.029566 | 0.310748 | 0.276255 | 0.23283 | 0.18725 | 0.18725 | 0.160148 | 0 | 0.003064 | 0.273469 | 5,390 | 180 | 102 | 29.944444 | 0.826098 | 0.227087 | 0 | 0.181818 | 0 | 0 | 0.099741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.045455 | 0.068182 | 0 | 0.284091 | 0.034091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
778e86e22df74cae729a35ebbd9cf3422b98571c | 5,159 | py | Python | bdd100k/eval/lane_test.py | siyliepfl/bdd100k | f38e9b5cd4e21f7a47822734ffa4d397f64bf04a | [
"BSD-3-Clause"
] | null | null | null | bdd100k/eval/lane_test.py | siyliepfl/bdd100k | f38e9b5cd4e21f7a47822734ffa4d397f64bf04a | [
"BSD-3-Clause"
] | null | null | null | bdd100k/eval/lane_test.py | siyliepfl/bdd100k | f38e9b5cd4e21f7a47822734ffa4d397f64bf04a | [
"BSD-3-Clause"
] | null | null | null | """Test cases for lane.py."""
import os
import unittest
from typing import Dict
import numpy as np
from .lane import (
eval_lane_per_threshold,
evaluate_lane_marking,
get_foreground,
get_lane_class,
sub_task_funcs,
)
class TestGetLaneClass(unittest.TestCase):
"""Test cases for the lane specific channel extraction."""
def test_partialled_classes(self) -> None:
"""Check the function that partial get_lane_class."""
for num in range(255):
byte = np.array(num, dtype=np.uint8)
if num & 8:
self.assertTrue(get_lane_class(byte, 1, 3, 1))
else:
self.assertTrue(get_lane_class(byte, 0, 3, 1))
self.assertTrue(get_foreground(byte))
if num & (1 << 5):
self.assertTrue(sub_task_funcs["direction"](byte, 1))
else:
self.assertTrue(sub_task_funcs["direction"](byte, 0))
if num & (1 << 4):
self.assertTrue(sub_task_funcs["style"](byte, 1))
else:
self.assertTrue(sub_task_funcs["style"](byte, 0))
class TestEvalLanePerThreshold(unittest.TestCase):
"""Test cases for the per image per threshold lane marking evaluation."""
def test_two_parallel_lines(self) -> None:
"""Check the correctness of the function in general cases."""
a = np.zeros((10, 10), dtype=np.bool)
b = np.zeros((10, 10), dtype=np.bool)
a[3, 3:7] = True
b[7, 3:7] = True
for radius in [1, 2, 3]:
self.assertAlmostEqual(eval_lane_per_threshold(a, b, radius), 0.0)
for radius in [4, 5, 6]:
self.assertAlmostEqual(eval_lane_per_threshold(a, b, radius), 1.0)
def test_two_vertical_lines(self) -> None:
"""Check the correctness of the function in general cases."""
a = np.zeros((10, 10), dtype=np.bool)
b = np.zeros((10, 10), dtype=np.bool)
a[3, 3:6] = True
b[5:8, 7] = True
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 2), 0.0)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 3), 1 / 3)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 4), 2 / 3)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 5), 1.0)
def test_two_vertical_border_lines(self) -> None:
"""Check the correctness of the function in general cases."""
a = np.zeros((10, 10), dtype=np.bool)
b = np.zeros((10, 10), dtype=np.bool)
a[1:6, 1:4] = True
b[4:7, 3:8] = True
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 2), 0.0)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 3), 0.4)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 4), 0.70588235)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 5), 1.0)
class TestEvaluateLaneMarking(unittest.TestCase):
"""Test cases for the evaluate_lane_marking function."""
def test_mock_cases(self) -> None:
"""Check the peformance of the mock case."""
cur_dir = os.path.dirname(os.path.abspath(__file__))
gt_dir = "{}/testcases/lane/gts".format(cur_dir)
res_dir = "{}/testcases/lane/res".format(cur_dir)
f_scores = evaluate_lane_marking(gt_dir, res_dir, bound_ths=[1, 2])
gt_f_scores: Dict[str, float] = {
"1.0_direction_parallel": 79.46877879291574,
"2.0_direction_parallel": 87.61816039690531,
"1.0_direction_vertical": 58.9375575858315,
"2.0_direction_vertical": 75.23632079381062,
"1.0_direction_avg": 100.0,
"2.0_direction_avg": 100.0,
"1.0_style_solid": 79.46877879291574,
"2.0_style_solid": 87.61816039690531,
"1.0_style_dashed": 58.9375575858315,
"2.0_style_dashed": 75.23632079381062,
"1.0_style_avg": 100.0,
"2.0_style_avg": 100.0,
"1.0_category_crosswalk": 88.24432582570225,
"2.0_category_crosswalk": 93.82889258902341,
"1.0_category_double_other": 99.01265721381078,
"2.0_category_double_other": 100.0,
"1.0_category_double_white": 100.0,
"2.0_category_double_white": 100.0,
"1.0_category_double_yellow": 100.0,
"2.0_category_double_yellow": 100.0,
"1.0_category_road_curb": 75.0,
"2.0_category_road_curb": 75.16008049762166,
"1.0_category_single_other": 59.173962031069706,
"2.0_category_single_other": 75.48380881221992,
"1.0_category_single_white": 100.0,
"2.0_category_single_white": 100.0,
"1.0_category_single_yellow": 89.27983318704442,
"2.0_category_single_yellow": 99.98725140234575,
"1.0_category_avg": 83.48815417369305,
"2.0_category_avg": 100.0,
"1.0_total_avg": 94.496051391231,
"2.0_total_avg": 100.0,
"average": 97.2480256956155,
}
for key, val in gt_f_scores.items():
self.assertAlmostEqual(val, f_scores[key])
if __name__ == "__main__":
unittest.main()
| 39.381679 | 78 | 0.611165 | 705 | 5,159 | 4.221277 | 0.212766 | 0.012769 | 0.040659 | 0.073925 | 0.480175 | 0.43918 | 0.319556 | 0.289987 | 0.266465 | 0.218078 | 0 | 0.137976 | 0.262454 | 5,159 | 130 | 79 | 39.684615 | 0.644152 | 0.087226 | 0 | 0.128713 | 0 | 0 | 0.159768 | 0.111945 | 0 | 0 | 0 | 0 | 0.178218 | 1 | 0.049505 | false | 0 | 0.049505 | 0 | 0.128713 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
778eae6f766166711785d06fe1c129980226b180 | 6,178 | py | Python | draw.py | squeezeday/raspberry-pi-e-ink-display | 97a16f7bd58ba6d6c800864c945a961870e823ed | [
"MIT"
] | 5 | 2019-09-10T09:57:59.000Z | 2021-08-17T18:10:21.000Z | draw.py | squeezeday/raspberry-pi-e-ink-display | 97a16f7bd58ba6d6c800864c945a961870e823ed | [
"MIT"
] | 5 | 2021-03-19T03:32:34.000Z | 2022-03-11T23:58:49.000Z | draw.py | squeezeday/raspberry-pi-e-ink-display | 97a16f7bd58ba6d6c800864c945a961870e823ed | [
"MIT"
] | null | null | null | from PIL import Image, ImageDraw, ImageFont
from urllib.request import Request, urlopen
from urllib.parse import quote, unquote
import json
import pytz
import os
from calendarhelper import getCaldavEvents, calendarEvent
from datetime import datetime, date, timedelta, tzinfo, timezone
from tzlocal import get_localzone
from dotenv import load_dotenv
load_dotenv()
import locale
locale.setlocale(locale.LC_ALL, os.getenv('LOCALE'))
home_assistant_base_url = os.getenv('HOME_ASSISTANT_BASE_URL')
home_assistant_access_token = os.getenv('HOME_ASSISTANT_ACCESS_TOKEN')
caldav_url = os.getenv('CALDAV_URL')
display_height = int(os.getenv('DISPLAY_WIDTH'))
display_width = int(os.getenv('DISPLAY_HEIGHT'))
localtimezone = get_localzone()
# cheat sheet https://cdn.materialdesignicons.com/4.3.95/
weather_icons = {
'cloudy': '%EF%96%90',
'fog': '%EF%96%91',
'hail': '%EF%96%92',
'hurricane': '%EF%A2%97',
'lightning': '%EF%96%93',
'lightning-rainy': '%EF%99%BD',
'night': '%EF%96%94',
'partlycloudy': '%EF%96%95',
'pouring': '%EF%96%96',
'rainy': '%EF%96%97',
'snowy': '%EF%96%98',
'snowy-rainy': '%EF%99%BE',
'sunny': "%EF%96%99",
'sunset': '%EF%96%9A',
'sunset-down': '%EF%96%9B',
'sunset-up': '%EF%96%9C',
'windy': '%EF%96%9D',
'thermometer': '\uF50F',
'humidity': '\uF58E',
'sunset': '\uF59B',
'sunrise': '\uF59C'
}
def get_ha_sensor_state(state):
try:
req = Request(home_assistant_base_url + state)
req.add_header('Authorization', 'Bearer ' + home_assistant_access_token)
content = urlopen(req).read()
sensor_data = json.loads(content.decode("utf-8"))
return sensor_data
except Exception as e:
print("Error reading " + state + ": " + str(e))
return None
def create_image():
# init black/white image
black_image = Image.new('1', (display_width, display_height), 255)
draw_black = ImageDraw.Draw(black_image)
# init fonts
fontForecastToday = ImageFont.truetype('materialdesignicons-webfont.ttf', 48)
fontForecast = ImageFont.truetype('materialdesignicons-webfont.ttf', 32)
fontThermometer = ImageFont.truetype('SourceSansPro-Bold.ttf', 36)
fontSun = ImageFont.truetype('SourceSansPro-Bold.ttf', 24)
fontEventToday = ImageFont.truetype('SourceSansPro-Regular.ttf', 26)
fontEvent = ImageFont.truetype('SourceSansPro-Regular.ttf', 24)
fontDateToday = ImageFont.truetype('SourceSansPro-Bold.ttf', 40)
fontDate = ImageFont.truetype('SourceSansPro-Bold.ttf', 26)
now = datetime.now().astimezone(localtimezone)
# get weather forecast
weather_data = get_ha_sensor_state('states/weather.smhi_home') # or states/weather.dark_sky
# get sunrise/sunset
sun_data = get_ha_sensor_state('states/sun.sun')['attributes']
# get sensor data
outdoor_sensor = get_ha_sensor_state('states/sensor.outdoor_2')
# get calendar events
events = getCaldavEvents(caldav_url)
# draw today
msg = now.strftime('%A %-d/%-m')
text_w, text_h = draw_black.textsize(msg, font = fontDateToday)
draw_black.text((10, 10), msg, font = fontDateToday, fill = 0)
# draw today's forecast
if weather_data is not None:
draw_black.text((245, 10), unquote(weather_icons[weather_data['attributes']['forecast'][0]['condition']]), font = fontForecastToday, fill = 0)
draw_black.text((295, 10), str(weather_data['attributes']['forecast'][0]['temperature']) + ' °C' , font = fontThermometer, fill = 0)
# draw current outdoor temp
if outdoor_sensor is not None:
str_current_outdoor_temp = str(outdoor_sensor["state"] + ' ' + outdoor_sensor['attributes']['unit_of_measurement'])
text_w, text_h = draw_black.textsize(str_current_outdoor_temp, font = fontThermometer)
current_outdoor_temp_y = display_height - 10 - text_h
draw_black.text((35, current_outdoor_temp_y), str_current_outdoor_temp, font = fontThermometer, fill = 0)
draw_black.text((0, current_outdoor_temp_y+10), unquote(weather_icons[outdoor_sensor['attributes']['icon'][4:]]), font = fontForecast, fill = 0)
# draw sunrise/sunset hours
if sun_data is not None:
sunrise = pytz.utc.localize(datetime.strptime(sun_data['next_rising'][:-6], '%Y-%m-%dT%H:%M:%S'))
sunset = pytz.utc.localize(datetime.strptime(sun_data['next_setting'][:-6], '%Y-%m-%dT%H:%M:%S'))
draw_black.text((200, current_outdoor_temp_y+10), sunrise.astimezone(localtimezone).strftime("%H:%M",), font = fontSun, fill = 0)
draw_black.text((170, current_outdoor_temp_y+10), unquote(weather_icons['sunrise']), font = fontForecast, fill = 0)
draw_black.text((300, current_outdoor_temp_y+10), sunset.astimezone(localtimezone).strftime("%H:%M",), font = fontSun, fill = 0)
draw_black.text((270, current_outdoor_temp_y+10), unquote(weather_icons['sunset']), font = fontForecast, fill = 0)
max_y = display_height - 80
y = 30
date = now
dateutc = datetime(now.year, now.month, now.day, 0,0,0,0, timezone.utc) # FIXME: make timezone naive
for i in range(7):
if y > max_y:
break
# draw day and forecast
if i != 0:
draw_black.text((10, y), date.strftime("%A %-d/%-m"), font = fontDate, fill = 0)
if weather_data is not None:
draw_black.text((280, y), unquote(weather_icons[weather_data['attributes']['forecast'][i]['condition']]), font = fontForecast, fill = 0)
draw_black.text((320, y), str(weather_data['attributes']['forecast'][i]['temperature']) + ' °C' , font = fontSun, fill = 0)
y+=30
# draw events for the day
if events is not None:
for ev in events:
eventStart = ev.datetimestart
eventEnd = ev.datetimeend
if y > max_y:
break
if not ev.date == date.date() and not eventStart <= dateutc < eventEnd:
continue
if not ev.allday:
row = "{} {}".format(eventStart.astimezone(localtimezone).strftime("%H:%M",), ev.summary)
else:
row = ev.summary
if i == 0:
font = fontEventToday
else:
font = fontEvent
draw_black.text((10,y), row, font = font, fill = 0)
y += 24
# increment
y += 12
date = date + timedelta(days=1)
dateutc = dateutc + timedelta(days=1)
return black_image
| 37.442424 | 148 | 0.683555 | 846 | 6,178 | 4.833333 | 0.288416 | 0.037417 | 0.04133 | 0.023967 | 0.340915 | 0.186843 | 0.154561 | 0.097334 | 0.047444 | 0.030325 | 0 | 0.03505 | 0.164131 | 6,178 | 164 | 149 | 37.670732 | 0.75639 | 0.058271 | 0 | 0.065574 | 0 | 0 | 0.175659 | 0.051198 | 0 | 0 | 0 | 0.006098 | 0 | 1 | 0.016393 | false | 0 | 0.090164 | 0 | 0.131148 | 0.008197 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77918e9deea502e2876228cdb462978e97466087 | 2,570 | py | Python | bingads/v13/bulk/entities/audiences/bulk_remarketing_list.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 86 | 2016-02-29T03:24:28.000Z | 2022-03-29T09:30:21.000Z | bingads/v13/bulk/entities/audiences/bulk_remarketing_list.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 135 | 2016-04-12T13:31:28.000Z | 2022-03-29T02:18:51.000Z | bingads/v13/bulk/entities/audiences/bulk_remarketing_list.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 154 | 2016-04-08T04:11:27.000Z | 2022-03-29T21:21:07.000Z | from bingads.v13.bulk.entities import *
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.v13.internal.extensions import *
from .bulk_audience import BulkAudience
class BulkRemarketingList(BulkAudience):
""" Represents an Remarketing List that can be read or written in a bulk file.
This class exposes the :attr:`remarketing_list` property that can be read and written as fields of the
Remarketing List record in a bulk file.
For more information, see Remarketing List at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
remarketing_list=None,
status=None,):
super(BulkRemarketingList, self).__init__(audience = remarketing_list, status = status)
_MAPPINGS = [
_SimpleBulkMapping(
_StringTable.TagId,
field_to_csv=lambda c: bulk_str(c.remarketing_list.TagId),
csv_to_field=lambda c, v: setattr(c.remarketing_list, 'TagId', int(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.RemarketingRule,
field_to_csv=lambda c: field_to_csv_RemarketingRule(c.remarketing_list),
csv_to_field=lambda c, v: csv_to_field_RemarketingRule(c.remarketing_list, v)
),
]
@property
def remarketing_list(self):
""" Defines a Remarketing List """
return self._audience
@remarketing_list.setter
def remarketing_list(self, remarketing_list):
self._audience = remarketing_list
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.remarketing_list, 'remarketing_list')
super(BulkRemarketingList, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkRemarketingList._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
self.remarketing_list = _CAMPAIGN_OBJECT_FACTORY_V13.create('RemarketingList')
super(BulkRemarketingList, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkRemarketingList._MAPPINGS)
| 39.538462 | 106 | 0.731518 | 303 | 2,570 | 5.881188 | 0.326733 | 0.159933 | 0.039282 | 0.049383 | 0.220539 | 0.056117 | 0 | 0 | 0 | 0 | 0 | 0.009602 | 0.189494 | 2,570 | 64 | 107 | 40.15625 | 0.845895 | 0.180934 | 0 | 0.105263 | 0 | 0 | 0.017535 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.184211 | 0 | 0.394737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77936e8ca10f6e684602ec0fcab350ddfe22ac60 | 24,154 | py | Python | src/daft_exprt/extract_features.py | ishine/ubisoft-laforge-daft-exprt | a576691c8c42988f813183efcea43c1677abe17a | [
"Apache-2.0"
] | 33 | 2021-09-17T18:32:23.000Z | 2022-03-01T21:05:08.000Z | src/daft_exprt/extract_features.py | ishine/ubisoft-laforge-daft-exprt | a576691c8c42988f813183efcea43c1677abe17a | [
"Apache-2.0"
] | 5 | 2021-12-07T04:23:04.000Z | 2022-03-15T07:37:13.000Z | src/daft_exprt/extract_features.py | ishine/ubisoft-laforge-daft-exprt | a576691c8c42988f813183efcea43c1677abe17a | [
"Apache-2.0"
] | 7 | 2021-09-16T02:24:02.000Z | 2022-01-11T07:48:19.000Z | import json
import logging
import logging.handlers
import os
import re
import subprocess
import types
import uuid
import librosa
import numpy as np
import torch
from shutil import rmtree
from librosa.filters import mel as librosa_mel_fn
from scipy.io import wavfile
from daft_exprt.symbols import ascii, eos, punctuation, SIL_WORD_SYMBOL, whitespace
from daft_exprt.utils import launch_multi_process
_logger = logging.getLogger(__name__)
FILE_ROOT = os.path.dirname(os.path.realpath(__file__))
TMP_DIR = os.path.join(FILE_ROOT, 'tmp')
FEATURES_HPARAMS = ['centered', 'cutoff', 'f0_interval', 'filter_length', 'hop_length',
'language', 'mel_fmax', 'mel_fmin', 'min_clipping', 'max_f0', 'min_f0',
'n_mel_channels', 'order', 'sampling_rate', 'symbols', 'uv_cost', 'uv_interval']
def check_features_config_used(features_dir, hparams):
''' Check current config is the same than the one used in features directory
'''
# hyper-params that are important for feature extraction
same_config = True
for root, _, file_names in os.walk(os.path.normpath(features_dir)):
# extract config files
configs = [x for x in file_names if x.endswith('.json')]
if len(configs) != 0:
# get previous config
with open(os.path.join(root, configs[0])) as f:
data = f.read()
config = json.loads(data)
hparams_prev = types.SimpleNamespace(**config)
# compare params
for param in FEATURES_HPARAMS:
if getattr(hparams, param) != getattr(hparams_prev, param):
same_config = False
_logger.warning(f'Parameter "{param}" is different in "{root}" -- '
f'Was {getattr(hparams_prev, param)} and now is {getattr(hparams, param)}')
return same_config
def get_min_phone_duration(lines, min_phone_dur=1000.):
''' Extract shortest phone duration in the current .markers file
'''
# iterate over phones
for line in lines:
line = line.strip().split(sep='\t')
# extract phone duration
begin, end = float(line[0]), float(line[1])
if end - begin < min_phone_dur:
min_phone_dur = end - begin
return min_phone_dur
def duration_to_integer(float_durations, hparams, nb_samples=None):
''' Convert phoneme float durations to integer frame durations
'''
# estimate number of samples in audio
if nb_samples is None:
# get total duration of audio
# float_durations = [[phone_begin, phone_end], ...]
total_duration = sum([(x[1] - x[0]) for x in float_durations])
# convert in number of samples
nb_samples = int(total_duration * hparams.sampling_rate)
# get nb spectrogram frames
# ignore padding for the moment
nb_frames = 1 + int((nb_samples - hparams.filter_length) / hparams.hop_length)
# get spectrogram frames index
frames_idx = [int(hparams.filter_length / 2) + hparams.hop_length * i for i in range(nb_frames)]
# compute number of frames per phoneme
curr_frame = 1
int_durations = []
while curr_frame <= nb_frames:
# extract phoneme duration
begin, end = float_durations.pop(0)
if begin != end:
# convert to sample idx
begin, end = int(begin * hparams.sampling_rate), int(end * hparams.sampling_rate)
# get corresponding frames
nb_phone_frames = len([idx for idx in frames_idx if begin < idx <= end])
int_durations.append(nb_phone_frames)
curr_frame += nb_phone_frames
else: # we should not have 0 durations
raise ValueError
# add edge frames if padding is on
if hparams.centered:
nb_edge_frames = int(hparams.filter_length / 2 / hparams.hop_length)
# left padding
int_durations[0] += nb_edge_frames
# right padding
if len(float_durations) != 0: # correspond to last phoneme
int_durations.append(nb_edge_frames)
else:
int_durations[-1] += nb_edge_frames
return int_durations
def update_markers(file_name, lines, sentence, sent_begin, int_durations, hparams, logger):
''' Update markers:
- change timings to start from 0
- add punctuation or whitespace at word boundaries
- add EOS token at end of sentence
- add int durations
'''
# characters to consider in the sentence
if hparams.language == 'english':
all_chars = ascii + punctuation
else:
raise NotImplementedError()
'''
match words in the sentence with the ones in markers lines
Sentence: ,THAT's, an example'! ' of a sentence. . .'
Markers words: that s an example <sil> of a sentence
'''
# split sentence:
# [',', "that's", ',', 'an', "example'", '!', "'", 'of', 'a', 'sentence', '.', '.', '.', "'"]
sent_words = re.findall(f"[\w']+|[{punctuation}]", sentence.lower().strip())
# remove characters that are not letters or punctuation:
# [',', "that's", ',', 'an', "example'", '!', 'of', 'a', 'sentence', '.', '.', '.']
sent_words = [x for x in sent_words if len(re.sub(f'[^{all_chars}]', '', x)) != 0]
# be sure to begin the sentence with a word and not a punctuation
# ["that's", ',', 'an', "example'", '!', 'of', 'a', 'sentence', '.', '.', '.']
while sent_words[0] in punctuation:
sent_words.pop(0)
# keep only one punctuation type at the end
# ["that's", ',', 'an', "example'", '!', 'of', 'a', 'sentence']
punctuation_end = None
while sent_words[-1] in punctuation:
punctuation_end = sent_words.pop(-1)
# split markers lines -- [[begin, end, phone, word, word_idx], ....]
markers = [line.strip().split(sep='\t') for line in lines]
# extract markers words
# they are no '<sil>' at beginning and end of sentence because we trimmed the audio
# ['that', 's', 'an', example'', '<sil>', 'of', 'a', 'sentence']
words_idx = [marker[4] for marker in markers]
lines_idx = [words_idx.index(word_idx) for word_idx in list(dict.fromkeys(words_idx).keys())]
marker_words = [markers[line_idx][3] for line_idx in lines_idx]
# update markers with word boundaries
sent_words_copy, markers_old = sent_words.copy(), markers.copy()
markers, word_idx, word_error = [], 0, False
while len(sent_words) != 0:
# extract word in .lab sentence and .markers file
sent_word = sent_words.pop(0)
marker_word, marker_word_idx = markers_old[0][3], markers_old[0][4]
if marker_word != sent_word:
# we should have the same words
# generally the issue comes from the symbol '
# e.g. example' vs example or that's vs [that, s]
regex_word = re.findall(f"[\w]+|[{punctuation}]", sent_word)
if len(regex_word) == 1: # ['example']
sent_word = regex_word[0]
else: # ['that', 's']
sent_words = regex_word + sent_words
sent_word = sent_words.pop(0)
if marker_word != sent_word:
# cannot fix the mismatch between words
word_error = True
logger.warning(f'Correspondance issue between words in the .lab sentence and those in .markers file -- '
f'File name: {file_name} -- Sentence: {sent_words_copy} -- '
f'Markers: {marker_words} -- Problematic words: {sent_word} -- {marker_word}')
break
# retrieve all markers lines that correspond to the word
while len(markers_old) != 0 and markers_old[0][4] == marker_word_idx:
begin, end, phone, word, _ = markers_old.pop(0)
begin = f'{float(begin) - sent_begin:.3f}'
end = f'{float(end) - sent_begin:.3f}'
int_dur = str(int_durations.pop(0))
markers.append([begin, end, int_dur, phone, word, str(word_idx)])
# at this point we pass to the next word
# we must add a word boundary between two consecutive words
word_idx += 1
if len(sent_words) != 0:
word_bound = sent_words.pop(0) if sent_words[0] in punctuation else whitespace
# check if a silence marker is associated to the word boundary
if markers_old[0][3] == SIL_WORD_SYMBOL:
begin, end, _, _, _ = markers_old.pop(0)
begin = f'{float(begin) - sent_begin:.3f}'
end = f'{float(end) - sent_begin:.3f}'
int_dur = str(int_durations.pop(0))
markers.append([begin, end, int_dur, word_bound, word_bound, str(word_idx)])
else:
end_prev = markers[-1][1]
markers.append([end_prev, end_prev, str(0), word_bound, word_bound, str(word_idx)])
word_idx += 1
if not word_error:
# add end punctuation if there is one
if punctuation_end is not None:
end_prev = markers[-1][1]
markers.append([end_prev, end_prev, str(0), punctuation_end, punctuation_end, str(word_idx)])
word_idx += 1
# add EOS token
end_prev = markers[-1][1]
markers.append([end_prev, end_prev, str(0), eos, eos, str(word_idx)])
# check everything is correct
assert(len(sent_words) == len(markers_old) == len(int_durations) == 0), \
logger.error(f'File name: {file_name} -- length mismatch between lists: ({sent_words}, {markers_old}, {int_durations})')
return markers
else:
return None
def extract_pitch(wav, fs, hparams):
''' Extract pitch frames from audio using REAPER binary
Convert pitch to log scale and set unvoiced values to 0.
'''
# REAPER asks for int16 audios
# audio is in float32
wav = wav * 32768.0
wav = wav.astype('int16')
# save audio file locally
rand_name = str(uuid.uuid4())
out_dir = os.path.join(TMP_DIR, 'reaper')
os.makedirs(out_dir, exist_ok=True)
wav_file = os.path.join(out_dir, f'{rand_name}.wav')
wavfile.write(wav_file, fs, wav)
# extract pitch values
f0_file = wav_file.replace('.wav', '.f0')
process = ['reaper', '-i', f'{wav_file}',
'-a', '-f', f'{f0_file}',
'-e', f'{hparams.f0_interval}',
'-m', f'{hparams.min_f0}',
'-x', f'{hparams.max_f0}',
'-u', f'{hparams.uv_interval}',
'-w', f'{hparams.uv_cost}']
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(process, stdout=devnull, stderr=subprocess.STDOUT)
# read PCM file
with open(f0_file, 'rb') as f:
buf = f.read()
pitch = np.frombuffer(buf, dtype='int16')
# extract unvoiced indexes
pitch = np.copy(pitch)
uv_idxs = np.where(pitch <= 0.)[0]
# put to log scale
pitch[uv_idxs] = 1000.
pitch = np.log(pitch)
# set unvoiced values to 0.
pitch[uv_idxs] = 0.
# extract pitch for each mel-spec frame
pitch_frames = pitch[::hparams.hop_length]
# edge case
if len(pitch) % hparams.hop_length == 0:
pitch_frames = np.append(pitch_frames, pitch[-1])
# delete files
os.remove(wav_file)
os.remove(f0_file)
return pitch_frames
def get_symbols_pitch(pitch, markers):
''' Compute mean pitch per symbol
pitch = NumPy array of shape (nb_mel_spec_frames, )
markers = [[begin, end, int_dur, symbol, word, word_idx], ...]
'''
idx = 0
symbols_pitch = []
for marker in markers:
# number of mel-spec frames assigned to the symbol
int_dur = int(marker[2])
if int_dur != 0:
# ignore unvoiced values
symbol_pitch = pitch[idx: idx + int_dur]
symbol_pitch = symbol_pitch[symbol_pitch > 0.]
# compute mean pitch for voiced values
if len(symbol_pitch) != 0:
symbols_pitch.append(f'{np.mean(symbol_pitch):.3f}\n')
else:
symbols_pitch.append(f'{0.:.3f}\n')
idx += int_dur
else:
symbols_pitch.append(f'{0.:.3f}\n')
return symbols_pitch
def extract_energy(mel_spec):
''' Extract energy of each mel-spec frame
mel_spec = NumPy array of shape (nb_mel_spec_channels, nb_mel_spec_frames)
'''
energy = np.linalg.norm(mel_spec, axis=0)
return energy
def get_symbols_energy(energy, markers):
''' Compute mean energy per symbol
energy = NumPy array of shape (nb_mel_spec_frames, )
markers = [[begin, end, int_dur, symbol, word, word_idx], ...]
'''
idx = 0
symbols_energy = []
for marker in markers:
# number of mel-spec frames assigned to the symbol
int_dur = int(marker[2])
if int_dur != 0:
# compute mean energy
symbol_energy = energy[idx: idx + int_dur]
symbol_energy = np.mean(symbol_energy)
symbols_energy.append(f'{symbol_energy:.3f}\n')
idx += int_dur
else:
symbols_energy.append(f'{0.:.3f}\n')
return symbols_energy
def mel_spectrogram_HiFi(wav, hparams):
''' Mel-Spectrogram extraction as it is performed by HiFi-GAN
'''
# convert to PyTorch float tensor
wav = torch.FloatTensor(wav) # (T, )
# extract hparams
fmin = hparams.mel_fmin
fmax = hparams.mel_fmax
center = hparams.centered
hop_size = hparams.hop_length
n_fft = hparams.filter_length
num_mels = hparams.n_mel_channels
sampling_rate = hparams.sampling_rate
min_clipping = hparams.min_clipping
# get mel filter bank
mel_filter_bank = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) # (n_mels, 1 + n_fft/2)
mel_filter_bank = torch.from_numpy(mel_filter_bank).float() # (n_mels, 1 + n_fft/2)
# build hann window
hann_window = torch.hann_window(n_fft)
# extract amplitude spectrogram
spec = torch.stft(wav, n_fft, hop_length=hop_size, win_length=n_fft, window=hann_window,
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
# convert to mels and pass to log
mel_spec = torch.matmul(mel_filter_bank, spec)
mel_spec = torch.log(torch.clamp(mel_spec, min=min_clipping))
# transform to numpy array
mel_spec = mel_spec.squeeze().numpy()
return mel_spec
def rescale_wav_to_float32(x):
''' Rescale audio array between -1.f and 1.f based on the current format
'''
# convert
if x.dtype == 'int16':
y = x / 32768.0
elif x.dtype == 'int32':
y = x / 2147483648.0
elif x.dtype == 'uint8':
y = ((x / 255.0) - 0.5)*2
elif x.dtype == 'float32' or x.dtype == 'float64':
y = x
else:
raise TypeError(f"could not normalize wav, unsupported sample type {x.dtype}")
# check amplitude is correct
y = y.astype('float32')
max_ampl = np.max(np.abs(y))
if max_ampl > 1.0:
pass # the error should be raised but librosa returns values bigger than 1 sometimes
# raise ValueError(f'float32 wav contains samples not in the range [-1., 1.] -- '
# f'max amplitude: {max_ampl}')
return y
def _extract_features(files, features_dir, hparams, log_queue):
''' Extract mel-spectrogram and markers with int duration
'''
# create logger from logging queue
qh = logging.handlers.QueueHandler(log_queue)
root = logging.getLogger()
if not root.hasHandlers():
root.setLevel(logging.INFO)
root.addHandler(qh)
logger = logging.getLogger(f"worker{str(uuid.uuid4())}")
# check files exist
markers_file, wav_file = files
assert(os.path.isfile(markers_file)), logger.error(f'There is no such file: {markers_file}')
assert(os.path.isfile(wav_file)), logger.error(f'There is no such file: {wav_file}')
# read markers lines
with open(markers_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
# check min phone duration is coherent
# min phone duration must be >= filter_length // 2
# in order to have at least one mel-spec frame attributed to the phone
min_phone_dur = get_min_phone_duration(lines)
fft_length = hparams.filter_length / hparams.sampling_rate
assert(min_phone_dur > fft_length / 2), \
logger.error(f'Min phone duration = {min_phone_dur} -- filter_length / 2 = {fft_length / 2}')
# extract sentence duration
# leading and tailing silences have been removed in markers.py script
sent_begin = float(lines[0].strip().split(sep='\t')[0])
sent_end = float(lines[-1].strip().split(sep='\t')[1])
sent_dur = sent_end - sent_begin
# ignore audio if length is inferior to min wav duration
if sent_dur >= hparams.minimum_wav_duration / 1000:
# read wav file to range [-1, 1] in np.float32
wav, fs = librosa.load(wav_file, sr=hparams.sampling_rate)
wav = rescale_wav_to_float32(wav)
# remove leading and tailing silences
wav = wav[int(sent_begin * fs): int(sent_end * fs)]
# extract mel-spectrogram
mel_spec = mel_spectrogram_HiFi(wav, hparams)
# get number of mel-spec frames
nb_mel_spec_frames = mel_spec.shape[1]
# convert phoneme durations to integer frame durations
float_durations = [[float(x[0]) - sent_begin, float(x[1]) - sent_begin]
for x in [line.strip().split(sep='\t') for line in lines]]
int_durations = duration_to_integer(float_durations, hparams, nb_samples=len(wav))
assert(len(int_durations) == len(lines)), logger.error(f'{markers_file} -- ({len(int_durations)}, {len(lines)})')
assert(sum(int_durations) == nb_mel_spec_frames), logger.error(f'{markers_file} -- ({sum(int_durations)}, {nb_mel_spec_frames})')
assert(0 not in int_durations), logger.error(f'{markers_file} -- {int_durations}')
# update markers:
# change timings to start from 0
# add punctuation or whitespace at word boundaries
# add EOS token at end of sentence
# add int durations
markers_dir = os.path.dirname(markers_file)
file_name = os.path.basename(markers_file).replace('.markers', '')
sentence_file = os.path.join(markers_dir, f'{file_name}.lab')
assert(os.path.isfile(sentence_file)), logger.error(f'There is no such file: {sentence_file}')
with open(sentence_file, 'r', encoding='utf-8') as f:
sentence = f.readline()
markers = update_markers(file_name, lines, sentence, sent_begin, int_durations, hparams, logger)
if markers is not None:
# save mel-spectrogram -- (n_mel_channels, T)
np.save(os.path.join(features_dir, f'{file_name}.npy'), mel_spec)
# save markers
# each line has the format: [begin, end, int_dur, symbol, word, word_idx]
markers_file = os.path.join(features_dir, f'{file_name}.markers')
with open(markers_file, 'w', encoding='utf-8') as f:
f.writelines(['\t'.join(x) + '\n' for x in markers])
# extract energy for each mel-spec frame
mel_spec = np.exp(mel_spec) # remove log
frames_energy = extract_energy(mel_spec)
# save frames energy values
energy_file = os.path.join(features_dir, f'{file_name}.frames_nrg')
with open(energy_file, 'w', encoding='utf-8') as f:
for val in frames_energy:
f.write(f'{val:.3f}\n')
# extract energy on the symbol level
# we use average energy value per symbol
symbols_energy = get_symbols_energy(frames_energy, markers)
# save symbols energy
energy_file = os.path.join(features_dir, f'{file_name}.symbols_nrg')
with open(energy_file, 'w', encoding='utf-8') as f:
f.writelines(symbols_energy)
# extract log pitch for each mel-spec frame
frames_pitch = extract_pitch(wav, fs, hparams)
assert(len(frames_pitch) == nb_mel_spec_frames), logger.error(f'{markers_file} -- ({len(frames_pitch)}, {nb_mel_spec_frames})')
# save frames pitch values
pitch_file = os.path.join(features_dir, f'{file_name}.frames_f0')
with open(pitch_file, 'w', encoding='utf-8') as f:
for val in frames_pitch:
f.write(f'{val:.3f}\n')
# extract pitch on the symbol level
# we use average pitch value per symbol
symbols_pitch = get_symbols_pitch(frames_pitch, markers)
# save symbols pitch values
pitch_file = os.path.join(features_dir, f'{file_name}.symbols_f0')
with open(pitch_file, 'w', encoding='utf-8') as f:
f.writelines(symbols_pitch)
else:
logger.warning(f'Ignoring {wav_file} -- audio has length inferior to {hparams.minimum_wav_duration / 1000}s after trimming')
def get_files_for_features_extraction(line, markers_dir, log_queue):
''' Return file name if .markers file exists
'''
# check if markers file exist for the corresponding line
line = line.strip().split(sep='|') # [file_name, text]
file_name = line[0].strip()
markers = os.path.join(markers_dir, f'{file_name}.markers')
if os.path.isfile(markers):
return file_name
else:
return None
def extract_features(dataset_dir, features_dir, hparams, n_jobs):
''' Extract features for training
'''
# iterate over speakers
_logger.info('--' * 30)
_logger.info('Extracting Features'.upper())
_logger.info('--' * 30)
for speaker in hparams.speakers:
_logger.info(f'Speaker: "{speaker}"')
# check wavs and markers dir exist
wavs_dir = os.path.join(dataset_dir, speaker, 'wavs')
markers_dir = os.path.join(dataset_dir, speaker, 'align')
assert(os.path.isdir(wavs_dir)), _logger.error(f'There is no such directory: {wavs_dir}')
assert(os.path.isdir(markers_dir)), _logger.error(f'There is no such directory: {markers_dir}')
# check metadata file exist
spk_features_dir = os.path.join(features_dir, speaker)
metadata = os.path.join(spk_features_dir, 'metadata.csv')
assert(os.path.isfile(metadata)), _logger.error(f'There is no such file: {metadata}')
# get all files that can be used for features extraction
with open(metadata, 'r', encoding='utf-8') as f:
lines = f.readlines()
file_names = launch_multi_process(iterable=lines, func=get_files_for_features_extraction,
n_jobs=n_jobs, markers_dir=markers_dir, timer_verbose=False)
file_names = [x for x in file_names if x is not None]
# check current files that exist in features dir
# avoid to process files that already have been processed in a previous features extraction
curr_files = [x.replace('.symbols_f0', '').strip() for x in os.listdir(spk_features_dir) if x.endswith('.symbols_f0')]
missing_files = [x for x in file_names if x not in curr_files]
_logger.info(f'{len(curr_files)} files already processed. {len(missing_files)} new files need to be processed')
# extract features
files = [(os.path.join(markers_dir, f'{x}.markers'), os.path.join(wavs_dir, f'{x}.wav')) for x in missing_files]
launch_multi_process(iterable=files, func=_extract_features, n_jobs=n_jobs,
features_dir=spk_features_dir, hparams=hparams)
# save config used to perform features extraction
hparams.save_hyper_params(os.path.join(spk_features_dir, 'config.json'))
_logger.info('')
# remove tmp directory
rmtree(TMP_DIR, ignore_errors=True)
| 43.599278 | 139 | 0.620187 | 3,283 | 24,154 | 4.379836 | 0.140725 | 0.015578 | 0.013214 | 0.008346 | 0.28479 | 0.229432 | 0.210029 | 0.17915 | 0.139161 | 0.103832 | 0 | 0.012451 | 0.265132 | 24,154 | 553 | 140 | 43.678119 | 0.797634 | 0.230811 | 0 | 0.155689 | 0 | 0.002994 | 0.12365 | 0.022482 | 0 | 0 | 0 | 0 | 0.035928 | 1 | 0.038922 | false | 0.002994 | 0.047904 | 0 | 0.125749 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7794c9a15b5585aa2606eff1839893cdafad353f | 4,076 | py | Python | python/cuspatial/cuspatial/geometry/geoseries.py | AyodeAwe/cuspatial | 77971ac91a24228bc46cf461c0ac7b6f2ed78e44 | [
"Apache-2.0"
] | 347 | 2019-08-29T12:39:02.000Z | 2022-03-28T14:55:34.000Z | python/cuspatial/cuspatial/geometry/geoseries.py | AyodeAwe/cuspatial | 77971ac91a24228bc46cf461c0ac7b6f2ed78e44 | [
"Apache-2.0"
] | 425 | 2019-08-24T23:27:46.000Z | 2022-03-31T20:07:18.000Z | python/cuspatial/cuspatial/geometry/geoseries.py | AyodeAwe/cuspatial | 77971ac91a24228bc46cf461c0ac7b6f2ed78e44 | [
"Apache-2.0"
] | 109 | 2019-08-14T22:49:56.000Z | 2022-02-24T19:54:42.000Z | # Copyright (c) 2020-2021, NVIDIA CORPORATION
from typing import TypeVar, Union
import geopandas as gpd
import pandas as pd
from geopandas.geoseries import GeoSeries as gpGeoSeries
import cudf
from cuspatial.geometry.geoarrowbuffers import GeoArrowBuffers
from cuspatial.geometry.geocolumn import GeoColumn, GeoMeta
from cuspatial.io.geopandas_adapter import GeoPandasAdapter
T = TypeVar("T", bound="GeoSeries")
class GeoSeries(cudf.Series):
"""
cuspatial.GeoSeries enables GPU-backed storage and computation of
shapely-like objects. Our goal is to give feature parity with GeoPandas.
At this time, only from_geopandas and to_geopandas are directly supported.
cuspatial GIS, indexing, and trajectory functions depend on the arrays
stored in the `GeoArrowBuffers` object, accessible with the `points`,
`multipoints`, `lines`, and `polygons` accessors.
>>> cuseries.points
xy:
0 -1.0
1 0.0
dtype: float64
"""
def __init__(
self,
data: Union[gpd.GeoSeries],
index: Union[cudf.Index, pd.Index] = None,
dtype=None,
name=None,
nan_as_null=True,
):
# Condition index
if isinstance(data, (gpGeoSeries, GeoSeries)):
if index is None:
index = data.index
if index is None:
index = cudf.RangeIndex(0, len(data))
# Condition data
if isinstance(data, pd.Series):
data = gpGeoSeries(data)
# Create column
if isinstance(data, GeoColumn):
column = data
elif isinstance(data, GeoSeries):
column = data._column
elif isinstance(data, gpGeoSeries):
adapter = GeoPandasAdapter(data)
buffers = GeoArrowBuffers(adapter.get_geoarrow_host_buffers())
pandas_meta = GeoMeta(adapter.get_geopandas_meta())
column = GeoColumn(buffers, pandas_meta)
else:
raise TypeError(
f"Incompatible object passed to GeoSeries ctor {type(data)}"
)
super().__init__(column, index, dtype, name, nan_as_null)
@property
def _geocolumn(self):
"""
The GeoColumn object keeps a reference to a `GeoArrowBuffers` object,
which contains all of the geometry coordinates and offsets for thie
`GeoSeries`.
"""
return self._column
@_geocolumn.setter
def _geocolumn(self, value):
if not isinstance(value, GeoColumn):
raise TypeError
self._column = value
@property
def points(self):
"""
Access the `PointsArray` of the underlying `GeoArrowBuffers`.
"""
return self._geocolumn.points
@property
def multipoints(self):
"""
Access the `MultiPointArray` of the underlying `GeoArrowBuffers`.
"""
return self._geocolumn.multipoints
@property
def lines(self):
"""
Access the `LineArray` of the underlying `GeoArrowBuffers`.
"""
return self._geocolumn.lines
@property
def polygons(self):
"""
Access the `PolygonArray` of the underlying `GeoArrowBuffers`.
"""
return self._geocolumn.polygons
def __repr__(self):
# TODO: Implement Iloc with slices so that we can use `Series.__repr__`
return self.to_pandas().__repr__()
def to_geopandas(self, nullable=False):
"""
Returns a new GeoPandas GeoSeries object from the coordinates in
the cuspatial GeoSeries.
"""
if nullable is True:
raise ValueError("GeoSeries doesn't support <NA> yet")
host_column = self._geocolumn.to_host()
output = [host_column[i].to_shapely() for i in range(len(host_column))]
return gpGeoSeries(output, index=self.index.to_pandas())
def to_pandas(self):
"""
Treats to_pandas and to_geopandas as the same call, which improves
compatibility with pandas.
"""
return self.to_geopandas()
| 31.114504 | 79 | 0.629539 | 449 | 4,076 | 5.587973 | 0.35412 | 0.0279 | 0.020725 | 0.047828 | 0.092467 | 0.078119 | 0.078119 | 0 | 0 | 0 | 0 | 0.00585 | 0.287046 | 4,076 | 130 | 80 | 31.353846 | 0.857536 | 0.300294 | 0 | 0.101449 | 0 | 0 | 0.038861 | 0 | 0 | 0 | 0 | 0.007692 | 0 | 1 | 0.144928 | false | 0.014493 | 0.115942 | 0.014493 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77a214f5777df7a0c6f55cff48bff29b3f5f4ff6 | 1,961 | py | Python | capture/command_scripts/stream_handler.py | CenturyLink/ExpertDHCP | 4dbcd36da7468b9a95a7869df19172fe890cefd2 | [
"MIT"
] | 1 | 2022-03-08T00:38:33.000Z | 2022-03-08T00:38:33.000Z | capture/command_scripts/stream_handler.py | aaronlumen/ExpertDHCP | 4dbcd36da7468b9a95a7869df19172fe890cefd2 | [
"MIT"
] | null | null | null | capture/command_scripts/stream_handler.py | aaronlumen/ExpertDHCP | 4dbcd36da7468b9a95a7869df19172fe890cefd2 | [
"MIT"
] | 2 | 2022-02-11T17:13:48.000Z | 2022-03-08T00:36:03.000Z | import subprocess
import shlex
import re
import json
class StreamHandler(object):
"""
A singleton class to handle the stream of dump and
return stdout in generator fashion
"""
__instance = None
def __init__(self,
dhcp_dump_command="sudo {} -i {}",
dump_path='/usr/bin/dhcpdump_json',
dump_if='eno1',
msg_sep_regex=r'(-{5,})'):
self.dhcp_dump_command = dhcp_dump_command.format(dump_path, dump_if)
self.message_seperator_pattern = re.compile(msg_sep_regex)
self.last_field = None
if StreamHandler.__instance is not None:
print("Instance already exists for singleton class StreamHandler")
raise Exception("This class is a singleton!")
else:
print("Created instance of class StreamHandler")
StreamHandler.__instance = self
def generate_dump(self):
try:
print(shlex.split(self.dhcp_dump_command))
dhcp_dump_stream_process = subprocess.Popen(shlex.split(self.dhcp_dump_command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# print("Killable PID dhcpdump",dhcp_dump_stream_process.pid)
for msg in dhcp_dump_stream_process.stdout:
print('-' * 100)
print(msg)
print('-' * 100)
yield json.loads(msg)
except Exception as excp:
import sys, os
print("some exception occured in stream handler")
print(excp)
exc_type, exc_obj, exc_tb = sys.exc_info()
print('%s, %s, %s', exc_type, exc_obj, exc_tb)
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('%s, %s, %s', str(exc_type), str(fname),
str(exc_tb.tb_lineno))
| 36.314815 | 92 | 0.562978 | 225 | 1,961 | 4.657778 | 0.422222 | 0.061069 | 0.071565 | 0.072519 | 0.123092 | 0.123092 | 0 | 0 | 0 | 0 | 0 | 0.007015 | 0.345742 | 1,961 | 53 | 93 | 37 | 0.809821 | 0.074452 | 0 | 0.05 | 0 | 0 | 0.128707 | 0.012311 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.125 | 0 | 0.225 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77a4ff6d26d09b8ed1fd64033110e651fd78bc73 | 16,460 | py | Python | sudoku_solve2.py | alfille/sudoku_count | 871794378c866f024acfc495d20f45478f3a0b99 | [
"MIT"
] | null | null | null | sudoku_solve2.py | alfille/sudoku_count | 871794378c866f024acfc495d20f45478f3a0b99 | [
"MIT"
] | null | null | null | sudoku_solve2.py | alfille/sudoku_count | 871794378c866f024acfc495d20f45478f3a0b99 | [
"MIT"
] | null | null | null | import sys
import tkinter as tk
import tkinter.font as tkfont
import tkinter.filedialog as tkfile
import tkinter.messagebox as tkmessage
import argparse
import ctypes
import platform
import signal
def signal_handler(signal, frame):
print("\nForced end\n")
sys.exit(0)
class Persist(tk.Frame):
SUBSIZE = 3
X = False
Window = False
Debug = False
Fsize = 14
GameStatus="None"
Data = None
solve_lib = None
s_lib={}
Lib={}
mode = "normal"
Legal=True
@classmethod
def LibSet(cls):
for i in range(2,7):
cls.Lib[i] = False
@classmethod
def LibUse(cls):
if not cls.Lib[cls.SUBSIZE]:
# Shared C library
lib_base = "./" #location
lib_base += "sudoku2_lib" # base name
lib_base += str(cls.SUBSIZE*cls.SUBSIZE)
# get the right filename
if platform.uname()[0] == "Windows":
lib_base += ".dll"
if platform.uname()[0] == "Linux":
lib_base += ".so"
else:
lib_base += ".dylib"
# load library
cls.s_lib[cls.SUBSIZE] = ctypes.cdll.LoadLibrary(lib_base)
cls.Lib[cls.SUBSIZE] = True
cls.solve_lib = cls.s_lib[cls.SUBSIZE]
class Sudoku(tk.Frame):
color=["dark blue","yellow"]
solution = False
def __init__(self, master=None):
super().__init__(master)
self.SIZE = Persist.SUBSIZE * Persist.SUBSIZE
self.TOTALSIZE = self.SIZE * self.SIZE
self.master = master
self.option_setup()
self.Widget()
self.Menu()
if ( Persist.Data ):
self.SetData()
else:
self.BadData()
self.win.update()
self.set_win_sizes()
def set_win_sizes( self ):
# needed for popup
self.tilex=self.but[0][0].winfo_width()
self.tiley=self.but[0][0].winfo_height()
self.popx = self.tilex*Persist.SUBSIZE
self.popy = self.tiley*Persist.SUBSIZE
self.winx = self.win.winfo_screenwidth()
self.winy = self.win.winfo_screenheight()
def UnRed( self ):
if self.solution:
# need to clear old solution
for i in range(self.SIZE):
for j in range(self.SIZE):
self.but[i][j].configure(fg="black")
self.solution = False
def set_square(self,i,j,n):
self.but[i][j].configure(text=n)
self.popup_done( i , j )
self.UnRed()
def popup_done( self, i, j ):
self.pop.destroy()
self.but[i][j].configure(relief="raised")
self.Status()
def Status( self ):
self.status.configure(text="Edit mode")
if ( Persist.GameStatus == "Unique" ):
self.status.configure( text=["Unsolvable","Unique","Not unique"][self.Unique()] )
def popup_force_done( self, i, j, force ):
self.pop.destroy()
self.but[i][j].configure(relief="raised")
self.Status()
self.Popup(i,j,not force)
def Popup( self,i,j,force):
self.but[i][j].configure(relief="sunken")
self.pop=tk.Toplevel()
show = False
if Persist.Legal:
goodlist = self.Available(i,j)
else:
goodlist = [x+1 for x in range(self.SIZE)]
t = [0 for n in range(self.SIZE+1)]
# figure location
self.win.update()
x = self.but[i][j].winfo_rootx()+self.tilex
y = self.but[i][j].winfo_rooty()
if x+self.popx > self.winx:
x -= self.tilex+self.popx
if x<0:
x=0
if y+self.popy > self.winy:
y = self.winy - self.popy
self.pop.geometry('+%d+%d' % (x,y) )
for si in range(Persist.SUBSIZE):
for sj in range(Persist.SUBSIZE):
n = si*Persist.SUBSIZE+sj+1
t[n]=tk.Button(self.pop,text=str(n),borderwidth=3,height=1,width=1,font=self.font, state='normal' if (n in goodlist) or not force else 'disabled', command=lambda i=i,j=j,n=str(n): self.set_square(i,j,n))
t[n].grid(row=si+1,column=sj)
if 0 in goodlist:
tk.Button(self.pop,text="force" if force else "unforce",borderwidth=3,height=1,font=self.font,command=lambda i=i,j=j: self.popup_force_done(i,j,force)).grid(row=0,columnspan=Persist.SUBSIZE,sticky="EW")
tk.Button(self.pop,text="Clear",borderwidth=3,height=1,font=self.font,command=lambda i=i,j=j,n=" ": self.set_square(i,j,n)).grid(columnspan=Persist.SUBSIZE,sticky="EW")
tk.Button(self.pop,text="Back",borderwidth=3,height=1,font=self.font,command=lambda i=i,j=j: self.popup_done(i,j)).grid(columnspan=Persist.SUBSIZE,sticky="EW")
self.pop.grab_set()
def Clear(self):
self.status.configure(text="Clearing...")
for i in range(self.SIZE):
for j in range(self.SIZE):
self.but[i][j].configure(text=" ")
self.Status()
self.UnRed()
def Solve(self):
self.status.configure(text="Solving...")
self.master.update()
arr = (ctypes.c_int * self.TOTALSIZE)(-1)
k = 0
self.solution = True
for i in range(self.SIZE):
for j in range(self.SIZE):
arr[k] = -1 # default blank
t = self.but[i][j].cget('text')
if t != " ":
arr[k] = int(t) # 1-based values for squares
self.but[i][j].configure(fg="red")
else:
arr[k] = 0
k += 1
x = 1 if Persist.X else 0
w = 1 if Persist.Window else 0
d = 1 if Persist.Debug else 0
sol = Persist.solve_lib.Solve(x,w,d,arr)
while True:
if sol == 0:
self.status.configure(text="Not solvable")
for i in range(self.SIZE):
for j in range(self.SIZE):
if self.but[i][j].cget('fg') != 'red':
self.but[i][j].configure(text=" ")
self.master.update()
break
if sol == 1:
self.status.configure(text="Successfully solved")
k = 0
for i in range(self.SIZE):
for j in range(self.SIZE):
if arr[k] > 0 :
self.but[i][j].configure(text=str(arr[k])) # 1-based text values
if self.but[i][j].cget('fg') == 'blue':
self.but[i][j].configure(fg='black')
else:
self.but[i][j].configure(text=" ")
k += 1
self.master.update()
break
if sol < 0:
self.status.configure(text="<"+str(-sol)+"> Still solving...")
k = 0
for i in range(self.SIZE):
for j in range(self.SIZE):
if self.but[i][j].cget('fg') != 'red':
self.but[i][j].configure(fg='blue')
if arr[k] > 0 :
self.but[i][j].configure(text=str(arr[k])) # 1-based text values
else:
self.but[i][j].configure(text=" ")
k += 1
self.master.update()
sol = Persist.solve_lib.Resume()
def Test( self ):
if not self.just_test():
tkmessage.showinfo("Position test","Not valid")
def Available(self,testi,testj):
arr = (ctypes.c_int * self.TOTALSIZE)(-1)
ret = (ctypes.c_int * self.SIZE)(-1)
k = 0
for i in range(self.SIZE):
for j in range(self.SIZE):
t = self.but[i][j].cget('text')
if i==testi and j==testj: # blank tested location
arr[k] = 0
elif t != " ":
arr[k] = int(t) # 1-based values for squares
self.but[i][j].configure(fg="red")
else:
arr[k] = 0
k += 1
x = 1 if Persist.X else 0
w = 1 if Persist.Window else 0
d = 1 if Persist.Debug else 0
Persist.solve_lib.TestAvailable(x,w,d,testi, testj, arr,ret)
return ret
def Unique(self):
arr = (ctypes.c_int * self.TOTALSIZE)(-1)
k = 0
for i in range(self.SIZE):
for j in range(self.SIZE):
t = self.but[i][j].cget('text')
if t != " ":
arr[k] = int(t) # 1-based values for squares
else:
arr[k] = 0
k += 1
x = 1 if Persist.X else 0
w = 1 if Persist.Window else 0
d = 1 if Persist.Debug else 0
return Persist.solve_lib.TestUnique(x,w,d, arr)
def just_test(self):
arr = (ctypes.c_int * self.TOTALSIZE)(-1)
k = 0
for i in range(self.SIZE):
for j in range(self.SIZE):
t = self.but[i][j].cget('text')
if t != " ":
arr[k] = int(t) # 1-based values for squares
self.but[i][j].configure(fg="red")
else:
arr[k] = 0
k += 1
x = 1 if Persist.X else 0
w = 1 if Persist.Window else 0
d = 1 if Persist.Debug else 0
return (Persist.solve_lib.Test(x,w,d,arr)==1)
def Quit(self):
sys.exit()
self.master.quit()
def Widget(self):
self.win = tk.Frame(self.master,borderwidth=2,relief="flat",background="white")
self.win.pack(side="top")
self.buttons=tk.Frame(self.master,borderwidth=2,relief="flat",background="white")
if Persist.SUBSIZE > 2:
tk.Label(self.buttons,text=" {0}x{0} ".format(self.SIZE),relief="sunken",anchor="c",font=self.font).pack(side="left",fill=tk.Y)
tk.Button(self.buttons,text="Solve",command=self.Solve,font=self.font).pack(side="left")
tk.Button(self.buttons,text="Clear",command=self.Clear,font=self.font).pack(side="left")
tk.Button(self.buttons,text="Exit",command=self.Quit,font=self.font).pack(side="left")
self.status = tk.Label(self.buttons,text="Edit mode",relief="sunken",anchor="e")
self.status.pack(side="left",fill="both",expand=1)
self.buttons.pack(side="bottom",fill=tk.X)
self.but = [[0 for i in range(self.SIZE)] for j in range(self.SIZE)]
for si in range(Persist.SUBSIZE):
for sj in range(Persist.SUBSIZE):
f = tk.Frame(self.win,background=self.color[(si+sj)%2],borderwidth=2,relief="flat")
f = tk.Frame(self.win,background=self.color[(si+sj)%2],borderwidth=2,relief="flat")
f.grid(row=si,column=sj)
for ssi in range(Persist.SUBSIZE):
for ssj in range(Persist.SUBSIZE):
i = si*Persist.SUBSIZE+ssi
j = sj*Persist.SUBSIZE+ssj
self.but[i][j] = tk.Button(f,text=" ",borderwidth=3,height=1,width=1,font=self.font,command=lambda i=i,j=j: self.Popup(i,j,True))
self.but[i][j].grid(row=ssi, column=ssj)
if Persist.X and ((i==j) or (i == self.SIZE-j-1)):
self.but[i][j].configure(background="light yellow")
if Persist.Window:
if (i % (Persist.SUBSIZE+1) > 0) and (j % (Persist.SUBSIZE+1) > 0):
self.but[i][j].configure(background="aquamarine")
if Persist.X and ((i==j) or (i == self.SIZE-j-1)):
self.but[i][j].configure(background="pale green")
self.Status()
def BadData( self ):
for i in range(self.SIZE):
for j in range(self.SIZE):
self.but[i][j].configure(text=str(1+(i+j)%self.SIZE))
def SetData( self ):
for i in range(self.SIZE):
for j in range(self.SIZE):
self.but[i][j].configure(fg='black')
s = str(Persist.Data[i][j])
if s == '0':
s = ' '
self.but[i][j].configure(text=s) # 1-based text values
Persist.Data = None
def about(self):
print("Sudoku Solve by Paul Alfille 2020")
def Size(self) :
if ( self.ss_choose != Persist.SUBSIZE ):
Persist.SUBSIZE = self.ss_choose.get()
self.master.destroy()
def Option(self):
if Persist.X != self.X.get():
Persist.X = self.X.get()
self.master.destroy()
if Persist.Window != self.Window.get():
Persist.Window = self.Window.get()
self.master.destroy()
Persist.Debug = self.Debug.get()
Persist.Legal = self.Legal.get()
def option_setup(self):
# match with Option(), set in Menu()
self.font = tkfont.Font(weight="bold",size=Persist.Fsize)
#self.pack()
self.X = tk.BooleanVar()
self.X.set(Persist.X)
self.Window = tk.BooleanVar()
self.Window.set(Persist.Window)
self.Debug = tk.BooleanVar()
self.Debug.set(Persist.Debug)
self.Legal = tk.BooleanVar()
self.Legal.set(Persist.Legal)
def fsize( self, f ):
if ( f != Persist.Fsize ) :
Persist.Fsize = f
self.master.destroy()
def set_status( self, s ):
Persist.GameStatus = s
self.Status()
def Menu(self):
self.menu = tk.Menu(self.master,tearoff=0)
self.filemenu = tk.Menu(self.menu,tearoff=0)
self.menu.add_cascade(label="File",menu=self.filemenu,font=self.font)
self.filemenu.add_command(label="Load",command=self.Load,font=self.font)
self.filemenu.add_command(label="Save",command=self.Save,font=self.font)
self.filemenu.add_command(label="Solve",command=self.Solve,font=self.font)
self.filemenu.add_command(label="Test",command=self.Test,font=self.font)
self.filemenu.add_command(label="Clear",command=self.Clear,font=self.font)
self.filemenu.add_command(label="Exit",command=self.Quit,font=self.font)
self.sizemenu = tk.Menu(self.menu,tearoff=0)
self.menu.add_cascade(label="Size",menu=self.sizemenu,font=self.font)
self.ss_choose = tk.IntVar()
ss_choose = Persist.SUBSIZE
for ss in range(2,7):
self.sizemenu.add_radiobutton(label=("> " if ss == Persist.SUBSIZE else "")+str(ss*ss)+"x"+str(ss*ss), value=ss, variable=self.ss_choose, command=self.Size,font=self.font)
self.optmenu = tk.Menu(self.menu,tearoff=0)
self.menu.add_cascade(label="Options",menu=self.optmenu,font=self.font)
self.optmenu.add_checkbutton(label="X pattern",onvalue=True,offvalue=False,variable=self.X,font=self.font,command=self.Option)
self.optmenu.add_checkbutton(label="Window pane",onvalue=True,offvalue=False,variable=self.Window,font=self.font,command=self.Option)
self.optmenu.add_checkbutton(label="Debugging data",onvalue=True,offvalue=False,variable=self.Debug,font=self.font,command=self.Option)
self.optmenu.add_checkbutton(label="Legal choices",onvalue=True,offvalue=False,variable=self.Legal,font=self.font,command=self.Option)
self.fontmenu = tk.Menu(self.optmenu,tearoff=0)
self.optmenu.add_cascade(label="Font size",menu=self.fontmenu,font=self.font)
for ff in [6,8,10,14,18,22,26]:
self.fontmenu.add_command(label=("> " if ff==Persist.Fsize else "")+str(ff),font=self.font, command=lambda ff=ff: self.fsize(ff))
self.statusmenu = tk.Menu(self.optmenu,tearoff=0)
self.optmenu.add_cascade(label="Game state status",menu=self.statusmenu,font=self.font)
for ss in ["None","Unique"]:
self.statusmenu.add_command(label=("> " if ss==Persist.GameStatus else "")+ss, font=self.font, command=lambda ss=ss: self.set_status(ss))
self.helpmenu = tk.Menu(self.menu,tearoff=0)
self.menu.add_cascade(label="Help",menu=self.helpmenu,font=self.font)
self.helpmenu.add_command(label="About",command=self.about,font=self.font)
self.master.config(menu=self.menu)
def Load( self ):
Lfile = tkfile.askopenfile(mode="r",filetypes=[("Comma-separated-values","*.csv"),("All files","*.*")],title="Load a sudoku",parent=self.master)
if Lfile:
try:
i = 0
Window = False
X = False
for line in Lfile:
if '#' in line:
[line,comment]=line.split('#')
if "=" in comment:
[var,val] = line.split("=")
else:
[var,val] = [comment,"true"]
if var == "window":
Window = (val=="true")
if var == "X":
X = (val=="true")
if ',' in line:
v = line.split(',')
if i == 0 :
#first line, check size
Lsize = len(v)
if Lsize not in [4,9,16,25,36]:
tkmessage.showerror("File error","Not a recognized size")
Persist.Data=None
break
Persist.Data = [[0 for i in range(self.SIZE)] for j in range(self.SIZE)]
else:
if len(v) != Lsize:
tkmessage.showerror("File error","Value lists not the same size")
Persist.Data=None
break
Persist.Data[i] = [int(x) if x.isnumeric() else 0 for x in list(map(lambda s:s.strip(),v)) ]
if max(Persist.Data[i]) > Lsize or min(Persist.Data[i]) < 0 :
tkmessage.showerror("File error","Value out of range")
Persist.Data=None
break
i += 1
if i == Lsize:
#done
Persist.X = X
Persist.Window = Window
Persist.SUBSIZE = [x*x for x in range(7)].index(Lsize)
self.master.destroy()
Lfile.close()
except UnicodeDecodeError:
tkmessage.showerror("Unreadable","File contains unreadable characters")
Persist.Data=None
return
def Save( self ):
filename = tkfile.asksaveasfilename(filetypes=[("Comma-separated-values","*.csv"),("All files","*.*")],title="Save this sudoku board",parent=self.master)
if filename:
with open(filename,'w') as Sfile:
if Persist.Window:
Sfile.write("# window\n")
if Persist.X:
Sfile.write("# X\n")
Sfile.write("\n".join([",".join([self.but[i][j].cget("text") for j in range(self.SIZE)]) for i in range(self.SIZE)])+"\n")
def Libs():
# returns a dict
s_lib={}
for ss in range(2,7):
# Shared C library
lib_base = "./" #location
lib_base += "sudoku2_lib" # base name
lib_base += str(ss*ss)
# get the right filename
if platform.uname()[0] == "Windows":
lib_base += ".dll"
if platform.uname()[0] == "Linux":
lib_base += ".so"
else:
lib_base += ".dylib"
# load library
global solve_lib
s_lib[ss] = ctypes.cdll.LoadLibrary(lib_base)
return s_lib
def main(args):
# keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
# set up library dist
#global solve_lib
#s_lib = Libs()
Persist.LibSet()
while True:
# load library
Persist.LibUse()
Sudoku(master=tk.Tk()).mainloop()
if __name__ == "__main__":
# execute only if run as a script
sys.exit(main(sys.argv))
| 30.881801 | 207 | 0.646355 | 2,634 | 16,460 | 3.998481 | 0.129841 | 0.010444 | 0.025826 | 0.029054 | 0.50883 | 0.43496 | 0.396316 | 0.352165 | 0.321401 | 0.312856 | 0 | 0.011716 | 0.18068 | 16,460 | 532 | 208 | 30.93985 | 0.769242 | 0.036817 | 0 | 0.372685 | 0 | 0 | 0.064223 | 0.002781 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.020833 | 0 | 0.143519 | 0.00463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77a51fcecb6daeb1528498330dad731bde24172c | 1,549 | py | Python | Text_to_speech_GAN/waveFiles.py | scotty110/ANN-code | 05ae6094dfe98c1c9fd0feb87ffb0c0c5206502a | [
"MIT"
] | null | null | null | Text_to_speech_GAN/waveFiles.py | scotty110/ANN-code | 05ae6094dfe98c1c9fd0feb87ffb0c0c5206502a | [
"MIT"
] | null | null | null | Text_to_speech_GAN/waveFiles.py | scotty110/ANN-code | 05ae6094dfe98c1c9fd0feb87ffb0c0c5206502a | [
"MIT"
] | null | null | null | import scipy.io.wavfile as siow
import scipy.signal as ssr
import matplotlib.pyplot as plt
import numpy as np
import math
def groupNumpy(to_group_array, interval, debug=True):
'''
Breaks numpy array into an array of arrays. Where each array is <interval> long.
Inputs:
to_group_array - array we wish to break down into sub intervals
interval - interval length we wish to break down into
Outputs:
2D array of size [len/interval, interval]
'''
n = math.ceil( len(to_group_array)/(interval*1.0) )
if(debug):
print("array lenth: ", len(to_group_array))
print("N: ", n)
# Make 2D array
output_array = np.zeros((n,interval,2))
for i in range(0,n,1):
output_array[i] = to_group_array[(i*interval):((i+1)*interval)]
return output_array
filename = "hello.wav"
audio_tuple = siow.read(filename)
audio_array = audio_tuple[1]
print("Sample rate: ", audio_tuple[0])
#Assuming sample rate is 16 khz, want to break into 20 ms chucks
grouped_array = groupNumpy(audio_array, 320)
#print("differenc: ", grouped_array[1])
#print("true segment: ", audio_array[320:2*320])
print("Difference: ", np.sum(grouped_array[1]-audio_array[320:2*320]) )
#2D sound is multi channel sound
'''
#Plots look about the same (probably are), so assuming down sampling works for now
plt.plot( audio_new, color="orange" )
plt.ylabel('sound wave')
plt.show()
'''
'''
Used as a outline for code:
https://medium.com/@ageitgey/machine-learning-is-fun-part-6-how-to-do-speech-recognition-with-deep-learning-28293c162f7a
''' | 28.163636 | 120 | 0.712718 | 247 | 1,549 | 4.37247 | 0.489879 | 0.032407 | 0.055556 | 0.037037 | 0.07037 | 0.038889 | 0 | 0 | 0 | 0 | 0 | 0.033742 | 0.158167 | 1,549 | 55 | 121 | 28.163636 | 0.794479 | 0.297611 | 0 | 0 | 0 | 0 | 0.067295 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.25 | 0 | 0.35 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77a746913532d4c25ca26f69e46343bc6271ae0e | 790 | py | Python | Tools/Individual Tools/Big Build Calculator.py | TheUncannyScrub/PythonMinecraftTools | 1ff5cf53f195cff41c33dc39a461ee94c6edf510 | [
"MIT"
] | null | null | null | Tools/Individual Tools/Big Build Calculator.py | TheUncannyScrub/PythonMinecraftTools | 1ff5cf53f195cff41c33dc39a461ee94c6edf510 | [
"MIT"
] | null | null | null | Tools/Individual Tools/Big Build Calculator.py | TheUncannyScrub/PythonMinecraftTools | 1ff5cf53f195cff41c33dc39a461ee94c6edf510 | [
"MIT"
] | null | null | null | import os
import sys
import random
import math
from time import *
import decimal
print('Minecraft Resource Calculator')
print('Enter the individual items and the calculator will')
print('tell you how many chest or stacks it is!')
print('*Only works for items that stack up to 64*')
sleep(1)
while True:
try:
print('Enter an amount of individual items')
numinput = float(input('Here: '))
stacks = (numinput) / 64
chests = (numinput) / 1728
dubchest = (numinput) / 3456
print(round(stacks,2), "Stack(s)")
print(round(chests,2), "Chest(s)")
print(round(dubchest,2), "Double Chest(s)")
break
except:
print('You must enter a number!')
sleep(2)
input('Press ENTER to exit') | 25.483871 | 60 | 0.616456 | 105 | 790 | 4.638095 | 0.590476 | 0.061602 | 0.045175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029463 | 0.26962 | 790 | 31 | 61 | 25.483871 | 0.814558 | 0 | 0 | 0 | 0 | 0 | 0.362681 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0.346154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77ac836929fce76c6241d61d0546b8b5bdacebd5 | 10,283 | py | Python | data/GoogleTrans/preprocess.py | wasiahmad/GATE | 1e48504a3641f00265a271a19eb6b6449fdc33bd | [
"MIT"
] | 24 | 2020-12-07T10:22:40.000Z | 2022-03-31T09:24:13.000Z | data/GoogleTrans/preprocess.py | wasiahmad/GATE | 1e48504a3641f00265a271a19eb6b6449fdc33bd | [
"MIT"
] | 15 | 2021-03-22T04:52:57.000Z | 2022-01-01T18:32:31.000Z | data/GoogleTrans/preprocess.py | wasiahmad/GATE | 1e48504a3641f00265a271a19eb6b6449fdc33bd | [
"MIT"
] | 8 | 2021-03-04T05:09:42.000Z | 2022-01-25T12:59:19.000Z | import json
import os
from udpipe import Model
from conllu import parse
from collections import OrderedDict
model_map = {
'en': 'udpipe/english-ewt-ud-2.5-191206.udpipe',
'zh': 'udpipe/chinese-gsd-ud-2.5-191206.udpipe',
'ar': 'udpipe/arabic-padt-ud-2.5-191206.udpipe'
}
def find_span(offsets, begin_offset, end_offset):
"""Match token offsets with the char begin/end offsets of the answer."""
start = [i for i, tok in enumerate(offsets) if tok[0] == begin_offset]
if len(start) == 0:
start = [i for i, tok in enumerate(offsets) if tok[0] < begin_offset < tok[1]]
end = [i for i, tok in enumerate(offsets) if tok[1] == end_offset]
if len(end) == 0:
end = [i for i, tok in enumerate(offsets) if tok[0] < end_offset < tok[1]]
assert (len(start) <= 1)
assert (len(end) <= 1)
if len(start) == 1 and len(end) == 1:
return start[0], end[0]
return False
def load_conllu(conllu_text):
conllu_data = []
sentences = parse(conllu_text)
for idx, sentence in enumerate(sentences):
tokens, upos, head, deprel, offset = [], [], [], [], []
reserved_offsets = []
for widx, word in enumerate(sentence):
if isinstance(word['id'], tuple):
# multi-word token, e.g., word['id'] = (4, '-', 5)
assert len(word['id']) == 3
indices = word['misc']['TokenRange'].split(':')
reserved_offsets.append([int(indices[0]), int(indices[1])])
else:
tokens.append(word['form'])
upos.append(word['upostag'])
head.append(word['head'])
deprel.append(word['deprel'])
if word['misc'] is not None:
# single-word token
indices = word['misc']['TokenRange'].split(':')
offset.append([int(indices[0]), int(indices[1])])
elif len(reserved_offsets) > 0:
offset.append(reserved_offsets.pop())
else:
offset.append([-1, -1])
assert len(tokens) == len(offset)
sent_obj = OrderedDict([
('token', tokens),
('stanford_pos', upos),
('stanford_head', head),
('stanford_deprel', deprel),
('offset', offset)
])
conllu_data.append(sent_obj)
return conllu_data
def preprocess(srcfile):
with open(srcfile) as f:
data = json.load(f)
confusing = 0
returned_data = []
for ex in data:
if ex['parallel'].count('<b>') > 1:
confusing += 1
continue
elif ex['parallel'].count('</b>') > 1:
confusing += 1
continue
elif ex['parallel'].count('<i>') > 1:
confusing += 1
continue
elif ex['parallel'].count('</i>') > 1:
confusing += 1
continue
parallel_sent = ex['parallel']
subj_start = parallel_sent.find('<b>')
subj_end = parallel_sent.find('</b>')
obj_start = parallel_sent.find('<i>')
obj_end = parallel_sent.find('</i>')
if subj_start > subj_end:
confusing += 1
continue
elif obj_start > obj_end:
confusing += 1
continue
if subj_end < obj_start:
# subj is in the left of obj
position = ['subj_start', 'subj_end', 'obj_start', 'obj_end']
elif obj_end < subj_start:
# obj is in the left of subj
position = ['obj_start', 'obj_end', 'subj_start', 'subj_end']
elif subj_start < obj_start < subj_end:
position = ['subj_start', 'obj_start']
if obj_end < subj_end:
position += ['obj_end', 'subj_end']
else:
position += ['subj_end', 'obj_end']
elif obj_start < subj_start < obj_end:
position = ['obj_start', 'subj_start']
if subj_end < obj_end:
position += ['subj_end', 'obj_end']
else:
position += ['obj_end', 'subj_end']
else:
raise ValueError()
# print(parallel_sent)
for item in position:
if item == 'subj_start':
real_ss = parallel_sent.find('<b>')
parallel_sent = parallel_sent.replace('<b>', '')
elif item == 'subj_end':
real_se = parallel_sent.find('</b>')
parallel_sent = parallel_sent.replace('</b>', '')
elif item == 'obj_start':
real_os = parallel_sent.find('<i>')
parallel_sent = parallel_sent.replace('<i>', '')
elif item == 'obj_end':
real_oe = parallel_sent.find('</i>')
parallel_sent = parallel_sent.replace('</i>', '')
if real_ss == real_se:
confusing += 1
continue
elif real_os == real_oe:
confusing += 1
continue
ex['parallel'] = {
'sentence': parallel_sent,
'subj_pos': [real_ss, real_se],
'obj_pos': [real_os, real_oe],
'source': ex['parallel']
}
returned_data.append(ex)
# print(parallel_sent)
# print(position)
# print(real_ss, real_se, real_os, real_oe)
# print()
print('Out of %d examples, %d are dropped!' % (len(data), confusing))
return returned_data
def get_conllu_text(text, model):
sentences = model.tokenize(text, 'ranges;presegmented')
total_words = 0
for s in sentences:
total_words += len(s.words)
model.tag(s)
model.parse(s)
conllu = model.write(sentences, "conllu")
return conllu
def convert_char_to_word_indices(parallel_data, tgtfile, lang):
model = Model(model_map[lang])
skipped = 0
for ex in parallel_data:
trans_sent = ex['parallel']['sentence']
conllu_text = get_conllu_text(trans_sent, model)
conll_ex = load_conllu(conllu_text)
assert len(conll_ex) == 1
conll_ex = conll_ex[0]
subj_start_char, subj_end_char = ex['parallel']['subj_pos']
obj_start_char, obj_end_char = ex['parallel']['obj_pos']
subj_start_end = find_span(conll_ex['offset'], subj_start_char, subj_end_char)
obj_start_end = find_span(conll_ex['offset'], obj_start_char, obj_end_char)
if not subj_start_end:
print(conll_ex['token'])
print(conll_ex['offset'])
print(trans_sent[subj_start_char:subj_end_char])
print(subj_start_char, subj_end_char)
skipped += 1
continue
if not obj_start_end:
print(conll_ex['token'])
print(conll_ex['offset'])
print(trans_sent[obj_start_char:obj_end_char])
print(obj_start_char, obj_end_char)
skipped += 1
continue
ex['source'] = ex['sentence']
ex['parallel'].pop('subj_pos')
ex['parallel'].pop('obj_pos')
ex.pop('sentence')
ex.pop('subj')
ex.pop('obj')
ex.pop('token')
ex['translation'] = ex['parallel']['source']
ex.pop('parallel')
ex['token'] = conll_ex['token']
ex['stanford_pos'] = conll_ex['stanford_pos']
ex['stanford_head'] = conll_ex['stanford_head']
ex['stanford_deprel'] = conll_ex['stanford_deprel']
ex['stanford_ner'] = ["O"] * len(ex['token'])
ex['subj_start'] = subj_start_end[0]
ex['subj_end'] = subj_start_end[1]
ex['obj_start'] = obj_start_end[0]
ex['obj_end'] = obj_start_end[1]
ex['subj_type'] = ex['subj_type']
ex['obj_type'] = ex['obj_type']
ex['relation'] = ex['relation']
if skipped > 0:
print('%d examples are skipped since we cannot resolve their character indices.' % skipped)
with open(tgtfile, 'w') as fw:
json.dump(parallel_data, fw, sort_keys=True, indent=4)
def filter_source_examples(selected_ids, src_file, tgt_file):
selected_data = []
with open(src_file) as f:
data = json.load(f)
data_dict = {ex['id']: ex for ex in data}
for idx in selected_ids:
selected_data.append(data_dict[idx])
assert len(selected_data) == len(selected_ids), \
'{} != {}'.format(len(selected_data), len(selected_ids))
with open(tgt_file, 'w') as fw:
json.dump(selected_data, fw, sort_keys=True, indent=4)
if __name__ == '__main__':
new_data = preprocess('ace_event/en_test_zh.json')
convert_char_to_word_indices(new_data,
'ace_event/Chinese/test_parallel.json',
'zh')
selected_ids = [ex['id'] for ex in new_data]
filter_source_examples(selected_ids,
'../data/ace_event/English/test.json',
'ace_event/Chinese/test_source.json')
new_data = preprocess('ace_relation/en_test_zh.json')
convert_char_to_word_indices(new_data,
'ace_relation/Chinese/test_parallel.json',
'zh')
selected_ids = [ex['id'] for ex in new_data]
filter_source_examples(selected_ids,
'../data/ace_relation/English/test.json',
'ace_relation/Chinese/test_source.json')
new_data = preprocess('ace_event/en_test_ar.json')
convert_char_to_word_indices(new_data,
'ace_event/Arabic/test_parallel.json',
'ar')
selected_ids = [ex['id'] for ex in new_data]
filter_source_examples(selected_ids,
'../data/ace_event/English/test.json',
'ace_event/Arabic/test_source.json')
new_data = preprocess('ace_relation/en_test_ar.json')
convert_char_to_word_indices(new_data,
'ace_relation/Arabic/test_parallel.json',
'ar')
selected_ids = [ex['id'] for ex in new_data]
filter_source_examples(selected_ids,
'../data/ace_relation/English/test.json',
'ace_relation/Arabic/test_source.json')
| 36.207746 | 99 | 0.550812 | 1,254 | 10,283 | 4.263955 | 0.135566 | 0.044885 | 0.026931 | 0.020572 | 0.43127 | 0.372732 | 0.312138 | 0.275856 | 0.266879 | 0.266879 | 0 | 0.010541 | 0.31732 | 10,283 | 283 | 100 | 36.335689 | 0.75114 | 0.030341 | 0 | 0.242553 | 0 | 0 | 0.166014 | 0.065984 | 0 | 0 | 0 | 0 | 0.025532 | 1 | 0.025532 | false | 0 | 0.021277 | 0 | 0.068085 | 0.042553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77ae2c52823c3ed666b8da7d385a9528b00e5196 | 4,034 | py | Python | bricklayer/util/parallel_fetch.py | loganwang007/bricklayer | 531dd4acaf20574a9d2f7f0adf68789888288157 | [
"Apache-2.0"
] | null | null | null | bricklayer/util/parallel_fetch.py | loganwang007/bricklayer | 531dd4acaf20574a9d2f7f0adf68789888288157 | [
"Apache-2.0"
] | null | null | null | bricklayer/util/parallel_fetch.py | loganwang007/bricklayer | 531dd4acaf20574a9d2f7f0adf68789888288157 | [
"Apache-2.0"
] | null | null | null | """
Module to distribute the S3 download over a spark cluster
Useful when the data is highly partitioned and unable to be loaded by standard methods
Results end up in a table
Usage:
```
from parallel_fetch import DbricksParallelFetch
# define the aws_bucket and output_dir for the s3_fetch to start
aws_bucket = "service-trips"
output_dir = "/tmp/"
# define the target df awaiting to be parse the path
df = Spark.createDataFrame()
# export the fetched contents dataframe
output_df = DbricksParallelFetch.download_file(df, aws_bucket, output_dir, path_column)
```
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partial
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import DataFrame
import logging
import os
import boto3
import csv
class DbricksParallelFetch:
@staticmethod
def download_file(df: DataFrame, aws_bucket: str, output_dir: str, path_column: str, max_workers: int = 32):
"""encapsulate the pandas udf function as a static method
Args:
df (DataFrame): target dataframe
aws_bucket (str): aws bucket stored all the small files
output_dir (str): temporary output dir
path_column (str): path column in the target dataframe
max_workers (int): number of processors
Returns:
[DataFrame]: [output dataframe with downloaded content]
"""
@pandas_udf('string', PandasUDFType.SCALAR)
def s3_fetch(paths):
def download_one_file(bucket: str, output: str, client: boto3.client, s3_file: str):
"""
Download a single file from S3
Args:
bucket (str): S3 bucket where images are hosted
output (str): Dir to store the images
client (boto3.client): S3 client
s3_file (str): S3 object name
"""
client.download_file(
Bucket=bucket, Key=s3_file,
Filename=os.path.join(output, s3_file.replace('/', '_'))
)
files_to_download = paths
# Creating only one session and one client
session = boto3.Session()
client = session.client("s3")
# The client is shared between threads
func = partial(download_one_file, aws_bucket, output_dir, client)
# List for storing possible failed downloads to retry later
failed_downloads = []
with ThreadPoolExecutor(max_workers) as executor:
# Using a dict for preserving the downloaded file for each future
# to store it as a failure if we need that
futures = {
executor.submit(func, file_to_download):
file_to_download for file_to_download in files_to_download
}
for future in as_completed(futures):
if future.exception():
failed_downloads.append(futures[future])
if len(failed_downloads) > 0:
with open(
os.path.join(output_dir, "failed_downloads.csv"), "w", newline=""
) as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
writer.writerow(failed_downloads)
def read_file_and_return_contents(path):
try:
with open(output_dir + path.replace('/', '_'), 'r') as file:
logging.info(f"Read {file} and return its value")
return file.read()
except FileNotFoundError:
logging.warning("Messages is failed to download from s3")
return None
return paths.apply(read_file_and_return_contents)
return df.withColumn('downloaded_content', s3_fetch(path_column))
| 42.020833 | 112 | 0.595439 | 458 | 4,034 | 5.09607 | 0.368996 | 0.034704 | 0.01671 | 0.021851 | 0.021422 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007845 | 0.336391 | 4,034 | 95 | 113 | 42.463158 | 0.864027 | 0.341101 | 0 | 0 | 0 | 0 | 0.050982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.170213 | 0 | 0.361702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77af594f90cdfff28c32483019a29f1ca17a171f | 621 | py | Python | algorithms/mergesort.py | MatheusRV/Analysis-of-Algorithms-Course | 4c1f4ccb8349b894653128be7e38d3045b0f5c13 | [
"MIT"
] | null | null | null | algorithms/mergesort.py | MatheusRV/Analysis-of-Algorithms-Course | 4c1f4ccb8349b894653128be7e38d3045b0f5c13 | [
"MIT"
] | null | null | null | algorithms/mergesort.py | MatheusRV/Analysis-of-Algorithms-Course | 4c1f4ccb8349b894653128be7e38d3045b0f5c13 | [
"MIT"
] | null | null | null | def merge(left, right):
"""Merge sort merging function."""
merged_array=[]
while left or right:
if not left:
merged_array.append(right.pop())
elif (not right) or left[-1] > right[-1]:
merged_array.append(left.pop())
else:
merged_array.append(right.pop())
merged_array.reverse()
return merged_array
def merge_sort(array, size):
"""Merge sort algorithm implementation."""
if size < 2: # base case
return array
else:
# divide array in half and merge sort recursively
half = size // 2
left = merge_sort(array[:half], half)
right = merge_sort(array[half:], half)
return merge(left, right)
| 25.875 | 51 | 0.690821 | 91 | 621 | 4.615385 | 0.340659 | 0.128571 | 0.121429 | 0.104762 | 0.22381 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007797 | 0.173913 | 621 | 23 | 52 | 27 | 0.810916 | 0.199678 | 0 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77b62d71a2e2a81d7bb14aa77dd472363e132b1c | 593 | py | Python | deploy/env_var.py | john850512/TranslatedPudding | 64e5f2b5fe389c598daa15ff78dbc5ede3e0799e | [
"MIT"
] | 3 | 2019-05-20T06:36:14.000Z | 2020-05-15T03:58:16.000Z | deploy/env_var.py | john850512/TranslatedPudding | 64e5f2b5fe389c598daa15ff78dbc5ede3e0799e | [
"MIT"
] | null | null | null | deploy/env_var.py | john850512/TranslatedPudding | 64e5f2b5fe389c598daa15ff78dbc5ede3e0799e | [
"MIT"
] | null | null | null | CHANNEL_ACCESS_TOKEN = r'YOUR CHANNEL ACCESS TOKEN'
CHANNEL_SECRET = r'YOUR CHANNEL SECRET'
STR_CURRENT_STATUS_RESPOND = ("目前狀態:【{current_status}】\n" +
"1.輸入『吃下翻譯布丁』開啟功能\n" +
"2.輸入『布丁消化完了』關閉功能"
)
STR_CURRENT_STATUS = r'翻譯布丁'
STR_ACTIVATE_BOT = r'吃下翻譯布丁'
STR_DEACTIVATE_BOT = r'布丁消化完了'
STR_ACTIVATE_BOT_RESPOND = r'吃下翻譯布丁之後,發現全身似乎充滿神奇的力量!'
STR_DUPLICATE_ACTIVATE_BOT_RESPOND = r'已經吃過一次了,不可以太貪心!'
STR_DEACTIVATE_BOT_RESPOND = r'翻譯布丁被消化的差不多了,神奇的力量漸漸退去'
STR_DUPLICATE_DEACTIVATE_BOT_RESPOND = r'沒有布丁可以消化,QQ' | 45.615385 | 59 | 0.686341 | 80 | 593 | 4.8 | 0.425 | 0.104167 | 0.114583 | 0.098958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00431 | 0.217538 | 593 | 13 | 60 | 45.615385 | 0.814655 | 0 | 0 | 0 | 0 | 0 | 0.319865 | 0.117845 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77b773bb110aacc218f4ca773acae2c751ddf7a7 | 691 | py | Python | src/duet/sv_calling.py | yekaizhou/duet | ab49323992d3a5f6c21a10bd7114525a31fb0b8b | [
"BSD-3-Clause"
] | 7 | 2021-08-12T08:26:18.000Z | 2022-02-23T18:13:53.000Z | src/duet/sv_calling.py | yekaizhou/duet | ab49323992d3a5f6c21a10bd7114525a31fb0b8b | [
"BSD-3-Clause"
] | null | null | null | src/duet/sv_calling.py | yekaizhou/duet | ab49323992d3a5f6c21a10bd7114525a31fb0b8b | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
import logging
import os
import time
def sv_calling(home, ref_path, aln_path, cls_thres, svlen_thres):
lines = '*************************'
logging.info(lines + ' SV CALLING STARTED ' + lines)
starttime = time.time()
sv_calling_home = home + '/sv_calling/'
os.system('mkdir ' + sv_calling_home)
os.system('svim alignment ' + sv_calling_home + ' ' + aln_path + ' ' + ref_path + ' --min_sv_size ' + \
str(svlen_thres) + ' --read_names --minimum_depth 0 --minimum_score 0 --cluster_max_distance ' + \
str(cls_thres))
logging.info(lines + ' SV CALLING COMPLETED IN ' + str(round(time.time() - starttime, 3)) + 's ' + lines) | 43.1875 | 112 | 0.615051 | 90 | 691 | 4.466667 | 0.466667 | 0.156716 | 0.129353 | 0.089552 | 0.124378 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007353 | 0.212735 | 691 | 16 | 113 | 43.1875 | 0.731618 | 0.017366 | 0 | 0 | 0 | 0 | 0.287611 | 0.069322 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77b834ef3b47095a3ef4e2276e9ea11ba4bc2b31 | 622 | py | Python | python/08/a4988.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
] | 6 | 2022-03-05T02:36:57.000Z | 2022-03-12T12:31:27.000Z | python/08/a4988.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
] | null | null | null | python/08/a4988.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
] | null | null | null | import pigpio
import time
ROTATION = 2
STEP_PER_ROTATE = 200
MICROSTEP = 16
ENABLE = 10
MS1 = 9
MS2 = 11
MS3 = 5
RESET = 6
SLEEP = 13
STEP = 19
DIR = 26
WAIT = 0.005
pi = pigpio.pi()
for pin in [ENABLE, MS1, MS2, MS3, RESET, SLEEP, STEP, DIR]:
pi.set_mode(pin, pigpio.OUTPUT)
pi.write(RESET, 0)
pi.write(SLEEP, 1)
pi.write(MS1, 1)
pi.write(MS2, 1)
pi.write(MS3, 1)
pi.write(DIR, 0)
pi.write(STEP, 0)
pi.write(ENABLE, 0)
time.sleep(0.001)
pi.write(RESET, 1)
for i in range(STEP_PER_ROTATE * MICROSTEP * ROTATION):
pi.write(STEP, 1)
time.sleep(WAIT / 2)
pi.write(STEP, 0)
time.sleep(WAIT / 2)
| 14.136364 | 60 | 0.651125 | 116 | 622 | 3.448276 | 0.353448 | 0.1925 | 0.08 | 0.06 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098394 | 0.199357 | 622 | 43 | 61 | 14.465116 | 0.704819 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77b8deb8a471712792ddcd411f464a17a0cb2bf4 | 1,917 | py | Python | tools/c7n_gcp/c7n_gcp/resources/dataflow.py | vkubyshko/cloud-custodian | e5e3a0f8b5c85adcbec212d780b453047fb6f4d1 | [
"Apache-2.0"
] | 2,415 | 2018-12-04T00:37:58.000Z | 2022-03-31T12:28:56.000Z | tools/c7n_gcp/c7n_gcp/resources/dataflow.py | vkubyshko/cloud-custodian | e5e3a0f8b5c85adcbec212d780b453047fb6f4d1 | [
"Apache-2.0"
] | 3,272 | 2018-12-03T23:58:17.000Z | 2022-03-31T21:15:32.000Z | tools/c7n_gcp/c7n_gcp/resources/dataflow.py | staxio/cloud-custodian | 24ed5d8f09bc37ff76184aae97a1ef577a69a41b | [
"Apache-2.0"
] | 773 | 2018-12-06T09:43:23.000Z | 2022-03-30T20:44:43.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import jmespath
from googleapiclient.errors import HttpError
from c7n_gcp.provider import resources
from c7n_gcp.query import QueryResourceManager, TypeInfo
@resources.register('dataflow-job')
class DataflowJob(QueryResourceManager):
"""GCP resource: https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs
"""
class resource_type(TypeInfo):
service = 'dataflow'
version = 'v1b3'
component = 'projects.jobs'
enum_spec = ('aggregated', 'jobs[]', None)
scope_key = 'projectId'
name = id = 'name'
get_requires_event = True
default_report_fields = [
'name', 'currentState', 'createTime', 'location']
permissions = ('dataflow.jobs.list',)
@staticmethod
def get(client, event):
return client.execute_command(
'get', {
'projectId': jmespath.search('resource.labels.project_id', event),
'jobId': jmespath.search('protoPayload.request.job_id', event)
}
)
def resources(self, query=None):
query_filter = 'ACTIVE'
if self.data.get('query'):
query_filter = self.data['query'][0].get('filter', 'ACTIVE')
return super(DataflowJob, self).resources(query={'filter': query_filter})
def augment(self, resources):
client = self.get_client()
results = []
for r in resources:
ref = {
'jobId': r['id'],
'projectId': r['projectId'],
'view': 'JOB_VIEW_ALL'
}
try:
results.append(
client.execute_query(
'get', verb_arguments=ref))
except HttpError:
results.append(r)
return results
| 32.491525 | 93 | 0.571205 | 186 | 1,917 | 5.77957 | 0.510753 | 0.04093 | 0.018605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006818 | 0.311424 | 1,917 | 58 | 94 | 33.051724 | 0.807576 | 0.087115 | 0 | 0 | 0 | 0 | 0.15261 | 0.030407 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.086957 | 0.021739 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77b987eaf14f75d36f539722f833433781e4d25e | 12,777 | py | Python | evolution/robot.py | gpatsiaouras/Robot-Simulator | 4676bb342ca2a1ebcabfb4834f42da61610bacdf | [
"MIT"
] | null | null | null | evolution/robot.py | gpatsiaouras/Robot-Simulator | 4676bb342ca2a1ebcabfb4834f42da61610bacdf | [
"MIT"
] | 1 | 2022-03-12T00:58:01.000Z | 2022-03-12T00:58:01.000Z | evolution/robot.py | gpatsiaouras/Robot-Simulator | 4676bb342ca2a1ebcabfb4834f42da61610bacdf | [
"MIT"
] | null | null | null | import numpy as np
from math import hypot as hyp
class Robot:
def __init__(self, diameter, initial_theta, initial_position):
# Robot specifications
self.diameter = diameter
self.radius = int(diameter / 2)
self.position = initial_position
# Rotation is in rads
self.theta = initial_theta
# Velocity of wheel in pixes/time
self.MAX_SPEED = 10
self.MIN_SPEED = -10
self.left_wheel_velocity = 0
self.right_wheel_velocity = 0
# Sensors
self.sensors_values = []
self.sensors_parameters = np.zeros((12, 2))
self.init_sensors()
# obstacles for sensor values
self.obstacles_coords = []
self.obstacles_parameters = np.zeros((4, 2))
# Collision Data (for evolution algorithm)
self.collisions = 0
def reset(self, theta, position):
self.theta = theta
self.position = position
self.update_sensor_values()
self.collisions = 0
self.left_wheel_velocity = 0
self.right_wheel_velocity = 0
def update_sensor_values(self):
count = 0
for angle in range(0, 360, 30):
# sensor origin coords
self.sensors_coords[count, 0] = self.position[0] + self.radius * np.cos(self.theta + np.radians(angle))
self.sensors_coords[count, 1] = self.position[1] + self.radius * np.sin(self.theta + np.radians(angle))
# sensor tips coords
self.sensors_coords[count, 2] = self.position[0] + self.sens_radius * np.cos(self.theta + np.radians(angle))
self.sensors_coords[count, 3] = self.position[1] + self.sens_radius * np.sin(self.theta + np.radians(angle))
self.sensors_values[count] = hyp(self.sensors_coords[count, 2] - self.sensors_coords[count, 0],
self.sensors_coords[count, 3] - self.sensors_coords[count, 1])
# sensors functions parameters
# slope a
self.sensors_parameters[count, 0] = (self.sensors_coords[count, 3] - self.sensors_coords[count, 1]) / \
(self.sensors_coords[count, 2] - self.sensors_coords[count, 0])
# intercept b
self.sensors_parameters[count, 1] = self.sensors_coords[count, 1] - (
self.sensors_parameters[count, 0] * self.sensors_coords[count, 0])
count = count + 1
def move(self):
# Store old position before applying kinematics
old_position = [self.position[0], self.position[1]]
if self.left_wheel_velocity != self.right_wheel_velocity:
# if self.left_wheel_velocity != self.right_wheel_velocity:
# Calculate ω - angular velocity and change rotation of the robot
angular_velocity = (self.left_wheel_velocity - self.right_wheel_velocity) / self.diameter
# Keep theta from exploding
self.theta %= 2 * np.pi
R = (self.diameter / 2) * (self.left_wheel_velocity + self.right_wheel_velocity) / (
self.left_wheel_velocity - self.right_wheel_velocity)
ICCx = self.position[0] - R * np.sin(self.theta)
ICCy = self.position[1] + R * np.cos(self.theta)
matrix_a = np.matrix([[np.cos(angular_velocity), -np.sin(angular_velocity), 0],
[np.sin(angular_velocity), np.cos(angular_velocity), 0], [0, 0, 1]])
vector_a = np.array([self.position[0] - ICCx, self.position[1] - ICCy, self.theta])
vector_b = np.array([ICCx, ICCy, angular_velocity])
new_pos_rot = matrix_a.dot(vector_a) + vector_b
self.position = [new_pos_rot.item((0, 0)), new_pos_rot.item((0, 1))]
self.theta = new_pos_rot.item((0, 2))
elif self.right_wheel_velocity != 0:
self.position[0] = self.position[0] + (self.right_wheel_velocity * np.cos(self.theta))
self.position[1] = self.position[1] + (self.right_wheel_velocity * np.sin(self.theta))
# Check if the new move caused a collision
if self.check_collision():
# Undo the move
self.position = old_position
# Move according to collision handling algorithm
self.move_with_wall()
# Increment collisions counter
self.collisions += 1
# update sensors
self.update_sensor_values()
def move_with_wall(self):
cap_hor = 1
cap_ver = 1
velocity_hor = 0
velocity_ver = 0
for obstacle_id in range(len(self.obstacles_parameters)):
# print("Obstacle {0}: {1}".format(obstacle_id, self.obstacles_parameters[obstacle_id][0]))
# Vertical Obstacle
if np.isinf(self.obstacles_parameters[obstacle_id][0]):
distance = np.abs(self.position[0] - self.obstacles_coords[obstacle_id][0])
is_inside_the_limits_of_the_line = [
self.obstacles_coords[obstacle_id][3] < self.position[1] < self.obstacles_coords[obstacle_id][1]]
# Horizontal Obstacle
elif self.obstacles_parameters[obstacle_id][0] == 0:
distance = np.abs(self.position[1] - self.obstacles_coords[obstacle_id][1])
is_inside_the_limits_of_the_line = [
self.obstacles_coords[obstacle_id][2] > self.position[0] > self.obstacles_coords[obstacle_id][0]]
else:
distance = np.abs(-self.obstacles_parameters[obstacle_id][0] * self.position[0] + self.position[1] -
self.obstacles_parameters[obstacle_id][1]) / \
np.sqrt((-self.obstacles_parameters[obstacle_id][0]) ** 2 + 1)
is_inside_the_limits_of_the_line = False
if is_inside_the_limits_of_the_line and distance <= self.radius + 10:
velocity_hor = np.cos(self.theta) * (self.right_wheel_velocity + self.left_wheel_velocity) / 2
velocity_ver = np.sin(self.theta) * (self.right_wheel_velocity + self.left_wheel_velocity) / 2
if self.obstacles_parameters[obstacle_id][0] == 0:
# self.velocity_ver = 0
cap_ver = 0
if np.isinf(self.obstacles_parameters[obstacle_id][0]):
# self.velocity_hor = 0
cap_hor = 0
velocity_hor = velocity_hor * cap_hor
velocity_ver = velocity_ver * cap_ver
self.position[0] = self.position[0] + velocity_hor
self.position[1] = self.position[1] + velocity_ver
def increment_left_wheel(self):
if self.left_wheel_velocity + 1 <= self.MAX_SPEED:
self.left_wheel_velocity += 1
def decrement_left_wheel(self):
if self.left_wheel_velocity - 1 >= self.MIN_SPEED:
self.left_wheel_velocity -= 1
def increment_right_wheel(self):
if self.right_wheel_velocity + 1 <= self.MAX_SPEED:
self.right_wheel_velocity += 1
def decrement_right_wheel(self):
if self.right_wheel_velocity - 1 >= self.MIN_SPEED:
self.right_wheel_velocity -= 1
def increment_both_wheels(self):
self.increment_left_wheel()
self.increment_right_wheel()
def decrement_both_wheels(self):
self.decrement_left_wheel()
self.decrement_right_wheel()
def stop_motors(self):
self.right_wheel_velocity = 0
self.left_wheel_velocity = 0
def init_sensors(self):
# 12 sensors perimetrically, 30o degrees between them
# sensor 0 is the one in front of the robot.
# Values go from 0 to 200, 200 being out of reach
self.sensors_values = [0 for i in range(12)]
self.sensors_coords = np.zeros((12, 4))
# self.sens_radius = 3 * self.radius
self.sens_radius = 100 + self.radius
self.update_sensor_values()
def set_obstacles(self, obstacles_coords, obstacles_params):
self.obstacles_coords = obstacles_coords
self.obstacles_parameters = obstacles_params
def check_sensors(self):
for sensor_id in range(len(self.sensors_coords)):
for obstacle_id in range(len(self.obstacles_coords)):
intersection_point = self.getIntersectingPoint(self.sensors_coords[sensor_id],
self.obstacles_coords[obstacle_id])
if intersection_point:
self.sensors_values[sensor_id] = np.sqrt(
(intersection_point[0] - self.sensors_coords[sensor_id, 0]) ** 2 + (
intersection_point[1] - self.sensors_coords[sensor_id, 1]) ** 2)
if self.sensors_values[sensor_id] < 100:
self.sensors_coords[sensor_id, 2] = intersection_point[0]
self.sensors_coords[sensor_id, 3] = intersection_point[1]
def check_collision(self):
for obstacle_id in range(len(self.obstacles_parameters)):
if np.isinf(self.obstacles_parameters[obstacle_id][0]):
distance = np.abs(self.position[0] - self.obstacles_coords[obstacle_id][0])
if self.position[1] < min(self.obstacles_coords[obstacle_id][1], self.obstacles_coords[obstacle_id][3]):
is_not_in_range = False
elif self.position[1] > max(self.obstacles_coords[obstacle_id][1], self.obstacles_coords[obstacle_id][3]):
is_not_in_range = False
else:
is_not_in_range = True
elif self.obstacles_parameters[obstacle_id][0] == 0:
distance = np.abs(self.position[1] - self.obstacles_coords[obstacle_id][1])
if self.position[0] < min(self.obstacles_coords[obstacle_id][0], self.obstacles_coords[obstacle_id][2]):
is_not_in_range = False
elif self.position[0] > max(self.obstacles_coords[obstacle_id][0], self.obstacles_coords[obstacle_id][2]):
is_not_in_range = False
else:
is_not_in_range = True
else:
distance = np.abs(-self.obstacles_parameters[obstacle_id][0] * self.position[0] + self.position[1] -
self.obstacles_parameters[obstacle_id][1]) / \
np.sqrt((-self.obstacles_parameters[obstacle_id][0]) ** 2 + 1)
is_not_in_range = True
if is_not_in_range and distance <= self.radius:
return True
return False
def getIntersectingPoint(self, line1, line2):
""" If the given lines are intersecting, return the position of this intersection, otherwise false """
line1_p1 = [line1[0], line1[1]]
line1_p2 = [line1[2], line1[3]]
line2_p1 = [line2[0], line2[1]]
line2_p2 = [line2[2], line2[3]]
# Check if a line intersection is possible within range
if ((line1_p1[0] > line2_p1[0] and line1_p1[0] > line2_p2[0] and line1_p2[0] > line2_p1[0] and line1_p2[0] >
line2_p2[0]) or
(line1_p1[0] < line2_p1[0] and line1_p1[0] < line2_p2[0] and line1_p2[0] < line2_p1[0] and line1_p2[0] <
line2_p2[0]) or
(line1_p1[1] > line2_p1[1] and line1_p1[1] > line2_p2[1] and line1_p2[1] > line2_p1[1] and line1_p2[1] >
line2_p2[1]) or
(line1_p1[1] < line2_p1[1] and line1_p1[1] < line2_p2[1] and line1_p2[1] < line2_p1[1] and line1_p2[1] <
line2_p2[1])):
return False
# Get axis differences
diffX = (line1_p1[0] - line1_p2[0], line2_p1[0] - line2_p2[0])
diffY = (line1_p1[1] - line1_p2[1], line2_p1[1] - line2_p2[1])
# Get intersection
d = np.linalg.det([diffX, diffY])
if d == 0:
return False
det = (np.linalg.det([line1_p1, line1_p2]), np.linalg.det([line2_p1, line2_p2]))
x = np.linalg.det([det, diffX]) / d
y = np.linalg.det([det, diffY]) / d
# Check if it is within range
margin = 0.0001
if (x < min(line1_p1[0], line1_p2[0]) - margin or
x > max(line1_p1[0], line1_p2[0]) + margin or
y < min(line1_p1[1], line1_p2[1]) - margin or
y > max(line1_p1[1], line1_p2[1]) + margin or
x < min(line2_p1[0], line2_p2[0]) - margin or
x > max(line2_p1[0], line2_p2[0]) + margin or
y < min(line2_p1[1], line2_p2[1]) - margin or
y > max(line2_p1[1], line2_p2[1]) + margin):
return False
return x, y
| 46.126354 | 122 | 0.591688 | 1,652 | 12,777 | 4.340194 | 0.110169 | 0.068898 | 0.055649 | 0.052162 | 0.590516 | 0.518271 | 0.461506 | 0.422873 | 0.36848 | 0.295676 | 0 | 0.04381 | 0.301479 | 12,777 | 276 | 123 | 46.293478 | 0.759552 | 0.089614 | 0 | 0.243523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088083 | false | 0 | 0.010363 | 0 | 0.134715 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77b9b6901e4cb4106030a4e8cd51ce978af0e6bd | 2,613 | py | Python | backblaze/tests/blocking/test_file.py | WardPearce/aiob2 | 3dcff9c3aa7612ce7b43375fca379c1358121a4a | [
"MIT"
] | null | null | null | backblaze/tests/blocking/test_file.py | WardPearce/aiob2 | 3dcff9c3aa7612ce7b43375fca379c1358121a4a | [
"MIT"
] | null | null | null | backblaze/tests/blocking/test_file.py | WardPearce/aiob2 | 3dcff9c3aa7612ce7b43375fca379c1358121a4a | [
"MIT"
] | 1 | 2019-07-16T03:38:49.000Z | 2019-07-16T03:38:49.000Z | import unittest
from uuid import uuid4
from os import path
from .client import CLIENT
from ...settings import (
BucketSettings,
UploadSettings,
PartSettings,
CopyFileSettings
)
from ...models.file import FileModel, PartModel
from ...bucket.blocking import BlockingFile
class TestBlockingFile(unittest.TestCase):
def test_file(self):
_, bucket = CLIENT.create_bucket(BucketSettings(
"file test {}".format(uuid4())
))
local_path = path.join(
path.dirname(path.realpath(__file__)),
"../test_file.png"
)
with open(local_path, "rb") as f:
data = f.read()
file_data, file = bucket.upload(
UploadSettings(
name="ウィーブ.png"
),
data=data
)
self.assertIsInstance(file_data, FileModel)
self.assertIsInstance(file, BlockingFile)
self.assertTrue(type(file.download()) == bytes)
copy_data, copy_file = file.copy(CopyFileSettings(
"copied file.png"
))
self.assertIsInstance(copy_data, FileModel)
self.assertIsInstance(copy_file, BlockingFile)
copy_file.delete()
file.delete(
file_data.file_name
)
local_path = path.join(
path.dirname(path.realpath(__file__)),
"../parts_test"
)
details, file = bucket.create_part(PartSettings(
"test part.png"
))
parts = file.parts()
data = b""
with open(local_path, "rb") as f:
data = f.read()
chunk_size = 5000000
for chunk in range(0, len(data), chunk_size):
parts.data(data[chunk:chunk + chunk_size])
for part, _ in file.parts().list():
self.assertIsInstance(part, PartModel)
parts.finish()
file.delete(details.file_name)
details, file = bucket.create_part(PartSettings(
"test part upload.png"
))
parts = file.parts()
parts.file(local_path)
parts.finish()
file.delete(details.file_name)
data, file = bucket.upload_file(
UploadSettings("test part.bin"),
local_path
)
file.delete(data.file_name)
local_path = path.join(
path.dirname(path.realpath(__file__)),
"../test_file.png"
)
data, file = bucket.upload_file(
UploadSettings("test file upload.png"),
local_path
)
file.delete(data.file_name)
bucket.delete()
| 22.921053 | 58 | 0.565251 | 268 | 2,613 | 5.335821 | 0.246269 | 0.05035 | 0.027273 | 0.035664 | 0.377622 | 0.377622 | 0.377622 | 0.233566 | 0.167832 | 0.167832 | 0 | 0.005708 | 0.329506 | 2,613 | 113 | 59 | 23.123894 | 0.810502 | 0 | 0 | 0.37037 | 0 | 0 | 0.057405 | 0 | 0 | 0 | 0 | 0 | 0.074074 | 1 | 0.012346 | false | 0 | 0.08642 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77bb0202fbcb60692c326fef16e93e7f15657559 | 616 | py | Python | tests/test_dataset.py | kilsenp/triplet-reid-pytorch | 0cd2c2ac638d22745483b3d12ccb5c160f0bf3c3 | [
"MIT"
] | 41 | 2018-06-21T09:36:23.000Z | 2021-05-07T22:06:25.000Z | tests/test_dataset.py | kilsen512/triplet-reid-pytorch | 0cd2c2ac638d22745483b3d12ccb5c160f0bf3c3 | [
"MIT"
] | 3 | 2018-06-20T10:35:32.000Z | 2020-11-17T20:16:03.000Z | tests/test_dataset.py | kilsen512/triplet-reid-pytorch | 0cd2c2ac638d22745483b3d12ccb5c160f0bf3c3 | [
"MIT"
] | 10 | 2018-05-18T07:34:56.000Z | 2021-01-16T00:02:12.000Z | import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import unittest
from csv_dataset import *
class TestMarket(unittest.TestCase):
def test_make_dataset(self):
csv_file = "~/Projects/cupsizes/data/market1501_train.csv"
data_dir = "~/Projects/triplet-reid-pytorch/datasets/Market-1501"
limit = 200
data = make_dataset(csv_file, data_dir, limit)
self.assertEqual(len(data), limit)
if __name__ == "__main__":
unittest.main()
| 30.8 | 97 | 0.600649 | 68 | 616 | 5.132353 | 0.558824 | 0.063037 | 0.08596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025522 | 0.300325 | 616 | 19 | 98 | 32.421053 | 0.784223 | 0 | 0 | 0 | 0 | 0 | 0.170455 | 0.157468 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.071429 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77bb5640e0e164a57f7b3d1cb8fe5afab8675900 | 3,695 | py | Python | help/forms.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
] | 11 | 2018-04-02T16:36:19.000Z | 2019-07-10T05:54:58.000Z | help/forms.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
] | 22 | 2019-01-01T20:40:21.000Z | 2022-02-10T08:06:39.000Z | help/forms.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
] | 4 | 2019-03-12T14:24:37.000Z | 2022-01-07T16:20:22.000Z | from crispy_forms.helper import (
FormHelper, Layout
)
from crispy_forms.layout import (
HTML, Fieldset, Submit
)
from django import forms
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from shop import models
class FaqMessageAdminForm(forms.ModelForm):
class Meta:
model = models.FaqMessage
fields = ('category', 'title', 'content', 'store', 'position')
class NoticeMessageAdminForm(forms.ModelForm):
class Meta:
model = models.NoticeMessage
fields = ('category', 'title', 'content', 'store')
class CustomerQuestionForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.store_code = kwargs.pop('store_code', 'default')
self.page = kwargs.pop('page', 1)
super(CustomerQuestionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_class = 'form'
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
'category',
'title',
'content',
),
HTML('''
<button type="submit" class="btn btn-block btn-lg btn-primary my-2">
<i class="fas fa-pencil-alt"></i> {}
</button>
<hr>
<a href="{}?page={}" class="btn btn-block btn-lg btn-outline-secondary my-2">
<i class="fas fa-list"></i> {}
</a>
'''.format(_('Write'), reverse('help:question-list', args=(self.store_code,)), self.page, _('List'))),
)
class Meta:
model = models.CustomerQuestion
fields = (
'category', 'title', 'content', # 'owner', 'store'
)
class TestimonialsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.store_code = kwargs.pop('store_code', 'default')
self.page = kwargs.pop('page', 1)
super(TestimonialsForm, self).__init__(*args, **kwargs)
self.fields['title'].help_text = False
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_class = 'form'
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
'title',
'content',
),
HTML('''
<button type="submit" class="btn btn-block btn-lg btn-primary my-2">
<i class="fas fa-pencil-alt"></i> {}
</button>
<hr>
<a href="{}?page={}" class="btn btn-block btn-lg btn-outline-secondary my-2">
<i class="fas fa-list"></i> {}
</a>
'''.format(_('Write'), reverse('help:testimonials-list', args=(self.store_code,)), self.page, _('List'))),
)
class Meta:
model = models.Testimonials
fields = (
'title', 'content', # 'owner', 'store'
)
class TestimonialsAnswerForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.store_code = kwargs.pop('store_code', 'default')
self.testimonial = kwargs.pop('testimonial', 0)
super(TestimonialsAnswerForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('help:testimonials-answer', args=(self.store_code, self.testimonial))
self.helper.add_input(Submit('submit', _('Post Answer'), css_class='btn btn-lg btn-block btn-primary'))
self.helper.form_method = 'POST'
class Meta:
model = models.TestimonialsAnswer
fields = ['content']
| 31.853448 | 118 | 0.568065 | 394 | 3,695 | 5.19797 | 0.238579 | 0.058594 | 0.041016 | 0.048828 | 0.638672 | 0.561035 | 0.527832 | 0.527832 | 0.527832 | 0.495605 | 0 | 0.002654 | 0.286062 | 3,695 | 115 | 119 | 32.130435 | 0.773692 | 0.030041 | 0 | 0.555556 | 0 | 0.044444 | 0.286952 | 0.024588 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.066667 | 0 | 0.211111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77c4ac6e0829dc7178a6b601892640733f30e784 | 2,736 | py | Python | hmrc_sdes/tests/test_client.py | uktrade/tamato | 4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca | [
"MIT"
] | 14 | 2020-03-25T11:11:29.000Z | 2022-03-08T20:41:33.000Z | hmrc_sdes/tests/test_client.py | uktrade/tamato | 4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca | [
"MIT"
] | 352 | 2020-03-25T10:42:09.000Z | 2022-03-30T15:32:26.000Z | hmrc_sdes/tests/test_client.py | uktrade/tamato | 4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca | [
"MIT"
] | 3 | 2020-08-06T12:22:41.000Z | 2022-01-16T11:51:12.000Z | import json
import os
from hashlib import md5
from unittest.mock import Mock
import pytest
from common.tests import factories
from hmrc_sdes.api_client import HmrcSdesClient
pytestmark = pytest.mark.django_db
def test_sdes_client(responses):
responses.add(
responses.POST,
url="https://test-api.service.hmrc.gov.uk/oauth/token",
json={
"access_token": "access_token",
"token_type": "bearer",
"expires_in": 3600,
"refresh_token": "refresh_token",
"scope": "write:transfer-complete write:transfer-ready",
},
)
client = HmrcSdesClient()
assert len(responses.calls) == 1
responses.add(
responses.POST,
url="https://test-api.service.hmrc.gov.uk/organisations/notification/files/transfer/ready/test-srn",
)
upload = factories.UploadFactory.build(
correlation_id="test-correlation-id",
file=Mock(size=1),
checksum="test-checksum",
)
client.notify_transfer_ready(upload)
request = responses.calls[1].request
hmrc_json = "application/vnd.hmrc.1.0+json"
assert request.headers["Accept"] == hmrc_json
assert request.headers["Content-Type"] == f"{hmrc_json}; charset=UTF-8"
assert json.loads(responses.calls[1].request.body) == {
"informationType": "EDM",
"correlationID": upload.correlation_id,
"file": {
"fileName": upload.filename,
"fileSize": upload.file.size,
"checksum": upload.checksum,
"checksumAlgorithm": "MD5",
},
}
@pytest.mark.hmrc_live_api
def test_api_call(responses, settings):
responses.add_passthru(settings.HMRC["base_url"])
# reload settings from env, overriding test settings
dotenv.read_dotenv(os.path.join(settings.BASE_DIR, ".env"))
settings.HMRC["client_id"] = os.environ.get("HMRC_API_CLIENT_ID")
settings.HMRC["client_secret"] = os.environ.get("HMRC_API_CLIENT_SECRET")
settings.HMRC["service_reference_number"] = os.environ.get(
"HMRC_API_SERVICE_REFERENCE_NUMBER"
)
# fetches OAuth2 access token on instantiation
client = HmrcSdesClient()
assert client.get_session().token is not None
# check fraud prevention headers
result = client.get(
f"{client.base_url}/test/fraud-prevention-headers/validate",
).json()
assert result.get("errors") is None
# generate a dummy upload of an empty file with a valid checksum
upload = factories.UploadFactory()
upload.file = Mock(size=0)
upload.checksum = md5("".encode("utf-8")).hexdigest()
response = client.notify_transfer_ready(upload)
assert response.status_code == 204 # no data on success
| 30.065934 | 108 | 0.666667 | 328 | 2,736 | 5.42378 | 0.396341 | 0.02923 | 0.025295 | 0.026981 | 0.136594 | 0.091062 | 0.062957 | 0.062957 | 0.062957 | 0.062957 | 0 | 0.009259 | 0.210526 | 2,736 | 90 | 109 | 30.4 | 0.814352 | 0.076023 | 0 | 0.090909 | 0 | 0.015152 | 0.256939 | 0.074148 | 0 | 0 | 0 | 0 | 0.106061 | 1 | 0.030303 | false | 0.015152 | 0.106061 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |