hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fec502d06e7f6b4c2323778a4f480e3ca87b83f7 | 243 | py | Python | appointment_booking/appointment_booking/doctype/visitor_appointment/tasks.py | smarty-india/appointment_booking | 781b8883b749d78d543b21f39f9c1a12f16033ae | [
"MIT"
] | 1 | 2021-02-10T05:13:29.000Z | 2021-02-10T05:13:29.000Z | appointment_booking/appointment_booking/doctype/visitor_appointment/tasks.py | smarty-india/appointment_booking | 781b8883b749d78d543b21f39f9c1a12f16033ae | [
"MIT"
] | null | null | null | appointment_booking/appointment_booking/doctype/visitor_appointment/tasks.py | smarty-india/appointment_booking | 781b8883b749d78d543b21f39f9c1a12f16033ae | [
"MIT"
] | 7 | 2020-09-23T13:10:29.000Z | 2021-12-28T19:03:34.000Z | import frappe
def set_appointment_as_pending():
frappe.db.sql("""update `tabVisitor Appointment` set `status`='Pending'
where appointment_date is not null
and appointment_date < CURDATE()
and `status` not in ('Closed', 'Cancelled')""") | 34.714286 | 72 | 0.740741 |
fec6c828f7c2c56e87c8344597efe1d8c44178c3 | 986 | py | Python | hood/urls.py | virginiah894/Hood-alert | 9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873 | [
"MIT"
] | 1 | 2020-03-10T18:01:51.000Z | 2020-03-10T18:01:51.000Z | hood/urls.py | virginiah894/Hood-alert | 9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873 | [
"MIT"
] | 4 | 2020-06-06T01:09:13.000Z | 2021-09-08T01:36:28.000Z | hood/urls.py | virginiah894/Hood-alert | 9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873 | [
"MIT"
] | null | null | null | from django.urls import path , include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.home,name='home'),
path('profile/', views.profile , name = 'profile'),
path('update_profile/',views.update_profile,name='update'),
path('updates/', views.updates, name='updates'),
path('new/update', views.new_update, name = 'newUpdate'),
path('posts', views.post, name='post'),
path('new/post', views.new_post, name='newPost'),
path('health', views.hosy, name='hosy'),
path('search', views.search_results, name = 'search_results'),
path('adminst', views.administration, name='admin'),
path('business', views.local_biz, name='biz'),
path('new/business', views.new_biz, name='newBiz'),
path('create/profile',views.create_profile, name='createProfile'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 30.8125 | 81 | 0.684584 |
fec70c2989068076b5623aeccec1da14a757918e | 962 | py | Python | base/client/TargetTracker.py | marlamade/generals-bot | b485e416a2c4fc307e7d015ecdb70e278c4c1417 | [
"MIT"
] | null | null | null | base/client/TargetTracker.py | marlamade/generals-bot | b485e416a2c4fc307e7d015ecdb70e278c4c1417 | [
"MIT"
] | null | null | null | base/client/TargetTracker.py | marlamade/generals-bot | b485e416a2c4fc307e7d015ecdb70e278c4c1417 | [
"MIT"
] | null | null | null | from typing import List
from .tile import Tile
class TargetTracker(list):
"""
Track the targets that might be good to attack/explore
"""
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
self.turn_last_updated: int = 0
def update_list(self, target_list: List[Tile], current_turn):
self.extend(target_list)
self.turn_last_updated = current_turn
def get_target(self, turn):
# print(self)
if turn > 1.5 * self.turn_last_updated + 30:
# if we haven't gotten an updated list since a third of the game ago,
# it's too old and we should throw it away
while self:
self.pop()
while self:
target = self[-1]
if target.is_city or target.is_mountain or target.is_basic or target.is_general:
self.pop()
continue
return target
return None
| 29.151515 | 92 | 0.591476 |
fec8bbb3f41ea8513300db1174bf26c5ac72fcf6 | 7,546 | py | Python | chatbrick/brick/shortener.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | null | null | null | chatbrick/brick/shortener.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | null | null | null | chatbrick/brick/shortener.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | 1 | 2019-03-05T06:50:11.000Z | 2019-03-05T06:50:11.000Z | import logging
import time
import blueforge.apis.telegram as tg
import requests
import urllib.parse
import json
from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, \
GenericTemplate, Element, PostBackButton, ButtonTemplate, UrlButton
logger = logging.getLogger(__name__)
BRICK_DEFAULT_IMAGE = 'https://www.chatbrick.io/api/static/brick/img_brick_20_001.png'
class Shortener(object):
def __init__(self, fb, brick_db):
self.brick_db = brick_db
self.fb = fb
@staticmethod
async def get_short_url(client_id, client_secret, long_url):
face_detection = requests.get(
'https://openapi.naver.com/v1/util/shorturl?%s' % urllib.parse.urlencode({
'url': long_url
}),
headers={
'Content-Type': 'application/json',
'X-Naver-Client-Id': client_id,
'X-Naver-Client-Secret': client_secret,
}
)
return face_detection.json()
async def facebook(self, command):
if command == 'get_started':
# send_message = [
# Message(
# attachment=ImageAttachment(
# url=BRICK_DEFAULT_IMAGE
# )
# ),
# Message(
# text='Naver Developers에서 제공하는 "URL 줄이기 서비스"에요.'
# ),
# Message(
# attachment=TemplateAttachment(
# payload=GenericTemplate(
# elements=[
# Element(
# image_url='https://www.chatbrick.io/api/static/brick/img_brick_23_002.png',
# title='URL 줄이기',
# subtitle='너무 길었던 URL을 줄여드려요.',
# buttons=[
# PostBackButton(
# title='줄이기',
# payload='brick|shortener|short'
# )
# ]
# )
# ]
# )
# )
# )
# ]
send_message = [
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(image_url=BRICK_DEFAULT_IMAGE,
title='URL 줄이기 서비스',
subtitle='Naver Developers에서 제공하는 "URL 줄이기 서비스"에요.')
]
)
)
),
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(
image_url='https://www.chatbrick.io/api/static/brick/img_brick_23_002.png',
title='URL 줄이기',
subtitle='너무 길었던 URL을 줄여드려요.',
buttons=[
PostBackButton(
title='줄이기',
payload='brick|shortener|short'
)
]
)
]
)
)
)
]
await self.fb.send_messages(send_message)
elif command == 'short':
await self.brick_db.save()
elif command == 'final':
input_data = await self.brick_db.get()
url = input_data['store'][0]['value']
result = await Shortener.get_short_url(input_data['data']['client_id'],
input_data['data']['client_secret'], url)
if result.get('errorCode', False):
send_message = [
Message(
text='에러가 발생했습니다.\n다시 시도해주세요.'
)
]
logger.error(result)
else:
send_message = [
Message(
text='줄여진 URL 결과에요.'
),
Message(
text='%s' % result['result'].get('url', ''),
quick_replies=QuickReply(
quick_reply_items=[
QuickReplyTextItem(
title='다른 URL 줄이기',
payload='brick|shortener|short'
)
]
)
)
]
await self.fb.send_messages(send_message)
await self.brick_db.delete()
return None
async def telegram(self, command):
if command == 'get_started':
send_message = [
tg.SendPhoto(
photo=BRICK_DEFAULT_IMAGE
),
tg.SendMessage(
text='Naver Developers에서 제공하는 "URL 줄이기 서비스"에요.',
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='URL 줄이기',
callback_data='BRICK|shortener|short'
)
]
]
)
)
]
await self.fb.send_messages(send_message)
elif command == 'short':
await self.brick_db.save()
elif command == 'final':
input_data = await self.brick_db.get()
url = input_data['store'][0]['value']
result = await Shortener.get_short_url(input_data['data']['client_id'],
input_data['data']['client_secret'], url)
if result.get('errorCode', False):
send_message = [
tg.SendMessage(
text='에러가 발생했습니다.\n다시 시도해주세요.'
)
]
logger.error(result)
else:
send_message = [
tg.SendMessage(
text='줄여진 URL 결과에요.'
),
tg.SendMessage(
text='%s' % result['result'].get('url', ''),
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='다른 URL 줄이기',
callback_data='BRICK|shortener|short'
)
]
]
)
)
]
await self.fb.send_messages(send_message)
await self.brick_db.delete()
return None
| 37.542289 | 115 | 0.373178 |
fec8fbc55d1af1209c9e7e098a82c13f771956eb | 1,195 | py | Python | ask/qa/models.py | nikitabray/web | ef2e1a6ed2e917b0398622c488be2f222742b882 | [
"Unlicense"
] | null | null | null | ask/qa/models.py | nikitabray/web | ef2e1a6ed2e917b0398622c488be2f222742b882 | [
"Unlicense"
] | null | null | null | ask/qa/models.py | nikitabray/web | ef2e1a6ed2e917b0398622c488be2f222742b882 | [
"Unlicense"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class QuestionManager(models.Manager):
def new(self):
return self.order_by('-id')
def popular(self):
return self.order_by('-rating')
class Question(models.Model):
objects = QuestionManager()
title = models.CharField(max_length=255)
text = models.TextField(blank=True)
added_at = models.DateTimeField(auto_now_add=True)
rating = models.IntegerField(default=0)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='question_user', default=1)
likes = models.ManyToManyField(User, related_name='question_like_user')
class Meta:
verbose_name = 'Question'
ordering = ['-id']
def get_absolute_url(self):
return reverse('question', kwargs={'question_id': self.pk})
class Answer(models.Model):
text = models.TextField()
added_at = models.DateTimeField(auto_now_add=True)
question = models.ForeignKey(
Question,
on_delete=models.CASCADE,
)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='answer_user')
| 31.447368 | 103 | 0.708787 |
fec96362f67167dcf46b5bbb0c6f46d9d1526eeb | 368 | py | Python | 6/max_average_subarray1.py | IronCore864/leetcode | a62a4cdde9814ae48997176debcaad537f7ad01f | [
"Apache-2.0"
] | 4 | 2018-03-07T02:56:03.000Z | 2021-06-15T05:43:31.000Z | 6/max_average_subarray1.py | IronCore864/leetcode | a62a4cdde9814ae48997176debcaad537f7ad01f | [
"Apache-2.0"
] | null | null | null | 6/max_average_subarray1.py | IronCore864/leetcode | a62a4cdde9814ae48997176debcaad537f7ad01f | [
"Apache-2.0"
] | 1 | 2021-09-02T12:05:15.000Z | 2021-09-02T12:05:15.000Z | class Solution:
def findMaxAverage(self, nums: List[int], k: int) -> float:
pre_sum = sum(nums[0:k])
max_sum = pre_sum
for i in range(len(nums)-k):
next_sum = pre_sum - nums[i] + nums[i + k]
if next_sum > max_sum:
max_sum = next_sum
pre_sum = next_sum
return max_sum/k
| 26.285714 | 63 | 0.516304 |
fec9f02854eb9eb4fafaedb66ec68d2f2a2ba154 | 152 | py | Python | meuCursoEmVideo/mundo1/ex008.py | FelipeSilveiraL/EstudoPython | 8dc6cb70415badd180a1375da68f9dc9cb8fc8df | [
"MIT"
] | null | null | null | meuCursoEmVideo/mundo1/ex008.py | FelipeSilveiraL/EstudoPython | 8dc6cb70415badd180a1375da68f9dc9cb8fc8df | [
"MIT"
] | null | null | null | meuCursoEmVideo/mundo1/ex008.py | FelipeSilveiraL/EstudoPython | 8dc6cb70415badd180a1375da68f9dc9cb8fc8df | [
"MIT"
] | null | null | null | n = float(input("informe um medida em metros: "));
cm = n * 100
mm = n * 1000
print('A medida {}M é correspondente a {}Cm e {}Mm'.format(n, cm, mm)) | 21.714286 | 70 | 0.605263 |
fecbabb08af60d46436a84bbcfcf8d984bfc2f0d | 301 | py | Python | import_descendants/test_example/__init__.py | ZumatechLtd/import-descendants | ad3dd65ae74dd98ae1eec68fad3b1fa775a5d74f | [
"Unlicense"
] | null | null | null | import_descendants/test_example/__init__.py | ZumatechLtd/import-descendants | ad3dd65ae74dd98ae1eec68fad3b1fa775a5d74f | [
"Unlicense"
] | null | null | null | import_descendants/test_example/__init__.py | ZumatechLtd/import-descendants | ad3dd65ae74dd98ae1eec68fad3b1fa775a5d74f | [
"Unlicense"
] | 1 | 2020-03-23T13:59:40.000Z | 2020-03-23T13:59:40.000Z | # -*- coding: utf-8 -*-
# (c) 2013 Bright Interactive Limited. All rights reserved.
# http://www.bright-interactive.com | info@bright-interactive.com
from import_descendants import import_descendants
import sys
this_module = sys.modules[__name__]
import_descendants(this_module, globals(), locals())
| 33.444444 | 65 | 0.774086 |
feccebf8b7f5ab31a62544c1a696cbcf12f4d112 | 1,264 | py | Python | DelibeRating/DelibeRating/env/Lib/site-packages/tests/test_widgets.py | Severose/DelibeRating | 5d227f35c071477ce3fd6fbf3ab13a44d13f6e08 | [
"MIT"
] | 1 | 2018-11-01T15:05:12.000Z | 2018-11-01T15:05:12.000Z | DelibeRating/DelibeRating/env/Lib/site-packages/tests/test_widgets.py | Severose/DelibeRating | 5d227f35c071477ce3fd6fbf3ab13a44d13f6e08 | [
"MIT"
] | null | null | null | DelibeRating/DelibeRating/env/Lib/site-packages/tests/test_widgets.py | Severose/DelibeRating | 5d227f35c071477ce3fd6fbf3ab13a44d13f6e08 | [
"MIT"
] | null | null | null | import pytest
from tempus_dominus import widgets
def test_datepicker_format_localized(settings):
settings.TEMPUS_DOMINUS_LOCALIZE = True
widget = widgets.DatePicker()
assert widget.get_js_format() == 'L'
def test_datepicker_format_nonlocalized(settings):
settings.TEMPUS_DOMINUS_LOCALIZE = False
widget = widgets.DatePicker()
assert widget.get_js_format() == 'YYYY-MM-DD'
def test_timepicker_format_localized(settings):
settings.TEMPUS_DOMINUS_LOCALIZE = True
widget = widgets.TimePicker()
assert widget.get_js_format() == 'LTS'
def test_timepicker_format_nonlocalized(settings):
settings.TEMPUS_DOMINUS_LOCALIZE = False
widget = widgets.TimePicker()
assert widget.get_js_format() == 'HH:mm:ss'
def test_datetimepicker_format_localized(settings):
settings.TEMPUS_DOMINUS_LOCALIZE = True
widget = widgets.DateTimePicker()
assert widget.get_js_format() == 'L LTS'
def test_datetimepicker_format_nonlocalized(settings):
settings.TEMPUS_DOMINUS_LOCALIZE = False
widget = widgets.DateTimePicker()
assert widget.get_js_format() == 'YYYY-MM-DD HH:mm:ss'
def test_get_js_format_error():
with pytest.raises(NotImplementedError):
widgets.TempusDominusMixin().get_js_format()
| 28.088889 | 58 | 0.761867 |
fece96dc896e75a634255768c6898114b3c6f1c0 | 9,568 | py | Python | maps/foliumMaps.py | selinerguncu/Yelp-Spatial-Analysis | befbcb927ef225bda9ffaea0fd41a88344f9693c | [
"MIT"
] | null | null | null | maps/foliumMaps.py | selinerguncu/Yelp-Spatial-Analysis | befbcb927ef225bda9ffaea0fd41a88344f9693c | [
"MIT"
] | null | null | null | maps/foliumMaps.py | selinerguncu/Yelp-Spatial-Analysis | befbcb927ef225bda9ffaea0fd41a88344f9693c | [
"MIT"
] | null | null | null | import folium
from folium import plugins
import numpy as np
import sqlite3 as sqlite
import os
import sys
import pandas as pd
#extract data from yelp DB and clean it:
DB_PATH = "/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/data/yelpCleanDB.sqlite"
conn = sqlite.connect(DB_PATH)
#######################################
############ organize data ############
#######################################
def organizeData(mapParameters):
business = str(mapParameters['business'])
region = str(mapParameters['region'])
price = str(mapParameters['price'])
rating = float(mapParameters['rating'])
print('mapParameters', mapParameters)
# if 'zipcode' in mapParameters.keys():
# zipcode = str(mapParameters['zipcode'])
# sql = "SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count FROM Business WHERE query_category = '%s' AND city = '%s' AND zip_code = '%s' AND price = '%s' AND rating = '%r'" % (business, city, zipcode, price, rating)
# coordinates = pd.read_sql_query(sql, conn)
# else:
# sql = "SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count FROM Business WHERE query_category = '%s' AND city = '%s' AND price = '%s' AND rating = '%r'" % (business, city, price, rating)
# coordinates = pd.read_sql_query(sql, conn)
# print('here')
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%r''' % (business, price, rating, region)
if region == 'Bay Area':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city != '%s' ''' % (business, price, rating, 'San Francisco')
elif region == 'Peninsula':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city != '%s' AND city != '%s' AND city != '%s' ''' % (business, price, rating, 'San Francisco', 'San Francisco - Downtown', 'San Francisco - Outer')
elif region == 'San Francisco':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = ?''' % (business, price, rating, 'San Francisco')
elif region == 'Downtown SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = '%s' ''' % (business, price, rating, 'San Francisco - Downtown')
elif region == 'Outer SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = '%s' ''' % (business, price, rating, 'San Francisco - Outer')
elif region == 'East Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%s' ''' % (business, price, rating, 'eastBay')
elif region == 'North Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%s' ''' % (business, price, rating, 'northBay')
coordinates = pd.read_sql_query(sql, conn)
if len(coordinates) <= 1860:
for i in range(len(coordinates)):
if coordinates["longitude"][i] == None:
coordinates["longitude"][i] = coordinates["query_longitude"][i]
if coordinates["latitude"][i] == None:
coordinates["latitude"][i] = coordinates["query_latitude"][i]
# coordinates = []
# for i in range(len(coords)): #max ~1860 coordinates
# coordinate = []
# coordinate.append(coords["latitude"][i])
# coordinate.append(coords["longitude"][i])
# coordinates.append(coordinate)
# # convert list of lists to list of tuples
# coordinates = [tuple([i[0],i[1]]) for i in coordinates]
# # print(coordinates[0:10])
return coordinates
# else:
# print("Too many data points; cannot be mapped!")
#######################################
##### visualize the coordinates #######
#######################################
def makeMarkerMap(coordinates):
# # get center of map
# meanlat = np.mean([float(i[0]) for i in coordinates])
# meanlon = np.mean([float(i[1]) for i in coordinates])
print('coordinates', len(coordinates))
meanlat = np.mean(coordinates['latitude'])
meanlon = np.mean(coordinates['longitude'])
#Initialize map
mapa = folium.Map(location=[meanlat, meanlon],
tiles='Cartodb Positron', zoom_start=10)
# add markers
for i in range(len(coordinates)):
# create popup on click
html="""
Rating: {}<br>
Popularity: {}<br>
Price: {}<br>
"""
html = html.format(coordinates["rating"][i],\
coordinates["review_count"][i],\
coordinates["price"][i])
iframe = folium.Div(html=html, width=150, height=100) #element yok
popup = folium.Popup(iframe, max_width=2650)
# add marker to map
folium.Marker(tuple([coordinates['latitude'][i],coordinates['longitude'][i]]), popup=popup,).add_to(mapa)
return mapa.save("/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/yelp/static/foliumMarkers.html")
#######################################
####### cluster nearby points #########
#######################################
def makeClusterMap(coordinates):
from folium.plugins import MarkerCluster # for marker clusters
meanlat = np.mean(coordinates['latitude'])
meanlon = np.mean(coordinates['longitude'])
# initialize map
mapa = folium.Map(location=[meanlat, meanlon],
tiles='Cartodb Positron', zoom_start=10)
coordinatesFinal = []
for i in range(len(coordinates)):
# add marker clusters
coordinate = []
coordinate.append(coordinates["latitude"][i])
coordinate.append(coordinates["longitude"][i])
coordinatesFinal.append(coordinate)
# convert list of lists to list of tuples
coordinatesFinal = [tuple([i[0],i[1]]) for i in coordinatesFinal]
# print('coordinatesFinal', len(coordinatesFinal))
mapa.add_child(MarkerCluster(locations=coordinatesFinal))
return mapa.save("/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/yelp/static/foliumCluster.html")
#######################################
####### generate a heat map ###########
#######################################
def makeHeatmapMap(coordinates):
from folium.plugins import HeatMap
meanlat = np.mean(coordinates['latitude'])
meanlon = np.mean(coordinates['longitude'])
# initialize map
mapa = folium.Map(location=[meanlat, meanlon],
tiles='Cartodb Positron', zoom_start=10) #tiles='OpenStreetMap'
coordinatesFinal = []
if len(coordinates) > 1090: #max len is 1090 for the Heat Map
for i in range(1090):
coordinate = []
coordinate.append(coordinates["latitude"][i])
coordinate.append(coordinates["longitude"][i])
coordinatesFinal.append(coordinate)
else:
for i in range(len(coordinates)):
coordinate = []
coordinate.append(coordinates["latitude"][i])
coordinate.append(coordinates["longitude"][i])
coordinatesFinal.append(coordinate)
# convert list of lists to list of tuples
coordinatesFinal = [tuple([i[0],i[1]]) for i in coordinatesFinal]
# add heat
mapa.add_child(HeatMap(coordinatesFinal))
# mapa.add_child(HeatMap((tuple([coordinates['latitude'][i],coordinates['longitude'][i]]))))
return mapa.save("/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/yelp/static/foliumHeatmap.html")
# saving the map as an image doesnt seem to work
# import os
# import time
# from selenium import webdriver
# from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# # for different tiles: https://github.com/python-visualization/folium
# delay=5
# fn='foliumHeatmap.html'
# tmpurl='file:///Users/selinerguncu/Desktop/PythonProjects/Fun%20Projects/Yelp%20Project/Simulation/foliumHeatmap.html'.format(path=os.getcwd(),mapfile=fn)
# mapa.save(fn)
# firefox_capabilities = DesiredCapabilities.FIREFOX
# firefox_capabilities['marionette'] = True
# browser = webdriver.Firefox(capabilities=firefox_capabilities, executable_path='/Users/selinerguncu/Downloads/geckodriver')
# browser.get(tmpurl)
# #Give the map tiles some time to load
# time.sleep(delay)
# browser.save_screenshot('mynewmap.png')
# browser.quit()
| 44.502326 | 302 | 0.666074 |
fecede72453f312f65abb3c7e2bbaa8b798ac96a | 352 | py | Python | telethon/client/telegramclient.py | chrizrobert/Telethon | 99711457213a2bb1a844830a3c57536c5fa9b1c2 | [
"MIT"
] | 1 | 2018-10-07T08:31:49.000Z | 2018-10-07T08:31:49.000Z | telethon/client/telegramclient.py | chrizrobert/Telethon | 99711457213a2bb1a844830a3c57536c5fa9b1c2 | [
"MIT"
] | null | null | null | telethon/client/telegramclient.py | chrizrobert/Telethon | 99711457213a2bb1a844830a3c57536c5fa9b1c2 | [
"MIT"
] | 1 | 2018-09-05T14:59:27.000Z | 2018-09-05T14:59:27.000Z | from . import (
UpdateMethods, AuthMethods, DownloadMethods, DialogMethods,
ChatMethods, MessageMethods, UploadMethods, MessageParseMethods,
UserMethods
)
class TelegramClient(
UpdateMethods, AuthMethods, DownloadMethods, DialogMethods,
ChatMethods, MessageMethods, UploadMethods, MessageParseMethods,
UserMethods
):
pass
| 25.142857 | 68 | 0.775568 |
fecf4c8aeffd0ce28d05065c07b1a272ca60037e | 1,529 | py | Python | great_expectations/data_context/data_context/explorer_data_context.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | great_expectations/data_context/data_context/explorer_data_context.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | great_expectations/data_context/data_context/explorer_data_context.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | import logging
from ruamel.yaml import YAML
from great_expectations.data_context.data_context.data_context import DataContext
logger = logging.getLogger(__name__)
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
class ExplorerDataContext(DataContext):
def __init__(self, context_root_dir=None, expectation_explorer=True) -> None:
"""
expectation_explorer: If True, load the expectation explorer manager, which will modify GE return objects \
to include ipython notebook widgets.
"""
super().__init__(context_root_dir)
self._expectation_explorer = expectation_explorer
if expectation_explorer:
from great_expectations.jupyter_ux.expectation_explorer import (
ExpectationExplorer,
)
self._expectation_explorer_manager = ExpectationExplorer()
def update_return_obj(self, data_asset, return_obj):
"""Helper called by data_asset.
Args:
data_asset: The data_asset whose validation produced the current return object
return_obj: the return object to update
Returns:
return_obj: the return object, potentially changed into a widget by the configured expectation explorer
"""
if self._expectation_explorer:
return self._expectation_explorer_manager.create_expectation_widget(
data_asset, return_obj
)
else:
return return_obj
| 33.23913 | 119 | 0.688685 |
fecf532f1524b2d286c4ac2038b09f2f317636bc | 406 | py | Python | rio_cogeo/errors.py | vincentsarago/rio-cogeo | a758c7befa394568daa7d926c331b5489753a694 | [
"BSD-3-Clause"
] | 159 | 2019-02-12T18:22:30.000Z | 2022-03-23T18:49:47.000Z | rio_cogeo/errors.py | vincentsarago/rio-cogeo | a758c7befa394568daa7d926c331b5489753a694 | [
"BSD-3-Clause"
] | 121 | 2019-01-28T18:00:18.000Z | 2022-03-31T17:54:42.000Z | rio_cogeo/errors.py | vincentsarago/rio-cogeo | a758c7befa394568daa7d926c331b5489753a694 | [
"BSD-3-Clause"
] | 27 | 2019-02-12T23:52:33.000Z | 2022-03-07T14:40:24.000Z | """Rio-Cogeo Errors and Warnings."""
class LossyCompression(UserWarning):
"""Rio-cogeo module Lossy compression warning."""
class IncompatibleBlockRasterSize(UserWarning):
"""Rio-cogeo module incompatible raster block/size warning."""
class RioCogeoError(Exception):
"""Base exception class."""
class IncompatibleOptions(RioCogeoError):
"""Rio-cogeo module incompatible options."""
| 22.555556 | 66 | 0.738916 |
fecfe168fd1f83e2b06ca1bb819712b3c0b0b0b9 | 293 | py | Python | src/songbook/console/_update.py | kipyin/- | 5d372c7d987e6a1da380197c1b990def0d240298 | [
"MIT"
] | 1 | 2021-01-03T10:40:28.000Z | 2021-01-03T10:40:28.000Z | src/songbook/console/_update.py | kipyin/- | 5d372c7d987e6a1da380197c1b990def0d240298 | [
"MIT"
] | null | null | null | src/songbook/console/_update.py | kipyin/- | 5d372c7d987e6a1da380197c1b990def0d240298 | [
"MIT"
] | 1 | 2021-01-03T10:40:29.000Z | 2021-01-03T10:40:29.000Z | import click
@click.group()
def update():
pass
@update.command("song")
def _update_song():
pass
@update.command("arrangement")
def _update_arrangement():
pass
@update.command("worship")
def _update_worship():
pass
@update.command("hymn")
def _update_hymn():
pass
| 10.851852 | 30 | 0.675768 |
fecfe7347f543cbcfbae4629f1a3340b7de24b39 | 1,367 | py | Python | util/rmcompile.py | likwoka/ak | e6ac14e202e5a0d8f1b57e3e1a5c5a1ed9ecc14b | [
"Apache-2.0"
] | null | null | null | util/rmcompile.py | likwoka/ak | e6ac14e202e5a0d8f1b57e3e1a5c5a1ed9ecc14b | [
"Apache-2.0"
] | null | null | null | util/rmcompile.py | likwoka/ak | e6ac14e202e5a0d8f1b57e3e1a5c5a1ed9ecc14b | [
"Apache-2.0"
] | null | null | null | '''
Copyright (c) Alex Li 2003. All rights reserved.
'''
__version__ = '0.1'
__file__ = 'rmcompile.py'
import os, getopt, sys
EXTLIST = ['.ptlc', '.pyc']
def remove(extlist, dirname, files):
for file in files:
(name, ext) = os.path.splitext(file)
if ext in extlist:
os.remove(os.path.join(dirname, file))
class UsageError(Exception):
def __init__(self, msg=''):
self.msg = msg
usage = '''\nUsage: $python %s [OPTION] dir
Remove all .pyc and .ptlc files in the directory recursively.
Options:
-h, --help display this message\n''' % __file__
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
except getopt.error, msg:
raise UsageError(msg)
for opt, arg in opts:
if opt in ('-h', '--help'):
raise UsageError(usage)
if len(args) != 1:
raise UsageError('E: Wrong number of argument.')
#LOGIC STARTS HERE
global EXTLIST
dir = args[0]
os.path.walk(dir, remove, EXTLIST)
except UsageError, err:
print >> sys.stderr, err.msg
print >> sys.stderr, 'For help use --help.'
return 2
if __name__ == '__main__':
sys.exit(main())
| 21.030769 | 64 | 0.547184 |
fed030e5255f1c16fe14660b2bdc69ee621a5da4 | 706 | py | Python | app/integrations/opsgenie.py | cds-snc/sre-bot | b34cdaba357fccbcdbaac1e1ac70ebbe408d7316 | [
"MIT"
] | null | null | null | app/integrations/opsgenie.py | cds-snc/sre-bot | b34cdaba357fccbcdbaac1e1ac70ebbe408d7316 | [
"MIT"
] | 12 | 2022-02-21T18:57:07.000Z | 2022-03-31T03:06:48.000Z | app/integrations/opsgenie.py | cds-snc/sre-bot | b34cdaba357fccbcdbaac1e1ac70ebbe408d7316 | [
"MIT"
] | null | null | null | import json
import os
from urllib.request import Request, urlopen
OPSGENIE_KEY = os.getenv("OPSGENIE_KEY", None)
def get_on_call_users(schedule):
content = api_get_request(
f"https://api.opsgenie.com/v2/schedules/{schedule}/on-calls",
{"name": "GenieKey", "token": OPSGENIE_KEY},
)
try:
data = json.loads(content)
return list(map(lambda x: x["name"], data["data"]["onCallParticipants"]))
except Exception:
return []
def api_get_request(url, auth):
req = Request(url)
req.add_header("Authorization", f"{auth['name']} {auth['token']}")
conn = urlopen(req) # nosec - Scheme is hardcoded to https
return conn.read().decode("utf-8")
| 28.24 | 81 | 0.651558 |
fed05ac1dfedd9e75b62b9d7eec9b45bc5c84bcd | 366 | py | Python | observatorio/dados/migrations/0007_auto_20201007_1720.py | guerrasao/Observatorio-Socioeconomico-da-COVID-19 | 15457859092a41e539e57af6cc1bc875f3fbdf93 | [
"MIT"
] | null | null | null | observatorio/dados/migrations/0007_auto_20201007_1720.py | guerrasao/Observatorio-Socioeconomico-da-COVID-19 | 15457859092a41e539e57af6cc1bc875f3fbdf93 | [
"MIT"
] | null | null | null | observatorio/dados/migrations/0007_auto_20201007_1720.py | guerrasao/Observatorio-Socioeconomico-da-COVID-19 | 15457859092a41e539e57af6cc1bc875f3fbdf93 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-10-07 20:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dados', '0006_auto_20201007_1630'),
]
operations = [
migrations.AlterUniqueTogether(
name='variaveisgrafico',
unique_together={('grafico', 'variavel')},
),
]
| 20.333333 | 54 | 0.612022 |
fed3744cb0d9a7b7d5b538e2e8bb1083ab7dd9b2 | 688 | py | Python | Part 1 - Data Preprocessing/data_preprocessing.py | Tatvam/Machine-Learning | a18d3f541d99a8fb0cfbe89df358a11d3121b4f5 | [
"MIT"
] | null | null | null | Part 1 - Data Preprocessing/data_preprocessing.py | Tatvam/Machine-Learning | a18d3f541d99a8fb0cfbe89df358a11d3121b4f5 | [
"MIT"
] | null | null | null | Part 1 - Data Preprocessing/data_preprocessing.py | Tatvam/Machine-Learning | a18d3f541d99a8fb0cfbe89df358a11d3121b4f5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 8 15:15:29 2018
@author: tatvam
importing the libraries
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# import the dataset
dataset = pd.read_csv("Data.csv")
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, 3].values
# Splitting the data into training set and test set
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(X,Y, test_size = 0.2, random_state = 0)
"""# feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)""" | 22.933333 | 88 | 0.741279 |
fed3cd8321c318f2dc707c9994a2ee0cad04c478 | 785 | py | Python | qiniu_ufop/management/commands/createproject.py | Xavier-Lam/qiniu-ufop | 02c6119c69637cb39e2b73a915e68b77afa07fe3 | [
"MIT"
] | 5 | 2019-06-10T12:53:41.000Z | 2020-12-06T02:57:37.000Z | qiniu_ufop/management/commands/createproject.py | Xavier-Lam/qiniu-ufop | 02c6119c69637cb39e2b73a915e68b77afa07fe3 | [
"MIT"
] | null | null | null | qiniu_ufop/management/commands/createproject.py | Xavier-Lam/qiniu-ufop | 02c6119c69637cb39e2b73a915e68b77afa07fe3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from distutils.dir_util import copy_tree
from kombu.utils.objects import cached_property
import qiniu_ufop
from ..base import BaseCommand
class Command(BaseCommand):
"""创建一个项目"""
def execute(self, args, unknown):
src = os.path.join(os.path.dirname(qiniu_ufop.__file__), "project")
dst = args.dir
if args.dir and not os.path.exists(dst):
os.makedirs(dst)
copy_tree(src, dst)
requirements = os.path.join(dst, "requirements.txt")
with open(requirements, "a") as f:
f.writelines(["qiniu-ufop==%s" % qiniu_ufop.__version__])
def add_arguments(self):
self.parser.add_argument("dir", default="", nargs="?", help="文件夹")
| 28.035714 | 75 | 0.657325 |
fed4560e0eada1a8875a46b508b9927cb620d08a | 8,991 | py | Python | jenkinsapi_tests/unittests/test_nodes.py | kkpattern/jenkinsapi | 6b0091c5f44e4473c0a3d5addbfdc416bc6515ca | [
"MIT"
] | 556 | 2016-07-27T03:42:48.000Z | 2022-03-31T15:05:19.000Z | jenkinsapi_tests/unittests/test_nodes.py | kkpattern/jenkinsapi | 6b0091c5f44e4473c0a3d5addbfdc416bc6515ca | [
"MIT"
] | 366 | 2016-07-24T02:51:45.000Z | 2022-03-24T17:02:45.000Z | jenkinsapi_tests/unittests/test_nodes.py | kkpattern/jenkinsapi | 6b0091c5f44e4473c0a3d5addbfdc416bc6515ca | [
"MIT"
] | 308 | 2016-08-01T03:35:45.000Z | 2022-03-31T01:06:57.000Z | import pytest
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.nodes import Nodes
from jenkinsapi.node import Node
DATA0 = {
'assignedLabels': [{}],
'description': None,
'jobs': [],
'mode': 'NORMAL',
'nodeDescription': 'the master Jenkins node',
'nodeName': '',
'numExecutors': 2,
'overallLoad': {},
'primaryView': {'name': 'All', 'url': 'http://halob:8080/'},
'quietingDown': False,
'slaveAgentPort': 0,
'unlabeledLoad': {},
'useCrumbs': False,
'useSecurity': False,
'views': [
{'name': 'All', 'url': 'http://halob:8080/'},
{'name': 'FodFanFo', 'url': 'http://halob:8080/view/FodFanFo/'}
]
}
DATA1 = {
'busyExecutors': 0,
'computer': [
{
'actions': [],
'displayName': 'master',
'executors': [{}, {}],
'icon': 'computer.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 0},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/var/lib/jenkins',
'size': 671924924416
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 3174686720,
'availableSwapSpace': 17163087872,
'totalPhysicalMemory': 16810180608,
'totalSwapSpace': 17163087872
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 671924924416
}
},
'numExecutors': 2,
'offline': False,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
},
{
'actions': [],
'displayName': 'bobnit',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 4261},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/home/sal/jenkins',
'size': 169784860672
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 29},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 4570710016,
'availableSwapSpace': 12195983360,
'totalPhysicalMemory': 8374497280,
'totalSwapSpace': 12195983360
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 249737277440
}
},
'numExecutors': 1,
'offline': True,
'offlineCause': {},
'oneOffExecutors': [],
'temporarilyOffline': False
},
{
'actions': [],
'displayName': 'halob',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': True,
'launchSupported': False,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': None,
'hudson.node_monitors.ClockMonitor': None,
'hudson.node_monitors.DiskSpaceMonitor': None,
'hudson.node_monitors.ResponseTimeMonitor': None,
'hudson.node_monitors.SwapSpaceMonitor': None,
'hudson.node_monitors.TemporarySpaceMonitor': None
},
'numExecutors': 1,
'offline': True,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
],
'displayName': 'nodes',
'totalExecutors': 2
}
DATA2 = {
'actions': [],
'displayName': 'master',
'executors': [{}, {}],
'icon': 'computer.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 0},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/var/lib/jenkins',
'size': 671942561792
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 2989916160,
'availableSwapSpace': 17163087872,
'totalPhysicalMemory': 16810180608,
'totalSwapSpace': 17163087872
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 671942561792
}
},
'numExecutors': 2,
'offline': False,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
DATA3 = {
'actions': [],
'displayName': 'halob',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': True,
'launchSupported': False,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': None,
'hudson.node_monitors.ClockMonitor': None,
'hudson.node_monitors.DiskSpaceMonitor': None,
'hudson.node_monitors.ResponseTimeMonitor': None,
'hudson.node_monitors.SwapSpaceMonitor': None,
'hudson.node_monitors.TemporarySpaceMonitor': None},
'numExecutors': 1,
'offline': True,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
@pytest.fixture(scope='function')
def nodes(monkeypatch):
def fake_jenkins_poll(cls, tree=None): # pylint: disable=unused-argument
return DATA0
monkeypatch.setattr(Jenkins, '_poll', fake_jenkins_poll)
def fake_nodes_poll(cls, tree=None): # pylint: disable=unused-argument
return DATA1
monkeypatch.setattr(Nodes, '_poll', fake_nodes_poll)
jenkins = Jenkins('http://foo:8080')
return jenkins.get_nodes()
def fake_node_poll(self, tree=None): # pylint: disable=unused-argument
"""
Fakes a poll of data by returning the correct section of the DATA1 test block.
"""
for node_poll in DATA1['computer']:
if node_poll['displayName'] == self.name:
return node_poll
return DATA2
def test_repr(nodes):
# Can we produce a repr string for this object
repr(nodes)
def test_baseurl(nodes):
assert nodes.baseurl == 'http://foo:8080/computer'
def test_get_master_node(nodes, monkeypatch):
monkeypatch.setattr(Node, '_poll', fake_node_poll)
node = nodes['master']
assert isinstance(node, Node)
def test_get_nonmaster_node(nodes, monkeypatch):
monkeypatch.setattr(Node, '_poll', fake_node_poll)
node = nodes['halob']
assert isinstance(node, Node)
def test_iterkeys(nodes):
expected_names = set(['master', 'bobnit', 'halob'])
actual_names = set([n for n in nodes.iterkeys()])
assert actual_names == expected_names
def test_keys(nodes):
expected_names = set(['master', 'bobnit', 'halob'])
actual_names = set(nodes.keys())
assert actual_names == expected_names
def items_test_case(nodes_method, monkeypatch):
monkeypatch.setattr(Node, '_poll', fake_node_poll)
expected_names = set(['master', 'bobnit', 'halob'])
actual_names = set()
for name, node in nodes_method():
assert name == node.name
assert isinstance(node, Node)
actual_names.add(name)
assert actual_names == expected_names
def test_iteritems(nodes, monkeypatch):
items_test_case(nodes.iteritems, monkeypatch)
def test_items(nodes, monkeypatch):
items_test_case(nodes.items, monkeypatch)
def values_test_case(nodes_method, monkeypatch):
monkeypatch.setattr(Node, '_poll', fake_node_poll)
expected_names = set(['master', 'bobnit', 'halob'])
actual_names = set()
for node in nodes_method():
assert isinstance(node, Node)
actual_names.add(node.name)
assert actual_names == expected_names
def test_itervalues(nodes, monkeypatch):
values_test_case(nodes.itervalues, monkeypatch)
def test_values(nodes, monkeypatch):
values_test_case(nodes.values, monkeypatch)
| 30.686007 | 82 | 0.571015 |
fed6388f5baf349f9563436e423b3f0bfd27a9e9 | 790 | py | Python | message_gen/legacy/messages/ClientGetCloudHostResponse.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 2 | 2020-04-15T11:20:59.000Z | 2021-05-12T13:01:36.000Z | message_gen/legacy/messages/ClientGetCloudHostResponse.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 1 | 2018-06-05T04:48:56.000Z | 2018-06-05T04:48:56.000Z | message_gen/legacy/messages/ClientGetCloudHostResponse.py | zadjii/nebula | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | [
"MIT"
] | 1 | 2018-08-15T06:45:46.000Z | 2018-08-15T06:45:46.000Z | from messages.SessionMessage import SessionMessage
from msg_codes import CLIENT_GET_CLOUD_HOST_RESPONSE as CLIENT_GET_CLOUD_HOST_RESPONSE
__author__ = 'Mike'
class ClientGetCloudHostResponse(SessionMessage):
def __init__(self, session_id=None, cname=None, ip=None, port=None, wsport=None):
super(ClientGetCloudHostResponse, self).__init__(session_id)
self.type = CLIENT_GET_CLOUD_HOST_RESPONSE
self.cname = cname
self.ip = ip
self.port = port
self.wsport = wsport
@staticmethod
def deserialize(json_dict):
msg = SessionMessage.deserialize(json_dict)
msg.cname = json_dict['cname']
msg.ip = json_dict['ip']
msg.port = json_dict['port']
msg.wsport = json_dict['wsport']
return msg
| 34.347826 | 86 | 0.698734 |
fed71aa40e24235555d670228f89196c28a60884 | 8,072 | py | Python | research/route_diversity/timeline_from_csv.py | jweckstr/journey-diversity-scripts | 7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae | [
"MIT"
] | null | null | null | research/route_diversity/timeline_from_csv.py | jweckstr/journey-diversity-scripts | 7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae | [
"MIT"
] | null | null | null | research/route_diversity/timeline_from_csv.py | jweckstr/journey-diversity-scripts | 7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae | [
"MIT"
] | null | null | null | """
PSEUDOCODE:
Load csv to pandas
csv will be of form: city, event type, event name, year, theme_A, theme_B, theme_C...
City can contain multiple cities, separated by TBD?
Check min and max year
Open figure,
Deal with events in same year, offset a little bit?
For city in cities:tle
for event in events
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from collections import OrderedDict
from numpy import cos, sin, deg2rad, arange
from matplotlib import gridspec
from pylab import Circle
def clean_years(year):
if isinstance(year, str):
if len(year) > 4:
year = year[:4]
if year == "?":
return year
return int(year)
def split_to_separate_rows(df, column, split_key):
s = df[column].str.split(split_key, expand=True).stack()
i = s.index.get_level_values(0)
df2 = df.loc[i].copy()
df2[column] = s.values
return df2
def slot_location(n_slots, which_slot):
if n_slots == 1:
return (0, 0)
else:
coord_list = []
for i in range(0, n_slots):
angle = (360 / n_slots) * i
coord_list.append((offset * sin(deg2rad(angle)), offset * cos(deg2rad(angle))))
return coord_list[which_slot]
base_path = "/home/clepe/route_diversity/data/plannord_tables/"
themes_path = base_path + "themes.csv"
events_path = base_path + "events.csv"
year_length = 1
city_height = 1
size = 0.1
theme_length = 0.5
theme_width = 1
offset = 0.15
event_offset = 0.15
start_year = 2000
end_year = 2024
color_dict = {"Land use or infrastructure planning": "#66c2a5",
"Service level analysis or definitions": "#fc8d62",
"PTN plan or comparison": "#8da0cb",
"PT strategy": "#e78ac3",
"Transport system plan or strategy": "#a6d854",
'Other': "k"}
type_dict = {"Conference procedings": "Other",
'PTS whitepaper': "Other",
'Replies from hearing': "Other",
'PT authority strategy': "Other",
'PTS white paper': "Other",
'PT "product characterization"': "Other",
'Other': "Other",
"Infrastructure analysis or plan": "Land use or infrastructure planning",
"Master planning": "Land use or infrastructure planning",
"PT service level analysis": "Service level analysis or definitions",
"PT service level definitions": "Service level analysis or definitions",
"PTN comparison": "PTN plan or comparison",
"PTS plan": "PTN plan or comparison",
"PTS strategy": "PT strategy",
"Transport system plan": "Transport system plan or strategy",
"Transport system strategy": "Transport system plan or strategy"}
event_offsets = {"LRT/tram": event_offset,
"BHLS or large route overhaul": 0,
"BRT/superbus": -1 * event_offset}
event_colors = {"LRT/tram": "g",
"BHLS or large route overhaul": "#0042FF",
"BRT/superbus": "#001C6E"}
theme_angles = {"through_routes": 0, "network_simplicity": 120, "trunk_network": 240}
themes_df = pd.read_csv(themes_path)
events_df = pd.read_csv(events_path)
themes_df = themes_df[pd.notnull(themes_df['year'])]
events_df = events_df[pd.notnull(events_df['year'])]
themes_df["year"] = themes_df.apply(lambda x: clean_years(x.year), axis=1)
events_df["year"] = events_df.apply(lambda x: clean_years(x.year), axis=1)
themes_df = split_to_separate_rows(themes_df, "city", "/")
themes_df.loc[themes_df['city'] == "Fredrikstad-Sarpsborg", 'city'] = "F:stad-S:borg"
events_df.loc[events_df['city'] == "Fredrikstad-Sarpsborg", 'city'] = "F:stad-S:borg"
themes_df.loc[themes_df['city'] == "Porsgrunn-Skien", 'city'] = "P:grunn-Skien"
events_df.loc[events_df['city'] == "Porsgrunn-Skien", 'city'] = "P:grunn-Skien"
city_year_slots = {}
for i, row in themes_df[["city", "year"]].append(events_df[["city", "year"]]).iterrows():
if (row.city, row.year) in city_year_slots.keys():
city_year_slots[(row.city, row.year)] += 1
else:
city_year_slots[(row.city, row.year)] = 1
city_year_cur_slot = {key: 0 for key, value in city_year_slots.items()}
cities = [x for x in set(themes_df.city.dropna().tolist()) if "/" not in x]
cities.sort(reverse=True)
themes_df["type"] = themes_df.apply(lambda row: type_dict[row.type], axis=1)
types = [x for x in set(themes_df.type.dropna().tolist())]
fig = plt.figure()
ax1 = plt.subplot(111)
#gs = gridspec.GridSpec(1, 2, width_ratios=[1, 9])
#ax1 = plt.subplot(gs[1])
#ax2 = plt.subplot(gs[0], sharey=ax1)
"""
gs1 = gridspec.GridSpec(3, 3)
gs1.update(right=.7, wspace=0.05)
ax1 = plt.subplot(gs1[:-1, :])
ax2 = plt.subplot(gs1[-1, :-1])
ax3 = plt.subplot(gs1[-1, -1])
"""
groups = themes_df.groupby('type')
for i, row in events_df.iterrows():
e_offset = event_offsets[row.type]
c = event_colors[row.type]
y = city_height * cities.index(row.city) + e_offset
x = row.year
ax1.plot([row.year, end_year+1], [y, y], c=c, marker='o', label=row.type, zorder=2, markersize=3)
for name, group in groups:
for i, row in group.iterrows():
n_slots = city_year_slots[(row.city, row.year)]
cur_slot = city_year_cur_slot[(row.city, row.year)]
city_year_cur_slot[(row.city, row.year)] += 1
slot_offset = slot_location(n_slots, cur_slot)
y = city_height * cities.index(row.city) + slot_offset[0]
x = row.year + slot_offset[1]
if row.year < start_year:
continue
#circle = Circle((x, y), color=color_dict[name], radius=size, label=name, zorder=5)
ax1.scatter(x, y, color=color_dict[name], s=5, label=name, zorder=5) #add_patch(circle)
for theme, angle in theme_angles.items():
if pd.notnull(row[theme]):
ax1.plot([x, x + theme_length * sin(deg2rad(angle))], [y, y + theme_length * cos(deg2rad(angle))],
c=color_dict[name], zorder=10, linewidth=theme_width)
handles, labels = ax1.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
#ax1.legend(by_label.values(), by_label.keys())
# TODO: add year for GTFS feed as vertical line
#ax2 = fig.add_subplot(121, sharey=ax1)
for city in cities:
y = city_height * cities.index(city)
x = end_year
ax1.text(x, y, city, horizontalalignment='left', verticalalignment='center', fontsize=10) #, bbox=dict(boxstyle="square", facecolor='white', alpha=0.5, edgecolor='white'))
ax1.plot([start_year-1, end_year+1], [y, y], c="grey", alpha=0.5, linewidth=0.1, zorder=1)
ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
ax1.set_yticks([])
ax1.set_yticklabels([])
#ax2.axis('off')
ax1.set_xlim(start_year, end_year)
ax1.set_aspect("equal")
plt.xticks(arange(start_year, end_year, 5))
plt.savefig(base_path+'timeline.pdf', format="pdf", dpi=300, bbox_inches='tight')
fig = plt.figure()
ax2 = plt.subplot(111)
ax2.legend(by_label.values(), by_label.keys(), loc='center', #bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=2)
ax2.axis('off')
plt.savefig(base_path+'legend.pdf', format="pdf", dpi=300, bbox_inches='tight')
#plt.show()
# create legend for themes in a separate figure
fig = plt.figure()
ax3 = plt.subplot(111)
x = 0
y = 0
circle = Circle((x, y), color="black", radius=size, zorder=5)
ax3.add_patch(circle)
for theme, angle in theme_angles.items():
x1 = x + theme_length * sin(deg2rad(angle))
y1 = y + theme_length * cos(deg2rad(angle))
x2 = x + theme_length * sin(deg2rad(angle)) * 1.2
y2 = y + theme_length * cos(deg2rad(angle)) * 1.2
ax3.annotate(theme.capitalize().replace("_", " "), (x1, y1), (x2, y2), horizontalalignment='center',
verticalalignment='center', color="red", zorder=10, size=15)
ax3.plot([x, x1], [y, y1], c="black",
linewidth=10*theme_width, zorder=1)
ax3.set_aspect("equal")
ax3.axis('off')
plt.savefig(base_path+'timeline_themes.pdf', format="pdf", dpi=300, bbox_inches='tight')
| 35.559471 | 175 | 0.650768 |
fed7cf7a07873e74fd5bc50796d61484b796fe97 | 2,012 | py | Python | bevm/db.py | sorawit/bevm | 850b2d64fc12dae92d9cdaf8b4c48b90cc0d05d6 | [
"MIT"
] | 1 | 2021-09-15T10:16:46.000Z | 2021-09-15T10:16:46.000Z | bevm/db.py | sorawit/bevm | 850b2d64fc12dae92d9cdaf8b4c48b90cc0d05d6 | [
"MIT"
] | null | null | null | bevm/db.py | sorawit/bevm | 850b2d64fc12dae92d9cdaf8b4c48b90cc0d05d6 | [
"MIT"
] | null | null | null | from eth.db.atomic import AtomicDB
from eth.db.backends.level import LevelDB
from eth.db.account import AccountDB
from rlp.sedes import big_endian_int
from bevm.block import Block
from bevm.action import rlp_decode_action
ACTION_COUNT = b'BEVM:ACTION_COUNT'
def block_key(blockno):
return 'BEVM:BLOCK_KEY_{}'.format(blockno).encode()
class DB:
def __init__(self, db_path=None):
self.db = LevelDB(db_path) if db_path is not None else AtomicDB()
if not self.db.exists(ACTION_COUNT):
block0 = Block.default()
block0hash = block0.hash()
self.db.set(block0hash, block0.rlp_encode())
self.db.set(block_key(0), block0hash)
self.db.set(ACTION_COUNT, big_endian_int.serialize(0))
@property
def action_count(self):
return big_endian_int.deserialize(self.db.get(ACTION_COUNT))
@property
def block_count(self):
return self.action_count
def push_block(self, block, action):
action_count = self.action_count
actionhash = action.hash()
blockhash = block.hash()
assert block.action_hash == actionhash
self.db.set(actionhash, action.rlp_encode())
self.db.set(blockhash, block.rlp_encode())
self.db.set(block_key(action_count + 1), blockhash)
self.db.set(ACTION_COUNT, big_endian_int.serialize(action_count + 1))
def get_state_db_by_root(self, state_root):
return AccountDB(self.db, state_root)
def get_latest_state_db(self):
return self.get_state_db_by_root(self.get_latest_block().state_root)
def get_latest_block(self):
return self.get_block_by_blockno(self.action_count)
def get_block_by_blockno(self, blockno):
return self.get_block_by_hash(self.db.get(block_key(blockno)))
def get_block_by_hash(self, blockhash):
return Block.rlp_decode(self.db.get(blockhash))
def get_action_by_hash(self, actionhash):
return rlp_decode_action(self.db.get(actionhash))
| 32.983607 | 77 | 0.700795 |
fed896e00f41aed0c3e19962de5fce02825adb90 | 2,408 | py | Python | api/ops/tasks/detection/core/detectionTypes/valueThreshold.py | LeiSoft/CueObserve | cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e | [
"Apache-2.0"
] | 149 | 2021-07-16T13:37:30.000Z | 2022-03-21T10:13:15.000Z | api/ops/tasks/detection/core/detectionTypes/valueThreshold.py | LeiSoft/CueObserve | cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e | [
"Apache-2.0"
] | 61 | 2021-07-15T06:39:05.000Z | 2021-12-27T06:58:10.000Z | api/ops/tasks/detection/core/detectionTypes/valueThreshold.py | LeiSoft/CueObserve | cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e | [
"Apache-2.0"
] | 22 | 2021-07-19T07:20:49.000Z | 2022-03-21T10:13:16.000Z | import dateutil.parser as dp
from dateutil.relativedelta import relativedelta
import pandas as pd, datetime as dt
def checkLatestAnomaly(df, operationCheckStr):
"""
Looks up latest anomaly in dataframe
"""
anomalies = df[df["anomaly"] == 15]
if anomalies.shape[0] > 0:
lastAnomalyRow = anomalies.iloc[-1]
anomalyTime = lastAnomalyRow["ds"]
return {
"operationCheck": operationCheckStr,
"value": float(lastAnomalyRow["y"]),
"anomalyTimeISO": dp.parse(anomalyTime).isoformat(),
"anomalyTime": dp.parse(anomalyTime).timestamp() * 1000,
}
return {}
def valueThresholdDetect(df, granularity, operator, value1, value2):
"""
Method to perform anomaly detection on given dataframe
"""
value1 = int(value1)
lowerVal = value1
upperVal = value1
if value2 != "null":
value2 = int(value2)
lowerVal = min(value1, value2)
upperVal = max(value1, value2)
operationStrDict = {
"greater": f'greater than {value1}',
"lesser": f'lesser than {value1}',
"!greater": f'not greater than {value1}',
"!lesser": f'not lesser than {value1}',
"between": f'between {lowerVal} and {upperVal}',
"!between": f'not between {lowerVal} and {upperVal}'
}
operationDict = {
"greater": '(df["y"] > value1) * 14 + 1',
"lesser": '(df["y"] < value1) * 14 + 1',
"!greater": '(df["y"] <= value1) * 14 + 1',
"!lesser": '(df["y"] >= value1) * 14 + 1',
"between": '((df["y"] >= lowerVal) & (df["y"] <= upperVal)) * 14 + 1',
"!between": '((df["y"] < lowerVal) | (df["y"] > upperVal)) * 14 + 1'
}
today = dt.datetime.now()
df["ds"] = pd.to_datetime(df["ds"])
df = df.sort_values("ds")
df["ds"] = df["ds"].apply(lambda date: date.isoformat()[:19])
todayISO = today.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).isoformat()[:19]
df = df[df["ds"] < todayISO]
df["anomaly"] = eval(operationDict[operator])
anomalyLatest = checkLatestAnomaly(df, operationStrDict[operator])
df = df[["ds", "y", "anomaly"]]
numActual = 45 if granularity == "day" else 24 * 7
output = {
"anomalyData": {
"actual": df[-numActual:].to_dict("records")
},
"anomalyLatest": anomalyLatest
}
return output | 35.411765 | 101 | 0.572674 |
fed8e9ad56ccf5ea28b13fbec8dee05b0037dc77 | 343 | py | Python | src/chapter8/exercise6.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | null | null | null | src/chapter8/exercise6.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | null | null | null | src/chapter8/exercise6.py | group7BSE1/BSE-2021 | 2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0 | [
"MIT"
] | 1 | 2021-04-07T14:49:04.000Z | 2021-04-07T14:49:04.000Z | list = []
while True:
number = 0.0
input_num = input('Enter a number: ')
if input_num == 'done':
break
try:
number = float(input_num)
except:
print('Invalid input')
quit()
list.append(input_num)
if list:
print('Maximum: ', max(list) or None)
print('Minimum: ', min(list) or None) | 22.866667 | 41 | 0.559767 |
fed8fa9a87db15241481aa01020912d1d1d9aa17 | 91 | py | Python | client/const.py | math2001/nine43 | 7749dc63b9717a6ee4ddc1723d6c59e16046fc01 | [
"MIT"
] | null | null | null | client/const.py | math2001/nine43 | 7749dc63b9717a6ee4ddc1723d6c59e16046fc01 | [
"MIT"
] | 3 | 2019-04-27T06:34:34.000Z | 2019-04-27T21:29:31.000Z | client/const.py | math2001/nine43 | 7749dc63b9717a6ee4ddc1723d6c59e16046fc01 | [
"MIT"
] | null | null | null | MONO = "FiraMono-Medium"
PORT = 9999
ISSUES = "https://github.com/math2001/nine43/issues"
| 18.2 | 52 | 0.725275 |
fed91e7ac94b5be8280a7f183dba3afc80ab32c6 | 484 | py | Python | zipencrypt/__init__.py | norcuni/zipencrypt | 897f03d05f5b2881e915ed346d0498f58abf3ac8 | [
"MIT"
] | 5 | 2018-06-05T18:57:10.000Z | 2020-12-04T10:08:31.000Z | zipencrypt/__init__.py | norcuni/zipencrypt | 897f03d05f5b2881e915ed346d0498f58abf3ac8 | [
"MIT"
] | 2 | 2018-11-07T02:53:40.000Z | 2019-10-30T20:48:40.000Z | zipencrypt/__init__.py | devthat/zipencrypt | 897f03d05f5b2881e915ed346d0498f58abf3ac8 | [
"MIT"
] | null | null | null | import sys
PY2 = sys.version_info[0] == 2
if PY2:
from .zipencrypt2 import ZipFile
from zipfile import BadZipfile, error, ZIP_STORED, ZIP_DEFLATED, \
is_zipfile, ZipInfo, PyZipFile, LargeZipFile
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
else:
from .zipencrypt3 import __all__ as zipencrypt3_all
from .zipencrypt3 import *
__all__ = zipencrypt3_all
| 32.266667 | 79 | 0.692149 |
fed9bd2808591485831ae3b90b08dc959af84228 | 19 | py | Python | deprecated/origin_stgcn_repo/feeder/__init__.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 2,302 | 2018-01-23T11:18:30.000Z | 2022-03-31T12:24:55.000Z | deprecated/origin_stgcn_repo/feeder/__init__.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 246 | 2019-08-24T15:36:11.000Z | 2022-03-23T06:57:02.000Z | deprecated/origin_stgcn_repo/feeder/__init__.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 651 | 2018-01-24T00:56:54.000Z | 2022-03-25T23:42:53.000Z | from . import tools | 19 | 19 | 0.789474 |
feda36d66368a5ba3e059121a70717771426dc48 | 138 | py | Python | nifs/retrieve/rawdata/__init__.py | nifs-software/nifs-retrieve | 4ff9d70a1d2301d7b5762162586388ae67046ad2 | [
"MIT"
] | null | null | null | nifs/retrieve/rawdata/__init__.py | nifs-software/nifs-retrieve | 4ff9d70a1d2301d7b5762162586388ae67046ad2 | [
"MIT"
] | 2 | 2021-12-16T04:50:00.000Z | 2021-12-22T11:55:01.000Z | nifs/retrieve/rawdata/__init__.py | nifs-software/nifs-retrieve | 4ff9d70a1d2301d7b5762162586388ae67046ad2 | [
"MIT"
] | null | null | null | from .rawdata import RawData
from .timedata import TimeData
from .voltdata import VoltData
__all__ = ["RawData", "TimeData", "VoltData"]
| 23 | 45 | 0.768116 |
fedb6c7eea105f52852855900c26c30796b4a06e | 5,654 | py | Python | preprocess/sketch_generation.py | code-gen/exploration | c83d79745df9566c5f1a82e581008e0984fcc319 | [
"MIT"
] | null | null | null | preprocess/sketch_generation.py | code-gen/exploration | c83d79745df9566c5f1a82e581008e0984fcc319 | [
"MIT"
] | 1 | 2019-05-11T14:49:58.000Z | 2019-05-24T15:02:54.000Z | preprocess/sketch_generation.py | code-gen/exploration | c83d79745df9566c5f1a82e581008e0984fcc319 | [
"MIT"
] | null | null | null | """
Sketch (similar to Coarse-to-Fine)
- keep Python keywords as is
- strip off arguments and variable names
- substitute tokens with types: `NUMBER`, `STRING`
- specialize `NAME` token:
- for functions: `FUNC#<num_args>`
# Examples
x = 1 if True else 0
NAME = NUMBER if True else NUMBER
result = SomeFunc(1, 2, 'y', arg)
NAME = FUNC#4 ( NUMBER , NUMBER , STRING , NAME )
result = [x for x in DoWork(xs) if x % 2 == 0]
NAME = [ NAME for NAME in FUNC#1 ( NAME ) if NAME % NUMBER == NUMBER ]
"""
import ast
import builtins
import io
import sys
import token
from collections import defaultdict
from tokenize import TokenInfo, tokenize
import astpretty
from termcolor import colored
class ASTVisitor(ast.NodeVisitor):
def __init__(self):
self.functions = {} # map function name -> num args
@staticmethod
def name_by_type(node):
if isinstance(node, ast.Attribute):
return node.attr
if isinstance(node, ast.Name):
return node.id
if isinstance(node, ast.Subscript):
try:
return node.slice.value.id
except AttributeError:
return node.slice.value
return None
def visit_Call(self, node: ast.Call):
if isinstance(node.func, ast.Call):
self.visit_Call(node.func)
else:
func_name = self.name_by_type(node.func)
self.functions[func_name] = len(node.args)
if hasattr(node, 'keywords'):
self.functions[func_name] += len(node.keywords)
for arg in node.args:
if isinstance(arg, ast.Call):
self.visit_Call(arg)
else:
self.generic_visit(arg)
class SketchVocab:
NAME_ID = "NAME"
FUNC_ID = "FUNC"
STR_LITERAL_ID = "STRING"
NUM_LITERAL_ID = "NUMBER"
# RESERVED_ID = "<reserved>"
# ACCESSOR_ID = "<accessor>"
# ASSIGN_ID = "<assign>"
# ARITHMETIC_ID = "<arithmetic>"
# OP_ID = "<op>"
class Sketch:
def __init__(self, code_snippet: str, verbose=False):
self.code_snippet = code_snippet
self.names = defaultdict(lambda: [])
self.keywords = defaultdict(lambda: [])
self.literals = defaultdict(lambda: [])
self.operators = defaultdict(lambda: [])
self.ordered = []
if verbose:
print(colored(" * tokenizing [%s]" % code_snippet, 'yellow'))
self.tok_list = list(tokenize(io.BytesIO(self.code_snippet.encode('utf-8')).readline))
# AST
self.ast_visitor = ASTVisitor()
self.ast = None
try:
self.ast = self.ast_visitor.visit(ast.parse(self.code_snippet))
except SyntaxError:
if verbose:
print(colored(" * skipping ast generation for [%s]" % code_snippet, 'red'))
def refine_name(self, tok: TokenInfo):
if self.is_reserved_keyword(tok.string):
self.keywords[tok.string].append(tok.start[1])
self.ordered.append(tok.string)
else:
self.names[tok.string].append(tok.start[1])
if tok.string in self.ast_visitor.functions:
self.ordered.append(SketchVocab.FUNC_ID + "#%d" % self.ast_visitor.functions[tok.string])
else:
self.ordered.append(SketchVocab.NAME_ID)
def generate(self):
for tok in self.tok_list:
tok_type = token.tok_name[tok.type]
if tok_type == 'NAME':
self.refine_name(tok)
elif tok_type == 'STRING':
self.literals[tok.string].append(tok.start[1])
self.ordered.append(SketchVocab.STR_LITERAL_ID)
elif tok_type == 'NUMBER':
self.literals[tok.string].append(tok.start[1])
self.ordered.append(SketchVocab.NUM_LITERAL_ID)
elif tok_type == 'OP':
self.operators[tok.string].append(tok.start[1])
self.ordered.append(tok.string)
else:
assert tok_type in ['ENCODING', 'NEWLINE', 'ENDMARKER', 'ERRORTOKEN'], "%s" % tok_type
return self
def details(self):
return "names: %s\nkeywords: %s\nliterals: %s\noperators: %s" % (
str(list(self.names.keys())),
str(list(self.keywords.keys())),
str(list(self.literals.keys())),
str(list(self.operators.keys()))
)
def split(self, delim=' '):
return str(self).split(delim)
def __str__(self):
return ' '.join(self.ordered)
def __repr__(self):
return str(self)
def __len__(self):
return len(self.ordered)
@staticmethod
def is_reserved_keyword(name):
RESERVED_KEYWORDS = set(dir(builtins) + [
"and", "assert", "break", "class", "continue", "def", "del", "elif",
"else", "except", "exec", "finally", "for", "from", "global", "if",
"import", "in", "is", "lambda", "not", "or", "pass", "print", "raise",
"return", "try", "while", "yield", "None", "self"
]) # len = 182
return name in RESERVED_KEYWORDS
def main():
# v = ASTVisitor()
# t = v.visit(ast.parse('x = SomeFunc(2, 3, y, "test")'))
# print(v.functions)
# astpretty.pprint(tree.body[0], indent=' ' * 4)
# exec(compile(tree, filename="<ast>", mode="exec"))
code_snippet = sys.argv[1]
astpretty.pprint(ast.parse(code_snippet).body[0], indent=' ' * 4)
sketch = Sketch(code_snippet, verbose=True).generate()
# print(sketch.details())
print(sketch)
if __name__ == '__main__':
main()
| 29.447917 | 105 | 0.579413 |
fedbf772bab9d4ac688fa0669b5207dce247b24c | 8,538 | py | Python | LPBv2/tests/game/test_player.py | TierynnB/LeaguePyBot | 2e96230b9dc24d185ddc0c6086d79f7d01e7a643 | [
"MIT"
] | 45 | 2020-11-28T04:45:45.000Z | 2022-03-31T05:53:37.000Z | LPBv2/tests/game/test_player.py | TierynnB/LeaguePyBot | 2e96230b9dc24d185ddc0c6086d79f7d01e7a643 | [
"MIT"
] | 13 | 2021-01-15T00:50:10.000Z | 2022-02-02T15:16:49.000Z | LPBv2/tests/game/test_player.py | TierynnB/LeaguePyBot | 2e96230b9dc24d185ddc0c6086d79f7d01e7a643 | [
"MIT"
] | 14 | 2020-12-21T10:03:31.000Z | 2021-11-22T04:03:03.000Z | import pytest
from LPBv2.common import (
InventoryItem,
PlayerInfo,
PlayerScore,
PlayerStats,
TeamMember,
MinimapZone,
merge_dicts,
)
from LPBv2.game import Player
update_data = {
"abilities": {
"E": {
"abilityLevel": 0,
"displayName": "\u9b42\u306e\u8a66\u7df4",
"id": "IllaoiE",
"rawDescription": "GeneratedTip_Spell_IllaoiE_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiE_DisplayName",
},
"Passive": {
"displayName": "\u65e7\u795e\u306e\u9810\u8a00\u8005",
"id": "IllaoiPassive",
"rawDescription": "GeneratedTip_Passive_IllaoiPassive_Description",
"rawDisplayName": "GeneratedTip_Passive_IllaoiPassive_DisplayName",
},
"Q": {
"abilityLevel": 0,
"displayName": "\u89e6\u624b\u306e\u9244\u69cc",
"id": "IllaoiQ",
"rawDescription": "GeneratedTip_Spell_IllaoiQ_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiQ_DisplayName",
},
"R": {
"abilityLevel": 0,
"displayName": "\u4fe1\u4ef0\u9707",
"id": "IllaoiR",
"rawDescription": "GeneratedTip_Spell_IllaoiR_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiR_DisplayName",
},
"W": {
"abilityLevel": 0,
"displayName": "\u904e\u9177\u306a\u308b\u6559\u8a13",
"id": "IllaoiW",
"rawDescription": "GeneratedTip_Spell_IllaoiW_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiW_DisplayName",
},
},
"championStats": {
"abilityHaste": 0.0,
"abilityPower": 0.0,
"armor": 41.0,
"armorPenetrationFlat": 0.0,
"armorPenetrationPercent": 1.0,
"attackDamage": 73.4000015258789,
"attackRange": 125.0,
"attackSpeed": 0.5709999799728394,
"bonusArmorPenetrationPercent": 1.0,
"bonusMagicPenetrationPercent": 1.0,
"cooldownReduction": 0.0,
"critChance": 0.0,
"critDamage": 175.0,
"currentHealth": 601.0,
"healthRegenRate": 1.899999976158142,
"lifeSteal": 0.0,
"magicLethality": 0.0,
"magicPenetrationFlat": 0.0,
"magicPenetrationPercent": 1.0,
"magicResist": 32.0,
"maxHealth": 601.0,
"moveSpeed": 340.0,
"physicalLethality": 0.0,
"resourceMax": 300.0,
"resourceRegenRate": 1.5,
"resourceType": "MANA",
"resourceValue": 300.0,
"spellVamp": 0.0,
"tenacity": 0.0,
},
"currentGold": 888.6270751953125,
"level": 1,
"summonerName": "Supername",
"championName": "\u30a4\u30e9\u30aa\u30a4",
"isBot": False,
"isDead": False,
"items": [
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u92fc\u306e\u30b7\u30e7\u30eb\u30c0\u30fc\u30ac\u30fc\u30c9",
"itemID": 3854,
"price": 400,
"rawDescription": "GeneratedTip_Item_3854_Description",
"rawDisplayName": "Item_3854_Name",
"slot": 0,
},
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u30d7\u30ec\u30fc\u30c8 \u30b9\u30c1\u30fc\u30eb\u30ad\u30e3\u30c3\u30d7",
"itemID": 3047,
"price": 500,
"rawDescription": "GeneratedTip_Item_3047_Description",
"rawDisplayName": "Item_3047_Name",
"slot": 1,
},
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u30ad\u30f3\u30c9\u30eb\u30b8\u30a7\u30e0",
"itemID": 3067,
"price": 400,
"rawDescription": "GeneratedTip_Item_3067_Description",
"rawDisplayName": "Item_3067_Name",
"slot": 2,
},
{
"canUse": True,
"consumable": False,
"count": 1,
"displayName": "\u30b9\u30c6\u30eb\u30b9 \u30ef\u30fc\u30c9",
"itemID": 3340,
"price": 0,
"rawDescription": "GeneratedTip_Item_3340_Description",
"rawDisplayName": "Item_3340_Name",
"slot": 6,
},
],
"position": "",
"rawChampionName": "game_character_displayname_Illaoi",
"respawnTimer": 0.0,
"runes": {
"keystone": {
"displayName": "\u4e0d\u6b7b\u8005\u306e\u63e1\u6483",
"id": 8437,
"rawDescription": "perk_tooltip_GraspOfTheUndying",
"rawDisplayName": "perk_displayname_GraspOfTheUndying",
},
"primaryRuneTree": {
"displayName": "\u4e0d\u6ec5",
"id": 8400,
"rawDescription": "perkstyle_tooltip_7204",
"rawDisplayName": "perkstyle_displayname_7204",
},
"secondaryRuneTree": {
"displayName": "\u9b54\u9053",
"id": 8200,
"rawDescription": "perkstyle_tooltip_7202",
"rawDisplayName": "perkstyle_displayname_7202",
},
},
"scores": {
"assists": 0,
"creepScore": 100,
"deaths": 0,
"kills": 0,
"wardScore": 0.0,
},
"skinID": 0,
"summonerSpells": {
"summonerSpellOne": {
"displayName": "\u30af\u30ec\u30f3\u30ba",
"rawDescription": "GeneratedTip_SummonerSpell_SummonerBoost_Description",
"rawDisplayName": "GeneratedTip_SummonerSpell_SummonerBoost_DisplayName",
},
"summonerSpellTwo": {
"displayName": "\u30a4\u30b0\u30be\u30fc\u30b9\u30c8",
"rawDescription": "GeneratedTip_SummonerSpell_SummonerExhaust_Description",
"rawDisplayName": "GeneratedTip_SummonerSpell_SummonerExhaust_DisplayName",
},
},
"team": "ORDER",
}
test_zone = MinimapZone(x=90, y=90, name="TestZone")
test_member = TeamMember(x=100, y=100, zone=test_zone)
@pytest.fixture
def get_player():
return Player()
def test_player_init(get_player):
assert get_player
assert isinstance(get_player.info, PlayerInfo)
assert isinstance(get_player.stats, PlayerStats)
assert isinstance(get_player.score, PlayerScore)
assert isinstance(get_player.inventory, list)
assert isinstance(get_player.location, str)
assert isinstance(get_player, Player)
@pytest.mark.asyncio
async def test_player_update_info(get_player):
await get_player.update_info(update_data)
assert get_player.info.name == "Supername"
assert get_player.info.level == 1
assert isinstance(get_player.info, PlayerInfo)
@pytest.mark.asyncio
async def test_player_update_stats(get_player):
await get_player.update_stats(update_data)
assert get_player.stats.maxHealth == 601.0
assert isinstance(get_player.stats, PlayerStats)
@pytest.mark.asyncio
async def test_player_update_score(get_player):
await get_player.update_score(update_data)
assert get_player.score.creepScore == 100
assert isinstance(get_player.score, PlayerScore)
@pytest.mark.asyncio
async def test_player_update_inventory(get_player):
await get_player.update_inventory(update_data)
assert isinstance(get_player.inventory, list)
assert len(get_player.inventory) > 0
assert isinstance(get_player.inventory[0], InventoryItem)
assert get_player.inventory[0].itemID == 3854
@pytest.mark.asyncio
async def test_player_update_location(get_player):
await get_player.update_location(test_member)
assert get_player.info.x == 100
assert get_player.info.y == 100
assert get_player.info.zone == test_zone
assert isinstance(get_player.info.zone, MinimapZone)
assert isinstance(get_player.info, PlayerInfo)
@pytest.mark.asyncio
async def test_player_update(get_player):
await get_player.update(update_data)
assert get_player.info.name == "Supername"
assert get_player.info.level == 1
assert isinstance(get_player.info, PlayerInfo)
assert get_player.stats.maxHealth == 601.0
assert isinstance(get_player.stats, PlayerStats)
assert get_player.score.creepScore == 100
assert isinstance(get_player.score, PlayerScore)
assert isinstance(get_player.inventory, list)
assert len(get_player.inventory) > 0
assert isinstance(get_player.inventory[0], InventoryItem)
assert get_player.inventory[0].itemID == 3854
| 33.093023 | 103 | 0.613844 |
fedcf036c6fb8965eea9548fe948c1a18ef9db31 | 785 | py | Python | seiketsu/users/schema.py | tychota/seiketsu | 2b5280365b9de44cd84ac65ed74981b30be5cc76 | [
"MIT"
] | null | null | null | seiketsu/users/schema.py | tychota/seiketsu | 2b5280365b9de44cd84ac65ed74981b30be5cc76 | [
"MIT"
] | null | null | null | seiketsu/users/schema.py | tychota/seiketsu | 2b5280365b9de44cd84ac65ed74981b30be5cc76 | [
"MIT"
] | null | null | null | # cookbook/ingredients/schema.py
import graphene
from graphene_django_extras import DjangoObjectField, DjangoFilterPaginateListField, LimitOffsetGraphqlPagination
from .types import UserType
from .mutations import UserSerializerMutation
from .subscriptions import UserSubscription
class Query(graphene.ObjectType):
user = DjangoObjectField(UserType, description='Single User query')
all_users = DjangoFilterPaginateListField(UserType, pagination=LimitOffsetGraphqlPagination())
class Mutation(graphene.ObjectType):
user_create = UserSerializerMutation.CreateField()
user_delete = UserSerializerMutation.DeleteField()
user_update = UserSerializerMutation.UpdateField()
class Subscription(graphene.ObjectType):
user_subscription = UserSubscription.Field()
| 34.130435 | 113 | 0.831847 |
fedd8583c4097da76284324d87da760d236bb283 | 1,026 | py | Python | app/__init__.py | alineayumi/desafio-ton-API-REST | cf9f88adc4f7de6060f2c3f2c31147077c311ce9 | [
"MIT"
] | null | null | null | app/__init__.py | alineayumi/desafio-ton-API-REST | cf9f88adc4f7de6060f2c3f2c31147077c311ce9 | [
"MIT"
] | null | null | null | app/__init__.py | alineayumi/desafio-ton-API-REST | cf9f88adc4f7de6060f2c3f2c31147077c311ce9 | [
"MIT"
] | null | null | null | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask.logging import default_handler
from flask_request_id_header.middleware import RequestID
from app.resources.encoders import CustomJSONEncoder
from app.resources.logger import formatter
from flask_jwt import JWT
db = SQLAlchemy()
def create_app(config):
from .mock_api import setup_blueprint as mock_api_blueprint
from .new_api import setup_blueprint as new_api_blueprint
default_handler.setFormatter(formatter)
# create application instance
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
# config from object
app.config.from_object(config)
db.init_app(app)
RequestID(app)
from app.resources.authentication import identity, authenticate
jwt = JWT(app=app, authentication_handler=authenticate, identity_handler=identity)
# repetir isso para outros modulos
app.register_blueprint(mock_api_blueprint())
app.register_blueprint(new_api_blueprint())
return app
| 26.307692 | 86 | 0.789474 |
fee0850f728247adf6624bff53382da94eff6965 | 1,199 | py | Python | tests/test_negate_with_undo.py | robobeaver6/hier_config | efd413ef709d462effe8bfd11ef0520c1d62eb33 | [
"MIT"
] | null | null | null | tests/test_negate_with_undo.py | robobeaver6/hier_config | efd413ef709d462effe8bfd11ef0520c1d62eb33 | [
"MIT"
] | null | null | null | tests/test_negate_with_undo.py | robobeaver6/hier_config | efd413ef709d462effe8bfd11ef0520c1d62eb33 | [
"MIT"
] | null | null | null | import unittest
import tempfile
import os
import yaml
import types
from hier_config import HConfig
from hier_config.host import Host
class TestNegateWithUndo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.os = 'comware5'
cls.options_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'files',
'test_options_negate_with_undo.yml',
)
cls.running_cfg = 'test_for_undo\nundo test_for_redo\n'
cls.compiled_cfg = 'undo test_for_undo\ntest_for_redo\n'
cls.remediation = 'undo test_for_undo\ntest_for_redo\n'
with open(cls.options_file) as f:
cls.options = yaml.safe_load(f.read())
cls.host_a = Host('example1.rtr', cls.os, cls.options)
def test_merge(self):
self.host_a.load_config_from(config_type="running", name=self.running_cfg, load_file=False)
self.host_a.load_config_from(config_type="compiled", name=self.compiled_cfg, load_file=False)
self.host_a.load_remediation()
self.assertEqual(self.remediation, self.host_a.facts['remediation_config_raw'])
if __name__ == "__main__":
unittest.main(failfast=True)
| 30.74359 | 101 | 0.692244 |
fee18a5b11572b38d902059c0db310b2cf42cd2d | 6,984 | py | Python | code/gauss_legendre.py | MarkusLohmayer/master-thesis-code | b107d1b582064daf9ad4414e1c9f332ef0be8660 | [
"MIT"
] | 1 | 2020-11-14T15:56:07.000Z | 2020-11-14T15:56:07.000Z | code/gauss_legendre.py | MarkusLohmayer/master-thesis-code | b107d1b582064daf9ad4414e1c9f332ef0be8660 | [
"MIT"
] | null | null | null | code/gauss_legendre.py | MarkusLohmayer/master-thesis-code | b107d1b582064daf9ad4414e1c9f332ef0be8660 | [
"MIT"
] | null | null | null | """Gauss-Legendre collocation methods for port-Hamiltonian systems"""
import sympy
import numpy
import math
from newton import newton_raphson, DidNotConvergeError
from symbolic import eval_expr
def butcher(s):
"""Compute the Butcher tableau for a Gauss-Legendre collocation method.
Parameters
----------
s : int
Number of stages of the collocation method.
The resulting method is of order 2s.
Returns
-------
a : numpy.ndarray
Coefficients a_{ij}, i.e. the j-th lagrange polynomial integrated on (0, c_i).
b : numpy.ndarray
Coefficients b_j, i.e. the the i-th lagrange polynomial integrated on (0, 1).
c : numpy.ndarray
Coefficients c_i, i.e. the collocation points.
"""
from sympy.abc import tau, x
# shifted Legendre polynomial of order s
P = (x ** s * (x - 1) ** s).diff(x, s)
# roots of P
C = sympy.solve(P)
C.sort()
c = numpy.array([float(c_i) for c_i in C])
# Lagrange basis polynomials at nodes C
L = []
for i in range(s):
l = 1
for j in range(s):
if j != i:
l = (l * (tau - C[j]) / (C[i] - C[j])).simplify()
L.append(l)
# integrals of Lagrange polynomials
A = [[sympy.integrate(l, (tau, 0, c_i)) for l in L] for c_i in C]
a = numpy.array([[float(a_ij) for a_ij in row] for row in A])
B = [sympy.integrate(l, (tau, 0, 1)) for l in L]
b = numpy.array([float(b_j) for b_j in B])
return a, b, c
def gauss_legendre(
x,
xdot,
x_0,
t_f,
dt,
s=1,
functionals={},
params={},
tol=1e-9,
logger=None,
constraints=[],
):
"""Integrate a port-Hamiltonian system in time
based on a Gauss-Legendre collocation method.
Parameters
----------
x : sympy.Matrix
vector of symbols for state-space coordinates
xdot : List[sympy.Expr]
The right hand sides of the differtial equations
which have to hold at each collocation point.
x_0 : numpy.ndarray
Initial conditions.
t_f : float
Length of time interval.
dt : float
Desired time step.
s : int
Number of stages of the collocation method.
The resulting method is of order 2s.
functionals : Dict[sympy.Symbol, sympy.Expr]
Functionals on which xdot may depend.
params : Dict[sympy.Symbol, Union[sympy.Expr, float]]
Parameters on which the system may depend.
logger : Optional[Logger]
Logger object which is passed through to Newton-Raphsopn solver.
constraints : List[sympy.Expr]
Additional algebraic equations which have to hold
at each collocation point.
"""
# number of steps
K = int(t_f // dt)
# accurate time step
dt = t_f / K
# dimension of state space
N = len(x)
# Butcher tableau (multiplied with time step)
a, b, c = butcher(s)
a *= dt
b *= dt
c *= dt
# generate code for evaluating residuals vector and Jacobian matrix
code = _generate_code(x, xdot, N, a, s, functionals, params, constraints)
# print(code)
# return None, None
ldict = {}
exec(code, None, ldict)
compute_residuals = ldict["compute_residuals"]
compute_jacobian = ldict["compute_jacobian"]
del code, ldict
# array for storing time at every step
time = numpy.empty(K + 1, dtype=float)
time[0] = t_0 = 0.0
# array for storing the state at every step
solution = numpy.empty((K + 1, N), dtype=float)
solution[0] = x_0
# flows / unknowns (reused at every step)
f = numpy.zeros(s * N, dtype=float)
fmat = f.view()
fmat.shape = (s, N)
# residuals vector (reused at every step)
residuals = numpy.empty(s * (N + len(constraints)), dtype=float)
# jacobian matrix (reused at every step)
jacobian = numpy.empty((s * (N + len(constraints)), s * N), dtype=float)
for k in range(1, K + 1):
try:
newton_raphson(
f,
residuals,
lambda residuals, unknowns: compute_residuals(residuals, unknowns, x_0),
jacobian,
lambda jacobian, unknowns: compute_jacobian(jacobian, unknowns, x_0),
tol=tol,
iterations=500,
logger=logger,
)
except DidNotConvergeError:
print(f"Did not converge at step {k}.")
break
time[k] = t_0 = t_0 + dt
solution[k] = x_0 = x_0 - b @ fmat
return time, solution
def _generate_code(x, xdot, N, a, s, functionals, params, constraints):
"""Generate code for the two methods compute_residuals and compute_jacobian"""
# dynamics
xdot = [eval_expr(f, functionals) for f in xdot]
# algebraic constraints
constraints = [eval_expr(c, functionals) for c in constraints]
# symbols for Butcher coefficients a_{ij} multiplied by time step h
asym = [[sympy.Symbol(f"a{i}{j}") for j in range(s)] for i in range(s)]
# symbols for old state
osym = [sympy.Symbol(f"o[{n}]") for n in range(N)]
# symbols for unknowns (flow vector)
fsym = [[sympy.Symbol(f"f[{i},{n}]") for n in range(N)] for i in range(s)]
# polynomial approximation of the numerical solution at the collocation points
xc = [
[
(x[n], osym[n] - sum(asym[i][j] * fsym[j][n] for j in range(s)))
for n in range(N)
]
for i in range(s)
]
# expressions for the residuals vector
residuals = [
fsym[i][n] + xdot[n].subs(xc[i]) for i in range(s) for n in range(N)
] + [c.subs(xc[i]) for c in constraints for i in range(s)]
# expressions for the Jacobian matrix
jacobian = [[residual.diff(d) for r in fsym for d in r] for residual in residuals]
printer = sympy.printing.lambdarepr.PythonCodePrinter()
dim = s * N + s * len(constraints)
code = "def compute_residuals(residuals, f, o):\n"
code += f"\tf = f.view()\n\tf.shape = ({s}, {N})\n"
code += "".join(f"\ta{i}{j} = {a[i,j]}\n" for i in range(s) for j in range(s))
# code += "".join(f"\t{symbol} = {printer.doprint(value)}\n" for symbol, value in params.items())
for i in range(dim):
code += f"\tresiduals[{i}] = {printer.doprint(eval_expr(residuals[i], params=params).evalf())}\n"
# code += f"\tresiduals[{i}] = {printer.doprint(residuals[i])}\n"
code += "\n\ndef compute_jacobian(jacobian, f, o):\n"
code += f"\tf = f.view()\n\tf.shape = ({s}, {N})\n"
code += "".join(f"\ta{i}{j} = {a[i,j]}\n" for i in range(s) for j in range(s))
# code += "".join(f"\t{symbol} = {printer.doprint(value)}\n" for symbol, value in params.items())
for i in range(dim):
for j in range(s * N):
code += f"\tjacobian[{i},{j}] = {printer.doprint(eval_expr(jacobian[i][j], params=params).evalf())}\n"
# code += f"\tjacobian[{i},{j}] = {printer.doprint(jacobian[i][j])}\n"
return code
| 31.459459 | 114 | 0.593643 |
fee2dd08a38899ceea87863c92dafc29503606c4 | 525 | py | Python | feeds/rss_feed.py | godwinaden/movie_api_server | 1b467bd91d0a5a9a2f0a2a9fc921b3a4f5c04217 | [
"MIT"
] | null | null | null | feeds/rss_feed.py | godwinaden/movie_api_server | 1b467bd91d0a5a9a2f0a2a9fc921b3a4f5c04217 | [
"MIT"
] | null | null | null | feeds/rss_feed.py | godwinaden/movie_api_server | 1b467bd91d0a5a9a2f0a2a9fc921b3a4f5c04217 | [
"MIT"
] | null | null | null | from sql_app.repositories.movie_repository import MovieRepo
from feedgenerator import RssFeed
from sqlalchemy.orm import Session
class LatestRssFeed(RssFeed):
title: str
price: float
description: str
@staticmethod
def items(db: Session):
return MovieRepo.fetch_all(db)
@staticmethod
def item_title(item):
return item.title
@staticmethod
def item_price(item):
return item.price
@staticmethod
def item_description(item):
return item.description
| 20.192308 | 59 | 0.704762 |
fee307cf09fb64ad8f6da891a9a28954c9a3eeae | 3,026 | py | Python | teraserver/python/opentera/db/models/TeraDeviceParticipant.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | 10 | 2020-03-16T14:46:06.000Z | 2022-02-11T16:07:38.000Z | teraserver/python/opentera/db/models/TeraDeviceParticipant.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | 114 | 2019-09-16T13:02:50.000Z | 2022-03-22T19:17:36.000Z | teraserver/python/opentera/db/models/TeraDeviceParticipant.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | null | null | null | from opentera.db.Base import db, BaseModel
class TeraDeviceParticipant(db.Model, BaseModel):
__tablename__ = 't_devices_participants'
id_device_participant = db.Column(db.Integer, db.Sequence('id_device_participant_sequence'), primary_key=True,
autoincrement=True)
id_device = db.Column(db.Integer, db.ForeignKey("t_devices.id_device"), nullable=False)
id_participant = db.Column(db.Integer, db.ForeignKey("t_participants.id_participant", ondelete='cascade'),
nullable=False)
device_participant_participant = db.relationship("TeraParticipant")
device_participant_device = db.relationship("TeraDevice")
def to_json(self, ignore_fields=[], minimal=False):
ignore_fields.extend(['device_participant_participant', 'device_participant_device'])
if minimal:
ignore_fields.extend([])
rval = super().to_json(ignore_fields=ignore_fields)
return rval
@staticmethod
def create_defaults(test=False):
if test:
from opentera.db.models.TeraParticipant import TeraParticipant
from opentera.db.models.TeraDevice import TeraDevice
participant1 = TeraParticipant.get_participant_by_id(1)
participant2 = TeraParticipant.get_participant_by_id(2)
device1 = TeraDevice.get_device_by_name('Apple Watch #W05P1')
device2 = TeraDevice.get_device_by_name('Kit Télé #1')
device3 = TeraDevice.get_device_by_name('Robot A')
dev_participant = TeraDeviceParticipant()
dev_participant.device_participant_device = device1
dev_participant.device_participant_participant = participant1
db.session.add(dev_participant)
dev_participant = TeraDeviceParticipant()
dev_participant.device_participant_device = device1
dev_participant.device_participant_participant = participant2
db.session.add(dev_participant)
dev_participant = TeraDeviceParticipant()
dev_participant.device_participant_device = device2
dev_participant.device_participant_participant = participant2
db.session.add(dev_participant)
db.session.commit()
@staticmethod
def get_device_participant_by_id(device_participant_id: int):
return TeraDeviceParticipant.query.filter_by(id_device_participant=device_participant_id).first()
@staticmethod
def query_devices_for_participant(participant_id: int):
return TeraDeviceParticipant.query.filter_by(id_participant=participant_id).all()
@staticmethod
def query_participants_for_device(device_id: int):
return TeraDeviceParticipant.query.filter_by(id_device=device_id).all()
@staticmethod
def query_device_participant_for_participant_device(device_id: int, participant_id: int):
return TeraDeviceParticipant.query.filter_by(id_device=device_id, id_participant=participant_id).first()
| 44.5 | 114 | 0.718771 |
fee39b66b3b2ef9dd7dd901d2d89a2d3c684442c | 11,043 | py | Python | leetcode_python/Linked_list/split-linked-list-in-parts.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Linked_list/split-linked-list-in-parts.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Linked_list/split-linked-list-in-parts.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
725. Split Linked List in Parts
Medium
0Given the head of a singly linked list and an integer k, split the linked list into k consecutive linked list parts.
The length of each part should be as equal as possible: no two parts should have a size differing by more than one. This may lead to some parts being null.
The parts should be in the order of occurrence in the input list, and parts occurring earlier should always have a size greater than or equal to parts occurring later.
Return an array of the k parts.
Example 1:
Input: head = [1,2,3], k = 5
Output: [[1],[2],[3],[],[]]
Explanation:
The first element output[0] has output[0].val = 1, output[0].next = null.
The last element output[4] is null, but its string representation as a ListNode is [].
Example 2:
Input: head = [1,2,3,4,5,6,7,8,9,10], k = 3
Output: [[1,2,3,4],[5,6,7],[8,9,10]]
Explanation:
The input has been split into consecutive parts with size difference at most 1, and earlier parts are a larger size than the later parts.
Constraints:
The number of nodes in the list is in the range [0, 1000].
0 <= Node.val <= 1000
1 <= k <= 50
"""
# V0
# IDEA : LINKED LIST OP + mod op
class Solution(object):
def splitListToParts(self, head, k):
# NO need to deal with edge case !!!
# get linked list length
_len = 0
_head = cur = head
while _head:
_len += 1
_head = _head.next
# init res
res = [None] * k
### NOTE : we loop over k
for i in range(k):
"""
2 cases
case 1) i < (_len % k) : there is "remainder" ((_len % k)), so we need to add extra 1
-> _cnt_elem = (_len // k) + 1
case 2) i == (_len % k) : there is NO "remainder"
-> _cnt_elem = (_len // k)
"""
# NOTE THIS !!!
_cnt_elem = (_len // k) + (1 if i < (_len % k) else 0)
### NOTE : we loop over _cnt_elem (length of each "split" linkedlist)
for j in range(_cnt_elem):
"""
3 cases
1) j == 0 (begin of sub linked list)
2) j == _cnt_elem - 1 (end of sub linked list)
3) 0 < j < _cnt_elem - 1 (middle within sub linked list)
"""
# NOTE THIS !!!
# NOTE we need keep if - else in BELOW ORDER !!
# -> j == 0, j == _cnt_elem - 1, else
if j == 0:
res[i] = cur
### NOTE this !!! :
# -> IF (but not elif)
# -> since we also need to deal with j == 0 and j == _cnt_elem - 1 case
if j == _cnt_elem - 1: # note this !!!
# get next first
tmp = cur.next
# point cur.next to None
cur.next = None
# move cur to next (tmp) for op in next i (for i in range(k))
cur = tmp
else:
cur = cur.next
#print ("res = " + str(res))
return res
# V0'
class Solution(object):
def splitListToParts(self, head, k):
# NO need to deal with edge case !!!
# get len
root = cur = head
_len = 0
while root:
root = root.next
_len += 1
res = [None] * k
for i in range(k):
tmp_cnt = (_len // k) + (1 if i < (_len % k) else 0)
for j in range(tmp_cnt):
# 3 cases
# j == 0
if j == 0:
res[i] = cur
# IF !!!! j == tmp_cnt - 1 !!!
if j == tmp_cnt-1:
_next = cur.next
cur.next = None
cur = _next
# 0 < j < tmp_cnt
else:
cur = cur.next
print ("res = " + str(res))
return res
# V0'
# IDEA : LINKED LIST OP
class Solution:
def splitListToParts(self, root, k):
def get_length(root):
ans = 0
while root is not None:
root = root.next
ans += 1
return ans
ans = [None]*k
cur = root
length = get_length(root)
for i in range(k):
no_elems = (length // k) + (1 if i < (length % k) else 0)
for j in range(no_elems):
if j == 0:
ans[i] = cur
if j == no_elems - 1:
temp = cur.next
cur.next = None
cur = temp
else:
cur = cur.next
return ans
# V1
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/109284/Elegant-Python-with-Explanation-45ms
class Solution(object):
def splitListToParts(self, root, k):
# Count the length of the linked list
curr, length = root, 0
while curr:
curr, length = curr.next, length + 1
# Determine the length of each chunk
chunk_size, longer_chunks = length // k, length % k
res = [chunk_size + 1] * longer_chunks + [chunk_size] * (k - longer_chunks)
# Split up the list
prev, curr = None, root
for index, num in enumerate(res):
if prev:
prev.next = None
res[index] = curr
for i in range(num):
prev, curr = curr, curr.next
return res
### Test case : dev
# V1'
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/139360/Simple-pythonic-solution.-Beats-100
def get_length(root):
ans = 0
while root is not None:
root = root.next
ans += 1
return ans
class Solution:
def splitListToParts(self, root, k):
ans = [None]*k
cur = root
length = get_length(root)
for i in range(k):
no_elems = (length // k) + (1 if i < (length % k) else 0)
for j in range(no_elems):
if j == 0:
ans[i] = cur
if j == no_elems - 1:
temp = cur.next
cur.next = None
cur = temp
else:
cur = cur.next
return ans
# V1''
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/237516/python-solution-beat-100
class Solution:
def splitListToParts(self, root: 'ListNode', k: 'int') -> 'List[ListNode]':
n, p, res = 0, root, []
while p:
n, p = n + 1, p.next
a, m, start = n // k, n % k, root
for _ in range(k):
if not start: res.append(None)
else:
end = start
for _ in range(a + (1 if m else 0) - 1):
end = end.next
if m > 0: m -= 1
res.append(start)
start = end.next
end.next = None
return res
# V1'''
# http://bookshadow.com/weblog/2017/11/13/leetcode-split-linked-list-in-parts/
class Solution(object):
def splitNum(self, m, n):
q, r = m / n, m % n
if r > 0: return [q + 1] * r + [q] * (n - r)
if r < 0: return [q] * (n + r) + [q - 1] * -r
return [q] * n
def listLength(self, root):
ans = 0
while root:
ans += 1
root = root.next
return ans
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
ans = []
s = self.listLength(root)
for p in self.splitNum(s, k):
if not p:
ans.append(None)
continue
node = root
for x in range(int(p) - 1):
node = node.next
ans.append(root)
if root:
root = node.next
node.next = None
return ans
# V1''''
# https://blog.csdn.net/fuxuemingzhu/article/details/79543931
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
nodes = []
counts = 0
each = root
while each:
counts += 1
each = each.next
num = int(counts / k)
rem = int(counts % k)
for i in range(k):
head = ListNode(0)
each = head
for j in range(num):
node = ListNode(root.val)
each.next = node
each = each.next
root = root.next
if rem and root:
rmnode = ListNode(root.val)
each.next = rmnode
if root:
root = root.next
rem -= 1
nodes.append(head.next)
return nodes
# V1'''''
# https://leetcode.com/problems/split-linked-list-in-parts/solution/
# IDEA : CREATE NEW LISTS
# time complexity : O(N+K)
# spce complexity : O(N,K)
class Solution(object):
def splitListToParts(self, root, k):
cur = root
for N in range(1001):
if not cur: break
cur = cur.next
width, remainder = divmod(N, k)
ans = []
cur = root
for i in range(k):
head = write = ListNode(None)
for j in range(width + (i < remainder)):
write.next = write = ListNode(cur.val)
if cur: cur = cur.next
ans.append(head.next)
return ans
# V1''''''
# https://leetcode.com/problems/split-linked-list-in-parts/solution/
# IDEA : SPLIT INPUT LIST
# time complexity : O(N+K)
# spce complexity : O(K)
class Solution(object):
def splitListToParts(self, root, k):
cur = root
for N in range(1001):
if not cur: break
cur = cur.next
width, remainder = divmod(N, k)
ans = []
cur = root
for i in range(k):
head = cur
for j in range(width + (i < remainder) - 1):
if cur: cur = cur.next
if cur:
cur.next, cur = None, cur.next
ans.append(head)
return ans
# V2
# Time: O(n + k)
# Space: O(1)
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
n = 0
curr = root
while curr:
curr = curr.__next__
n += 1
width, remainder = divmod(n, k)
result = []
curr = root
for i in range(k):
head = curr
for j in range(width-1+int(i < remainder)):
if curr:
curr = curr.__next__
if curr:
curr.next, curr = None, curr.next
result.append(head)
return result | 29.845946 | 167 | 0.472698 |
fee526d6327eadfd2a1c6fc5732f854eab5a5bb2 | 1,645 | py | Python | carl/charts.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | null | null | null | carl/charts.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | null | null | null | carl/charts.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | 1 | 2020-11-19T23:41:28.000Z | 2020-11-19T23:41:28.000Z | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
"""
def ecdf(sorted_views):
for view, data in sorted_views.iteritems():
yvals = np.arange(len(data))/float(len(data))
plt.plot(data, yvals, label=view)
plt.grid(True)
plt.xlabel('jaccard')
plt.ylabel('CDF')
lgnd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
plt.savefig("ecdf.png", bbox_extra_artists=(lgnd, ), bbox_inches='tight')
clear()
"""
#def ecdf_polished(sorted_views):
def ecdf(sorted_views):
view_to_label = {
"priv": "root domain",
"netloc": "fqdn",
"path": "full path"}
for view, data in sorted_views.iteritems():
if view in view_to_label.keys():
yvals = np.arange(len(data))/float(len(data))
plt.plot(data, yvals, label=view_to_label[view])
matplotlib.rcParams.update({'font.size': 22})
plt.grid(True)
plt.xlabel('jaccard index')
plt.ylabel('CDF')
lgnd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
plt.savefig("ecdf.png", bbox_extra_artists=(lgnd, ), bbox_inches='tight')
clear()
def density(sorted_views):
for view, data in sorted_views.iteritems():
xvals = range(len(data))
plt.plot(xvals, data, label=view)
plt.grid(True)
plt.xlabel('site')
plt.ylabel('jaccard')
lgnd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
plt.savefig("stack.png", bbox_extra_artists=(lgnd, ), bbox_inches='tight')
clear()
def clear():
plt.clf()
plt.cla()
plt.close()
| 25.703125 | 78 | 0.6231 |
fee57ff8598ad386cc6460807e129b503a56f217 | 1,740 | py | Python | tests/stimuli/test_flashed_images.py | balefebvre/pystim | ae51d8a4b478da6dec44b296407099c6257fa3fa | [
"MIT"
] | null | null | null | tests/stimuli/test_flashed_images.py | balefebvre/pystim | ae51d8a4b478da6dec44b296407099c6257fa3fa | [
"MIT"
] | null | null | null | tests/stimuli/test_flashed_images.py | balefebvre/pystim | ae51d8a4b478da6dec44b296407099c6257fa3fa | [
"MIT"
] | null | null | null | import pystim
bin_path = None # TODO correct.
vec_path = None # TODO correct.
trials_path = None # TODO correct.
stimulus = pystim.stimuli.flashed_images.load(bin_path, vec_path, trials_path)
print(stimulus.nb_frames)
print(stimulus.nb_diplays)
print(stimulus.nb_trials)
print(stimulus.nb_conditions)
print(stimulus.condition_nbs)
print(stimulus.condition_nbs_sequence)
# print(stimulus.nb_repetitions) # ill-defined?
print(stimulus.get_nb_repetitions(condition_nb))
print(stimulus.get_frame(display_nb))
print(stimulus.get_frame_by_display_nb(display_nb))
print(stimulus.get_nb_displays(trial_nb))
print(stimulus.get_display_nbs(trial_nb))
print(stimulus.get_nb_displays(condition_nb, condition_trial_nb))
print(stimulus.get_display_nbs(condition_nb, condition_trial_nb))
# Une condition c'est des paramètres et une (ou une suite) de binary frames.
# TODO stimulus doit permettre la génération.
# TODO stimulus doit permettre de vérifier son intégrité.
# TODO stimulus doit faciliter l'analyse.
stimulus.get_trial_display_extend(trial_nb)
stimulus.get_trial_display_extend(condition_nb, condition_trial_nb)
stimulus.get_trial_display_extends(condition_nb)
condition = stimulus.get_condition(condition_nb) # une condition -> plusieurs trials, plusieurs displays
trial = stimulus.get_trial(trial_nb) # un trial -> une condition, plusieurs displays
display = stimulus.get_display(display_nb) # un display -> un trial, une condition
stimulus.get_display_nbs_extent(trial_nb)
stimulus.get_time_extent(trial_nb)
psr = response.get_peristimulus_responses(stimulus.get_trial_display_extends(condition_nb))
# Analyse.
# 1. Pour chaque enregistrement.
# a. Visualizer le taux de décharge au cours temps (pour chaque neurone). | 34.8 | 105 | 0.820115 |
fee65bcaf5d8cc11fa9804e94169f7ab6dcff8da | 427 | py | Python | test/test_google.py | kcather/Legacy | dcf92aa7d5d4213736e3018ce4b0eb945d80afb7 | [
"MIT"
] | null | null | null | test/test_google.py | kcather/Legacy | dcf92aa7d5d4213736e3018ce4b0eb945d80afb7 | [
"MIT"
] | null | null | null | test/test_google.py | kcather/Legacy | dcf92aa7d5d4213736e3018ce4b0eb945d80afb7 | [
"MIT"
] | null | null | null | #### neeed to make sure google still work for sure
# this may have to run on non-python devs' boxes, try/catch an install of the requests lib to be SURE
try:
import requests
except:
import os
os.sys('easy_install pip')
os.sys('pip install requests')
import requests
#r = requests.get('http://www.google.com/')
r = requests.get('http://google.com')
if r.status_code = 200:
print "yep, it still there"
| 25.117647 | 101 | 0.683841 |
fee67822f155f266cc796b6f601f1860ad8b8823 | 4,760 | py | Python | examples/Kane1985/Chapter5/Ex10.10.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 298 | 2015-01-31T11:43:22.000Z | 2022-03-15T02:18:21.000Z | examples/Kane1985/Chapter5/Ex10.10.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 359 | 2015-01-17T16:56:42.000Z | 2022-02-08T05:27:08.000Z | examples/Kane1985/Chapter5/Ex10.10.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 109 | 2015-02-03T13:02:45.000Z | 2021-12-21T12:57:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 10.10 from Kane 1985."""
from __future__ import division
from sympy import expand, solve, symbols, sin, cos, S
from sympy.physics.mechanics import ReferenceFrame, RigidBody, Point
from sympy.physics.mechanics import dot, dynamicsymbols, inertia, msprint
from util import generalized_active_forces, partial_velocities
from util import potential_energy
# Define generalized coordinates, speeds, and constants:
q0, q1, q2 = dynamicsymbols('q0:3')
q0d, q1d, q2d = dynamicsymbols('q0:3', level=1)
u1, u2, u3 = dynamicsymbols('u1:4')
LA, LB, LP = symbols('LA LB LP')
p1, p2, p3 = symbols('p1:4')
A1, A2, A3 = symbols('A1:4')
B1, B2, B3 = symbols('B1:4')
C1, C2, C3 = symbols('C1:4')
D11, D22, D33, D12, D23, D31 = symbols('D11 D22 D33 D12 D23 D31')
g, mA, mB, mC, mD, t = symbols('g mA mB mC mD t')
TA_star, TB_star, TC_star, TD_star = symbols('TA* TB* TC* TD*')
## --- reference frames ---
E = ReferenceFrame('E')
A = E.orientnew('A', 'Axis', [q0, E.x])
B = A.orientnew('B', 'Axis', [q1, A.y])
C = B.orientnew('C', 'Axis', [0, B.x])
D = C.orientnew('D', 'Axis', [0, C.x])
## --- points and their velocities ---
pO = Point('O')
pA_star = pO.locatenew('A*', LA * A.z)
pP = pO.locatenew('P', LP * A.z)
pB_star = pP.locatenew('B*', LB * B.z)
pC_star = pB_star.locatenew('C*', q2 * B.z)
pD_star = pC_star.locatenew('D*', p1 * B.x + p2 * B.y + p3 * B.z)
pO.set_vel(E, 0) # Point O is fixed in Reference Frame E
pA_star.v2pt_theory(pO, E, A) # Point A* is fixed in Reference Frame A
pP.v2pt_theory(pO, E, A) # Point P is fixed in Reference Frame A
pB_star.v2pt_theory(pP, E, B) # Point B* is fixed in Reference Frame B
# Point C* is moving in Reference Frame B
pC_star.set_vel(B, pC_star.pos_from(pB_star).diff(t, B))
pC_star.v1pt_theory(pB_star, E, B)
pD_star.set_vel(B, pC_star.vel(B)) # Point D* is fixed rel to Point C* in B
pD_star.v1pt_theory(pB_star, E, B) # Point D* is moving in Reference Frame B
# --- define central inertias and rigid bodies ---
IA = inertia(A, A1, A2, A3)
IB = inertia(B, B1, B2, B3)
IC = inertia(B, C1, C2, C3)
ID = inertia(B, D11, D22, D33, D12, D23, D31)
# inertia[0] is defined to be the central inertia for each rigid body
rbA = RigidBody('rbA', pA_star, A, mA, (IA, pA_star))
rbB = RigidBody('rbB', pB_star, B, mB, (IB, pB_star))
rbC = RigidBody('rbC', pC_star, C, mC, (IC, pC_star))
rbD = RigidBody('rbD', pD_star, D, mD, (ID, pD_star))
bodies = [rbA, rbB, rbC, rbD]
## --- generalized speeds ---
kde = [u1 - dot(A.ang_vel_in(E), A.x),
u2 - dot(B.ang_vel_in(A), B.y),
u3 - dot(pC_star.vel(B), B.z)]
kde_map = solve(kde, [q0d, q1d, q2d])
for k, v in kde_map.items():
kde_map[k.diff(t)] = v.diff(t)
# kinetic energy of robot arm E
K = sum(rb.kinetic_energy(E) for rb in bodies).subs(kde_map)
print('K = {0}'.format(msprint(K)))
# find potential energy contribution of the set of gravitational forces
forces = [(pA_star, -mA*g*E.x), (pB_star, -mB*g*E.x),
(pC_star, -mC*g*E.x), (pD_star, -mD*g*E.x)]
## --- define partial velocities ---
partials = partial_velocities([f[0] for f in forces],
[u1, u2, u3], E, kde_map)
## -- calculate generalized active forces ---
Fr, _ = generalized_active_forces(partials, forces)
V = potential_energy(Fr, [q0, q1, q2], [u1, u2, u3], kde_map)
#print('V = {0}'.format(msprint(V)))
print('\nSetting C = g*mD*p1, α1, α2, α3 = 0')
V = V.subs(dict(zip(symbols('C α1 α2 α3'), [g*mD*p1, 0, 0, 0] )))
print('V = {0}'.format(msprint(V)))
Z1 = u1 * cos(q1)
Z2 = u1 * sin(q1)
Z3 = -Z2 * u2
Z4 = Z1 * u2
Z5 = -LA * u1
Z6 = -(LP + LB*cos(q1))
Z7 = u2 * LB
Z8 = Z6 * u1
Z9 = LB + q2
Z10 = Z6 - q2*cos(q1)
Z11 = u2 * Z9
Z12 = Z10 * u1
Z13 = -sin(q1) * p2
Z14 = Z9 + p3
Z15 = Z10 + sin(q1)*p1 - cos(q1)*p3
Z16 = cos(q1) * p2
Z17 = Z13*u1 + Z14*u2
Z18 = Z15 * u1
Z19 = Z16*u1 - u2*p1 + u3
Z20 = u1 * Z5
Z21 = LB * sin(q1) * u2
Z22 = -Z2 * Z8
Z23 = Z21*u1 + Z2*Z7
Z24 = Z1*Z8 - u2*Z7
Z25 = Z21 - u3*cos(q1) + q2*sin(q1)*u2
Z26 = 2*u2*u3 - Z2*Z12
Z27 = Z25*u1 + Z2*Z11 - Z1*u3
Z28 = Z1*Z12 - u2*Z11
Z29 = -Z16 * u2
Z30 = Z25 + u2*(cos(q1)*p1 + sin(q1)*p3)
Z31 = Z13 * u2
Z32 = Z29*u1 + u2*(u3 + Z19) - Z2*Z18
Z33 = Z30*u1 + Z2*Z17 - Z1*Z19
Z34 = Z31*u1 + Z1*Z18 - u2*Z17
K_expected = S(1)/2*(A1*u1**2 + (B1 + C1)*Z1**2 + (B2 + C2)*u2**2 +
(B3 + C3)*Z2**2 + Z1*(D11*Z1 + D12*u2 + D31*Z2) +
u2*(D12*Z1 + D22*u2 + D23*Z2) +
Z2*(D31*Z1 + D23*u2 + D33*Z2) + mA*Z5**2 +
mB*(Z7**2 + Z8**2) + mC*(Z11**2 + Z12**2 + u3**2) +
mD*(Z17**2 + Z18**2 + Z19**2))
V_expected = g*((mB*LB + mC*Z9 + mD*Z14)*sin(q1) + mD*p1*cos(q1))
assert expand(K - K_expected) == 0
assert expand(V - V_expected) == 0
| 33.521127 | 76 | 0.602731 |
fee67e3507fde627d604b24556de9fa5e1ddebf0 | 1,179 | py | Python | src/test/test_pairwiseView.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | 1 | 2022-02-16T01:24:17.000Z | 2022-02-16T01:24:17.000Z | src/test/test_pairwiseView.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | null | null | null | src/test/test_pairwiseView.py | SensorDX/rainqc | d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from src.view import PairwiseView
import numpy as np
class TestPairwiseView(TestCase):
def setUp(self):
self.num_stations = 4
self.n = 200
self.stations = np.random.randn(self.n, self.num_stations)
self.pv = PairwiseView(variable='pr')
def test_make_view(self):
## Slicing in this way preserves the dimension of the array
#ref: https://stackoverflow.com/questions/3551242/numpy-index-slice-without-losing-dimension-information
X = self.pv.make_view(self.stations[:, 0:1:], [self.stations[:, 1:2:]]).x
self.assertEqual(X.shape[0], self.n)
y = self.pv.make_view(self.stations[:,0:1:],[self.stations[:, 1:2:],
self.stations[:, 2:3:],self.stations[:, 3:4:]]).x
self.assertEqual(y.shape, (self.n, self.num_stations-1))
self.assertIsNone(self.pv.label)
## Test multiple pairwise _views
for i in range(self.num_stations-1):
vw = self.pv.make_view(self.stations[:, 0:1:],
[self.stations[:, (i+1):(i+2):]]).x
self.assertEqual(vw.shape[0], self.n )
| 35.727273 | 112 | 0.603053 |
feea04b5b8f70213610fd5b8726978dd6e62c7f1 | 1,013 | py | Python | bmi.py | blorincz1/bmi-tool | b49e66bac422ab1fe411642937bd0679862b7042 | [
"MIT"
] | null | null | null | bmi.py | blorincz1/bmi-tool | b49e66bac422ab1fe411642937bd0679862b7042 | [
"MIT"
] | null | null | null | bmi.py | blorincz1/bmi-tool | b49e66bac422ab1fe411642937bd0679862b7042 | [
"MIT"
] | null | null | null | # prompt user to enter how much they weigh in pounds
weight = int(input ("How much do you weigh (in pounds)? "))
# prompt user to enter their height in inches
height = int(input ("What is your height (in inches)? "))
# this converts weight to kilograms
weight_in_kg = weight / 2.2
# this converts height to centimeters
height_in_meter = height * 2.54 / 100
# this calculates BMI
bmi = round(weight_in_kg / (height_in_meter ** 2), 1)
if bmi <= 18.5:
print("Oh no, your BMI is", bmi, "which means you are underwewight. Eat some food!")
elif bmi > 18.5 and bmi < 25:
print('Congratulations! Your BMI is', bmi, 'which means you are in the normal range. Keep up the good work!')
elif bmi > 25 and bmi < 30:
print('Uh oh, your BMI is', bmi, 'which means you are overweight. Make healthy choices and exercise!')
elif bmi > 30:
print('Oh boy, your BMI is', bmi, 'which means you are obese. GO SEE YOUR DOCTOR~')
else:
print('Uh oh, something went wrong.')
| 31.65625 | 115 | 0.664363 |
feee07121fe76d5736e52eb5411adc869715e8db | 7,031 | py | Python | day92021.py | GeirOwe/adventOfCode | fee1420cb8ecce8b7aaf9d48472364be191ca2a2 | [
"MIT"
] | 1 | 2021-12-20T11:10:59.000Z | 2021-12-20T11:10:59.000Z | day92021.py | GeirOwe/adventOfCode | fee1420cb8ecce8b7aaf9d48472364be191ca2a2 | [
"MIT"
] | null | null | null | day92021.py | GeirOwe/adventOfCode | fee1420cb8ecce8b7aaf9d48472364be191ca2a2 | [
"MIT"
] | 1 | 2021-12-02T14:40:12.000Z | 2021-12-02T14:40:12.000Z | # Day9 - 2021 Advent of code
# source: https://adventofcode.com/2021/day/9
import os
import numpy as np
def clear_console():
os.system('clear')
print('< .... AoC 2021 Day 9, part 1 .... >')
print()
return
def find_low_points(the_map, numOfRows, numOfCols):
low_points_list = []
row = 0
lastRow = numOfRows -1 #since we start at zero
while row < numOfRows:
col = 0
while col < numOfCols:
if row == 0:
#process first row
if col == 0:
#process first col
if (the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
elif col == (numOfCols -1):
#process last col
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
else:
#process the other cols
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
elif row == lastRow:
#process last row
if col == 0:
#process first col
if (the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row-1, col] > the_map[row, col]): # signal above
low_points_list.append(the_map[row, col])
elif col == (numOfCols -1):
#process last col
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row-1, col] > the_map[row, col]): # signal above
low_points_list.append(the_map[row, col])
else:
#process the other cols
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row-1, col] > the_map[row, col]): # signal above
low_points_list.append(the_map[row, col])
else:
#process the other rows
if col == 0:
#process first col
if (the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row-1, col] > the_map[row, col] and # signal above
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
elif col == (numOfCols -1):
#process last col
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row-1, col] > the_map[row, col] and # signal above
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
else:
#process the other cols
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row-1, col] > the_map[row, col] and # signal above
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
col += 1
row += 1
return low_points_list
def summarize_risk(low_points_list):
sumRiskLowPoints = 0
for element in low_points_list:
# The risk level of a low point is 1 plus its height.
sumRiskLowPoints = sumRiskLowPoints + element + 1
return sumRiskLowPoints
def process_the_data(the_map, numOfRows, numOfCols):
sumRiskLowPoints = 0
# Your first goal is to find the low points - the locations that are lower than any of its adjacent locations.
low_points_list = find_low_points(the_map, numOfRows, numOfCols)
print('\nthe low points -> ', low_points_list,'\n')
# What is the sum of the risk levels of all low points on your heightmap
sumRiskLowPoints = summarize_risk(low_points_list)
return sumRiskLowPoints
def build_map(theData):
numOfRows = len(theData)
map_list = []
for row in theData:
numOfCols = len(row)
i = 0 #the positio in the row
while i < numOfCols:
#add numbers on the borad to along list - it will be reshaped into a 5x5 board in numpy array
map_list.append(int(row[i]))
i += 1
#move them into numpy arrays - to make it easier to process
#array (x, y, z) -> board, rows, columns -> : means all elements
the_map = np.array(map_list, dtype = "int").reshape(numOfRows, numOfCols)
return the_map, numOfRows, numOfCols
def get_the_data():
#read the test puzzle input
#theData = open('day92021_test_puzzle_input.txt', 'r')
#read the puzzle input
theData = open('day92021_puzzle_input.txt', 'r')
#move data into a list - read a line and remove lineshift
data_list = []
for element in theData:
elementTrimmed = element.strip()
data_list.append(elementTrimmed)
return data_list
def start_the_engine():
#get the data and read them into a list
theData = get_the_data()
the_map, numOfRows, numOfCols = build_map(theData)
#process the data and return the answer
valueX = process_the_data(the_map, numOfRows, numOfCols)
# Next, you need to find the largest basins. The size of a basin is the number
# of locations within the basin, including the low point.
# Find the three largest basins and multiply their sizes together.
#find adjacent cells:
#def adj_finder(matrix, position):
#adj = []
#for dx in range(-1, 2):
# for dy in range(-1, 2):
# rangeX = range(0, matrix.shape[0]) # X bounds
# rangeY = range(0, matrix.shape[1]) # Y bounds
#
# (newX, newY) = (position[0]+dx, position[1]+dy) # adjacent cell
#
# if (newX in rangeX) and (newY in rangeY) and (dx, dy) != (0, 0):
# adj.append((newX, newY))
#
#return adj
print('\nthe sum of the risk levels of all low points -> ', valueX,'\n')
return
#let's start
if __name__ == '__main__':
clear_console()
start_the_engine() | 43.94375 | 114 | 0.54345 |
feee0df189f0b37958204462a48904755aa19b63 | 7,420 | py | Python | cogs/Console.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
] | null | null | null | cogs/Console.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
] | null | null | null | cogs/Console.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from datetime import date, datetime
# Class handles commands related to console players
class ConsoleCommands(commands.Cog, name="Console Commands"):
"""Console Commands"""
def __init__(self, bot):
self.bot = bot
# Returns a list of embeds of console players so they can store their Paladins id's in the bot
@commands.command(name='console', pass_context=True, ignore_extra=False, aliases=["Console"])
@commands.cooldown(3, 30, commands.BucketType.user)
async def console(self, ctx, player_name, platform: str):
async with ctx.channel.typing():
platform = platform.lower()
if platform == "xbox":
platform = "10"
elif platform == "ps4":
platform = "9"
elif platform == "switch":
platform = "22"
else:
await ctx.send("```Invalid platform name. Valid platform names are:\n1. Xbox\n2. PS4\n3. Switch```")
return None
# players = paladinsAPI.getPlayerId(player_name, "steam")
# players = paladinsAPI.getPlayerId(player_name, platform)
players = self.bot.paladinsAPI.searchPlayers(player_name)
if not players:
await ctx.send("Found `0` players with the name `{}`.".format(player_name))
return None
# Hi-Rez endpoint down.
if players is None:
await ctx.send("A Hi-Rez endpoint is down meaning this command won't work. "
"Please don't try again for a while and give Hi-Rez a few hours to get the "
"endpoint online again.")
return None
players = [player for player in players if player.playerName.lower() == player_name.lower() and
player['portal_id'] == platform]
num_players = len(players)
if num_players > 20: # Too many players...we must match case exactly
await ctx.send("Found `{}` players with the name `{}`. Switching to case sensitive mode..."
.format(num_players, player_name))
players = [player for player in players if player.playerName == player_name and
player['portal_id'] == platform]
num_players = len(players)
await ctx.send("Found `{}` players with the name `{}`."
.format(num_players, player_name))
if num_players > 20:
await ctx.send("```There are too many players with the name {}:\n\nPlease look on PaladinsGuru to "
"find the Player ID```https://paladins.guru/search?term={}&type=Player"
.format(player_name, player_name))
return None
ss = ""
recent_player = []
for player in players:
ss += str(player) + "\n"
player = self.bot.paladinsAPI.getPlayer(player=player.playerId)
current_date = date.today()
current_time = datetime.min.time()
today = datetime.combine(current_date, current_time)
last_seen = player.lastLoginDatetime
last_seen = (today - last_seen).days
# only add players seen in the last 90 days
if last_seen <= 90:
recent_player.append(player)
await ctx.send("Found `{}` recent player(s) `(seen in the last 90 days)`".format(len(recent_player)))
for player in recent_player:
current_date = date.today()
current_time = datetime.min.time()
today = datetime.combine(current_date, current_time)
last_seen = player.lastLoginDatetime
last_seen = (today - last_seen).days
if last_seen <= 0:
last_seen = "Today"
else:
last_seen = "{} days ago".format(last_seen)
embed = discord.Embed(
title=player.playerName,
description="↓↓↓ Player ID ↓↓↓```fix\n{}```".format(player.playerId),
colour=discord.colour.Color.dark_teal(),
)
embed.add_field(name='Last Seen:', value=last_seen, inline=True)
embed.add_field(name='Account Level:', value=player.accountLevel, inline=True)
embed.add_field(name='Hours Played:', value=player.hoursPlayed, inline=True)
embed.add_field(name='Account Created:', value=player.createdDatetime, inline=True)
await ctx.send(embed=embed)
# Returns an embed of how to format a console name
@commands.command(name='console_name')
async def usage(self, ctx):
embed = discord.Embed(
title="How to format your console name in PaladinsAssistant.",
colour=discord.Color.dark_teal(),
description="\u200b"
)
embed.add_field(name="To use a console name you must provide your name and platform surrounded in quotes.",
value="So for example a console player with the name `zombie killer` who plays on the "
"`Switch` would type their name as follows in the stats command.\n\n"
"`>>stats \"Zombie Killer Switch\"`\n\u200b", inline=False)
embed.add_field(
name="Now if you want to make your life easier I would recommend storing/linking your name to the "
"PaladinsAssistant.",
value="You can do this by using the `>>console` command to look up your Paladins `player_id` and then"
"using the `>>store` command by doing `>>store your_player_id`. Then in commands you can just use "
"the word `me` in place of your console name and platform.\n\u200b", inline=False)
embed.add_field(name="Below are the 3 steps (`with a picture`) of what you need to do if you are directed"
" to use Guru's site to find a console `player_id from the console command.`",
value="```md\n"
"1. Use the link generated from the command or go to https://paladins.guru/ and type "
"in the console player's name and then search.\n"
"2. Locate the account that you want and click on the name.\n"
"3. Then copy the number right before the player name.\n"
"4. Congrats you now have the console's players magical number.\n```", inline=False)
embed.set_thumbnail(
url="https://raw.githubusercontent.com/EthanHicks1/PaladinsAssistantBot/master/assets/Androxus.png")
embed.set_image(
url="https://raw.githubusercontent.com/EthanHicks1/PaladinsAssistantBot/master/assets/Console.png")
embed.set_footer(text="If you still have questions feel free to message me @ FeistyJalapeno#9045. "
"I am a very busy but will try to respond when I can.")
await ctx.send(embed=embed)
# Add this class to the cog list
def setup(bot):
bot.add_cog(ConsoleCommands(bot))
| 51.172414 | 119 | 0.568329 |
feeebbc5a748ddb1157bf558ba36f40a432ef1a6 | 666 | py | Python | documentation/demonstrations/abfFromWks.py | swharden/PyOriginTools | 536fb8e11234ffdc27e26b1800e0358179ca7d26 | [
"MIT"
] | 11 | 2018-04-22T20:34:53.000Z | 2022-03-12T12:02:47.000Z | documentation/demonstrations/abfFromWks.py | swharden/PyOriginTools | 536fb8e11234ffdc27e26b1800e0358179ca7d26 | [
"MIT"
] | 3 | 2018-01-11T14:54:46.000Z | 2018-04-26T13:45:18.000Z | documentation/demonstrations/abfFromWks.py | swharden/PyOriginTools | 536fb8e11234ffdc27e26b1800e0358179ca7d26 | [
"MIT"
] | 3 | 2019-05-14T13:36:14.000Z | 2020-09-02T16:13:57.000Z | R"""
try to get the worksheet name from a worksheet
run -pyf C:\Users\swharden\Documents\GitHub\PyOriginTools\documentation\demonstrations\abfFromWks.py
"""
import sys
if False:
# this code block will NEVER actually run
sys.path.append('../') # helps my IDE autocomplete
sys.path.append('../../') # helps my IDE autocomplete
sys.path.append('../../../') # helps my IDE autocomplete
import PyOriginTools as OR
import PyOrigin
if __name__=="__main__":
bookName,sheetName=OR.activeBookAndSheet()
worksheetPage=PyOrigin.WorksheetPages(bookName)
print(worksheetPage[0])
# for item in worksheetPage:
# print(item)
print("DONE") | 30.272727 | 100 | 0.711712 |
feef852c484bcfaf650545d694c36f762735f100 | 803 | py | Python | geniza/corpus/migrations/0018_document_doctype_help_link.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | null | null | null | geniza/corpus/migrations/0018_document_doctype_help_link.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | 5 | 2020-09-22T17:35:24.000Z | 2020-09-22T19:45:46.000Z | geniza/corpus/migrations/0018_document_doctype_help_link.py | kmcelwee/geniza | 0e59134e35357d4f80d85bf1e423edbc29d1edfb | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1 on 2021-08-19 15:49
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corpus", "0017_secondary_lang_allow_unknown"),
]
operations = [
migrations.AlterField(
model_name="document",
name="doctype",
field=models.ForeignKey(
blank=True,
help_text='Refer to <a href="https://docs.google.com/document/d/1FHr1iS_JD5h-y5O1rv5JNNw1OqEVQFb-vSTGr3hoiF4/edit" target="_blank">PGP Document Type Guide</a>',
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="corpus.documenttype",
verbose_name="Type",
),
),
]
| 29.740741 | 176 | 0.595268 |
fef0f2eca41493ff175b1ce22f370a3502ed826a | 50 | py | Python | rubin_sim/scheduler/features/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/scheduler/features/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/scheduler/features/__init__.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | from .features import *
from .conditions import *
| 16.666667 | 25 | 0.76 |
fef10be702d297731f0eada02c3e9a2ec0107a0f | 5,932 | py | Python | traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py | lzzppp/DERT | e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6 | [
"MIT"
] | 7 | 2020-08-21T02:19:15.000Z | 2021-12-30T02:02:40.000Z | traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py | lzzppp/DERT | e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6 | [
"MIT"
] | 1 | 2021-04-21T13:50:53.000Z | 2021-04-25T02:34:48.000Z | traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py | lzzppp/DERT | e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6 | [
"MIT"
] | 1 | 2020-12-02T07:15:13.000Z | 2020-12-02T07:15:13.000Z | import numpy as np
import h5py
from datetime import datetime
from geopy.distance import distance
import argparse
import pickle
import json
import os
class TestedFeatureExtractor:
driving_time_norm = 1
def __init__(self, selected_feature, norm_param):
self.selected_feature = selected_feature
self._set_norm_param(norm_param)
def _set_norm_param(self, norm_param):
self.norm_driving_time = norm_param["driving_time"]
self.norm_driving_distance = norm_param["driving_distance"]
self.norm_speed = norm_param["speed"]
# def _set_param_dims(self):
# dim = 0
# self.norm_feature_dim = {}
# if 'time_of_day' in self.selected_feature:
# dim += 12
# if 'day_week' in self.selected_feature:
# dim += 7
# for feature in ['trip_time']:
def extract_from_h5(self, h5_path, save_path, number='all'):
f = h5py.File(h5_path, 'r')
traj_nums = f.attrs['traj_nums']
func = self.spatial_temporal_features_func(f)
if number == 'all':
out = np.array(list(map(func, range(traj_nums))))
elif isinstance(number, int):
out = np.array(list(map(func, range(number))))
else:
raise Exception("number of needed trajectories should be set properly")
f.close()
np.save(save_path, out)
def spatial_temporal_features_func(self, f):
def norma_traj(point):
"""If traj point in normal range. (lon, lat)"""
return point[0] >= -180 and point[0] <= 180 and point[1] >= -90 and point[1] <= 90
def func(tid):
trajs = np.array(f['trips/%d' % tid])
times = np.array(f['timestamps/%d' % tid])
trajs = np.array(list(filter(norma_traj, trajs)))
# time of day, day of week
out_feature = []
if 'time_of_day' in self.selected_feature or 'day_of_week' in self.selected_feature:
day_hour, date_week = self.unix_to_weekday_and_hour(times[0])
if 'time_of_day' in self.selected_feature:
out_feature += self.one_hot(day_hour, 12)
if 'day_of_week' in self.selected_feature:
out_feature += self.one_hot(date_week, 7)
if 'trip_time' in self.selected_feature:
out_feature.append(self._normalize(times[-1] - times[0], self.norm_driving_time))
if 'avg_speed' in self.selected_feature or 'max_speed' in self.selected_feature or 'drving_time' in self.selected_feature:
out_feature += self.driving_feature(trajs, times, len(trajs) == 0)
return out_feature
return func
def driving_feature(self, trips, times, abnormal=False):
distances = [coord_distance(coords) for coords in zip(trips[1:], trips[:-1])]
seg_times = times[1:] - times[:-1]
speeds = [distances[i] / seg_times[i] if seg_times[i] != 0.0 else 0.0 for i in range(len(distances))]
out_feature = []
if 'driving_distance' in self.selected_feature:
if not abnormal:
out_feature.append(self._normalize(sum(distances), self.norm_driving_distance))
else:
out_feature.append(0.0)
if 'avg_speed' in self.selected_feature:
if not abnormal:
out_feature.append(self._normalize(np.mean(speeds), self.norm_speed))
else:
out_feature.append(0.0)
if 'max_speed' in self.selected_feature:
if not abnormal:
out_feature.append(self._normalize(np.max(speeds), self.norm_speed))
else:
out_feature.append(0.0)
return out_feature
def unix_to_weekday_and_hour(self, unix_time):
"""Get hour and day of the week
For hour of day, it will be divided to 12 parts
Return:
[day_part, day_of_week]
"""
date = datetime.fromtimestamp(unix_time)
return [date.hour // 2, date.weekday()]
def one_hot(self, id, len):
return [0 if i != id else 1 for i in range(len)]
def _normalize(self, value, max_value):
# In this case, all value will be above 0, and I want to normalize them to -1,1
# normalize all column at once will be more efficient
if value > max_value:
return 1.0
else:
return value / max_value * 2 - 1.0
def coord_distance(coords):
"""return distance between two points
geopy.distance.distance accept [lat, lon] input, while this dataset is [lon, lat]
"""
return distance((coords[0][1], coords[0][0]), (coords[1][1], coords[1][0])).meters
def get_saved_path(city_name, train_or_test):
data_root = '/data3/zhuzheng/trajecotry/feature'
return os.path.join(data_root, city_name + '_' + train_or_test)
parser = argparse.ArgumentParser(description="extral trajectory's temporal related feature")
parser.add_argument("-region_name", type=str, default="region_porto_top100", help="")
args = parser.parse_args()
if __name__ == "__main__":
selected_feature = ['time_of_day', 'day_of_week', 'avg_speed', 'max_speed', 'trip_distance', 'trip_time']
with open('../hyper-parameters.json', 'r') as f:
hyper_param = json.loads(f.read())
with open('normalize_param.json', 'r') as f:
norm_param = json.loads(f.read())
feature_extractor = TestedFeatureExtractor(selected_feature, norm_param[args.region_name])
train_h5_path = hyper_param[args.region_name]['filepath']
test_h5_path = hyper_param[args.region_name]['testpath']
feature_extractor.extract_from_h5(train_h5_path, get_saved_path(hyper_param[args.region_name]['cityname'], 'train'))
feature_extractor.extract_from_h5(test_h5_path, get_saved_path(hyper_param[args.region_name]['cityname'], 'test'))
| 39.546667 | 134 | 0.630142 |
fef114610ec0d475191a1220ffe83885004935bc | 2,545 | py | Python | psystem/plot.py | ranocha/Dispersive-wave-error-growth-notebooks | cffe67961db325291a02258118d3c7261fcce788 | [
"MIT"
] | null | null | null | psystem/plot.py | ranocha/Dispersive-wave-error-growth-notebooks | cffe67961db325291a02258118d3c7261fcce788 | [
"MIT"
] | null | null | null | psystem/plot.py | ranocha/Dispersive-wave-error-growth-notebooks | cffe67961db325291a02258118d3c7261fcce788 | [
"MIT"
] | null | null | null | from clawpack.petclaw.solution import Solution
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as pl
from matplotlib import rc
import numpy as np
import os
def plot_q(frame,
file_prefix='claw',
path='./_output/',
xShift=0.0,
xlimits=None,
ylimits=None,
name=None,
plot_strain=True,
plot_path='',
ylabel='$\sigma$',
plot_title=True,
follow=False,
follow_window=50,
X=None, Eps=None, Vel=None, Sigma=None):
import sys
sys.path.append('.')
import psystem
sol=Solution(frame,file_format='petsc',read_aux=False,path=path,file_prefix=file_prefix)
x=sol.state.grid.x.centers
eps=sol.state.q[0,:]
# get stress
sigma = psystem.stress(sol.state)
# number the frame to name the file
if frame < 10:
str_frame = "000"+str(frame)
elif frame < 100:
str_frame = "00"+str(frame)
elif frame < 1000:
str_frame = "0"+str(frame)
else:
str_frame = str(frame)
# create the figure and plot the solution
pl.figure(figsize=(15,5))
if plot_strain:
pl.plot(x+xShift,eps,'-r',lw=1)
pl.plot(x+xShift,sigma,'-k',lw=3)
# format the plot
if plot_title:
pl.title("t= "+str(sol.state.t),fontsize=25)
#
if ylabel is not None:
pl.ylabel(ylabel,fontsize=30)
#
pl.xticks(size=25); pl.yticks(size=25)
if follow is True:
amax = sigma.argmax()
xmax = x[amax]
xlimits = [0,0]
xlimits[0] = xmax-follow_window
xlimits[1] = xmax+follow_window
if xlimits is not None:
xlim=[xlimits[0], xlimits[1]]
else:
xlim=[np.min(x),np.max(x)]
if ylimits is not None:
ylim=[ylimits[0], ylimits[1]]
else:
ylim=[np.min(sigma),np.max(sigma)]
pl.tight_layout()
pl.axis([xlim[0]+xShift,xlim[1]+xShift,ylim[0],ylim[1]])
pl.gca().ticklabel_format(useOffset=False)
if name is None:
if follow:
pl.savefig('./_plots_follow'+plot_path+'/sigma_'+str_frame+'.png',bbox_inches="tight")
else:
pl.savefig('./_plots'+plot_path+'/sigma_'+str_frame+'.png',bbox_inches="tight")
else:
pl.savefig(name+'.png',bbox_inches="tight")
#
pl.close()
# save the data
if X is not None:
X.append(x)
Eps.append(eps)
Vel.append(sol.state.q[1,:])
Sigma.append(sigma)
#
return x,eps,sigma
#
| 27.074468 | 98 | 0.574853 |
fef15a29a302098c87559c64e7c95311ad1af7bc | 2,285 | py | Python | deepl/layers/utils.py | akamnev/deepl | 392c757e21dec7bdd72cb0f71298389ef0d13968 | [
"MIT"
] | 1 | 2020-06-08T14:06:36.000Z | 2020-06-08T14:06:36.000Z | deepl/layers/utils.py | akamnev/deepl | 392c757e21dec7bdd72cb0f71298389ef0d13968 | [
"MIT"
] | null | null | null | deepl/layers/utils.py | akamnev/deepl | 392c757e21dec7bdd72cb0f71298389ef0d13968 | [
"MIT"
] | null | null | null | import torch
from typing import List
def get_min_value(tensor):
if tensor.dtype == torch.float16:
min_value = -1e4
elif tensor.dtype == torch.float32:
min_value = -1e9
else:
raise ValueError("{} not recognized. `dtype` "
"should be set to either `torch.float32` "
"or `torch.float16`".format(tensor.dtype))
return min_value
def get_attention_mask(input_ids):
max_length = max([len(x) for x in input_ids])
attention_mask = [[1.0] * len(x) + [0.0] * (max_length - len(x))
for x in input_ids]
return attention_mask
def get_vector_attention_mask(input_ids):
max_length = max([len(x) for x in input_ids])
attention_mask = [[1.0] * (len(x) + 1) +
[0.0] * (max_length - len(x))
for x in input_ids]
return attention_mask
def prune_input_sequence(input_ids, max_length):
fval = []
for ids in input_ids:
if len(ids) > max_length:
ids = ids[:max_length]
fval.append(ids)
return fval
def kl_div(mu, sigma):
"""
KL-divergence between a diagonal multivariate normal,
and a standard normal distribution (with zero mean and unit variance)
"""
sigma_2 = sigma * sigma
kld = 0.5 * torch.mean(mu * mu + sigma_2 - torch.log(sigma_2) - 1.0)
return kld
def kld_gaussian(mu, log_sigma, nu=0.0, rho=1.0):
"""
KL-divergence between a diagonal multivariate normal,
and a standard normal distribution
"""
device = mu.device
nu = torch.as_tensor(nu, device=device)
rho = torch.as_tensor(rho, device=device)
delta_variance = 2.0 * (log_sigma - torch.log(rho))
variance_term = torch.sum(torch.exp(delta_variance) - delta_variance)
mean_term = torch.sum((mu - nu) ** 2 / rho)
return 0.5 * (mean_term + variance_term - 1.0)
def rand_epanechnikov_trig(shape: List[int], device: torch.device, dtype: torch.dtype = torch.float32):
# https://stats.stackexchange.com/questions/6643/what-is-the-closed-form-solution-for-the-inverse-cdf-for-epanechnikov
xi = torch.rand(shape,
dtype=dtype,
device=device)
xi = 2 * torch.sin(torch.asin(2 * xi - 1) / 3)
return xi
| 31.736111 | 122 | 0.617068 |
fef388e9c0a8cc5d31503d18e82095b931d385f7 | 13,762 | py | Python | main.py | ooshyun/filterdesign | 59dbea191b8cd44aa9f2d02d3787b5805d486ae2 | [
"MIT"
] | 1 | 2021-12-27T00:38:32.000Z | 2021-12-27T00:38:32.000Z | main.py | ooshyun/FilterDesign | 7162ccad8e1ae8aebca370da56be56603b9e8b24 | [
"MIT"
] | null | null | null | main.py | ooshyun/FilterDesign | 7162ccad8e1ae8aebca370da56be56603b9e8b24 | [
"MIT"
] | null | null | null | import os
import json
import numpy as np
from numpy import log10, pi, sqrt
import scipy.io.wavfile as wav
from scipy.fftpack import *
from src import (
FilterAnalyzePlot,
WaveProcessor,
ParametricEqualizer,
GraphicalEqualizer,
cvt_char2num,
maker_logger,
DEBUG,
)
if DEBUG:
PRINTER = maker_logger()
LIBRARY_PATH = "./" # First of all, it need to set the library(or this project) path
def filter_plot():
from src import lowpass, highpass, bandpass, notch, peaking, shelf, allpass
data_path = os.path.join(LIBRARY_PATH, "test/data/wav/")
file_name = "White Noise.wav"
result_path = ""
infile_path = os.path.join(data_path, file_name)
fs, data = wav.read(infile_path)
fft_size = 256
fft_band = np.arange(1, fft_size / 2 + 1) * fs / fft_size
# fc_band = np.arange(30, 22060, 10)
fc_band = np.array([100, 1000, 2000, 3000, 5000])
ploter = FilterAnalyzePlot(sample_rate=fs)
"""Plot the several filters
"""
fc = 1033.59375
gain = 6
Q = 1 / np.sqrt(2)
name = "Shelf Filter"
lowpass_filter = lowpass(Wn=2 * fc / fs, Q=Q)
highpass_filter = highpass(Wn=2 * fc / fs, Q=Q)
bandpass_filter = bandpass(Wn=2 * fc / fs, Q=Q)
notch_filter = notch(Wn=2 * fc / fs, Q=Q)
peak_filter = peaking(Wn=2 * fc / fs, Q=Q, dBgain=gain)
shelf_filter = shelf(Wn=2 * fc / fs, Q=Q, dBgain=gain)
allpass_filter = allpass(Wn=2 * fc / fs, Q=Q)
ploter.filters = peak_filter
ploter.plot(type=["freq", "phase", "pole"], save_path=None, name=name)
def filter_process():
"""Comparison between time domain and frequency domain using WavProcessor class
"""
from src import peaking, shelf
data_path = LIBRARY_PATH + "/test/data/wav/"
file_name = "White Noise.wav"
outfile_path = LIBRARY_PATH + "/test/result/wav/"
infile_path = os.path.join(data_path, file_name)
fs, data = wav.read(infile_path)
gain = 6
fc = 1033.59375
# time
wave_processor = WaveProcessor(wavfile_path=infile_path)
outfile_name = "White Noise_peak_time_domain.wav"
peak_filter = peaking(Wn=2 * fc / fs, Q=1 / np.sqrt(2), dBgain=gain)
wave_processor.filter_time_domain_list = peak_filter
wave_processor.run(savefile_path=outfile_path + outfile_name)
if len(wave_processor.time_filter_time) != 0:
print(
sum(wave_processor.time_filter_time) / len(wave_processor.time_filter_time)
)
# frequency
wave_processor = WaveProcessor(wavfile_path=infile_path)
outfile_name = "White Noise_peaking_freq_domain.wav"
fft_size = 256 # it should be designed before running
fft_band = np.arange(1, fft_size // 2 + 1) * fs / fft_size
coeff_frequency = np.ones(shape=(fft_size // 2 + 1,))
coeff_frequency[np.argwhere(fft_band == fc)] = 10 ** (gain / 20)
wave_processor.filter_freq_domain_list = coeff_frequency
wave_processor.run(savefile_path=outfile_path + outfile_name)
if len(wave_processor.time_filter_freq) != 0:
print(
sum(wave_processor.time_filter_freq) / len(wave_processor.time_filter_freq)
)
def serial_equalizer_plot():
"""Test frequency response for IIR filter cascade
"""
from src import peaking
data_path = LIBRARY_PATH + "/test/data/wav/"
infile_path = os.path.join(data_path, "White Noise.wav")
fs, _ = wav.read(infile_path)
ploter = FilterAnalyzePlot()
parametric_filter = ParametricEqualizer(fs)
fc_band = np.array([1000, 4000, 8000])
for f in fc_band:
peak_filter = peaking(Wn=2 * f / fs, dBgain=6, Q=4)
parametric_filter.coeff = peak_filter
ploter.filters = parametric_filter
ploter.plot(type=["freq", "phase", "pole"])
def serial_equalizer_process():
"""Test processing to wav for IIR filter cascade
"""
from src import peaking
data_path = LIBRARY_PATH + "/test/data/wav/"
result_path = LIBRARY_PATH + "/test/result/wav/"
infile_path = os.path.join(data_path, "White Noise.wav")
fs, _ = wav.read(infile_path)
wave_processor = WaveProcessor(wavfile_path=infile_path)
fc_band = np.array([1000, 4000, 8000])
for f in fc_band:
peak_filter = peaking(Wn=2 * f / fs, dBgain=12, Q=4)
b, a = peak_filter
wave_processor.filter_time_domain_list = b, a
# wave_processor.graphical_equalizer = True
wave_processor.run(
savefile_path=result_path + "/whitenoise_3peak_250_2000_8000.wav"
)
if len(wave_processor.time_filter_freq) != 0:
print(
sum(wave_processor.time_filter_freq) / len(wave_processor.time_filter_freq)
)
if len(wave_processor.time_filter_time) != 0:
print(
sum(wave_processor.time_filter_time) / len(wave_processor.time_filter_time)
)
def generator_test_vector_grahpical_equalizer():
"""Generate test vector for parallel strucuture equalizer called graphical equalizer
"""
sample_rate = 44100
# cuf-off freuqency case 1
cutoff_frequency = np.array(
(
20,
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
16000,
20000,
)
)
# gain
num_case = 5
test_gain_list = np.zeros(shape=(num_case, len(cutoff_frequency)))
# case 1
test_gain_list[0, :] = np.array(
[
12,
12,
10,
8,
4,
1,
0.5,
0,
0,
6,
6,
12,
6,
6,
-12,
12,
-12,
-12,
-12,
-12,
0,
0,
0,
0,
-3,
-6,
-9,
-12,
0,
0,
0,
]
)
# case 2
test_gain_list[1, 0::2] = 12
test_gain_list[1, 1::2] = -12
# case 3
test_gain_list[2, np.where(cutoff_frequency == 2000)] = 12
# case 4
test_gain_list[3, :] = np.ones_like(cutoff_frequency) * 12
# case 5
test_gain_list[4, 0::3] = 0
test_gain_list[4, 1::3] = 0
test_gain_list[4, 2::3] = 12
# cut-off frequency case 2, cutoff frequency with bandwith
f_bandwidth = np.array(
[
2.3,
2.9,
3.6,
4.6,
5.8,
7.3,
9.3,
11.6,
14.5,
18.5,
23.0,
28.9,
36.5,
46.3,
57.9,
72.9,
92.6,
116,
145,
185,
232,
290,
365,
463,
579,
730,
926,
1158,
1447,
1853,
2316,
]
)
f_upperband = np.array(
[
22.4,
28.2,
35.5,
44.7,
56.2,
70.8,
89.1,
112,
141,
178,
224,
282,
355,
447,
562,
708,
891,
1120,
1410,
1780,
2240,
2820,
3550,
4470,
5620,
7080,
8910,
11200,
14100,
17800,
22050,
]
)
f_lowerband = np.zeros_like(f_upperband)
f_lowerband[0] = 17.5
f_lowerband[1:] = f_upperband[:-1]
cutoff_frequency_bandwidth = np.zeros((2, len(cutoff_frequency)))
cutoff_frequency_bandwidth[0, :] = np.append(10, f_upperband[:-1])
cutoff_frequency_bandwidth[1, :] = cutoff_frequency
cutoff_frequency_bandwidth = cutoff_frequency_bandwidth.reshape(
(cutoff_frequency_bandwidth.shape[0] * cutoff_frequency_bandwidth.shape[1],),
order="F",
)
test_gain_bandwidth_list = np.zeros(
shape=(num_case, cutoff_frequency_bandwidth.shape[0])
)
for id_test_gain, test_gain in enumerate(test_gain_list):
buf_test_gain = np.zeros((2, len(cutoff_frequency)))
buf_test_gain[0, :] = test_gain
buf_test_gain[1, :] = test_gain
buf_test_gain = buf_test_gain.reshape(
(buf_test_gain.shape[0] * buf_test_gain.shape[1],), order="F"
)
buf_test_gain[1:] = buf_test_gain[:-1]
buf_test_gain[0] = 0
test_gain_bandwidth_list[id_test_gain, :] = buf_test_gain[:]
cutoff_frequency = cutoff_frequency.tolist()
test_gain_list = test_gain_list.tolist()
cutoff_frequency_bandwidth = cutoff_frequency_bandwidth.tolist()
test_gain_bandwidth_list = test_gain_bandwidth_list.tolist()
test_vector_graphical_equalizer = json.dumps(
{
"1": {
"sample_rate": sample_rate,
"cutoff_frequency": cutoff_frequency,
"test_gain": test_gain_list,
},
"2": {
"sample_rate": sample_rate,
"cutoff_frequency": cutoff_frequency_bandwidth,
"test_gain": test_gain_bandwidth_list,
},
},
indent=4,
)
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "w") as f:
f.write(test_vector_graphical_equalizer)
def parallel_equalizer_plot():
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "r") as f:
test_case = json.load(f)
fs, fc, gain = (
test_case["2"]["sample_rate"],
test_case["2"]["cutoff_frequency"],
test_case["2"]["test_gain"][1],
)
fs = int(fs)
fc = np.array(fc)
gain = np.array(gain)
eq = GraphicalEqualizer(fs, fc, gain)
w, h = eq.freqz(show=True)
file = "/test/data/txt/test_graphical_equalizer.txt"
eq.write_to_file(f"{LIBRARY_PATH}/{file}")
def parallel_equalizer_wav_process():
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "r") as f:
test_case = json.load(f)
fs, fc, gain = (
test_case["2"]["sample_rate"],
test_case["2"]["cutoff_frequency"],
test_case["2"]["test_gain"][1],
)
fs = int(fs)
fc = np.array(fc)
gain = np.array(gain)
eq = GraphicalEqualizer(fs, fc, gain)
# w, h = eq.freqz(show=True)
txt_file = "/test/data/txt/test_graphical_equalizer.txt"
eq.write_to_file(f"{LIBRARY_PATH}/{txt_file}")
"""Test wav file processing of parallel structure of iir filter
"""
data_path = LIBRARY_PATH + "/test/data/wav/"
result_path = LIBRARY_PATH + "/test/result/wav/"
wav_file = "White Noise.wav"
out_file = "White Noise_graphical_equalizer.wav"
infile_path = os.path.join(data_path, wav_file)
outfile_path = os.path.join(result_path, out_file)
coeff_text = open(f"{LIBRARY_PATH}/{txt_file}").read()
coeff_text = coeff_text.split("\n")[:-1]
coeff_text = [text.split(" ") for text in coeff_text]
cvt_char2num(coeff_text)
coeff_text, bias = np.array(coeff_text[:-1]), np.array(coeff_text[-1])
wave_processor = WaveProcessor(wavfile_path=infile_path)
wave_processor.graphical_equalizer = True
wave_processor.filter_time_domain_list = coeff_text
wave_processor.bias = bias
outresult_path = outfile_path
PRINTER.info(f"target file {outresult_path} is processing......")
wave_processor.run(savefile_path=outresult_path)
def analyze_filter():
from src import highpass, notch
fs = 44100
# """ Custom filter analysis"""
ploter = FilterAnalyzePlot(sample_rate=44100)
fc = 1000
filter_custom = highpass(Wn=2 * fc / fs, Q=1 / np.sqrt(2))
ploter.filters = filter_custom
ploter.plot(type=["freq", "phase", "pole"])
del filter_custom
""" Parametric filter analysis, serial structure"""
ploter = FilterAnalyzePlot()
fc = np.array([500, 4000])
peq = ParametricEqualizer(fs)
filter_custom = notch(Wn=2 * fc[0] / fs, Q=1 / np.sqrt(2))
peq.coeff = filter_custom
filter_custom = notch(Wn=2 * fc[1] / fs, Q=1 / np.sqrt(2))
peq.coeff = filter_custom
ploter.filters = peq
ploter.plot(type=["freq", "phase", "pole"])
del peq
""" Graphical filter analysis, parallel structure"""
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "r") as f:
test_case = json.load(f)
fs, fc, gain = (
test_case["1"]["sample_rate"],
test_case["1"]["cutoff_frequency"],
test_case["1"]["test_gain"][0],
)
fs = int(fs)
fc = np.array(fc)
gain = np.array(gain)
ploter = FilterAnalyzePlot()
geq = GraphicalEqualizer(fs, fc, gain)
ploter.filters = geq
ploter.plot(type=["freq", "phase", "pole"])
del geq
del ploter
if __name__ == "__main__":
PRINTER.info("Hello Digital Signal Processing World!")
"""Single filter design"""
filter_plot()
filter_process()
"""Serial structure of filters design"""
serial_equalizer_plot()
serial_equalizer_process()
"""Parallel structure of filters design"""
generator_test_vector_grahpical_equalizer()
parallel_equalizer_plot()
parallel_equalizer_wav_process()
""" Analyze filter"""
analyze_filter()
pass
| 26.113852 | 88 | 0.575861 |
fef4b3fa8786cd370700430b9b9414a5a831d2bf | 3,322 | py | Python | time_transfer.py | EternityNull/alfred_scripts-TimeTransfer | d7c24c977d174d0b71b9903193ce8225a5538c7c | [
"MIT"
] | null | null | null | time_transfer.py | EternityNull/alfred_scripts-TimeTransfer | d7c24c977d174d0b71b9903193ce8225a5538c7c | [
"MIT"
] | null | null | null | time_transfer.py | EternityNull/alfred_scripts-TimeTransfer | d7c24c977d174d0b71b9903193ce8225a5538c7c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import re
import json
from datetime import datetime
from alfred import *
TIMESTAMP_SEC_RE = r'^\d{10}$' # 1643372599
TIMESTAMP_MSEC_RE = r'^\d{13}$' # 1643372599000
# 2022-01-28 10:00:00
DATETIME_LONG_STR = r'^[1-9]\d{3}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$'
DATETIME_SHORT_STR = r'^[1-9]\d{13}$' # 20220128100000
def judge_input(input_arg: str):
date_now = input_datetime = datetime.now()
title_to_display = result_to_display = str()
if re.match(TIMESTAMP_SEC_RE, input_arg):
input_datetime = datetime.fromtimestamp(float(input_arg))
result_to_display = input_datetime.strftime("%Y-%m-%d %H:%M:%S")
title_to_display = "日期时间: %s" % result_to_display
elif re.match(TIMESTAMP_MSEC_RE, input_arg):
input_datetime = datetime.fromtimestamp(float(int(input_arg) / 1000))
result_to_display = input_datetime.strftime("%Y-%m-%d %H:%M:%S")
title_to_display = "日期时间: %s" % result_to_display
elif re.match(DATETIME_SHORT_STR, input_arg):
input_datetime = datetime.strptime(input_arg, "%Y%m%d%H%M%S")
result_to_display = int(input_datetime.timestamp())
title_to_display = "时间戳: %s" % result_to_display
elif re.match(DATETIME_LONG_STR, input_arg):
input_datetime = datetime.strptime(input_arg, "%Y-%m-%d %H:%M:%S")
result_to_display = int(input_datetime.timestamp())
title_to_display = "时间戳: %s" % result_to_display
else:
exit(1)
prefix = "前"
diff_days = (date_now - input_datetime).days
diff_secs = (date_now - input_datetime).seconds
if date_now < input_datetime:
prefix = "后"
diff_days = (input_datetime - date_now).days
diff_secs = (input_datetime - date_now).seconds
subtitle_to_display = "距离当前时间 %s [%s] 天 + [%s] 秒" % (
prefix,
diff_days,
diff_secs)
return Alfred(title_to_display, subtitle_to_display, result_to_display).__dict__
def judge_now():
date_now = datetime.now()
display_list = list()
# 日期格式 long
display_list.append(
Alfred(
title=date_now.strftime("%Y-%m-%d %H:%M:%S"),
subtitle='Long日期格式',
arg=date_now.strftime("%Y-%m-%d %H:%M:%S")
).__dict__
)
# 日期格式 short
display_list.append(
Alfred(
title=date_now.strftime("%Y/%m/%d %H/%M/%S"),
subtitle='传统日期格式',
arg=date_now.strftime("%Y/%m/%d %H/%M/%S")
).__dict__
)
# 日期格式 short
display_list.append(
Alfred(
title=date_now.strftime("%Y%m%d%H%M%S"),
subtitle='Short日期格式',
arg=date_now.strftime("%Y%m%d%H%M%S")
).__dict__
)
# 时间戳格式
display_list.append(
Alfred(
title=int(date_now.timestamp()),
subtitle='秒级时间戳',
arg=int(date_now.timestamp())
).__dict__
)
return display_list
if __name__ == '__main__':
input_args = sys.argv[1:]
if len(input_args) > 2:
exit(1)
input_arg = ' '.join(input_args)
alfred_result = list()
if input_arg == 'now':
alfred_result.extend(judge_now())
else:
alfred_result.append(judge_input(input_arg))
print(json.dumps({"items": alfred_result}))
| 27.454545 | 84 | 0.609874 |
fef4d3e2153fde18995213ace718d0a7d41c56ac | 55 | py | Python | test.py | SquarerFive/ursina | 8d2a86a702a96fe2d3d3b608b87e755bf28cb2ae | [
"MIT"
] | null | null | null | test.py | SquarerFive/ursina | 8d2a86a702a96fe2d3d3b608b87e755bf28cb2ae | [
"MIT"
] | null | null | null | test.py | SquarerFive/ursina | 8d2a86a702a96fe2d3d3b608b87e755bf28cb2ae | [
"MIT"
] | null | null | null | import ursina
app = ursina.Ursina(init_showbase=True)
| 13.75 | 39 | 0.8 |
fef5faa5a487c2ba4ddeb8aafe0c3838370c774b | 14,598 | py | Python | ravager/bot/commands/admin_interface.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | null | null | null | ravager/bot/commands/admin_interface.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | 1 | 2022-03-15T06:55:48.000Z | 2022-03-15T15:38:20.000Z | ravager/bot/commands/admin_interface.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | 2 | 2022-02-09T21:30:57.000Z | 2022-03-15T06:19:57.000Z | import logging
from functools import wraps
import psutil
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, ForceReply, ParseMode
from telegram.ext import CommandHandler, CallbackQueryHandler, MessageHandler, Filters
from ravager.bot.helpers.constants import *
from ravager.bot.helpers.timeout import ConversationTimeout
from ravager.config import MAX_TASKS_PER_USER, STORAGE_TIME, STORAGE_SIZE, GROUP_PASSWORD, USER_PASSWORD, ALLOWLIST, \
DOWNLOAD_DIR, LOGS_DIR,HEROKU_APP,HEROKU_API_TOKEN
from ravager.database.helpers.structs import UserStruct
from ravager.database.users import UserData
from ravager.helpers.humanize import humanize
logger = logging.getLogger(__file__)
HANDLE_ADMIN_PANEL, LIMITS_PANEL, FILTERS_PANEL, SYS_INFO_PANEL, LOGS_HANDLER = range(5)
limits_panel_text = "*Limits Configuration:*\
\nDownload storage size: *{}* GB\
\nDownload storage time: *{}* Hrs\n"
filter_panel_text = "*Filters and User Configuration:*\
\nFilters Enabled: *{}*\nGroup chat password: *{}*\
\nPrivate chat password: *{}*"
sys_info_text = "*System Information*\
\n*Cpu Usage Percent:* {}%\
\n*Used Ram:* {} {}\
\n*Available Ram:* {} {}\
\n*Network Ingress:* {} {}\
\n*Network Egress:* {} {}\
\n*Total Disk Space:* {} {}\
\n*Total Disk Space Available: *{} {}"
class AdminInterface:
def __init__(self):
self.end_selection = ConversationTimeout.end_selection
self.selection_timeout = ConversationTimeout.selection_timeout
self.user = UserStruct()
def _restricted(handlers):
wraps(handlers)
def wrapper(self, update, context, *args, **kwargs):
user_id = update.effective_user.id
user = UserStruct()
user.user_id = user_id
user = UserData(user=user).get_user()
if user is not None and bool(user.is_admin):
return handlers(self, update, context, *args, **kwargs)
update.message.reply_text(text="Unauthorized user", quote=True)
logger.error("Unauthorized access denied for {}.".format(user_id))
return -1
return wrapper
@staticmethod
def admin_panel():
admin_panel = [[InlineKeyboardButton(text="Limits", callback_data="admin|admin_limits"),
InlineKeyboardButton(text="Filters", callback_data="admin|admin_filters")],
[InlineKeyboardButton(text="Sys Info", callback_data="admin|admin_sys_info"),
InlineKeyboardButton(text="Close", callback_data="admin|close")]]
return InlineKeyboardMarkup(admin_panel)
@staticmethod
def admin_interface_filters():
filters_panel = [[InlineKeyboardButton(text="Revoke Access", callback_data="filters|revoke_user"),
InlineKeyboardButton(text="Add Admin", callback_data="filters|add_admin")],
[InlineKeyboardButton(text="Back", callback_data="admin|admin_main"),
InlineKeyboardButton(text="Close", callback_data="admin|close")]]
return InlineKeyboardMarkup(filters_panel)
@staticmethod
def admin_interface_limts():
limits_panel = [[InlineKeyboardButton(text="Back", callback_data="admin|admin_main"),
InlineKeyboardButton(text="Close", callback_data="admin|close")]]
return InlineKeyboardMarkup(limits_panel)
@staticmethod
def admin_interface_sys_info():
sys_info_panel = [[InlineKeyboardButton(text="System Info", callback_data="sys_info|sys_info"),
InlineKeyboardButton(text="Logs", callback_data="sys_info|logs")],
[InlineKeyboardButton(text="Back", callback_data="admin|admin_main"),
InlineKeyboardButton(text="Close", callback_data="admin|close")]]
return InlineKeyboardMarkup(sys_info_panel)
@staticmethod
def toggle_panel(back_menu):
toggle_panel = [[InlineKeyboardButton(text="Enable", callback_data=""),
InlineKeyboardButton(text="Disable", callback_data=""),
InlineKeyboardButton(text="Back", callback_data="{}|".format(back_menu))]]
return InlineKeyboardMarkup(toggle_panel)
@staticmethod
def last_step_btns(prev_menu):
last_step_panel = [[InlineKeyboardButton(text="Back", callback_data="{}".format(prev_menu)),
InlineKeyboardButton(text="Back to main menu", callback_data="admin|admin_main")]]
return InlineKeyboardMarkup(last_step_panel)
def handle_admin_panel(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
if selection_option == "admin_main":
update.callback_query.edit_message_text(text="Admin Panel", reply_markup=self.admin_panel())
return HANDLE_ADMIN_PANEL
if selection_option == "admin_limits":
download_storage_time_threshold = STORAGE_TIME
download_storage_size_threshold = STORAGE_SIZE
stats = limits_panel_text.format(download_storage_size_threshold,download_storage_time_threshold)
update.callback_query.edit_message_text(text=stats, reply_markup=self.admin_interface_limts(),
parse_mode=ParseMode.MARKDOWN)
return LIMITS_PANEL
if selection_option == "admin_filters":
group_passwd = GROUP_PASSWORD
private_passwd = USER_PASSWORD
allowlist_enabled = str(ALLOWLIST)
text = filter_panel_text.format(allowlist_enabled, group_passwd, private_passwd)
update.callback_query.edit_message_text(text=text, reply_markup=self.admin_interface_filters(),
parse_mode=ParseMode.MARKDOWN)
return FILTERS_PANEL
if selection_option == "admin_sys_info":
update.callback_query.edit_message_text(text="Sys Health", reply_markup=self.admin_interface_sys_info())
return SYS_INFO_PANEL
if selection_option == "close":
update.callback_query.edit_message_text(text="Admin Interface closed")
return -1
def filters_options(self, update, context):
chat_id = update.effective_chat.id
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
if selection_option == "revoke_user":
text = "*Revoke user's access from bot*\nSend user's username or user id"
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_filters"))
context.bot.send_message(chat_id=chat_id, text="Username or User ID", parse_mode=ParseMode.MARKDOWN,
reply_markup=ForceReply())
if selection_option == "add_admin":
text = "*Revoke user's access from bot*\nSend user's username or user id"
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_filters"))
context.bot.send_message(chat_id=chat_id, text="Username or User ID", parse_mode=ParseMode.MARKDOWN,
reply_markup=ForceReply())
return
@staticmethod
def get_logs(update, context):
# dump logs properly
user_id = update.effective_user.id
try:
context.bot.sendDocument(chat_id=user_id, document=open("{}/ravager.log".format(LOGS_DIR), "rb"))
context.bot.sendDocument(chat_id=user_id, document=open("{}/celery.log".format(LOGS_DIR), "rb"))
context.bot.sendDocument(chat_id=user_id, document=open("{}/aria2.log".format(LOGS_DIR), "rb"))
except Exception as e:
update.message.reply_text(chat_id=user_id, text=str(e))
logger.error(e)
@staticmethod
def logs_panel():
logs_panel = [[InlineKeyboardButton(text="Aria logs", callback_data="sys_info_logs|aria_logs"),
InlineKeyboardButton(text="Celery logs", callback_data="sys_info_logs|celery_logs"),
InlineKeyboardButton(text="Ravager logs", callback_data="sys_info_logs|ravager_logs")],
[InlineKeyboardButton(text="Back", callback_data="admin|admin_sys_info"),
InlineKeyboardButton(text="Back to main menu", callback_data="admin|admin_main")]]
reply_markup = InlineKeyboardMarkup(logs_panel)
return reply_markup
def system_options(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
if selection_option == "sys_info":
psutil.cpu_percent(interval=0.1)
cpu_percent = psutil.cpu_percent(interval=0.1)
mem = psutil.virtual_memory()
disk_usage = psutil.disk_usage(str(DOWNLOAD_DIR))
net = psutil.net_io_counters(pernic=False, nowrap=True)
used_mem = humanize(mem.used)
available_mem = humanize(mem.available)
bytes_sent = humanize(net.bytes_sent)
bytes_recvd = humanize(net.bytes_recv)
total_disk_space = humanize(disk_usage.total)
total_free_space = humanize(disk_usage.free)
text = sys_info_text.format(cpu_percent, used_mem.size, used_mem.unit, available_mem.size, available_mem.unit,
bytes_recvd.size, bytes_recvd.unit, bytes_sent.size, bytes_sent.unit,
total_disk_space.size, total_disk_space.unit, total_free_space.size,
total_free_space.unit)
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_sys_info"))
return SYS_INFO_PANEL
if selection_option == "logs":
update.callback_query.edit_message_text(text="*Get yo logs*", parse_mode=ParseMode.MARKDOWN,
reply_markup=self.logs_panel())
return LOGS_HANDLER
def logs_handler(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
try:
if selection_option == "aria_logs":
context.bot.sendDocument(chat_id=update.callback_query.from_user.id,
document=open("{}/aria2.log".format(LOGS_DIR), "rb"))
if selection_option == "celery_logs":
context.bot.sendDocument(chat_id=update.callback_query.from_user.id, document=open("{}/celery.log".format(LOGS_DIR), "rb"))
if selection_option == "ravager_logs":
context.bot.sendDocument(chat_id=update.callback_query.from_user.id, document=open("{}/ravager.log".format(LOGS_DIR), "rb"))
return LOGS_HANDLER
except Exception as e:
logger.error(e)
@_restricted
def serve_admin_panel(self, update, context):
if str(update.effective_chat.type) == "group" or str(update.effective_chat.type) == "supergroup":
update.message.reply_text(text="This command can only be ran inside private chat")
return -1
self.user.user_id = update.effective_chat.id
self.user = UserData(user=self.user).get_user()
update.message.reply_text(text="Admin Panel", reply_markup=self.admin_panel())
return HANDLE_ADMIN_PANEL
def limits_options(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
max_tasks_per_chat = MAX_TASKS_PER_USER
download_storage_size_treshold = STORAGE_SIZE
download_storage_time_treshold = STORAGE_TIME
if selection_option == "max_tasks_per_chat":
text = "*Max tasks per chat*\nCurrent value is: *{}*\nSend new value:".format(max_tasks_per_chat)
if selection_option == "storage_size_treshold":
text = "*Download storage size*\nCurrent value is: *{}* GB\nSend new value:".format(
download_storage_size_treshold)
if selection_option == "storage_duration":
text = "*Download storage duration*\nCurrent value is: *{}* Hrs\nSend new value:".format(
download_storage_time_treshold)
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_limits"))
return LIMITS_PANEL
def admin_interface_handler(self):
admin_interface_handler = ConversationHandler(
entry_points=[CommandHandler("admin_interface", self.serve_admin_panel)],
states={
HANDLE_ADMIN_PANEL: [CallbackQueryHandler(self.handle_admin_panel, pattern="admin")],
LIMITS_PANEL: [CallbackQueryHandler(self.limits_options, pattern="limits")],
FILTERS_PANEL: [CallbackQueryHandler(self.filters_options, pattern="filters")],
SYS_INFO_PANEL: [CallbackQueryHandler(self.system_options, pattern="sys_info")],
LOGS_HANDLER: [CallbackQueryHandler(self.logs_handler, pattern="sys_info_logs")]
},
fallbacks=[CallbackQueryHandler(self.handle_admin_panel, pattern="admin"),
CallbackQueryHandler(self.handle_admin_panel, pattern="limits"),
CallbackQueryHandler(self.handle_admin_panel, pattern="filters"),
CallbackQueryHandler(self.handle_admin_panel, pattern="close"),
CommandHandler('cancel', self.end_selection),
MessageHandler(Filters.regex('^\/'), self.end_selection)],
conversation_timeout=300
)
return admin_interface_handler
| 50.164948 | 140 | 0.644746 |
fef71fd2689cde39a6617bb13c2101fc8e715b36 | 10,004 | py | Python | logo_rc.py | idocx/WHULibSeatReservation | 198fc62910a7937cc654069eb2f3fbf44b6e6f1d | [
"MIT"
] | 14 | 2019-02-24T01:53:37.000Z | 2021-03-27T02:21:24.000Z | logo_rc.py | Linqiaosong/WHULibSeatReservation | da89e1d3db920d41d6d74b3f83f8cdebad305457 | [
"MIT"
] | 3 | 2019-06-11T03:31:49.000Z | 2021-04-12T02:58:50.000Z | logo_rc.py | Linqiaosong/WHULibSeatReservation | da89e1d3db920d41d6d74b3f83f8cdebad305457 | [
"MIT"
] | 7 | 2019-06-06T17:31:27.000Z | 2020-11-08T13:03:49.000Z | #############################################################
# 作者:我.doc
# Github地址:https://github.com/idocx/WHULibSeatReservation
#############################################################
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x03\xac\
\x00\
\x00\x67\xf6\x78\x9c\xed\x9c\x5b\x48\x53\x71\x1c\xc7\x7f\x53\xb3\
\xec\xa2\x54\x9a\x94\x3a\xcd\x6e\x56\xda\x7c\x90\xd2\x53\x82\x98\
\x46\x45\xbd\x04\xd1\xed\xa1\x1b\x58\x42\x85\x92\x97\x0a\xc1\xa0\
\x07\xbb\x88\x45\x35\xb5\x42\xc9\x9c\x36\x7c\x2b\x8d\x56\x91\x33\
\xba\x50\x10\x95\x1a\xb3\xcc\x62\xe5\x8b\xa2\xdb\xc4\x28\x0b\xb5\
\x7f\x67\x8a\x10\xd6\xff\x9c\xad\x79\x9c\xe2\xf7\x0b\x1f\x7e\xdb\
\x61\x6c\xdf\xcf\x39\x8f\xfb\x9d\x43\xa4\x22\x0f\x8a\x8e\x26\x31\
\x61\xa4\x5f\x44\xb4\x58\x7c\x95\x90\x30\xf8\x3e\x22\x91\xe8\x95\
\x78\x2c\x42\x7c\x67\xff\xc8\x1e\x11\x95\x78\x7c\x20\xeb\x09\x41\
\xdc\x96\x88\x07\x6d\x51\x2b\x8c\xb6\xf3\x4a\x42\x2f\xd9\x24\xa5\
\xfa\x6b\x1e\x76\xad\x8b\x36\xda\xcc\x4a\x12\x5c\xd5\xea\x83\xfe\
\xe8\x8f\xfe\xe8\x8f\xfe\xe8\x8f\xfe\xe8\x3f\x5e\xfa\x47\x54\xb6\
\x7e\x99\x11\x7f\xbb\xdb\x15\xc8\x53\x7b\x93\x48\xab\xe3\x73\xb9\
\x88\x28\xd7\x43\x89\xfe\x4b\xca\x3e\xb7\x7a\xab\x75\x3f\x5d\x81\
\x54\x85\x0d\x44\x85\xf5\x7c\xb4\x8f\xd0\x7f\xec\xf5\x67\x5b\xc9\
\xd3\x94\x19\xd7\x62\xca\x12\xac\x3c\x2a\xce\x1d\xe9\x4e\x32\xb4\
\xf4\xf1\x88\x2b\xad\xef\x9b\x16\xaa\xfb\xc5\x63\x4a\x88\xae\x57\
\xa9\xfe\xc6\x04\xf2\x32\x65\x0b\x1d\x4d\xd9\xab\x19\x0f\xfd\xb9\
\xc3\x2c\xd9\xd0\xcc\x45\x28\x7d\xc3\xa6\x87\xe9\xb8\xf8\xa8\xcb\
\xfb\xd0\x1f\xfd\xd1\x1f\xfd\xff\xa7\xff\xfd\x53\xdb\x59\x6e\xc9\
\x55\x2e\x87\xf2\x8a\xd9\x9c\x20\x3e\xbe\x81\x57\xbf\x79\xcc\x2e\
\x6d\x93\x42\xba\xbb\x6b\xfd\xe5\xb8\x97\xb2\x8e\x2d\x0e\xc8\xe3\
\x32\xd7\x37\xdf\x22\xdf\x0f\xfd\xd1\x1f\xfd\xd1\x1f\xfd\xd1\x1f\
\xfd\xd1\x1f\xfd\x47\xab\xbf\x31\x75\x2d\xdb\xb4\x34\x93\xcb\x2a\
\x75\xce\x47\xf1\xf7\x6f\xb8\x88\x56\xa9\xfe\xb2\x64\x09\x37\x86\
\xff\xee\x48\x65\xbc\xf7\xaf\xda\x4a\x9e\xe2\xf7\x3f\x35\x65\x09\
\x26\xe5\x88\x3d\xad\x54\x7f\x04\xf9\x9f\xb0\x81\xfc\x63\x76\x11\
\xf9\x8d\xd2\xa4\xc9\xfc\x69\xa6\x71\x37\xfd\xec\xe7\x6f\xf8\xe4\
\x9d\x67\xfb\x9e\x56\x82\xc8\x49\xfa\x63\x4f\xeb\xa4\x73\xd7\x10\
\x41\x10\x84\x97\x65\x0f\x3b\x57\x46\xd5\x5a\x0e\x68\x8c\xd6\x83\
\xee\x20\xaa\xae\x73\x2f\x55\x31\x6f\x77\xf9\xaf\x30\xda\x32\x34\
\x75\xb6\x4f\x4a\xff\xf7\xce\x43\x53\x6b\x69\x08\x7f\x60\xf5\x83\
\x3f\xfc\xe1\x0f\x7f\xf8\xc3\x1f\xfe\xf0\x87\x3f\xfc\xe1\x0f\x7f\
\xf8\xc3\x1f\xfe\xee\xf7\xd7\xd4\x5a\xcd\x91\x86\x0e\x73\x64\x4d\
\xfb\xe7\x91\x64\x79\x4d\x7b\xa3\xff\xbe\x27\xf3\x88\xb4\xd3\x1d\
\xa3\xc0\xbe\xc3\xaf\x1a\x6d\xff\xa8\x7b\x9d\x66\xff\x5d\x75\x96\
\xc9\x0b\x2b\x7b\x46\x12\xef\xf9\x15\x5f\x49\x55\xf8\x5c\x74\x7b\
\xe6\x20\x65\x44\x97\xe6\xb9\xc5\x7f\x5b\x9d\xd5\xd5\x1d\xf4\xe1\
\x4c\x0a\x2e\xff\x4e\xaa\xa2\x46\x27\xfe\xe7\xae\x22\xba\x18\x0c\
\x7f\xf8\xc3\xff\xef\x34\xa5\xc5\x86\x99\xb2\xe3\x1e\x37\x65\x0b\
\x5d\x8e\x90\x77\xe5\xc2\xf7\x24\xc3\x87\x5e\xa9\x7b\x37\xec\x24\
\x56\x37\xf7\x85\xee\x30\xf4\x4b\xdd\xc3\x31\x84\x4f\x88\xfc\x2e\
\xb5\x52\xfe\x0d\x69\x31\xe1\xa2\xff\x1b\x47\xf7\x26\xce\x16\x17\
\xb0\xe4\xbb\xef\x25\x77\xc7\xed\xac\xad\x7e\xc7\xe6\xef\x34\x48\
\xee\x90\x0f\x31\x55\x5d\xde\x0f\x7f\xf8\xc3\x1f\xfe\xf0\x87\x3f\
\xfc\xc7\xae\xff\xb5\x0b\x27\x58\xca\xcd\x1a\x96\xa2\xbf\x23\xc9\
\xfe\xf2\x1a\x16\xbd\xa5\x82\xcd\x0c\x29\x91\xc5\x37\xa8\xa4\xd7\
\x2b\xb0\xcc\xe6\x08\x9e\x01\xd7\x2d\xf2\xf7\x95\x29\xe7\xff\xfa\
\x78\x22\x7b\x91\xb3\x5e\x64\x83\x24\x8f\x8f\x6d\x64\xfb\xd6\xa4\
\xb1\x05\x01\x67\x64\x51\xcf\x3c\xfb\x83\x3c\x8a\xde\x3a\x84\x73\
\xd7\x7e\xc4\xfd\x1d\xa5\xfe\x68\x3c\x4b\x15\x52\x25\x77\xb8\x87\
\x08\x9b\x75\xba\xc7\x49\x27\xf8\xc3\x1f\xfe\xf0\x87\x3f\xfc\xe1\
\x0f\x7f\xf8\xc3\x1f\xfe\xf0\x87\x3f\xfc\xe1\x0f\x7f\xb7\xfb\x37\
\x66\xac\x61\xf9\x9b\x77\xb0\xdd\x31\x87\x65\xd9\xa6\x49\xb3\x10\
\x69\xf3\x15\x22\x9d\xa8\x60\xd6\x68\xfb\x3b\xc3\xbb\x2c\xa1\x99\
\xd7\x4f\xe9\xc0\x7f\x62\xfb\xbf\x4f\x8f\x0b\x32\x65\x0a\x7a\x91\
\x57\x6e\x23\x23\xf6\x96\xbb\xfc\x11\x04\x99\x58\x61\x0e\x67\xe8\
\xd3\x63\x61\x0e\x3e\xfb\x40\x6a\x76\xd1\x84\x98\xa1\x03\x8f\xb3\
\x10\xcf\x0a\x6f\xca\xe5\x37\xae\x47\xc0\x77\
\x00\x00\x03\xf7\
\x00\
\x00\x4e\x25\x78\x9c\xed\x9c\xef\x6f\x53\x55\x18\xc7\x4f\xb7\x75\
\xea\x2e\x1b\x6f\x08\x6b\x89\x64\x13\x0a\x84\x4c\xbb\xb6\x6a\x34\
\x22\xb9\x76\xd9\x1c\x77\x81\xd6\x0c\xcc\x24\x2e\xb6\xd8\x0a\x66\
\xbc\xd8\x4c\x11\x67\xd0\xde\xae\x64\x29\x66\x35\x75\x6d\x1c\x95\
\x00\x45\x19\x12\x34\x76\x59\x96\x69\x8a\x26\xb7\xd8\x50\x41\xcb\
\x0a\x66\x42\x25\xe2\x6d\x21\x6b\xf6\x23\xd8\xcd\x6d\xf4\xc7\xee\
\xbd\xc7\xb3\xfa\x4e\x5f\x99\x98\x88\xc9\x73\xf3\xdc\x9c\xef\xb9\
\x27\x9f\xcf\x79\xce\x1f\x70\xee\x7b\x2f\x18\x9a\x2b\x2b\xd6\x54\
\x20\x84\x2a\x99\x6d\x8d\xad\x08\xc9\x6d\xe4\xb5\x3e\x58\x4e\xbe\
\x0c\x60\x77\x2d\x19\x64\xb6\xd6\xe6\x06\x14\x1c\x7b\x78\x8a\x4c\
\xca\xf6\xe9\x77\xe8\x11\x1a\xf6\x50\xc2\x1e\x39\x99\x3f\xd4\xb5\
\x6d\x37\x21\xd6\xba\x96\x5f\x99\x67\x62\xee\x35\x84\x76\x9c\x67\
\x1a\xf5\xbb\xba\x5f\xb9\x9b\xe0\xa8\x30\x3e\x78\x23\x5b\xbf\x7b\
\xfb\x85\xed\xce\x0f\xae\x7f\xf2\xc6\xe6\x37\x3b\xea\xab\x9c\x4e\
\x67\xf2\xe6\xc7\xbe\xc3\xe3\x8f\xf4\x1d\x98\x5c\x17\xd5\x0f\xdf\
\x9a\x1e\xac\x3d\x62\xae\xa8\xda\x78\x60\x32\x71\xe6\x34\x5d\x7b\
\xb5\xa9\x4c\xdb\x44\x0d\x34\x50\x55\x6b\xd7\xa9\xa7\x92\xa3\x3d\
\x5b\x2e\x4f\x3c\xba\x18\x79\xf7\x64\xf6\xed\x67\x53\x73\xf4\xf1\
\xf9\x82\xde\xab\x62\xa0\xa0\xee\xff\x2a\x9c\xa7\xa5\x36\xe9\x62\
\x1c\x5f\x33\xe5\xaf\xd0\x32\x84\x84\x55\x6c\x2a\x20\xf5\x07\x8e\
\x8f\x91\xd5\x1f\xe3\x5f\xd2\x52\x1d\x97\x7f\x40\x4a\x98\x04\xde\
\xbe\x82\x2c\xef\xc2\xb7\x37\x9d\x1d\x21\xec\xc2\xc8\x15\x3f\x85\
\xd0\x7a\x4b\x90\x4c\xd6\x28\x6d\x32\x64\xb6\xc6\x3e\x54\x31\xad\
\xeb\x2d\x6a\x84\xdc\xee\x10\xe1\x3b\xac\x31\x39\x72\xf4\x6b\xdb\
\x48\x7e\xdf\x1d\x42\x48\xa3\x4b\xb7\x78\x55\xe7\xfa\xb5\xd5\x08\
\x3d\x1f\x6d\x27\x1d\x3c\xa9\x4b\x97\xa0\xf0\x25\x7f\x1d\xe3\x1d\
\x24\x5f\x10\x52\x28\x6d\x1b\x18\xef\xf8\x25\x90\x83\x1c\xe4\xff\
\x7b\xb9\xe3\x53\x3a\xcf\x0b\x51\x0f\xee\xa3\x97\x68\x19\xbd\x9f\
\x4d\xdf\xc0\x19\x6e\x31\xb2\x92\x55\xa0\x6e\x7e\x48\x9a\x8d\xd4\
\x0b\x56\x9d\xef\x18\x31\x9c\x9a\x32\xe2\xa3\xec\xfc\x77\x58\xcc\
\x88\x46\x47\x60\x98\x2b\x1c\x92\x92\x9d\x98\x8d\xdf\xa2\xcb\x91\
\x21\x17\xa1\x3f\x8a\x0d\xee\x25\x0d\xbc\xae\x4b\x37\x1c\x26\xfd\
\x19\xb4\xe9\xcf\x4e\x90\xf1\x98\x3b\x74\xd5\x4c\x76\xda\xe9\x0e\
\x75\xec\x23\xe3\x46\x8b\xfa\x09\x85\x4d\x96\x5c\x65\x09\xde\x5c\
\xd6\x5e\xf6\x53\x95\x04\x7b\xfa\xa2\xff\xf3\x83\x80\x03\x0e\x38\
\xe0\x80\x03\x0e\x38\xe0\x80\x03\x0e\x38\xe0\x80\x03\x0e\x38\xe0\
\x80\x03\x0e\x38\xe0\x80\x03\x0e\x38\xe0\xf7\x15\xde\xfb\xb2\x34\
\x6e\x2a\x34\xd3\xd2\x04\x57\xa0\x4b\x29\x21\x8f\xa7\xeb\x71\x6e\
\x48\xbc\x5d\x83\xec\x51\xa9\x90\x11\x3d\x98\xc7\x29\x3a\x17\x62\
\xab\xbe\xb9\xd3\x89\x85\xb8\x38\x12\xd0\x25\x91\x50\x55\xdc\x2c\
\x35\xc0\x2b\x8f\x48\x79\xfc\xfb\x59\x87\xdc\x7e\x87\x13\xe3\xac\
\xb0\x9f\x5b\xa0\xc2\xf1\x04\x2f\x9e\xe1\xc5\x4e\xbb\x98\xc5\x42\
\x67\x69\x26\x37\xca\x67\xbf\x96\x16\x8d\xca\x2e\x64\x77\x69\xdb\
\xee\xba\x43\x5d\x7d\xda\xea\x7b\x9a\x62\x67\xbf\x5a\x62\xf2\xf2\
\x68\xfb\xe8\xe3\xe9\x6f\x1f\x8b\xb6\x27\x22\xc5\xf6\xbb\x3d\xda\
\x6a\x85\xc2\xf6\xd3\x98\x9f\xfa\x4a\x61\xdb\xb3\xb2\x78\xc6\xd4\
\xd6\x68\xbb\xd9\x1c\x7b\x71\x8b\x45\xfd\xcb\xab\xb1\xde\xad\xc5\
\x93\xf4\x66\x15\xb6\x9e\x1e\x90\x82\x14\xa4\x20\x05\x29\x48\x41\
\x0a\x52\x90\x82\x14\xa4\x20\x05\x29\x48\x41\x0a\x52\x90\xfe\xbb\
\xd2\x0d\xcc\x09\xfb\x4c\x8d\x30\xe9\x11\xae\xf3\x4b\xd7\xb0\x6c\
\x41\xf4\xe1\xdf\x8e\x4a\x13\xc6\x43\xcf\xc9\x1c\x98\x75\x87\xc2\
\x61\x7f\x1d\x33\x6d\x2d\xee\x80\x66\x2b\x2d\x6a\x8d\x26\xdd\xe2\
\x7d\x46\x59\xec\x03\xbd\x43\x64\x2b\xdc\x21\xef\xa0\xe1\x4f\x31\
\xe2\x35\xe9\x92\x52\x4b\x50\x75\xce\x57\xdc\xbe\x24\xfc\x14\x28\
\x40\x01\x0a\x50\x80\x02\x14\xa0\xf8\x8b\x42\x8a\x73\xf3\x01\xc9\
\x84\x67\x58\xb1\x46\xa4\x51\x98\x33\x71\x73\x3f\x70\x39\x0e\x7f\
\x51\x63\xcf\xd9\x10\x0e\x9a\xc4\x94\x0b\xcf\x6e\xca\x8c\xa8\x18\
\x7a\x69\xf9\x12\xce\xec\x69\x36\xe7\x9b\x7f\xc9\xab\x62\xa6\xf7\
\xc6\xe4\x08\x15\x2f\xd9\xac\xb6\x42\x84\x08\x11\x22\x44\x88\x10\
\x21\x42\x84\x08\x11\x22\x44\x88\x10\x21\xfe\x3d\x2e\x0c\x49\x12\
\x7b\xcf\x65\x9f\xc9\xe0\x49\xb6\x0c\x39\x22\x52\xd4\x38\x66\xf9\
\xef\x7f\x3b\x04\x05\xf5\x8f\xaa\xf0\xd6\x52\xca\xc5\x52\xc3\x3f\
\xd3\xab\x5b\x2e\x9c\xfa\x1e\x91\x87\x69\x32\x34\x06\x1b\xcc\xce\
\x3f\x00\x9c\xbc\xe1\x52\
"
qt_resource_name = b"\
\x00\x04\
\x00\x05\x13\xbf\
\x00\x4c\
\x00\x4f\x00\x47\x00\x4f\
\x00\x08\
\x05\xe2\x41\xff\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x2e\x00\x69\x00\x63\x00\x6f\
\x00\x08\
\x05\xe2\x59\x27\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x0e\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x24\x00\x01\x00\x00\x00\x01\x00\x00\x03\xb0\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0e\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x6c\x4d\xe3\x39\x93\
\x00\x00\x00\x24\x00\x01\x00\x00\x00\x01\x00\x00\x03\xb0\
\x00\x00\x01\x6c\x42\xbf\x46\x67\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 53.784946 | 104 | 0.703419 |
fef8828761203757d50e9784d410fa779ff9303d | 563 | py | Python | daoliagent/utils.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | daoliagent/utils.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | daoliagent/utils.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | import random
import six.moves.urllib.parse as urlparse
def replace_url(url, host=None, port=None, path=None):
o = urlparse.urlparse(url)
_host = o.hostname
_port = o.port
_path = o.path
if host is not None:
_host = host
if port is not None:
_port = port
netloc = _host
if _port is not None:
netloc = ':'.join([netloc, str(_port)])
if path is not None:
_path = path
return '%s://%s%s' % (o.scheme, netloc, _path)
def generate_seq():
return random.randint(1000000000, 4294967296)
| 20.107143 | 54 | 0.614565 |
fef8bcaaac0327ab05b3750bfd80e03d8695818d | 2,745 | py | Python | cookbook/chap9/main.py | duyquang6/py-side-project | e3cdfcf424bbb15afad8241a357de49a1717fba6 | [
"Apache-2.0"
] | null | null | null | cookbook/chap9/main.py | duyquang6/py-side-project | e3cdfcf424bbb15afad8241a357de49a1717fba6 | [
"Apache-2.0"
] | null | null | null | cookbook/chap9/main.py | duyquang6/py-side-project | e3cdfcf424bbb15afad8241a357de49a1717fba6 | [
"Apache-2.0"
] | null | null | null | # 9.1. Putting a Wrapper Around a Function
#region
# import time
# from functools import wraps
# def timethis(func):
# '''
# Decorator that reports the execution time.
# '''
# @wraps(func)
# def wrapper(*args, **kwargs):
# start = time.time()
# result = func(*args, **kwargs)
# end = time.time()
# print(func.__name__, end-start)
# return result
# return wrapper
# @timethis
# def countdown(n):
# '''
# Count down
# '''
# while n > 0:
# n -= 1
# countdown(10000000)
# class A:
# @classmethod
# def method(cls):
# pass
# class B:
# # Equivalent definition of a class method
# def method(cls):
# pass
# method = classmethod(method)
#endregion
# 9.2. Preserving Function Metadata When Writing Decorators
#region
# import time
# from functools import wraps
# def timethis(func):
# '''
# Decorator that reports the execution time.
# '''
# @wraps(func)
# def wrapper(*args, **kwargs):
# start = time.time()
# result = func(*args, **kwargs)
# end = time.time()
# print(func.__name__, end-start)
# return result
# return wrapper
# @timethis
# def countdown(n):
# '''
# Count down
# '''
# while n > 0:
# n -= 1
#endregion
### 9.3. Unwrapping a Decorator
#region
# from functools import wraps
# def decorator1(func):
# @wraps(func)
# def wrapper(*args, **kwargs):
# print('Decorator 1')
# return func(*args, **kwargs)
# return wrapper
# def decorator2(func):
# @wraps(func)
# def wrapper(*args, **kwargs):
# print('Decorator 2')
# return func(*args, **kwargs)
# return wrapper
# @decorator1
# @decorator2
# def add(x, y):
# return x + y
# add(2, 3)
# add.__wrapped__(2, 3)
#endregion
### 9.4. Defining a Decorator That Takes Arguments
#region
from functools import wraps
import logging
def logged(level, name=None, message=None):
'''
Add logging to a function. level is the logging
level, name is the logger name, and message is the
log message. If name and message aren't specified,
they default to the function's module and name.
'''
def decorate(func):
logname = name if name else func.__module__
log = logging.getLogger(logname)
logmsg = message if message else func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
log.log(level, logmsg)
return func(*args, **kwargs)
return wrapper
return decorate
# Example use
@logged(logging.DEBUG)
def add(x, y):
return x + y
@logged(logging.CRITICAL, 'example')
def spam():
print('Spam!')
#endregion | 20.639098 | 59 | 0.587614 |
fefa551e8285feb448d258e854941881fb3ad2e9 | 759 | py | Python | doggo_ears_definitions.py | jryzkns/doggo-ears | 004dbb8b07a0a2170ce0d04b6e1458b268cdd543 | [
"MIT"
] | 1 | 2020-08-28T16:49:32.000Z | 2020-08-28T16:49:32.000Z | doggo_ears_definitions.py | jryzkns/doggo-ears | 004dbb8b07a0a2170ce0d04b6e1458b268cdd543 | [
"MIT"
] | null | null | null | doggo_ears_definitions.py | jryzkns/doggo-ears | 004dbb8b07a0a2170ce0d04b6e1458b268cdd543 | [
"MIT"
] | null | null | null | import numpy as np
import torch
torch.manual_seed(0)
# PRE-PROCESSING
RAVDESS_DSET_PATH = "C:\\Users\\***\\Downloads\\RAVDESS\\"
TESS_DSET_PATH = "C:\\Users\\***\\Downloads\\TESS\\"
N_WORKERS = 15
# DATASET
emote_id = {
"01" : "neutral", "03" : "happy",
"04" : "sad", "05" : "angry"}
emote_idn = {
0 : "neutral", 1 : "happy",
2 : "sad", 3 : "angry"}
N_CATEGORIES = len(emote_id)
label_id = { n : torch.tensor(i)
for i, n in enumerate(emote_id.values())}
# AUDIO
window_duration = 0.5
LISTENER_RATE = 44100
N_FEATURES = 2
NUM_INFERENCE_WINDOW = 10
samples_per_wind = int(LISTENER_RATE * window_duration)
# TRAINING
BATCH_SIZE = 16
loader_params = { "batch_size" : BATCH_SIZE,
"shuffle" : True} | 22.323529 | 58 | 0.623188 |
fefb10e3bc54bf078e079e6dd58a9eee22dea396 | 7,752 | py | Python | vdp/pipeline/v1alpha/pipeline_service_pb2.py | instill-ai/protogen-python | 6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f | [
"Apache-2.0"
] | 1 | 2022-03-22T09:09:46.000Z | 2022-03-22T09:09:46.000Z | vdp/pipeline/v1alpha/pipeline_service_pb2.py | instill-ai/protogen-python | 6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f | [
"Apache-2.0"
] | 4 | 2022-03-16T12:36:12.000Z | 2022-03-22T10:53:12.000Z | vdp/pipeline/v1alpha/pipeline_service_pb2.py | instill-ai/protogen-python | 6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vdp/pipeline/v1alpha/pipeline_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from vdp.pipeline.v1alpha import healthcheck_pb2 as vdp_dot_pipeline_dot_v1alpha_dot_healthcheck__pb2
from vdp.pipeline.v1alpha import pipeline_pb2 as vdp_dot_pipeline_dot_v1alpha_dot_pipeline__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n+vdp/pipeline/v1alpha/pipeline_service.proto\x12\x14vdp.pipeline.v1alpha\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a&vdp/pipeline/v1alpha/healthcheck.proto\x1a#vdp/pipeline/v1alpha/pipeline.proto2\xcc\x10\n\x0fPipelineService\x12\x92\x01\n\x08Liveness\x12%.vdp.pipeline.v1alpha.LivenessRequest\x1a&.vdp.pipeline.v1alpha.LivenessResponse\"7\x82\xd3\xe4\x93\x02\x31Z\x1a\x12\x18/v1alpha/health/pipeline\x12\x13/v1alpha/__liveness\x12z\n\tReadiness\x12&.vdp.pipeline.v1alpha.ReadinessRequest\x1a\'.vdp.pipeline.v1alpha.ReadinessResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1alpha/__readiness\x12\x9c\x01\n\x0e\x43reatePipeline\x12+.vdp.pipeline.v1alpha.CreatePipelineRequest\x1a,.vdp.pipeline.v1alpha.CreatePipelineResponse\"/\xda\x41\x08pipeline\x82\xd3\xe4\x93\x02\x1e:\x08pipeline\"\x12/v1alpha/pipelines\x12\x81\x01\n\x0cListPipeline\x12).vdp.pipeline.v1alpha.ListPipelineRequest\x1a*.vdp.pipeline.v1alpha.ListPipelineResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/v1alpha/pipelines\x12\x8e\x01\n\x0bGetPipeline\x12(.vdp.pipeline.v1alpha.GetPipelineRequest\x1a).vdp.pipeline.v1alpha.GetPipelineResponse\"*\xda\x41\x04name\x82\xd3\xe4\x93\x02\x1d\x12\x1b/v1alpha/{name=pipelines/*}\x12\xba\x01\n\x0eUpdatePipeline\x12+.vdp.pipeline.v1alpha.UpdatePipelineRequest\x1a,.vdp.pipeline.v1alpha.UpdatePipelineResponse\"M\xda\x41\x14pipeline,update_mask\x82\xd3\xe4\x93\x02\x30:\x08pipeline2$/v1alpha/{pipeline.name=pipelines/*}\x12\x97\x01\n\x0e\x44\x65letePipeline\x12+.vdp.pipeline.v1alpha.DeletePipelineRequest\x1a,.vdp.pipeline.v1alpha.DeletePipelineResponse\"*\xda\x41\x04name\x82\xd3\xe4\x93\x02\x1d*\x1b/v1alpha/{name=pipelines/*}\x12\xa8\x01\n\x0eLookUpPipeline\x12+.vdp.pipeline.v1alpha.LookUpPipelineRequest\x1a,.vdp.pipeline.v1alpha.LookUpPipelineResponse\";\xda\x41\tpermalink\x82\xd3\xe4\x93\x02)\x12\'/v1alpha/{permalink=pipelines/*}:lookUp\x12\xa9\x01\n\x10\x41\x63tivatePipeline\x12-.vdp.pipeline.v1alpha.ActivatePipelineRequest\x1a..vdp.pipeline.v1alpha.ActivatePipelineResponse\"6\xda\x41\x04name\x82\xd3\xe4\x93\x02):\x01*\"$/v1alpha/{name=pipelines/*}:activate\x12\xb1\x01\n\x12\x44\x65\x61\x63tivatePipeline\x12/.vdp.pipeline.v1alpha.DeactivatePipelineRequest\x1a\x30.vdp.pipeline.v1alpha.DeactivatePipelineResponse\"8\xda\x41\x04name\x82\xd3\xe4\x93\x02+:\x01*\"&/v1alpha/{name=pipelines/*}:deactivate\x12\xb1\x01\n\x0eRenamePipeline\x12+.vdp.pipeline.v1alpha.RenamePipelineRequest\x1a,.vdp.pipeline.v1alpha.RenamePipelineResponse\"D\xda\x41\x14name,new_pipeline_id\x82\xd3\xe4\x93\x02\':\x01*\"\"/v1alpha/{name=pipelines/*}:rename\x12\xac\x01\n\x0fTriggerPipeline\x12,.vdp.pipeline.v1alpha.TriggerPipelineRequest\x1a-.vdp.pipeline.v1alpha.TriggerPipelineResponse\"<\xda\x41\x0bname,inputs\x82\xd3\xe4\x93\x02(:\x01*\"#/v1alpha/{name=pipelines/*}:trigger\x12\xae\x01\n\x1fTriggerPipelineBinaryFileUpload\x12<.vdp.pipeline.v1alpha.TriggerPipelineBinaryFileUploadRequest\x1a=.vdp.pipeline.v1alpha.TriggerPipelineBinaryFileUploadResponse\"\x0c\xda\x41\tname,file(\x01\x42\xea\x01\n\x18\x63om.vdp.pipeline.v1alphaB\x14PipelineServiceProtoP\x01ZFgithub.com/instill-ai/protogen-go/vdp/pipeline/v1alpha;pipelinev1alpha\xa2\x02\x03VPX\xaa\x02\x14Vdp.Pipeline.V1alpha\xca\x02\x14Vdp\\Pipeline\\V1alpha\xe2\x02 Vdp\\Pipeline\\V1alpha\\GPBMetadata\xea\x02\x16Vdp::Pipeline::V1alphab\x06proto3')
_PIPELINESERVICE = DESCRIPTOR.services_by_name['PipelineService']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\030com.vdp.pipeline.v1alphaB\024PipelineServiceProtoP\001ZFgithub.com/instill-ai/protogen-go/vdp/pipeline/v1alpha;pipelinev1alpha\242\002\003VPX\252\002\024Vdp.Pipeline.V1alpha\312\002\024Vdp\\Pipeline\\V1alpha\342\002 Vdp\\Pipeline\\V1alpha\\GPBMetadata\352\002\026Vdp::Pipeline::V1alpha'
_PIPELINESERVICE.methods_by_name['Liveness']._options = None
_PIPELINESERVICE.methods_by_name['Liveness']._serialized_options = b'\202\323\344\223\0021Z\032\022\030/v1alpha/health/pipeline\022\023/v1alpha/__liveness'
_PIPELINESERVICE.methods_by_name['Readiness']._options = None
_PIPELINESERVICE.methods_by_name['Readiness']._serialized_options = b'\202\323\344\223\002\026\022\024/v1alpha/__readiness'
_PIPELINESERVICE.methods_by_name['CreatePipeline']._options = None
_PIPELINESERVICE.methods_by_name['CreatePipeline']._serialized_options = b'\332A\010pipeline\202\323\344\223\002\036:\010pipeline\"\022/v1alpha/pipelines'
_PIPELINESERVICE.methods_by_name['ListPipeline']._options = None
_PIPELINESERVICE.methods_by_name['ListPipeline']._serialized_options = b'\202\323\344\223\002\024\022\022/v1alpha/pipelines'
_PIPELINESERVICE.methods_by_name['GetPipeline']._options = None
_PIPELINESERVICE.methods_by_name['GetPipeline']._serialized_options = b'\332A\004name\202\323\344\223\002\035\022\033/v1alpha/{name=pipelines/*}'
_PIPELINESERVICE.methods_by_name['UpdatePipeline']._options = None
_PIPELINESERVICE.methods_by_name['UpdatePipeline']._serialized_options = b'\332A\024pipeline,update_mask\202\323\344\223\0020:\010pipeline2$/v1alpha/{pipeline.name=pipelines/*}'
_PIPELINESERVICE.methods_by_name['DeletePipeline']._options = None
_PIPELINESERVICE.methods_by_name['DeletePipeline']._serialized_options = b'\332A\004name\202\323\344\223\002\035*\033/v1alpha/{name=pipelines/*}'
_PIPELINESERVICE.methods_by_name['LookUpPipeline']._options = None
_PIPELINESERVICE.methods_by_name['LookUpPipeline']._serialized_options = b'\332A\tpermalink\202\323\344\223\002)\022\'/v1alpha/{permalink=pipelines/*}:lookUp'
_PIPELINESERVICE.methods_by_name['ActivatePipeline']._options = None
_PIPELINESERVICE.methods_by_name['ActivatePipeline']._serialized_options = b'\332A\004name\202\323\344\223\002):\001*\"$/v1alpha/{name=pipelines/*}:activate'
_PIPELINESERVICE.methods_by_name['DeactivatePipeline']._options = None
_PIPELINESERVICE.methods_by_name['DeactivatePipeline']._serialized_options = b'\332A\004name\202\323\344\223\002+:\001*\"&/v1alpha/{name=pipelines/*}:deactivate'
_PIPELINESERVICE.methods_by_name['RenamePipeline']._options = None
_PIPELINESERVICE.methods_by_name['RenamePipeline']._serialized_options = b'\332A\024name,new_pipeline_id\202\323\344\223\002\':\001*\"\"/v1alpha/{name=pipelines/*}:rename'
_PIPELINESERVICE.methods_by_name['TriggerPipeline']._options = None
_PIPELINESERVICE.methods_by_name['TriggerPipeline']._serialized_options = b'\332A\013name,inputs\202\323\344\223\002(:\001*\"#/v1alpha/{name=pipelines/*}:trigger'
_PIPELINESERVICE.methods_by_name['TriggerPipelineBinaryFileUpload']._options = None
_PIPELINESERVICE.methods_by_name['TriggerPipelineBinaryFileUpload']._serialized_options = b'\332A\tname,file'
_PIPELINESERVICE._serialized_start=202
_PIPELINESERVICE._serialized_end=2326
# @@protoc_insertion_point(module_scope)
| 131.389831 | 3,390 | 0.821723 |
fefbae820a9ce01089538fc58c0ca13a3a6231eb | 119 | py | Python | slash/__init__.py | SilentJungle399/dpy-appcommands | d383ebd3414457aaaf1f65ff048604accb7bb1bc | [
"MIT"
] | 2 | 2021-09-02T13:06:46.000Z | 2021-09-03T07:19:54.000Z | slash/__init__.py | SilentJungle399/dpy-appcommands | d383ebd3414457aaaf1f65ff048604accb7bb1bc | [
"MIT"
] | null | null | null | slash/__init__.py | SilentJungle399/dpy-appcommands | d383ebd3414457aaaf1f65ff048604accb7bb1bc | [
"MIT"
] | 1 | 2021-08-14T03:38:42.000Z | 2021-08-14T03:38:42.000Z | __author__ = "SilentJungle399"
__version__ = "1.0.0"
from .client import *
from .models import *
from .enums import *
| 17 | 30 | 0.722689 |
fefc83e00d4e08e9e4f83915c661bd7690cde11d | 211 | py | Python | django-app/main/textanalyzers/textblobanalyzer.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | 9 | 2019-07-07T02:57:50.000Z | 2022-01-07T10:03:30.000Z | django-app/main/textanalyzers/textblobanalyzer.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | null | null | null | django-app/main/textanalyzers/textblobanalyzer.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | null | null | null | from .abstractanalyzer import AbstractAnalyzer
from textblob import TextBlob
class TextBlobAnalyzer(AbstractAnalyzer):
def __init__(self):
pass
def analyze(self, text_content):
pass
| 16.230769 | 46 | 0.729858 |
fefccd0f2f86b8b353d1a858bb9e54ee6a296e8f | 850 | py | Python | 3/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | 3/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | 3/one.py | TheFrederick-git/adventofcode2021 | a320f3bba2655afab1aad8bf2520ccb705b2fd1e | [
"MIT"
] | null | null | null | """3/1 adventofcode"""
with open("input.txt", "r", encoding="UTF-8") as i_file:
data = i_file.read().splitlines()
columns = [[row[i] for row in data] for i in range(len(data[0]))]
def binlst_to_int(values) -> int:
"""Returns int values of binary in list form"""
values = values[::-1]
total = 0
for i in range(len(values)):
total += values[i]*2**i
return total
def get_most(columns) -> list:
"""Returns list of most common values for each column"""
return [1 if column.count("1") > column.count("0") else 0 for column in columns]
def get_least(columns) -> list:
"""Returns list of least common values for each column"""
return [0 if column.count("0") < column.count("1") else 1 for column in columns]
print(binlst_to_int(get_most(columns))*binlst_to_int(get_least(columns)))
| 35.416667 | 85 | 0.64 |
fefd02d2de45b18b74656b9de90c0632735f1832 | 848 | py | Python | leetcode/palindrome_pairs/palindrome_pairs.py | sagasu/python-algorithms | d630777a3f17823165e4d72ab780ede7b10df752 | [
"MIT"
] | null | null | null | leetcode/palindrome_pairs/palindrome_pairs.py | sagasu/python-algorithms | d630777a3f17823165e4d72ab780ede7b10df752 | [
"MIT"
] | null | null | null | leetcode/palindrome_pairs/palindrome_pairs.py | sagasu/python-algorithms | d630777a3f17823165e4d72ab780ede7b10df752 | [
"MIT"
] | null | null | null | class Solution:
def palindromePairs(self, words: List[str]) -> List[List[int]]:
lookup = {}
for index, word in enumerate(words):
lookup[word] = index
ans = set()
for index, word in enumerate(words):
for k in range(len(word) + 1):
current = word[:k][::-1]
if current in lookup and lookup[current] != index :
newword = word + current
if newword == newword[::-1]:
ans.add((index, lookup[current]))
current = word[len(word) - k:][::-1]
if current in lookup and lookup[current]!=index:
newword = current + word
if newword == newword[::-1]:
ans.add((lookup[current], index))
return list(ans) | 38.545455 | 67 | 0.471698 |
fefdeea84966c3c376d5a46f9c21101aefc50772 | 193 | py | Python | landing/views.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | null | null | null | landing/views.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | 3 | 2015-12-08T17:14:31.000Z | 2016-01-29T18:46:59.000Z | landing/views.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | null | null | null | from braces.views import AnonymousRequiredMixin
from django.views.generic import TemplateView
class LandingView(AnonymousRequiredMixin, TemplateView):
template_name = 'landing/index.html'
| 32.166667 | 56 | 0.839378 |
3a00eea590558911d75f7435e45a186ce7c2a0a1 | 30,437 | py | Python | startExperiment.py | aydindemircioglu/radFS | b50b2a78f7c7975751b699b6b74a2761f7fa3501 | [
"MIT",
"Unlicense"
] | 1 | 2022-02-24T02:16:55.000Z | 2022-02-24T02:16:55.000Z | startExperiment.py | aydindemircioglu/radFS | b50b2a78f7c7975751b699b6b74a2761f7fa3501 | [
"MIT",
"Unlicense"
] | null | null | null | startExperiment.py | aydindemircioglu/radFS | b50b2a78f7c7975751b699b6b74a2761f7fa3501 | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/python3
from functools import partial
from datetime import datetime
import pandas as pd
from joblib import parallel_backend
import random
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
import shutil
import pathlib
import os
import math
import random
from matplotlib import pyplot
import matplotlib.pyplot as plt
import time
import copy
import random
import pickle
from joblib import Parallel, delayed
import tempfile
from xgboost import XGBClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB, BernoulliNB, CategoricalNB, ComplementNB
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from joblib import Parallel, delayed
import itertools
import multiprocessing
import socket
from glob import glob
from collections import OrderedDict
import logging
import mlflow
from typing import Dict, Any
import hashlib
import json
from pymrmre import mrmr
from pprint import pprint
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.feature_selection import RFE, RFECV
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_recall_curve
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import IsolationForest, RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, SelectFromModel
from sklearn.feature_selection import mutual_info_classif
from mlflow import log_metric, log_param, log_artifact, log_dict, log_image
from loadData import *
from utils import *
from parameters import *
from extraFeatureSelections import *
### parameters
TrackingPath = "/data/results/radFS/mlrun.benchmark"
print ("Have", len(fselParameters["FeatureSelection"]["Methods"]), "Feature Selection Methods.")
print ("Have", len(clfParameters["Classification"]["Methods"]), "Classifiers.")
# wie CV: alle parameter gehen einmal durch
def getExperiments (experimentList, expParameters, sKey, inject = None):
newList = []
for exp in experimentList:
for cmb in list(itertools.product(*expParameters.values())):
pcmb = dict(zip(expParameters.keys(), cmb))
if inject is not None:
pcmb.update(inject)
_exp = exp.copy()
_exp.append((sKey, pcmb))
newList.append(_exp)
experimentList = newList.copy()
return experimentList
# this is pretty non-generic, maybe there is a better way, for now it works.
def generateAllExperiments (experimentParameters, verbose = False):
experimentList = [ [] ]
for k in experimentParameters.keys():
if verbose == True:
print ("Adding", k)
if k == "BlockingStrategy":
newList = []
blk = experimentParameters[k].copy()
newList.extend(getExperiments (experimentList, blk, k))
experimentList = newList.copy()
elif k == "FeatureSelection":
# this is for each N too
print ("Adding feature selection")
newList = []
for n in experimentParameters[k]["N"]:
for m in experimentParameters[k]["Methods"]:
fmethod = experimentParameters[k]["Methods"][m].copy()
fmethod["nFeatures"] = [n]
newList.extend(getExperiments (experimentList, fmethod, m))
experimentList = newList.copy()
elif k == "Classification":
newList = []
for m in experimentParameters[k]["Methods"]:
newList.extend(getExperiments (experimentList, experimentParameters[k]["Methods"][m], m))
experimentList = newList.copy()
else:
experimentList = getExperiments (experimentList, experimentParameters[k], k)
return experimentList
# if we do not want scaling to be performed on all data,
# we need to save thet scaler. same for imputer.
def preprocessData (X, y):
simp = SimpleImputer(strategy="mean")
X = pd.DataFrame(simp.fit_transform(X),columns = X.columns)
sscal = StandardScaler()
X = pd.DataFrame(sscal.fit_transform(X),columns = X.columns)
return X, y
def applyFS (X, y, fExp):
print ("Applying", fExp)
return X, y
def applyCLF (X, y, cExp, fExp = None):
print ("Training", cExp, "on FS:", fExp)
return "model"
def testModel (y_pred, y_true, idx, fold = None):
t = np.array(y_true)
p = np.array(y_pred)
# naive bayes can produce nan-- on ramella2018 it happens.
# in that case we replace nans by 0
p = np.nan_to_num(p)
y_pred_int = [int(k>=0.5) for k in p]
acc = accuracy_score(t, y_pred_int)
df = pd.DataFrame ({"y_true": t, "y_pred": p}, index = idx)
return {"y_pred": p, "y_test": t,
"y_pred_int": y_pred_int,
"idx": np.array(idx).tolist()}, df, acc
def getRunID (pDict):
def dict_hash(dictionary: Dict[str, Any]) -> str:
dhash = hashlib.md5()
encoded = json.dumps(dictionary, sort_keys=True).encode()
dhash.update(encoded)
return dhash.hexdigest()
run_id = dict_hash(pDict)
return run_id
def getAUCCurve (modelStats, dpi = 100):
# compute roc and auc
fpr, tpr, thresholds = roc_curve (modelStats["y_test"], modelStats["y_pred"])
area_under_curve = auc (fpr, tpr)
if (math.isnan(area_under_curve) == True):
print ("ERROR: Unable to compute AUC of ROC curve. NaN detected!")
print (modelStats["y_test"])
print (modelStats["y_pred"])
raise Exception ("Unable to compute AUC")
sens, spec = findOptimalCutoff (fpr, tpr, thresholds)
return area_under_curve, sens, spec
def getPRCurve (modelStats, dpi = 100):
# compute roc and auc
precision, recall, thresholds = precision_recall_curve(modelStats["y_test"], modelStats["y_pred"])
try:
f1 = f1_score (modelStats["y_test"], modelStats["y_pred_int"])
except Exception as e:
print (modelStats["y_test"])
print (modelStats["y_pred_int"])
raise (e)
f1_auc = auc (recall, precision)
if (math.isnan(f1_auc) == True):
print ("ERROR: Unable to compute AUC of PR curve. NaN detected!")
print (modelStats["y_test"])
print (modelStats["y_pred"])
raise Exception ("Unable to compute AUC")
return f1, f1_auc
def logMetrics (foldStats):
y_preds = []
y_test = []
y_index = []
aucList = {}
for k in foldStats:
if "fold" in k:
y_preds.extend(foldStats[k]["y_pred"])
y_test.extend(foldStats[k]["y_test"])
y_index.extend(foldStats[k]["idx"])
fpr, tpr, thresholds = roc_curve (foldStats[k]["y_test"], foldStats[k]["y_pred"])
area_under_curve = auc (fpr, tpr)
aucList["AUC" + "_" + str(len(aucList))] = area_under_curve
auc_mean = np.mean(list(aucList.values()))
auc_std = np.std(list(aucList.values()))
aucList["AUC_mean"] = auc_mean
aucList["AUC_std"] = auc_std
modelStats, df, acc = testModel (y_preds, y_test, idx = y_index, fold = "ALL")
roc_auc, sens, spec = getAUCCurve (modelStats, dpi = 72)
f1, f1_auc = getPRCurve (modelStats, dpi = 72)
#pprint(aucList)
log_dict(aucList, "aucStats.json")
log_dict(modelStats, "params.yml")
log_metric ("Accuracy", acc)
log_metric ("Sens", sens)
log_metric ("Spec", spec)
log_metric ("AUC", roc_auc)
log_metric ("F1", f1)
log_metric ("F1_AUC", f1_auc)
#print (foldStats["features"])
log_dict(foldStats["features"], "features.json")
for k in foldStats["params"]:
log_param (k, foldStats["params"][k])
with tempfile.TemporaryDirectory() as temp_dir:
predFile = os.path.join(temp_dir, "preds.csv")
df.to_csv(predFile)
mlflow.log_artifact(predFile)
print(".", end = '', flush=True)
return {}
def createFSel (fExp, cache = True):
method = fExp[0][0]
nFeatures = fExp[0][1]["nFeatures"]
if method == "LASSO":
C = fExp[0][1]["C"]
clf = LogisticRegression(penalty='l1', max_iter=500, solver='liblinear', C = C)
pipe = SelectFromModel(clf, prefit=False, max_features=nFeatures)
if method == "ET":
clf = ExtraTreesClassifier()
pipe = SelectFromModel(clf, prefit=False, max_features=nFeatures)
if method == "ReliefF":
from ITMO_FS.filters.univariate import reliefF_measure
pipe = SelectKBest(reliefF_measure, k = nFeatures)
if method == "MIM":
pipe = SelectKBest(mutual_info_classif, k = nFeatures)
if method == "Chi2":
from ITMO_FS.filters.univariate import chi2_measure
pipe = SelectKBest(chi2_measure, k = nFeatures)
if method == "Anova":
from ITMO_FS.filters.univariate import anova
pipe = SelectKBest(anova, k = nFeatures)
if method == "InformationGain":
from ITMO_FS.filters.univariate import information_gain
pipe = SelectKBest(information_gain, k = nFeatures)
if method == "GiniIndex":
from ITMO_FS.filters.univariate import gini_index
pipe = SelectKBest(gini_index, k = nFeatures)
if method == "SUMeasure":
from ITMO_FS.filters.univariate import su_measure
pipe = SelectKBest(su_measure, k = nFeatures)
if method == "FCBF":
from ITMO_FS.filters.multivariate.FCBF import FCBFDiscreteFilter
def fcbf_fct (X, y):
fcbf = FCBFDiscreteFilter()
fcbf.fit(X,y)
idxList = fcbf.selected_features
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(fcbf_fct, k = nFeatures)
if method == "MCFS":
from ITMO_FS.filters import MCFS
def mcfs_fct (X, y):
mcfs = MCFS(nFeatures, scheme='0-1') # dot is broken
idxList = mcfs.feature_ranking(X)
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(mcfs_fct, k = nFeatures)
if method == "UDFS":
from ITMO_FS.filters import UDFS
def udfs_fct (X, y):
udfs = UDFS(nFeatures)
idxList = udfs.feature_ranking(X)
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(udfs_fct, k = nFeatures)
if method == "Pearson":
from ITMO_FS.filters.univariate import pearson_corr
pipe = SelectKBest(pearson_corr, k = nFeatures)
if method == "Kendall":
from scipy.stats import kendalltau
def kendall_corr_fct (X, y):
scores = [0]*X.shape[1]
for k in range(X.shape[1]):
scores[k] = 1-kendalltau(X[:,k], y)[1]
return np.array(scores)
pipe = SelectKBest(kendall_corr_fct, k = nFeatures)
if method == "Fechner":
from ITMO_FS.filters.univariate import fechner_corr
pipe = SelectKBest(fechner_corr, k = nFeatures)
if method == "Spearman":
from ITMO_FS.filters.univariate import spearman_corr
pipe = SelectKBest(spearman_corr, k = nFeatures)
if method == "Laplacian":
from ITMO_FS.filters.univariate import laplacian_score
def laplacian_score_fct (X, y):
scores = laplacian_score(X,y)
return -scores
pipe = SelectKBest(laplacian_score_fct, k = nFeatures)
if method == "FisherScore":
from ITMO_FS.filters.univariate import f_ratio_measure
pipe = SelectKBest(f_ratio_measure, k = nFeatures)
if method == "Relief":
from extraFeatureSelections import relief_measure
pipe = SelectKBest(relief_measure, k = nFeatures)
if method == "JMI":
from skfeature.function.information_theoretical_based import JMI
def jmi_score (X, y, nFeatures):
sol, _, _ = JMI.jmi (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
jmi_score_fct = partial(jmi_score, nFeatures = nFeatures)
pipe = SelectKBest(jmi_score_fct, k = nFeatures)
if method == "ICAP":
from skfeature.function.information_theoretical_based import ICAP
def icap_score (X, y, nFeatures):
sol, _, _ =ICAP.icap (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
icap_score_fct = partial(icap_score, nFeatures = nFeatures)
pipe = SelectKBest(icap_score_fct, k = nFeatures)
# not exported
if method == "DCSF":
from ITMO_FS.filters.multivariate import DCSF
def dcsf_score_fct (X, y):
selected_features = []
other_features = [i for i in range(0, X.shape[1]) if i not in selected_features]
scores = DCSF(np.array(selected_features), np.array(other_features), X, y)
return scores
pipe = SelectKBest(dcsf_score_fct, k = nFeatures)
if method == "CIFE":
from skfeature.function.information_theoretical_based import CIFE
def cife_score (X, y, nFeatures):
sol, _, _ = CIFE.cife (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
cife_score_fct = partial(cife_score, nFeatures = nFeatures)
pipe = SelectKBest(cife_score_fct, k = nFeatures)
# should be the same as MIM
if method == "MIFS":
from ITMO_FS.filters.multivariate import MIFS
def mifs_score_fct (X, y):
selected_features = []
other_features = [i for i in range(0, X.shape[1]) if i not in selected_features]
scores = MIFS(np.array(selected_features), np.array(other_features), X, y, beta = 0.5)
return scores
pipe = SelectKBest(mifs_score_fct, k = nFeatures)
if method == "CMIM":
from skfeature.function.information_theoretical_based import CMIM
def cmim_score (X, y, nFeatures):
sol, _, _ =CMIM.cmim (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
cmim_score_fct = partial(cmim_score, nFeatures = nFeatures)
pipe = SelectKBest(cmim_score_fct, k = nFeatures)
if method == "MRI":
from ITMO_FS.filters.multivariate import MRI
def mri_score_fct (X, y):
selected_features = []
other_features = [i for i in range(0, X.shape[1]) if i not in selected_features]
scores = MRI(np.array(selected_features), np.array(other_features), X, y)
return scores
pipe = SelectKBest(mri_score_fct, k = nFeatures)
if method == "MRMR":
def mrmr_score (X, y, nFeatures):
Xp = pd.DataFrame(X, columns = range(X.shape[1]))
yp = pd.DataFrame(y, columns=['Target'])
# we need to pre-specify the max solution length...
solutions = mrmr.mrmr_ensemble(features = Xp, targets = yp, solution_length=nFeatures, solution_count=1)
scores = [0]*Xp.shape[1]
for j,z in enumerate(solutions.iloc[0][0]):
scores[z] = (len(solutions.iloc[0][0]) - j)/len(solutions.iloc[0][0])
scores = np.asarray(scores, dtype = np.float32)
return scores
mrmr_score_fct = partial(mrmr_score, nFeatures = nFeatures)
pipe = SelectKBest(mrmr_score_fct, k = nFeatures)
if method == "MRMRe":
def mrmre_score (X, y, nFeatures):
Xp = pd.DataFrame(X, columns = range(X.shape[1]))
yp = pd.DataFrame(y, columns=['Target'])
# we need to pre-specify the max solution length...
solutions = mrmr.mrmr_ensemble(features = Xp, targets = yp, solution_length=nFeatures, solution_count=5)
scores = [0]*Xp.shape[1]
for k in solutions.iloc[0]:
for j, z in enumerate(k):
scores[z] = scores[z] + Xp.shape[1] - j
scores = np.asarray(scores, dtype = np.float32)
scores = scores/np.sum(scores)
return scores
mrmre_score_fct = partial(mrmre_score, nFeatures = nFeatures)
pipe = SelectKBest(mrmre_score_fct, k = nFeatures)
if method == "SVMRFE":
def svmrfe_score_fct (X, y):
svc = LinearSVC (C=1)
rfe = RFECV(estimator=svc, step=0.10, scoring='roc_auc', n_jobs=1)
rfe.fit(X, y)
scores = rfe.ranking_
return scores
pipe = SelectKBest(svmrfe_score_fct, k = nFeatures)
if method == "Boruta":
import boruta
def boruta_fct (X, y):
rfc = RandomForestClassifier(n_jobs=-1, class_weight='balanced_subsample')
b = boruta.BorutaPy (rfc, n_estimators = nFeatures)
b.fit(X, y)
scores = np.max(b.ranking_) - b.ranking_
return scores
pipe = SelectKBest(boruta_fct, k = nFeatures)
if method == "RandomizedLR":
from sklearn.utils import resample
def randlr_fct (X, y):
# only 100 instead of 1000
scores = None
for k in range(25):
boot = resample(range(0,X.shape[0]), replace=True, n_samples=X.shape[0], random_state=k)
model = LogisticRegression(solver = 'lbfgs', random_state = k)
model.fit(X[boot,:], y[boot])
if scores is None:
scores = model.coef_[0]*0
scores = scores + np.abs(model.coef_[0])
return scores
pipe = SelectKBest(randlr_fct, k = nFeatures)
if method == "tScore":
from skfeature.function.statistical_based import t_score
pipe = SelectKBest(t_score.t_score, k = nFeatures)
if method == "Wilcoxon":
from extraFeatureSelections import wilcoxon_score
pipe = SelectKBest(wilcoxon_score, k = nFeatures)
if method == "Variance":
def variance (X, y):
scores = np.var(X, axis = 0)
return scores
pipe = SelectKBest(variance, k = nFeatures)
if method == "TraceRatio":
from skfeature.function.similarity_based import trace_ratio
def trace_ratio_score (X, y, nFeatures):
fidx, fscore, _ = trace_ratio.trace_ratio (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j in range(len(fidx)):
scores[fidx[j]] = fscore[j]
scores = np.asarray(scores, dtype = np.float32)
return scores
trace_ratio_score_fct = partial(trace_ratio_score, nFeatures = nFeatures)
pipe = SelectKBest(trace_ratio_score_fct, k = nFeatures)
if method == "Bhattacharyya":
def bhattacharyya_score_fct (X, y):
import cv2
yn = y/np.sum(y)
yn = np.asarray(yn, dtype = np.float32)
scores = [0]*X.shape[1]
for j in range(X.shape[1]):
xn = (X[:,j] - np.min(X[:,j]))/(np.max(X[:,j] - np.min(X[:,j])))
xn = xn/np.sum(xn)
xn = np.asarray(xn, dtype = np.float32)
scores[j] = cv2.compareHist(xn, yn, cv2.HISTCMP_BHATTACHARYYA)
scores = np.asarray(scores, dtype = np.float32)
return -scores
pipe = SelectKBest(bhattacharyya_score_fct, k = nFeatures)
if method == "None":
def dummy_score (X, y):
scores = np.ones(X.shape[1])
return scores
pipe = SelectKBest(dummy_score, k = 'all')
return pipe
def createClf (cExp):
#print (cExp)
method = cExp[0][0]
if method == "Constant":
model = DummyClassifier()
if method == "SVM":
C = cExp[0][1]["C"]
svc = LinearSVC(C = C)
model = CalibratedClassifierCV(svc)
if method == "RBFSVM":
C = cExp[0][1]["C"]
g = cExp[0][1]["gamma"]
model = SVC(kernel = "rbf", C = C, gamma = g, probability = True)
if method == "LDA":
model = LinearDiscriminantAnalysis(solver = 'lsqr', shrinkage = 'auto')
if method == "QDA":
model = QuadraticDiscriminantAnalysis()
if method == "LogisticRegression":
C = cExp[0][1]["C"]
model = LogisticRegression(solver = 'lbfgs', C = C, random_state = 42)
if method == "RandomForest":
n_estimators = cExp[0][1]["n_estimators"]
model = RandomForestClassifier(n_estimators = n_estimators)
if method == "kNN":
neighbors = cExp[0][1]["N"]
model = KNeighborsClassifier(neighbors)
if method == "XGBoost":
learning_rate = cExp[0][1]["learning_rate"]
n_estimators = cExp[0][1]["n_estimators"]
model = XGBClassifier(learning_rate = learning_rate, n_estimators = n_estimators, n_jobs = 1, use_label_encoder=False, eval_metric = "logloss", random_state = 42)
if method == "XGBoost_GPU":
learning_rate = cExp[0][1]["learning_rate"]
n_estimators = cExp[0][1]["n_estimators"]
model = XGBClassifier(learning_rate = learning_rate, n_estimators = n_estimators, use_label_encoder=False, eval_metric = "logloss", tree_method='gpu_hist', random_state = 42)
if method == "NaiveBayes":
model = GaussianNB()
if method == "NeuralNetwork":
N1 = cExp[0][1]["layer_1"]
N2 = cExp[0][1]["layer_2"]
N3 = cExp[0][1]["layer_3"]
model = MLPClassifier (hidden_layer_sizes=(N1,N2,N3,), random_state=42, max_iter = 1000)
return model
@ignore_warnings(category=ConvergenceWarning)
@ignore_warnings(category=UserWarning)
def executeExperiment (fselExperiments, clfExperiments, data, dataID):
mlflow.set_tracking_uri(TrackingPath)
y = data["Target"]
X = data.drop(["Target"], axis = 1)
X, y = preprocessData (X, y)
# need a fixed set of folds to be comparable
kfolds = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 1, random_state = 42)
# make sure experiment gets selected
raceOK = False
while raceOK == False:
try:
mlflow.set_experiment(dataID)
raceOK = True
except:
time.sleep(0.5)
pass
stats = {}
for i, fExp in enumerate(fselExperiments):
np.random.seed(i)
random.seed(i)
for j, cExp in enumerate(clfExperiments):
timings = {}
foldStats = {}
foldStats["features"] = []
foldStats["params"] = {}
foldStats["params"].update(fExp)
foldStats["params"].update(cExp)
run_name = getRunID (foldStats["params"])
current_experiment = dict(mlflow.get_experiment_by_name(dataID))
experiment_id = current_experiment['experiment_id']
# check if we have that already
# recompute using mlflow did not work, so i do my own.
if len(glob (os.path.join(TrackingPath, str(experiment_id), "*/artifacts/" + run_name + ".ID"))) > 0:
print ("X", end = '', flush = True)
continue
# log what we do next
with open(os.path.join(TrackingPath, "curExperiments.txt"), "a") as f:
f.write("(RUN) " + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + str(fExp) + "+" + str(cExp) + "\n")
expVersion = '_'.join([k for k in foldStats["params"] if "Experiment" not in k])
pID = str(foldStats["params"])
# register run in mlflow now
run_id = getRunID (foldStats["params"])
mlflow.start_run(run_name = run_id, tags = {"Version": expVersion, "pID": pID})
# this is stupid, but well, log a file with name=runid
log_dict(foldStats["params"], run_id+".ID")
for k, (train_index, test_index) in enumerate(kfolds.split(X, y)):
X_train, X_test = X.iloc[train_index].copy(), X.iloc[test_index].copy()
y_train, y_test = y[train_index].copy(), y[test_index].copy()
# log fold index too
log_dict({"Test": test_index.tolist(), "Train": train_index.tolist()}, "CVIndex_"+str(k)+".json")
fselector = createFSel (fExp)
with np.errstate(divide='ignore',invalid='ignore'):
timeFSStart = time.time()
fselector.fit (X_train.copy(), y_train.copy())
timeFSEnd = time.time()
timings["Fsel_Time_Fold_" + str(k)] = timeFSEnd - timeFSStart
feature_idx = fselector.get_support()
selected_feature_names = X_train.columns[feature_idx].copy()
all_feature_names = X_train.columns.copy()
# log also 0-1
fpat = np.zeros(X_train.shape[1])
for j,f in enumerate(feature_idx):
fpat[j] = int(f)
# just once
if k == 0:
log_dict({f:fpat[j] for j, f in enumerate(all_feature_names)}, "FNames_"+str(k)+".json")
log_dict({j:fpat[j] for j, f in enumerate(all_feature_names)}, "FPattern_"+str(k)+".json")
foldStats["features"].append(list([selected_feature_names][0].values))
# apply selector-- now the data is numpy, not pandas, lost its names
X_fs_train = fselector.transform (X_train)
y_fs_train = y_train
X_fs_test = fselector.transform (X_test)
y_fs_test = y_test
# check if we have any features
if X_fs_train.shape[1] > 0:
classifier = createClf (cExp)
timeClfStart = time.time()
classifier.fit (X_fs_train, y_fs_train)
timeClfEnd = time.time()
timings["Clf_Time_Fold_" + str(k)] = timeClfEnd - timeClfStart
y_pred = classifier.predict_proba (X_fs_test)
y_pred = y_pred[:,1]
foldStats["fold_"+str(k)], df, acc = testModel (y_pred, y_fs_test, idx = test_index, fold = k)
else:
# this is some kind of bug. if lasso does not select any feature and we have the constant
# classifier, then we cannot just put a zero there. else we get a different model than
# the constant predictor. we fix this by testing
if cExp[0][0] == "Constant":
print ("F:", fExp, end = '')
classifier = createClf (cExp)
classifier.fit (X_train.iloc[:,0:2], y_train)
y_pred = classifier.predict_proba (X_fs_test)[:,1]
else:
# else we can just take 0 as a prediction
y_pred = y_test*0 + 1
foldStats["fold_"+str(k)], df, acc = testModel (y_pred, y_fs_test, idx = test_index, fold = k)
stats[str(i)+"_"+str(j)] = logMetrics (foldStats)
log_dict(timings, "timings.json")
mlflow.end_run()
with open(os.path.join(TrackingPath, "curExperiments.txt"), "a") as f:
f.write("(DONE)" + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + str(fExp) + "+" + str(cExp) + "\n")
def executeExperiments (z):
fselExperiments, clfExperiments, data, d = z
executeExperiment ([fselExperiments], [clfExperiments], data, d)
if __name__ == "__main__":
print ("Hi.")
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
# load data first
datasets = {}
dList = ["Li2020", "Carvalho2018", "Hosny2018A", "Hosny2018B", "Hosny2018C", "Ramella2018", "Keek2020", "Park2020", "Song2020" , "Toivonen2019"]
for d in dList:
eval (d+"().info()")
datasets[d] = eval (d+"().getData('./data/')")
print ("\tLoaded data with shape", datasets[d].shape)
# avoid race conditions later
try:
mlflow.set_tracking_uri(TrackingPath)
mlflow.create_experiment(d)
mlflow.set_experiment(d)
time.sleep(3)
except:
pass
for d in dList:
print ("\nExecuting", d)
data = datasets[d]
# generate all experiments
fselExperiments = generateAllExperiments (fselParameters)
print ("Created", len(fselExperiments), "feature selection parameter settings")
clfExperiments = generateAllExperiments (clfParameters)
print ("Created", len(clfExperiments), "classifier parameter settings")
print ("Total", len(clfExperiments)*len(fselExperiments), "experiments")
# generate list of experiment combinations
clList = []
for fe in fselExperiments:
for clf in clfExperiments:
clList.append( (fe, clf, data, d))
# execute
ncpus = 16
with parallel_backend("loky", inner_max_num_threads=1):
fv = Parallel (n_jobs = ncpus)(delayed(executeExperiments)(c) for c in clList)
#
| 36.451497 | 182 | 0.610934 |
3a01b5b20e16dc59b45be5e462160adb8ae019e0 | 692 | py | Python | dm/algorithms/HungarianAlg.py | forons/distance-measurement | 39741aefed0aa2f86e8959338c867398ce6494c7 | [
"MIT"
] | null | null | null | dm/algorithms/HungarianAlg.py | forons/distance-measurement | 39741aefed0aa2f86e8959338c867398ce6494c7 | [
"MIT"
] | null | null | null | dm/algorithms/HungarianAlg.py | forons/distance-measurement | 39741aefed0aa2f86e8959338c867398ce6494c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from scipy import optimize, sparse
from .AbstractDistanceAlg import AbstractDistanceAlg
class HungarianAlg(AbstractDistanceAlg):
def __init__(self, df, size):
super().__init__(df, size)
def compute_matching(self):
distances = self.df.collect()
cost_matrix = sparse.coo_matrix((self.size, self.size), dtype=np.float32)
for row, col, dist in distances:
cost_matrix[row, col] = dist
results = optimize.linear_sum_assignment(cost_matrix)
for row, col in results:
self.matches.append((row, col, cost_matrix[row, col]))
return self.matches
| 31.454545 | 81 | 0.669075 |
3a025d2fa53d6a334efac01743db85a3f7705e2e | 757 | py | Python | illallangi/delugeapi/filtercollection.py | illallangi/DelugeAPI | 8a949c0cf505992d5e6363d1ff3a9ed5147fc1a1 | [
"MIT"
] | null | null | null | illallangi/delugeapi/filtercollection.py | illallangi/DelugeAPI | 8a949c0cf505992d5e6363d1ff3a9ed5147fc1a1 | [
"MIT"
] | null | null | null | illallangi/delugeapi/filtercollection.py | illallangi/DelugeAPI | 8a949c0cf505992d5e6363d1ff3a9ed5147fc1a1 | [
"MIT"
] | null | null | null | from collections.abc import Sequence
from .filter import Filter
class FilterCollection(Sequence):
def __init__(self, host, filters, *args, **kwargs):
super().__init__(*args, **kwargs)
self.host = host
self._filters = [Filter(self.host, filter) for filter in filters]
def __repr__(self):
return f'{self.__class__}({self.host.username}@{self.host.hostname}:{self.host.port})[{self.__len__()}]'
def __str__(self):
return f'{self.__len__()} Filter{"" if self.__len__() == 1 else "s"}'
def __iter__(self):
return self._filters.__iter__()
def __getitem__(self, key):
return list(self._filters).__getitem__(key)
def __len__(self):
return list(self._filters).__len__()
| 29.115385 | 112 | 0.649934 |
3a04e44a83831c5da0bf2cc7640fd1129f243146 | 97 | py | Python | odds/__init__.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | 1 | 2017-11-05T20:41:12.000Z | 2017-11-05T20:41:12.000Z | odds/__init__.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | 2 | 2021-03-31T18:43:15.000Z | 2021-12-13T19:46:28.000Z | odds/__init__.py | nik849/Odds | a2403e5f5428fcf826322b59410471ec97a6aa26 | [
"MIT"
] | null | null | null | """
:copyright: Nick Hale
:license: MIT, see LICENSE for more details.
"""
__version__ = '0.0.1'
| 16.166667 | 44 | 0.670103 |
3a078ca91eafb1c88f7c5c3ad6afd4b81ea83805 | 1,386 | py | Python | src/io/protobuf_test.py | fritzo/pomagma | 224bb6adab3fc68e2d853e6365b4b86a8f7f468f | [
"Apache-2.0"
] | 10 | 2015-06-09T00:25:01.000Z | 2019-06-11T16:07:31.000Z | src/io/protobuf_test.py | fritzo/pomagma | 224bb6adab3fc68e2d853e6365b4b86a8f7f468f | [
"Apache-2.0"
] | 25 | 2015-03-23T23:16:01.000Z | 2017-08-29T03:35:59.000Z | src/io/protobuf_test.py | fritzo/pomagma | 224bb6adab3fc68e2d853e6365b4b86a8f7f468f | [
"Apache-2.0"
] | null | null | null | from google.protobuf import text_format
from pomagma.io import protobuf_test_pb2
from pomagma.io.protobuf import InFile, OutFile
from pomagma.util import in_temp_dir
from pomagma.util.testing import for_each
def parse(text, Message=protobuf_test_pb2.TestMessage):
message = Message()
text_format.Merge(text, message)
return message
EXAMPLES = [
parse(''),
parse('''
optional_string: 'test'
'''),
parse('''
repeated_string: 'test1'
repeated_string: 'test2'
'''),
parse('''
optional_string: 'test'
repeated_string: 'test1'
repeated_string: 'test2'
optional_message: {
repeated_message: {}
repeated_message: {
optional_string: 'sub sub 1'
repeated_string: 'sub'
}
repeated_message: {
optional_string: 'sub 1'
}
repeated_message: {
repeated_string: 'sub 2'
}
}
'''),
]
@for_each(EXAMPLES)
def test_write_read(expected):
print 'Testing read(write({}))'.format(expected)
actual = protobuf_test_pb2.TestMessage()
with in_temp_dir():
filename = 'test.pb'
with OutFile(filename) as f:
f.write(expected)
with InFile(filename) as f:
f.read(actual)
assert actual == expected
| 24.75 | 55 | 0.585859 |
3a079d600f0144ca6ea7cb473635485bda6d1725 | 2,039 | py | Python | python/oneflow/test/modules/test_linspace.py | lizhimeng159/oneflow | b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_linspace.py | lizhimeng159/oneflow | b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_linspace.py | lizhimeng159/oneflow | b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestLinspace(flow.unittest.TestCase):
@autotest(n=30, auto_backward=False, rtol=1e-5, atol=1e-5, check_graph=True)
def test_linspace_int_with_random_data(test_case):
start = random().to(int)
end = start + random().to(int)
steps = random(0, end - start).to(int)
x = torch.linspace(start=start, end=end, steps=steps)
device = random_device()
x.to(device)
return x
@autotest(n=30, auto_backward=False, rtol=1e-5, atol=1e-5, check_graph=True)
def test_linspace_float_with_random_data(test_case):
start = random()
end = start + random()
steps = random(0, end - start).to(int)
x = torch.linspace(start=start, end=end, steps=steps)
device = random_device()
x.to(device)
return x
def test_consistent_naive(test_case):
placement = flow.placement("cpu", {0: [0]})
sbp = (flow.sbp.broadcast,)
x = flow.linspace(start=0, end=10, steps=2, placement=placement, sbp=sbp)
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
if __name__ == "__main__":
unittest.main()
| 33.983333 | 82 | 0.680726 |
3a081670c8619a8dbe9b2b1bb3b4d9935ec6801d | 1,577 | py | Python | alexia/apps/general/templatetags/menuitem.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 8 | 2015-06-29T20:01:22.000Z | 2020-10-19T13:49:38.000Z | alexia/apps/general/templatetags/menuitem.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 67 | 2015-10-05T16:57:14.000Z | 2022-03-28T19:57:36.000Z | alexia/apps/general/templatetags/menuitem.py | LaudateCorpus1/alexia-1 | 9c0d3c90c0ffe2237299a561b755b9c17905e354 | [
"BSD-3-Clause"
] | 6 | 2015-10-05T13:54:34.000Z | 2021-11-30T05:11:58.000Z | import re
from django.template import Library, Node, TemplateSyntaxError
from django.template.base import token_kwargs
from django.urls import Resolver404, resolve
from django.utils.html import format_html
register = Library()
class MenuItemNode(Node):
def __init__(self, nodelist, pattern, kwargs):
self.nodelist = nodelist
self.pattern = pattern
self.kwargs = kwargs
def render(self, context):
pattern = self.pattern.resolve(context)
classes = []
if 'class' in self.kwargs:
classes = self.kwargs['class'].resolve(context).split()
try:
func = resolve(context['request'].path).func
except Resolver404:
return ''
match = func.__module__ + '.' + func.__name__
if re.search(pattern, match):
classes.append('active')
if classes:
open_tag = format_html('<li class="{}">', ' '.join(classes))
else:
open_tag = format_html('<li>')
content = self.nodelist.render(context)
close_tag = format_html('</li>')
return open_tag + content + close_tag
@register.tag
def menuitem(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument, a pattern matching a view name." % bits[0])
pattern = parser.compile_filter(bits[1])
kwargs = token_kwargs(bits[2:], parser)
nodelist = parser.parse(('endmenuitem',))
parser.delete_first_token()
return MenuItemNode(nodelist, pattern, kwargs)
| 29.203704 | 112 | 0.637286 |
3a0830f683c3bcea14ab59eb19f8a4474d9635b6 | 3,984 | py | Python | superai/log/logger.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 1 | 2020-12-03T18:18:16.000Z | 2020-12-03T18:18:16.000Z | superai/log/logger.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 13 | 2021-02-22T18:27:58.000Z | 2022-02-10T08:14:10.000Z | superai/log/logger.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 1 | 2021-04-27T12:38:47.000Z | 2021-04-27T12:38:47.000Z | """ Log initializer """
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging
import sys
import os
from logging.handlers import RotatingFileHandler
from rich.logging import RichHandler
from typing import List
DEBUG = logging.DEBUG
INFO = logging.INFO
ERROR = logging.ERROR
WARNING = logging.WARNING
DEFAULT_LOG_FILENAME = "superai.log"
_log_format = (
"%(asctime)s - %(levelname)s - %(filename)s - %(threadName)s - [%(name)s:%(funcName)s:%(lineno)s] - %(message)s"
)
_rich_log_format = "%(message)s - %(threadName)s"
_date_format = "%Y-%m-%d %H:%M:%S"
_style = "{"
loggers: List[logging.Logger] = []
def create_file_handler(
log_format=_log_format,
log_filename=DEFAULT_LOG_FILENAME,
max_bytes=5000000,
backup_count=25,
):
"""Create rotating file handler"""
formatter = CustomFormatter(fmt=log_format, datefmt=_date_format, style=_style)
handler = RotatingFileHandler(log_filename, maxBytes=max_bytes, backupCount=backup_count)
handler.setFormatter(formatter)
return handler
def create_non_cli_handler(log_format=_log_format, stream=sys.stdout):
"""Create logging to non-CLI console (like ECS)"""
formatter = CustomFormatter(fmt=log_format, datefmt=_date_format)
console_handler = logging.StreamHandler(stream)
console_handler.setFormatter(formatter)
return console_handler
def create_cli_handler():
"""Create logging handler for CLI with rich structured output"""
rich_handler = RichHandler(rich_tracebacks=True)
return rich_handler
def get_logger(name=None, propagate=True):
"""Get logger object"""
logger = logging.getLogger(name)
logger.propagate = propagate
loggers.append(logger)
return logger
def exception(line):
"""Log exception"""
return logging.exception(line)
def debug(line):
"""Log debug"""
return logging.debug(line)
def warn(line):
"""Log warning"""
return logging.warn(line)
def error(line):
"""Log error"""
return logging.error(line)
def info(line):
"""Log info"""
return logging.info(line)
def init(filename=None, console=True, log_level=INFO, log_format=_log_format):
"""Initialize logging setup"""
if not log_format:
log_format = _log_format
log_handlers: List[logging.Handler] = []
if console:
if os.getenv("ECS", False) or os.getenv("JENKINS_URL", False):
log_handlers.append(create_non_cli_handler(log_format=log_format))
else:
# Use Rich for CLI
log_handlers.append(create_cli_handler())
# Set Format to short type for Rich
log_format = _rich_log_format
if filename is not None:
# Alwoys log to file with verbose format
log_handlers.append(create_file_handler(log_format=_log_format, log_filename=filename))
for pair in itertools.product(loggers, log_handlers):
pair[0].addHandler(pair[1])
pair[0].setLevel(log_level)
# Set Logging config based on CLI/Non/CLI Format
logging.basicConfig(format=log_format, level=log_level, handlers=log_handlers)
log = get_logger(__name__)
if log_level > logging.INFO:
log.log(level=log_level, msg=f"super.Ai logger initialized with log_level={log_level}")
return log
class CustomFormatter(logging.Formatter):
"""Custom Formatter does these 2 things:
1. Overrides 'funcName' with the value of 'func_name_override', if it exists.
2. Overrides 'filename' with the value of 'file_name_override', if it exists.
"""
def format(self, record):
if hasattr(record, "func_name_override"):
record.funcName = record.func_name_override
if hasattr(record, "file_name_override"):
record.filename = record.file_name_override
if hasattr(record, "lineno_override"):
record.lineno = record.lineno_override
return super(CustomFormatter, self).format(record)
init()
| 29.511111 | 116 | 0.704317 |
3a090e5c232242360194af34105d0efa576a5d9f | 6,613 | py | Python | src/test.py | 0shimax/SE-Wavenet | f3cf8239175fec02565c81995e5b9f9e1bbd5eb1 | [
"MIT"
] | null | null | null | src/test.py | 0shimax/SE-Wavenet | f3cf8239175fec02565c81995e5b9f9e1bbd5eb1 | [
"MIT"
] | null | null | null | src/test.py | 0shimax/SE-Wavenet | f3cf8239175fec02565c81995e5b9f9e1bbd5eb1 | [
"MIT"
] | null | null | null | import argparse
from pathlib import Path
import torch
import torch.nn.functional as F
from sklearn.metrics import precision_recall_fscore_support, roc_curve, auc
import matplotlib.pyplot as plt
import numpy as np
from data.data_loader import ActivDataset, loader
from models.focal_loss import FocalLoss
from models.ete_waveform import EteWave
from models.post_process import as_seaquence
torch.manual_seed(555)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device:", device)
def main(args):
model = EteWave(args.n_class).to(device)
if Path(args.resume_model).exists():
print("load model:", args.resume_model)
model.load_state_dict(torch.load(args.resume_model))
test_data_file_names =\
[line.rstrip() for line in open(args.test_data_file_pointer_path)]
test_dataset = ActivDataset(test_data_file_names, args.root_dir,
seq_len=args.test_seq_len, time_step=args.time_step,
is_train=False)
test_loader = loader(test_dataset, 1, shuffle=False)
test(args, model, test_loader)
def test(args, model, data_loader):
model.eval()
test_loss = 0
segmentation_correct = 0
lack_classifier_correct = 0
total_len = 0
lack_total_len = 0
true_seq_labels = []
inf_seq_labels = []
true_finish_labels = []
inf_finish_labels = []
inf_finish_proba = []
true_finish_labels_mat = np.empty([len(data_loader), 5])
inf_finish_labels_mat = np.empty([len(data_loader), 5])
with torch.no_grad():
for i_batch, (l_data, l_target, l_lack_labels) in enumerate(data_loader):
l_data = l_data.to(device)
l_target = l_target.to(device)
l_lack_labels = l_lack_labels.to(device)
total_len += l_target.shape[-1]
lack_total_len += l_lack_labels.shape[-1]
output = model(l_data)
output = output.view([-1, output.shape[-1]])
targets = l_target.view(-1)
test_loss += F.cross_entropy(output, targets, ignore_index=-1).item()
pred = output.argmax(1)
pred = as_seaquence(pred.detach(), ahead=7)
segmentation_correct += pred.eq(targets.view_as(pred)).sum().item()
model.tatc.select_data_per_labels(l_data, pred, device)
tatc_output = model.tatc()
test_loss += F.cross_entropy(tatc_output, l_lack_labels.reshape(-1)).item()
tatc_pred = tatc_output.argmax(1)
print("true:", l_lack_labels[0])
print("inference:", tatc_pred)
lack_classifier_correct += tatc_pred.eq(l_lack_labels.view_as(tatc_pred)).sum().item()
true_seq_labels += targets.view_as(pred).cpu().tolist()
inf_seq_labels += pred.cpu().tolist()
lack_labels_cpu = l_lack_labels.view_as(tatc_pred).cpu().tolist()
tatc_pred_cpu = tatc_pred.cpu().tolist()
true_finish_labels += lack_labels_cpu
inf_finish_labels += tatc_pred_cpu
inf_finish_proba += tatc_output[:, 1].view(-1).cpu().tolist()
true_finish_labels_mat[i_batch] = lack_labels_cpu
inf_finish_labels_mat[i_batch] = tatc_pred_cpu
test_loss /= len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Seg Accuracy: {}/{} ({:.0f}%), lack Accuracy: {}/{} ({:.0f}%)\n'
.format(test_loss,
segmentation_correct, total_len, 100. * segmentation_correct / total_len,
lack_classifier_correct, lack_total_len, 100. * lack_classifier_correct / lack_total_len))
print("seq f1:")
print(precision_recall_fscore_support(true_seq_labels, inf_seq_labels))
print("finish work:")
print(precision_recall_fscore_support(true_finish_labels, inf_finish_labels))
fpr, tpr, _ = roc_curve(true_finish_labels, inf_finish_proba)
plt.plot(fpr, tpr)
plt.savefig( Path(args.out_dir, 'finish_roc.png') )
print("finish work AUC:")
print(auc(fpr, tpr))
for i in range(args.n_class -1):
print("class {}:".format(i))
print(precision_recall_fscore_support(true_finish_labels_mat[:, i], inf_finish_labels_mat[:, i]))
print("低速:")
print(precision_recall_fscore_support(true_finish_labels_mat[:5, :].ravel(), inf_finish_labels_mat[:5, :].ravel()))
print("中速:")
print(precision_recall_fscore_support(true_finish_labels_mat[5:10, :].ravel(), inf_finish_labels_mat[5:10, :].ravel()))
print("高速:")
print(precision_recall_fscore_support(true_finish_labels_mat[10:15, :].ravel(), inf_finish_labels_mat[10:15, :].ravel()))
for i in range(5):
start = 15+i*3
end = 15+(i+1)*3
print("作業{}中断再開:".format(i+1))
print(precision_recall_fscore_support(true_finish_labels_mat[start:end, :].ravel(), inf_finish_labels_mat[start:end, :].ravel()))
for i in range(5):
start = 30+i*3
end = 30+(i+1)*3
print("作業{}中断:".format(i+1))
print(precision_recall_fscore_support(true_finish_labels_mat[start:end, :].ravel(), inf_finish_labels_mat[start:end, :].ravel()))
for i in range(5):
start = 45+i*3
end = 45+(i+1)*3
print("作業{}欠損:".format(i+1))
print(precision_recall_fscore_support(true_finish_labels_mat[start:end, :].ravel(), inf_finish_labels_mat[start:end, :].ravel()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='/home/sh70k/mnt/tracker_data/test', help='path to dataset')
parser.add_argument('--n-class', type=int, default=6, help='number of class')
parser.add_argument('--test_seq-len', type=int, default=200, help='fixed seaquence length')
parser.add_argument('--time-step', type=float, default=.25, help='fixed time interbal of input data')
parser.add_argument('--test-data-file-pointer-path', default='./data/test_data_file_pointer', help='path to test data file pointer')
parser.add_argument('--resume-model', default='/home/sh70k/mnt/tracker_data/results/model_ckpt_v1_average.pth', help='path to trained model')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch-size', type=int, default=1, help='input batch size')
parser.add_argument('--out-dir', default='/home/sh70k/mnt/tracker_data/results', help='folder to output data and model checkpoints')
args = parser.parse_args()
Path(args.out_dir).mkdir(parents=True, exist_ok=True),
main(args)
| 42.121019 | 145 | 0.665356 |
3a0d56385a100828a93d1a548339d663fa8c3ed6 | 4,031 | py | Python | code/ConvexHull.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | code/ConvexHull.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | code/ConvexHull.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | from structure_helper_class import structure_helper
from model_train_helper_class import model_train_helper
import matplotlib.pyplot as plt
import pandas as pd
from tabulate import tabulate
class convex_hull:
def get_convex_hull_points(structure_name_to_object_map, draw_hull = True, model = None, model_str = None):
#Getting a map from composition ratio to list of structure names
composition_ratio_to_structure_names_list_map = structure_helper.get_composition_ratio_to_structure_names_list_map(structure_name_to_object_map.values())
points = []
points_x = []
points_y = []
if model is not None:
prediction_dict = model_train_helper.get_prediction_dict(structure_name_to_object_map, model, model_str)
for composition, name_to_energy_map in prediction_dict.items():
for name, energy in name_to_energy_map.items():
if name not in model.used_structure_names_list:
continue
points.append((composition, energy, name))
points_x.append(composition)
points_y.append(energy)
else:
for composition, structure_names in composition_ratio_to_structure_names_list_map.items():
for name in structure_names:
points.append((composition, structure_name_to_object_map[name].total_energy_, name))
points_x.append(composition)
points_y.append(structure_name_to_object_map[name].total_energy_)
"""Computes the convex hull of a set of 2D points.
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorithm. O(n log n) complexity.
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
lower = []
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
if draw_hull:
plt.scatter(points_x, points_y, marker='.')
return lower
def draw(structure_name_to_object_map, draw_hull = True, model = None, model_str = None):
# Build lower hull
lower = convex_hull.get_convex_hull_points(structure_name_to_object_map,
draw_hull, model, model_str)
print('\nPoints used for Convex Hull :\n')
pd.set_option('display.expand_frame_repr', False)
df = pd.DataFrame({'Composition':[lower[i][0] for i in range(len(lower))],
'Structure name':[lower[i][2] for i in range(len(lower))],
'Structure energy':[lower[i][1] for i in range(len(lower))]})
df.set_index('Composition')
print(tabulate(df, headers='keys', tablefmt='psql'))
lower_x = [lower[i][0] for i in range(len(lower))]
lower_y = [lower[i][1] for i in range(len(lower))]
if draw_hull:
plt.plot(lower_x, lower_y , marker='.', color='black')
plt.show()
| 47.988095 | 161 | 0.611015 |
3a0e24a4de9a8532f6e0fffca390853480dadb10 | 5,460 | py | Python | PoPs/warning.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | PoPs/warning.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | PoPs/warning.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
Store and report warnings and errors in a PoPs database.
PoPs.check() returns a nested list of warning objects:
>>> warnings = PoPs.check()
>>> print( warnings )
May include or exclude specific classes of warning using the filter command.
filter() returns a new context instance:
>>> warnings2 = warnings.filter( exclude=[warning.unnormalizedGammas] )
Or, for easier searching you may wish to flatten the list (to get warnings alone without context messages):
>>> flat = warnings.flatten()
"""
# FIXME context class and base warning class are both identical to stuff in fudge.warning. Move to external utility?
__metaclass__ = type
class context:
"""
Store warnings in context. This class contains location information (reactionSuite, reaction, etc)
plus a nested list of warnings or other context instances
"""
def __init__( self, message='', warningList=None ):
self.message = message
self.warningList = warningList or []
def __len__( self ):
return len(self.warningList)
def __getitem__( self, idx ):
return self.warningList[idx]
def __str__( self ):
if len(self.warningList) == 0:
return self.message + ": no problems encountered"
return '\n'.join(self.toStringList())
def __eq__( self, other ):
return self.message == other.message and self.warningList == other.warningList
def filter( self, include=None, exclude=None ):
"""
Filter warning list to only include (or exclude) specific classes of warning. For example:
>>> newWarnings = warnings.filter( exclude=[warning.discreteLevelsOutOfOrder] )
Note that if both 'include' and 'exclude' lists are provided, exclude is ignored.
"""
if include is None and exclude is None: return self
newWarningList = []
for warning in self.warningList:
if isinstance(warning, context):
newContext = warning.filter(include, exclude)
if newContext: newWarningList.append(newContext)
elif include is not None:
if warning.__class__ in include:
newWarningList.append(warning)
else: # exclude is not None:
if warning.__class__ not in exclude:
newWarningList.append(warning)
return context(self.message, newWarningList)
def flatten( self ):
"""
From a nested hierarchy of warnings, get back a flat list for easier searching:
>>> w = PoPs.check()
>>> warningList = w.flatten()
:return: list containing all of warnings
"""
List = []
for val in self.warningList:
if isinstance(val, warning):
List.append(val)
else:
List += val.flatten()
return List
def toStringList( self, indent='', dIndent=' ' ):
""" Format warnings for printing. Returns a list of warning strings with indentation. """
s = ['%s%s' % (indent, self.message)]
for warning in self.warningList:
s += warning.toStringList(indent + dIndent)
return s
class warning: # FIXME make abstract base class?
"""
General warning class. Contains link to problem object,
xpath in case the object leaves memory,
and information about the warning or error.
"""
def __init__( self, obj=None ):
self.obj = obj
self.xpath = ''
if hasattr(obj, 'toXLink'):
self.xpath = obj.toXLink()
def __str__( self ):
return "Generic warning for %s" % self.xpath
def __eq__( self, other ):
return self.xpath == other.xpath
def toStringList( self, indent='' ):
return ['%sWARNING: %s' % (indent, self)]
#
# specific warning classes:
#
class NotImplemented(warning):
def __init__( self, form, obj=None ):
warning.__init__(self, obj)
self.form = form
def __str__( self ):
return "Checking not yet implemented for %s type data" % self.form
def __eq__( self, other ):
return (self.form == other.form and self.xpath == other.xpath)
class discreteLevelsOutOfOrder( warning ):
def __init__(self, lidx, obj=None):
warning.__init__(self, obj)
self.lidx = lidx
def __str__(self):
return "Discrete level %s is out of order" % self.lidx
def __eq__(self, other):
return (self.lidx == other.lidx)
class unnormalizedDecayProbabilities( warning ):
def __init__(self, branchingSum, obj=None):
warning.__init__(self, obj)
self.branchingSum = branchingSum
def __str__(self):
return "Sum of decay probabilities = %s, should be 1.0!" % (self.branchingSum)
def __eq__(self, other):
return (self.xpath == other.xpath and self.branchingSum == other.branchingSum)
class AliasToNonExistentParticle(warning):
def __init__( self, id, pid, obj=None ):
warning.__init__(self, obj)
self.id = id
self.pid = pid
def __str__(self):
return "Alias '%s' points to non-existant particle '%s'" % (self.id, self.pid)
def __eq__(self, other):
return (self.id == other.id and self.pid == other.pid)
| 31.37931 | 116 | 0.630952 |
3a0f2160b69e0995f3cc76e9cebbc03eb599b9f1 | 2,077 | py | Python | libra/transaction/script.py | MaslDi/libra-client | 0983adfcb6787f7a16de4bf364cdf5596c183d88 | [
"MIT"
] | null | null | null | libra/transaction/script.py | MaslDi/libra-client | 0983adfcb6787f7a16de4bf364cdf5596c183d88 | [
"MIT"
] | null | null | null | libra/transaction/script.py | MaslDi/libra-client | 0983adfcb6787f7a16de4bf364cdf5596c183d88 | [
"MIT"
] | null | null | null | from canoser import Struct, Uint8, bytes_to_int_list, hex_to_int_list
from libra.transaction.transaction_argument import TransactionArgument, normalize_public_key
from libra.bytecode import bytecodes
from libra.account_address import Address
class Script(Struct):
_fields = [
('code', [Uint8]),
('args', [TransactionArgument])
]
@classmethod
def gen_transfer_script(cls, receiver_address,micro_libra):
if isinstance(receiver_address, bytes):
receiver_address = bytes_to_int_list(receiver_address)
if isinstance(receiver_address, str):
receiver_address = hex_to_int_list(receiver_address)
code = bytecodes["peer_to_peer_transfer"]
args = [
TransactionArgument('Address', receiver_address),
TransactionArgument('U64', micro_libra)
]
return Script(code, args)
@classmethod
def gen_mint_script(cls, receiver_address,micro_libra):
receiver_address = Address.normalize_to_int_list(receiver_address)
code = bytecodes["mint"]
args = [
TransactionArgument('Address', receiver_address),
TransactionArgument('U64', micro_libra)
]
return Script(code, args)
@classmethod
def gen_create_account_script(cls, fresh_address):
fresh_address = Address.normalize_to_int_list(fresh_address)
code = bytecodes["create_account"]
args = [
TransactionArgument('Address', fresh_address),
TransactionArgument('U64', 0)
]
return Script(code, args)
@classmethod
def gen_rotate_auth_key_script(cls, public_key):
key = normalize_public_key(public_key)
code = bytecodes["rotate_authentication_key"]
args = [
TransactionArgument('ByteArray', key)
]
return Script(code, args)
@staticmethod
def get_script_bytecode(script_name):
return bytecodes[script_name] | 36.438596 | 93 | 0.641791 |
3a0f8c5dad18187b53b099da32a80926deec7934 | 172 | py | Python | Statistics/SampleMean.py | Shannon-NJIT/MiniProject2_Statistics | 961d579d40682c030b3aa88b4cd38fa828e8e01e | [
"MIT"
] | null | null | null | Statistics/SampleMean.py | Shannon-NJIT/MiniProject2_Statistics | 961d579d40682c030b3aa88b4cd38fa828e8e01e | [
"MIT"
] | 6 | 2019-11-04T22:48:39.000Z | 2019-11-14T01:18:49.000Z | Statistics/SampleMean.py | Shannon-NJIT/MiniProject2_Statistics | 961d579d40682c030b3aa88b4cd38fa828e8e01e | [
"MIT"
] | 4 | 2019-10-29T23:24:57.000Z | 2019-11-15T01:25:46.000Z | from Calculators.Division import division
def sampleMean(data):
sample_data = data[0:999]
n = len(sample_data)
return round(division(n, sum(sample_data)), 1)
| 21.5 | 50 | 0.715116 |
3a107df57da88f96818aa6ed0682c1887ef863ef | 1,901 | py | Python | puzzle/booking/candy.py | aliciawyy/dmining | 513f6f036f8f258281e1282fef052a74bf9cc3d3 | [
"Apache-2.0"
] | null | null | null | puzzle/booking/candy.py | aliciawyy/dmining | 513f6f036f8f258281e1282fef052a74bf9cc3d3 | [
"Apache-2.0"
] | 9 | 2017-10-25T10:03:36.000Z | 2018-06-12T22:49:22.000Z | puzzle/booking/candy.py | aliciawyy/dmining | 513f6f036f8f258281e1282fef052a74bf9cc3d3 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
def read_line_to_list(as_type=int):
return map(as_type, raw_input().strip().split(' '))
N, M, T = read_line_to_list()
candies_ = [read_line_to_list() for _ in range(N)]
class CollectCandies(object):
def __init__(self, n, m, t, candies):
self.dim = n, m
self.time_limit = t
self.candies = candies
self.v_grid_ = {(0, 0): self.candies[0][0]}
self.new_pos_from_previous_ = defaultdict(set)
self.pos_unchecked_ = {(i, j) for i in range(self.dim[0])
for j in range(self.dim[1])}
def _in_range_x(self, point):
return 0 <= point[0] < self.dim[0]
def _in_range_y(self, point):
return 0 <= point[1] < self.dim[1]
def append_previous_pos_(self, point):
def _add(p1):
self.new_pos_from_previous_[p1].add(point)
self.pos_unchecked_.remove(point)
x, y = point[0], point[1]
map(_add, filter(self._in_range_x, [(x - 1, y), (x + 1, y)]))
map(_add, filter(self._in_range_y, [(x, y - 1), (x, y + 1)]))
_add(point)
def get_max_sum(self):
if self.time_limit < self.dim[0] + self.dim[1] - 2:
return "Too late"
for i in range(self.time_limit):
if self.pos_unchecked_:
map(self.append_previous_pos_,
self.pos_unchecked_.intersection(self.v_grid_))
new_values = {
new_pos: max(
map(self.v_grid_.__getitem__, previous_pos_list)
) + self.candies[new_pos[0]][new_pos[1]]
for new_pos, previous_pos_list in
self.new_pos_from_previous_.items()}
self.v_grid_.update(new_values)
return self.v_grid_[(self.dim[0] - 1, self.dim[1] - 1)]
collector = CollectCandies(N, M, T, candies_)
print collector.get_max_sum()
| 32.220339 | 69 | 0.579695 |
3a110cf9f81c51a45a9e039e2675a3d01dca6237 | 13,818 | py | Python | SourceRepositoryTools/__init__.py | davidbrownell/Common_Environment | 4015872aeac8d5da30a6aa7940e1035a6aa6a75d | [
"BSL-1.0"
] | 1 | 2017-04-25T13:15:10.000Z | 2017-04-25T13:15:10.000Z | SourceRepositoryTools/__init__.py | davidbrownell/Common_Environment | 4015872aeac8d5da30a6aa7940e1035a6aa6a75d | [
"BSL-1.0"
] | null | null | null | SourceRepositoryTools/__init__.py | davidbrownell/Common_Environment | 4015872aeac8d5da30a6aa7940e1035a6aa6a75d | [
"BSL-1.0"
] | null | null | null | # ----------------------------------------------------------------------
# |
# | __init__.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-02-18 14:37:39
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
import os
import sys
import textwrap
from collections import OrderedDict
# ----------------------------------------------------------------------
_script_fullpath = os.path.abspath(__file__) if "python" in sys.executable.lower() else sys.executable
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def GetFundamentalRepository():
# Get the location of the fundamental dir. This is "../" when invoked from
# a python script, but more complicated when invoked as part of a frozen
# binary.
# Don't import Constants here, as Constants relies on this for initialization
value = os.getenv("DEVELOPMENT_ENVIRONMENT_FUNDAMENTAL")
if value is None:
# If here, we are't running in a standard environment are are likely running
# as part of a frozen exe. See if we are running on a file system that is
# similar to Common_Environment.
assert "python" not in sys.executable.lower(), sys.executable
potential_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
if os.path.isdir(potential_dir):
value = potential_dir
if value is not None and value.endswith(os.path.sep):
value = value[:-len(os.path.sep)]
return value
# ----------------------------------------------------------------------
# This file may be invoked by our included version of python - all imports will
# work as expected. But sometimes, this file may be invoked by embedded versions
# of python (for example, when used as part of a Mercurial plugin). At that point,
# we need to go through a bit more work to ensure that module-level imports work
# as expected.
try:
import inflect
import six
import wrapt
# If here, everything was found and all is good
except ImportError:
# If here, we are in a foreign python environment. Hard-code an import path
# to a known location of these base-level libraries. Because the libraries are
# so basic, it doesn't matter which one we use; therefore pick the lowest common
# denominator.
fundamental_repo = GetFundamentalRepository()
python_root = os.path.join(fundamental_repo, "Tools", "Python", "v2.7.10")
assert os.path.isdir(python_root), python_root
for suffix in [ os.path.join("Windows", "Lib", "site-packages"),
os.path.join("Ubuntu", "lib", "python2.7", "site-packages"),
]:
potential_dir = os.path.join(python_root, suffix)
if os.path.isdir(potential_dir):
sys.path.insert(0, potential_dir)
break
# Try it again
import inflect
import six
import wrapt
del sys.path[0]
# ----------------------------------------------------------------------
# Backwards compatibility
from SourceRepositoryTools.Impl.Configuration import *
from SourceRepositoryTools.Impl import Constants
from SourceRepositoryTools.Impl.Utilities import DelayExecute, \
GetLatestVersion, \
GetRepositoryUniqueId, \
GetVersionedDirectory
# ----------------------------------------------------------------------
@wrapt.decorator
def ToolRepository(wrapped, instance, args, kwargs):
"""\
Signals that a repository is a tool repository (a repository that contains
items that help in the development process but doesn't contain primitives
used by other dependent repositories during the build process.
"""
return wrapped(*args, **kwargs)
# ----------------------------------------------------------------------
def CreateDependencyMap(root_dir):
# Note that this functionality if very similar to that found in ActivationData.
# The difference between the two is this function will compile a map of all repositories
# under the code dir, while the code in ActivationData will only traverse environment
# data created during setup. Theoretically, it is possible for ActivationData
# to be implemented in terms of this function, but that would be too inefficient for
# general use.
from CommonEnvironment.NamedTuple import NamedTuple
from CommonEnvironment import Shell
from CommonEnvironment import SourceControlManagement
from SourceRepositoryTools.Impl.EnvironmentBootstrap import EnvironmentBootstrap
# ----------------------------------------------------------------------
RepoInfo = NamedTuple( "RepoInfo",
"UniqueId",
"Name",
"Root",
"Configurations",
)
ConfigInfo = NamedTuple( "ConfigInfo",
"ReliesOn",
"ReliedUponBy",
)
DependencyInfo = NamedTuple( "DependencyInfo",
"Configuration",
"Dependency",
)
# ----------------------------------------------------------------------
assert os.path.isdir(root_dir), root_dir
environent = Shell.GetEnvironment()
repositories = OrderedDict()
for scm, directory in SourceControlManagement.EnumSCMDirectories(root_dir):
result = GetRepositoryUniqueId( directory,
scm=scm,
throw_on_error=False,
)
if result is None:
continue
repo_name, repo_id = result
assert repo_id not in repositories, (repo_id, directory, repositories[repo_id].Root)
repo_bootstrap_data = EnvironmentBootstrap.Load(directory, environment=environent)
repo_bootstrap_data.Name = repo_name
repo_bootstrap_data.Id = repo_id
repo_bootstrap_data.Root = directory
repo_bootstrap_data.PriorityModifier = 0
repositories[repo_id] = repo_bootstrap_data
# Order by priority
# ----------------------------------------------------------------------
def Walk(repo_id, priority_modifier):
assert repo_id in repositories, repo_id
repo_info = repositories[repo_id]
repo_info.PriorityModifier += priority_modifier
for configuration in six.itervalues(repo_info.Configurations):
for dependency in configuration.Dependencies:
Walk(dependency.Id, priority_modifier + 1)
# ----------------------------------------------------------------------
for repo_id in six.iterkeys(repositories):
Walk(repo_id, 1)
priority_values = list(six.iteritems(repositories))
priority_values.sort(key=lambda x: x[1].PriorityModifier, reverse=True)
# Convert the repositories into a structure that is easier to process
results = OrderedDict()
for unique_id, repo_info in priority_values:
results[unique_id] = RepoInfo( unique_id,
repo_info.Name,
repo_info.Root,
OrderedDict(),
)
for config_name in six.iterkeys(repo_info.Configurations):
results[unique_id].Configurations[config_name] = ConfigInfo([], [])
# Populate the dependencies
for unique_id, repo_info in priority_values:
for config_name, config_info in six.iteritems(repo_info.Configurations):
# It is possible that a dependency is included more than once (as will be the case if someone
# includes Common_Enviroment as a dependency even though a dependency on Common_Enviroment is
# implied). Ensure that we are only looking at unique dependencies.
these_dependencies = []
dependency_lookup = set()
for dependency in config_info.Dependencies:
if dependency.Id in dependency_lookup:
continue
these_dependencies.append(( dependency, repositories[dependency.Id].PriorityModifier ))
dependency_lookup.add(dependency.Id)
# Ensure that the dependencies are ordered in priority order
these_dependencies.sort(key=lambda x: x[0].Id, reverse=True)
for dependency, priority_modifier in these_dependencies:
results[unique_id].Configurations[config_name].ReliesOn.append(DependencyInfo(dependency.Configuration, results[dependency.Id]))
results[dependency.Id].Configurations[dependency.Configuration].ReliedUponBy.append(DependencyInfo(config_name, results[unique_id]))
# Ensure that we can index by repo path as well as id
for unique_id in list(six.iterkeys(results)):
results[results[unique_id].Root] = results[unique_id]
return results
# ----------------------------------------------------------------------
def DisplayDependencyMap( dependency_map,
output_stream=sys.stdout,
):
from CommonEnvironment.StreamDecorator import StreamDecorator
# ----------------------------------------------------------------------
for k, v in six.iteritems(dependency_map):
if not os.path.isdir(k):
continue
output_stream.write(textwrap.dedent(
"""\
Name: {name} ({unique_id})
Directory: {dir}
Configurations:
{configurations}
""").format( name=v.Name,
unique_id=v.UniqueId,
dir=k,
configurations=StreamDecorator.LeftJustify( '\n'.join([ textwrap.dedent(
"""\
{name}
ReliesOn:
{relies_on}
ReliedUponBy:
{relied_upon_by}
""").format( name=ck,
relies_on='\n'.join([ " - {} <{}> [{}]".format(item.Dependency.Name, item.Configuration, item.Dependency.Root) for item in cv.ReliesOn ]) if cv.ReliesOn else " <None>",
relied_upon_by='\n'.join([ " - {} <{}> [{}]".format(item.Dependency.Name, item.Configuration, item.Dependency.Root) for item in cv.ReliedUponBy ]) if cv.ReliedUponBy else " <None>",
)
for ck, cv in six.iteritems(v.Configurations)
]),
2,
skip_first_line=False,
),
))
# ----------------------------------------------------------------------
def EnumRepositories():
from SourceRepositoryTools.Impl.ActivationData import ActivationData
# ----------------------------------------------------------------------
for repo in ActivationData.Load(None, None).PrioritizedRepos:
yield repo
# ----------------------------------------------------------------------
def GetRepositoryRootForFile(filename):
dirname = os.path.dirname(filename)
while True:
if os.path.isfile(os.path.join(dirname, Constants.REPOSITORY_ID_FILENAME)):
return dirname
potential_dirname = os.path.dirname(dirname)
if potential_dirname == dirname:
break
dirname = potential_dirname
raise Exception("Unable to find the repository root for '{}'".format(filename))
| 45.453947 | 285 | 0.481473 |
3a11220a149a467396eed9e2f60bcf713ed632ac | 3,213 | py | Python | db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 3: Lectures.py | chrislangst/scalable-data-science | c7beee15c7dd14d27353c4864d927c1b76cd2fa9 | [
"Unlicense"
] | 138 | 2017-07-25T06:48:28.000Z | 2022-03-31T12:23:36.000Z | db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 3: Lectures.py | chrislangst/scalable-data-science | c7beee15c7dd14d27353c4864d927c1b76cd2fa9 | [
"Unlicense"
] | 11 | 2017-08-17T13:45:54.000Z | 2021-06-04T09:06:53.000Z | db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 3: Lectures.py | chrislangst/scalable-data-science | c7beee15c7dd14d27353c4864d927c1b76cd2fa9 | [
"Unlicense"
] | 74 | 2017-08-18T17:04:46.000Z | 2022-03-21T14:30:51.000Z | # Databricks notebook source exported at Mon, 14 Mar 2016 03:21:05 UTC
# MAGIC %md
# MAGIC **SOURCE:** This is from the Community Edition of databricks and has been added to this databricks shard at [/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x](/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x) as extra resources for the project-focussed course [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/) that is prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand), and *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome).
# COMMAND ----------
# MAGIC %md
# MAGIC <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/">Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License</a>.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Module Three Lectures
# COMMAND ----------
# MAGIC %md
# MAGIC ### Lecture 5: Semi-Structured Data
# COMMAND ----------
displayHTML('https://youtube.com/embed/qzMs9Sq_DHw')
# COMMAND ----------
displayHTML('https://youtube.com/embed/pMSGGZVSwqo')
# COMMAND ----------
displayHTML('https://youtube.com/embed/NJyBQ-cQ3Ac')
# COMMAND ----------
displayHTML('https://youtube.com/embed/G_67yUxdDbU')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Llof8ZgCHFE')
# COMMAND ----------
displayHTML('https://youtube.com/embed/KjzoBzCxHMs')
# COMMAND ----------
displayHTML('https://youtube.com/embed/25YMAapjJgw')
# COMMAND ----------
displayHTML('https://youtube.com/embed/otrnf8MQ8S8')
# COMMAND ----------
displayHTML('https://youtube.com/embed/8vpmMbmUAiA')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Wc7zJG-N2B8')
# COMMAND ----------
displayHTML('https://youtube.com/embed/c2MFJI_NWVw')
# COMMAND ----------
# MAGIC %md
# MAGIC ### Lecture 6: Structured Data
# COMMAND ----------
displayHTML('https://youtube.com/embed/lODYQTgyqLk')
# COMMAND ----------
displayHTML('https://youtube.com/embed/BZuv__KF4qU')
# COMMAND ----------
displayHTML('https://youtube.com/embed/khFzRxjk2Tg')
# COMMAND ----------
displayHTML('https://youtube.com/embed/tAepBMlGvak')
# COMMAND ----------
displayHTML('https://youtube.com/embed/XAyWtVtBTlI')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Zp0EF2Dghik')
# COMMAND ----------
displayHTML('https://youtube.com/embed/iAqgcaKERHM')
# COMMAND ----------
displayHTML('https://youtube.com/embed/kaX4I2jENJc')
# COMMAND ----------
displayHTML('https://youtube.com/embed/tBsNkJyFr2w') | 30.6 | 749 | 0.698101 |
3a11c774870f73e9df814c0fb0e907ad67a018a8 | 2,075 | py | Python | src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py | Ankk98/einsteinpy | e6c3e3939063a7698410163b6de52e499bb3c8ea | [
"MIT"
] | null | null | null | src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py | Ankk98/einsteinpy | e6c3e3939063a7698410163b6de52e499bb3c8ea | [
"MIT"
] | null | null | null | src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py | Ankk98/einsteinpy | e6c3e3939063a7698410163b6de52e499bb3c8ea | [
"MIT"
] | null | null | null | from unittest import mock
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pytest
from einsteinpy.coordinates import SphericalDifferential
from einsteinpy.plotting import StaticGeodesicPlotter
@pytest.fixture()
def dummy_data():
sph_obj = SphericalDifferential(
306 * u.m,
np.pi / 2 * u.rad,
np.pi / 2 * u.rad,
0 * u.m / u.s,
0 * u.rad / u.s,
951.0 * u.rad / u.s,
)
t = 0 * u.s
m = 4e24 * u.kg
start_lambda = 0.0
end_lambda = 0.002
step_size = 0.5e-6
return sph_obj, t, m, start_lambda, end_lambda, step_size
def test_staticgeodesicplotter_has_axes(dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
assert isinstance(cl.ax, mpl.axes.SubplotBase)
assert cl.time.value == 0.0
assert cl._attractor_present is False
@mock.patch("einsteinpy.plotting.geodesics_static.plt.show")
def test_plot_calls_plt_show(mock_show, dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
cl.plot(sph_obj, el, ss)
cl.show()
mock_show.assert_called_with()
assert cl._attractor_present
@mock.patch("einsteinpy.plotting.geodesics_static.plt.savefig")
def test_plot_save_saves_plot(mock_save, dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
cl.plot(sph_obj, el, ss)
name = "test_plot.png"
cl.save(name)
mock_save.assert_called_with(name)
def test_plot_calls_draw_attractor_Manualscale(dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m, attractor_radius_scale=1500)
cl.plot(sph_obj, el, ss)
assert cl._attractor_present
assert cl.attractor_radius_scale == 1500
assert cl.get_curr_plot_radius != -1
def test_plot_calls_draw_attractor_AutoScale(dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
cl.plot(sph_obj, el, ss)
assert cl._attractor_present
assert cl.get_curr_plot_radius != -1
| 28.040541 | 63 | 0.700241 |
3a14941cbf1878d6614fada903d6f5559aa474e0 | 367 | py | Python | pageOne.py | Priyanka1527/PageOne | ff129f305b13c8cac839e6a5f55f3853e1f16973 | [
"MIT"
] | null | null | null | pageOne.py | Priyanka1527/PageOne | ff129f305b13c8cac839e6a5f55f3853e1f16973 | [
"MIT"
] | null | null | null | pageOne.py | Priyanka1527/PageOne | ff129f305b13c8cac839e6a5f55f3853e1f16973 | [
"MIT"
] | null | null | null | #from inv_ind.py import inverted_index
import search
class main:
#vector_space = inverted_index()
# *************************** INPUTTING THE QUERY ***************************
k = input('Enter value of k:') #Accepting query from the user
query = input('Enter your search query:') #Accepting query from the user
search.search_query(query, k)
| 28.230769 | 81 | 0.599455 |
3a1626ac2fa1019fb590d26ad03b0ec329ab6d9d | 2,017 | py | Python | deciphon_cli/console/scan.py | EBI-Metagenomics/deciphon-cli | aa090c886db1f4dacc6bc88b46b6ebcecb79eaab | [
"MIT"
] | null | null | null | deciphon_cli/console/scan.py | EBI-Metagenomics/deciphon-cli | aa090c886db1f4dacc6bc88b46b6ebcecb79eaab | [
"MIT"
] | null | null | null | deciphon_cli/console/scan.py | EBI-Metagenomics/deciphon-cli | aa090c886db1f4dacc6bc88b46b6ebcecb79eaab | [
"MIT"
] | null | null | null | from enum import Enum
import typer
from fasta_reader import read_fasta
from deciphon_cli.core import ScanPost, SeqPost
from deciphon_cli.requests import get_json, get_plain, post_json
__all__ = ["app"]
app = typer.Typer()
class ScanIDType(str, Enum):
SCAN_ID = "scan_id"
JOB_ID = "job_id"
@app.command()
def add(
db_id: int = typer.Argument(...),
fasta_filepath: str = typer.Argument(...),
multi_hits: bool = typer.Argument(True),
hmmer3_compat: bool = typer.Argument(False),
):
scan = ScanPost(db_id=db_id, multi_hits=multi_hits, hmmer3_compat=hmmer3_compat)
with read_fasta(fasta_filepath) as f:
for item in f:
seq = SeqPost(name=item.id, data=item.sequence)
scan.seqs.append(seq)
typer.echo(post_json(f"/scans/", scan.dict()))
@app.command()
def get(
scan_id: int = typer.Argument(...),
id_type: ScanIDType = typer.Option(ScanIDType.SCAN_ID.value),
):
typer.echo((get_json(f"/scans/{scan_id}", {"id_type": id_type.value})))
@app.command()
def seq_list(scan_id: int = typer.Argument(...)):
typer.echo((get_json(f"/scans/{scan_id}/seqs")))
@app.command()
def list():
typer.echo((get_json(f"/scans")))
@app.command()
def prod_list(scan_id: int = typer.Argument(...)):
typer.echo((get_json(f"/scans/{scan_id}/prods")))
@app.command()
def prod_gff(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/gff"), nl=False)
@app.command()
def prod_path(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/path"), nl=False)
@app.command()
def prod_fragment(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/fragment"), nl=False)
@app.command()
def prod_amino(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/amino"), nl=False)
@app.command()
def prod_codon(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/codon"), nl=False)
| 24.901235 | 84 | 0.67526 |
3a163271adf00fd1d184016bb403b5d130a4068f | 1,655 | py | Python | neuralmaterial/lib/models/vgg.py | NejcHirci/material-addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | 4 | 2022-01-31T14:26:39.000Z | 2022-02-06T06:34:27.000Z | neuralmaterial/lib/models/vgg.py | NejcHirci/material_addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | 2 | 2022-01-30T10:35:04.000Z | 2022-01-30T10:35:04.000Z | neuralmaterial/lib/models/vgg.py | NejcHirci/material-addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
class VGG(nn.Module):
def __init__(self, features, pretrained):
super(VGG, self).__init__()
self.features = features
if not pretrained:
self._initialize_weights()
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(in_channels):
layers = []
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def vgg19(pretrained, in_channels):
model = VGG(make_layers(in_channels), pretrained)
if pretrained:
state_dict = load_state_dict_from_url('https://download.pytorch.org/models/vgg19-dcbb9e9d.pth')
model.load_state_dict(state_dict, strict=False)
return model
| 33.77551 | 113 | 0.578852 |
3a16438d4a6793d41974ba3f9e345b3deca9076f | 296 | py | Python | portfolio/admin.py | jokimies/django-pj-portfolio | ce32882fa3f5cc3206b2a61eb5cd88c0cdf243ec | [
"BSD-3-Clause"
] | 3 | 2017-02-02T19:58:57.000Z | 2021-08-10T14:43:37.000Z | portfolio/admin.py | jokimies/django-pj-portfolio | ce32882fa3f5cc3206b2a61eb5cd88c0cdf243ec | [
"BSD-3-Clause"
] | 4 | 2016-01-15T14:18:37.000Z | 2016-03-06T15:06:31.000Z | portfolio/admin.py | jokimies/django-pj-portfolio | ce32882fa3f5cc3206b2a61eb5cd88c0cdf243ec | [
"BSD-3-Clause"
] | 2 | 2019-10-12T02:05:49.000Z | 2022-03-08T16:25:17.000Z | from portfolio.models import Transaction, Security, Price, Account
from portfolio.models import PriceTracker
from django.contrib import admin
admin.site.register(Transaction)
admin.site.register(Security)
admin.site.register(Price)
admin.site.register(PriceTracker)
admin.site.register(Account)
| 29.6 | 66 | 0.841216 |
3a16bef75430d1f8616b4661d929e57eb96f5d11 | 1,295 | py | Python | quasimodo/cache/file_cache.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 16 | 2019-11-28T13:26:37.000Z | 2022-02-09T09:53:10.000Z | quasimodo/cache/file_cache.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 1 | 2021-03-26T20:31:48.000Z | 2021-07-15T08:52:47.000Z | quasimodo/cache/file_cache.py | Aunsiels/CSK | c88609bc76d865b4987aaf30ddf1247a2031b1a6 | [
"MIT"
] | 3 | 2020-08-14T23:23:25.000Z | 2021-12-24T14:02:35.000Z | import os
import shutil
class FileCache(object):
def __init__(self, cache_dir):
self.cache_dir = cache_dir + "/"
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
def write_cache(self, query, suggestions):
filename = self.cache_dir + query.replace(" ", "-").replace("'", "_").replace("/", "-")
with open(filename, "w") as f:
for suggestion in suggestions:
f.write(str(suggestion[0]) + "\t" + str(suggestion[1]) + "\n")
def read_cache(self, query):
filename = self.cache_dir + query.replace(" ", "-").replace("'", "_").replace("/", "-")
if os.path.isfile(filename):
suggestions = []
with open(filename) as f:
for line in f:
suggestion = line.strip().split("\t")
suggestions.append((suggestion[0], float(suggestion[1])))
return suggestions
else:
return None
def delete_cache(self):
# Only delete if we are sure it is a test
if "test" in self.cache_dir:
shutil.rmtree(self.cache_dir, ignore_errors=True)
def read_regex(self, regex):
raise NotImplementedError
def read_all(self):
raise NotImplementedError
| 32.375 | 95 | 0.565251 |
3a16fcd29e32261f583e0fe17a97b6df4dbfd030 | 391 | py | Python | OpticsLab/components.py | AzizAlqasem/OpticsLab | a68c12edc9998f0709bae3da2fa0f85778e19bf0 | [
"MIT"
] | null | null | null | OpticsLab/components.py | AzizAlqasem/OpticsLab | a68c12edc9998f0709bae3da2fa0f85778e19bf0 | [
"MIT"
] | null | null | null | OpticsLab/components.py | AzizAlqasem/OpticsLab | a68c12edc9998f0709bae3da2fa0f85778e19bf0 | [
"MIT"
] | null | null | null | """ The components module has all optical components that are used in optics
"""
class Mirror:
def __init__(self,):
pass
class Lense:
def __init__(self,):
pass
class Mediam:
def __init__(self,):
pass
class BeamSpliter:
def __init__(self,):
pass
class Waveplate:
def __init__(self,):
pass | 11.848485 | 76 | 0.557545 |
3a193908dfb0eb3ea9c064b546eae9b145317435 | 10,915 | py | Python | txraft/test_txraft.py | tehasdf/txraft | 860345e4a10d438d3fc69d752f09a06546c92d08 | [
"MIT"
] | null | null | null | txraft/test_txraft.py | tehasdf/txraft | 860345e4a10d438d3fc69d752f09a06546c92d08 | [
"MIT"
] | null | null | null | txraft/test_txraft.py | tehasdf/txraft | 860345e4a10d438d3fc69d752f09a06546c92d08 | [
"MIT"
] | null | null | null | from twisted.internet.defer import succeed
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
from txraft import Entry, RaftNode, MockRPC, STATE
from txraft.commands import AppendEntriesCommand, RequestVotesCommand
class MockStoreDontUse(object):
def __init__(self, entries=None):
self.currentTerm = 0
self.votedFor = None
if entries is None:
entries = {}
self.log = entries
def getLastIndex(self):
if not self.log:
return succeed(0)
return succeed(max(self.log.iterkeys()))
def getLastTerm(self):
if not self.log:
return succeed(0)
return (self.getLastIndex()
.addCallback(lambda index: self.log[index].term)
)
def getByIndex(self, ix):
return succeed(self.log[ix])
def setVotedFor(self, votedFor):
self.votedFor = votedFor
return succeed(True)
def setCurrentTerm(self, currentTerm):
self.currentTerm = currentTerm
return succeed(True)
def getVotedFor(self):
return succeed(self.votedFor)
def getCurrentTerm(self):
return succeed(self.currentTerm)
def contains(self, term, index):
if term == index == 0:
return True
return index in self.log and self.log[index].term == term
def deleteAfter(self, ix, inclusive=True):
if not inclusive:
ix += 1
while True:
if not ix in self.log:
break
del self.log[ix]
ix += 1
def insert(self, entries):
for index, entry in entries.iteritems():
if index in self.log and self.log[index].term != entry.term:
self.deleteAfter(index)
for index, entry in entries.iteritems():
self.log[index] = entry
class TestMockStoreInsert(TestCase):
def test_empty(self):
store = MockStoreDontUse()
newentry = Entry(term=1, payload=True)
store.insert({1: newentry})
self.assertEqual(store.log, {1: newentry})
def test_noconflict(self):
oldentry = Entry(term=1, payload=True)
store = MockStoreDontUse({1: oldentry})
newentry = Entry(term=1, payload=True)
store.insert({2: newentry})
self.assertEqual(store.log, {1: oldentry, 2: newentry})
def test_conflict_last(self):
oldentry = Entry(term=1, payload=False)
store = MockStoreDontUse({1: oldentry})
newentry = Entry(term=2, payload=True)
store.insert({1: newentry})
self.assertEqual(store.log, {1: newentry})
def test_conflict_many(self):
oldentry1 = Entry(term=1, payload=1)
oldentry2 = Entry(term=1, payload=2)
oldentry3 = Entry(term=1, payload=3)
store = MockStoreDontUse({1: oldentry1, 2: oldentry2, 3: oldentry3})
newentry1 = Entry(term=2, payload=4)
newentry2 = Entry(term=2, payload=5)
newentry3 = Entry(term=2, payload=6)
store.insert({2: newentry1, 3: newentry2, 4: newentry3})
self.assertEqual(store.log, {1: oldentry1, 2: newentry1, 3: newentry2, 4: newentry3})
class TestElection(TestCase):
def test_three_up(self):
store1 = MockStoreDontUse()
store2 = MockStoreDontUse()
store3 = MockStoreDontUse()
rpc1 = MockRPC()
rpc2 = MockRPC()
rpc3 = MockRPC()
clock1 = Clock()
clock2 = Clock()
clock3 = Clock()
node1 = RaftNode(1, store1, rpc1, clock=clock1)
node2 = RaftNode(2, store2, rpc2, clock=clock2)
node3 = RaftNode(3, store3, rpc3, clock=clock3)
for rpc in [rpc1, rpc2, rpc3]:
for node in [node1, node2, node3]:
rpc.simpleAddNode(node)
clock1.advance(0.4)
self.assertIs(node1._state, STATE.LEADER)
def test_respond_requestVote(self):
store = MockStoreDontUse()
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId=2,
lastLogIndex=4,
lastLogTerm=4))
term, result = self.successResultOf(resp)
self.assertTrue(result)
votedFor = self.successResultOf(store.getVotedFor())
self.assertEqual(votedFor, 2)
def test_respond_requestVote_alreadyVoted(self):
store = MockStoreDontUse()
store.setVotedFor(3)
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId=2,
lastLogIndex=4,
lastLogTerm=4))
term, result = self.successResultOf(resp)
self.assertFalse(result)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId=3,
lastLogIndex=4,
lastLogTerm=4))
term, result = self.successResultOf(resp)
self.assertTrue(result)
def test_respond_requestVote_lowerTerm(self):
store = MockStoreDontUse()
store.setCurrentTerm(3)
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
resp = node.respond_requestVote(RequestVotesCommand(term=2,
candidateId='id',
lastLogIndex=4,
lastLogTerm=4))
term, result = self.successResultOf(resp)
self.assertFalse(result)
def test_respond_requestVote_oldLog(self):
store = MockStoreDontUse(entries={
2: Entry(term=2, payload=1),
3: Entry(term=3, payload=2)
})
store.setCurrentTerm(3)
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId='id',
lastLogIndex=2,
lastLogTerm=2))
term, result = self.successResultOf(resp)
self.assertFalse(result)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId='id',
lastLogIndex=4,
lastLogTerm=2))
term, result = self.successResultOf(resp)
self.assertFalse(result)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId='id',
lastLogIndex=2,
lastLogTerm=3))
term, result = self.successResultOf(resp)
self.assertFalse(result)
class TestAppendEntries(TestCase):
def test_respond_appendEntries_simple(self):
store = MockStoreDontUse()
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
newentry = Entry(term=0, payload=1)
resp = node.respond_appendEntries(AppendEntriesCommand(term=0,
leaderId=2,
prevLogIndex=0,
prevLogTerm=0,
entries={1: newentry},
leaderCommit=1))
term, result = self.successResultOf(resp)
self.assertEqual(term, 0)
self.assertTrue(result)
self.assertEqual(store.log, {1: newentry})
def test_respond_appendEntries_empty(self):
store = MockStoreDontUse()
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
newentry = Entry(term=0, payload=1)
resp = node.respond_appendEntries(AppendEntriesCommand(term=0,
leaderId=2,
prevLogIndex=0,
prevLogTerm=0,
entries={},
leaderCommit=1))
term, result = self.successResultOf(resp)
self.assertEqual(term, 0)
self.assertTrue(result)
class TestCallingAppendEntries(TestCase):
def test_backwards(self):
clock = Clock()
leader_store = MockStoreDontUse(entries={
1: Entry(term=1, payload=1),
2: Entry(term=2, payload=2),
})
leader_store.setCurrentTerm(2)
leader_rpc = MockRPC()
leader = RaftNode(1, leader_store, leader_rpc, clock=clock)
follower_store = MockStoreDontUse()
follower_rpc = MockRPC()
follower = RaftNode(2, follower_store, follower_rpc, clock=clock)
leader_rpc.simpleAddNode(follower)
follower_rpc.simpleAddNode(leader)
d = leader._callAppendEntries(follower.id, {})
res = self.successResultOf(d)
self.assertEqual(leader_store.log, follower_store.log)
def test_add(self):
clock = Clock()
leader_store = MockStoreDontUse(entries={
1: Entry(term=1, payload=1),
2: Entry(term=2, payload=2),
3: Entry(term=2, payload=3),
})
leader_store.setCurrentTerm(2)
leader_rpc = MockRPC()
leader = RaftNode(1, leader_store, leader_rpc, clock=clock)
follower_store = MockStoreDontUse({
1: Entry(term=1, payload=1)
})
follower_rpc = MockRPC()
follower = RaftNode(2, follower_store, follower_rpc, clock=clock)
leader_rpc.simpleAddNode(follower)
follower_rpc.simpleAddNode(leader)
d = leader._callAppendEntries(follower.id, {})
res = self.successResultOf(d)
self.assertEqual(leader_store.log, follower_store.log)
def test_remove_incorrect(self):
clock = Clock()
leader_store = MockStoreDontUse(entries={
1: Entry(term=1, payload=1),
2: Entry(term=2, payload=2),
3: Entry(term=2, payload=3),
})
leader_store.setCurrentTerm(2)
leader_rpc = MockRPC()
leader = RaftNode(1, leader_store, leader_rpc, clock=clock)
follower_store = MockStoreDontUse({
1: Entry(term=1, payload=1),
2: Entry(term=5, payload=1)
})
follower_rpc = MockRPC()
follower = RaftNode(2, follower_store, follower_rpc, clock=clock)
leader_rpc.simpleAddNode(follower)
follower_rpc.simpleAddNode(leader)
d = leader._callAppendEntries(follower.id, {})
res = self.successResultOf(d)
self.assertEqual(leader_store.log, follower_store.log)
class TestCluster(TestCase):
def test_cluster(self):
nodes = []
for num in range(5):
clock = Clock()
rpc = MockRPC()
store = MockStoreDontUse()
node = RaftNode(num, store, rpc, clock=clock, electionTimeout=1)
nodes.append((node, rpc, store, clock))
for node1, rpc, _, _ in nodes:
for node2, _, _, _ in nodes:
if node1 is node2:
continue
rpc.simpleAddNode(node2)
for node, rpc, store, clock in nodes:
clock.advance(1.0)
# for node, rpc, store, clock in nodes:
# print 'asd', node._state
| 30.319444 | 93 | 0.599542 |
3a19793608f407d01e4af46fb22f949e028fb9e8 | 6,867 | py | Python | prototype/c2dn/script/analysis/extractData.py | Thesys-lab/C2DN | 55aa7fc1cd13ab0c80a9c25aa0288b454616d83c | [
"Apache-2.0"
] | null | null | null | prototype/c2dn/script/analysis/extractData.py | Thesys-lab/C2DN | 55aa7fc1cd13ab0c80a9c25aa0288b454616d83c | [
"Apache-2.0"
] | null | null | null | prototype/c2dn/script/analysis/extractData.py | Thesys-lab/C2DN | 55aa7fc1cd13ab0c80a9c25aa0288b454616d83c | [
"Apache-2.0"
] | null | null | null |
import os, sys
sys.path.append(os.path.expanduser("~/workspace/"))
from pyutils.common import *
def load_fe_metrics(ifilepath):
n_byte_partial_miss, n_req_partial_miss = 0, 0
n_byte_push_chunk, n_byte_chunk_hit, n_req_chunk_hit, n_byte_ICP_chunk = 0, 0, 0, 0
n_req_ICP_chunk, n_req_skip_chunk = 0, 0
n_req_chunk_resp_skipped = 0
with open(ifilepath) as ifile:
for line in ifile:
if not line.startswith("frontend"):
continue
if 'byte{reqType="allToClient"}' in line:
n_byte_to_client = float(line.split()[1])
elif 'nReq{reqType="allToClient"}' in line:
n_req_to_client = float(line.split()[1])
elif 'trafficType="origin"' in line:
n_byte_from_origin = float(line.split()[1])
elif 'reqType="fullObjMiss"' in line:
n_req_from_origin = float(line.split()[1])
elif 'traffic{trafficType="intra"}' in line:
n_byte_intra = float(line.split()[1])
elif 'traffic{trafficType="ICPFull"}' in line:
n_byte_ICP_full = float(line.split()[1])
elif 'traffic{trafficType="ICPChunk"}' in line:
n_byte_ICP_chunk = float(line.split()[1])
elif 'trafficType="pushFullObj"' in line:
n_byte_push_full = float(line.split()[1])
elif 'trafficType="pushChunk"' in line:
n_byte_push_chunk = float(line.split()[1])
elif 'nReq{reqType="ICPFull"}' in line:
n_req_ICP_full = float(line.split()[1])
elif 'nReq{reqType="ICPChunk"}' in line:
n_req_ICP_chunk = float(line.split()[1])
elif 'nReq{reqType="skipFetch"}' in line:
n_req_skip_chunk = float(line.split()[1])
elif 'frontend_nReq{reqType="chunkRespSkipped"}' in line:
n_req_chunk_resp_skipped = float(line.split()[1])
# elif 'traffic{trafficType="pushChunk"}' in line:
# n_byte_push_chunk = float(line.split()[1])
elif 'byte{reqType="chunkHit"}' in line:
n_byte_chunk_hit = float(line.split()[1])
elif 'nReq{reqType="chunkHit"}' in line:
n_req_chunk_hit = float(line.split()[1])
elif 'byte{reqType="partialHit_1"}' in line:
n_byte_partial_miss += float(line.split()[1]) / 3 * 2
elif 'byte{reqType="partialHit_2"}' in line:
n_byte_partial_miss += float(line.split()[1]) / 3
elif 'nReq{reqType="partialHit_1"}' in line:
n_req_partial_miss += float(line.split()[1])
elif 'nReq{reqType="partialHit_2"}' in line:
n_req_partial_miss += float(line.split()[1])
ret_dict = {
"n_byte_to_client": n_byte_to_client,
"n_req_to_client": n_req_to_client,
"n_byte_from_origin": n_byte_from_origin,
"n_req_from_origin": n_req_from_origin,
"n_byte_intra": n_byte_intra,
"n_byte_ICP_full": n_byte_ICP_full,
"n_req_ICP_full": n_req_ICP_full,
"n_byte_push_full": n_byte_push_full,
"n_byte_push_chunk": n_byte_push_chunk,
"n_byte_chunk_hit": n_byte_chunk_hit,
"n_req_chunk_hit": n_req_chunk_hit,
"n_req_skip_chunk": n_req_skip_chunk,
"n_req_chunk_resp_skipped": n_req_chunk_resp_skipped,
"n_byte_ICP_chunk": n_byte_ICP_chunk,
"n_req_ICP_chunk": n_req_ICP_chunk,
"n_byte_partial_miss": n_byte_partial_miss,
"n_req_partial_miss": n_req_partial_miss,
}
return ret_dict
def load_all_fe_metrics(ifile_dir, system):
all_data = []
for i in range(10):
try:
d = load_fe_metrics("{}/cdn{}/c2dn/metricFE".format(ifile_dir, i))
all_data.append(d)
except Exception as e:
print(e)
client_bytes = sum([d["n_byte_to_client"] for d in all_data])
origin_bytes = sum([d["n_byte_from_origin"] for d in all_data])
client_nreq = sum([d["n_req_to_client"] for d in all_data])
origin_nreq = sum([d["n_req_from_origin"] for d in all_data])
intra_bytes = sum([d["n_byte_intra"] for d in all_data])
# this is not accurate as it includes skipped chunk fetch
intra_get_bytes = sum([d["n_byte_ICP_full"] for d in all_data])
intra_push_bytes = sum([d["n_byte_push_full"] for d in all_data])
intra_get_nreq = sum([d["n_req_ICP_full"] for d in all_data])
if system == "C2DN":
# intra_get_nreq += (sum([d["n_req_ICP_chunk"] for d in all_data]) - sum([d["n_req_skip_chunk"] for d in all_data]))//3
# intra_get_nreq += sum([d["n_req_ICP_chunk"] for d in all_data]) // 3
intra_get_bytes += sum([d["n_byte_ICP_chunk"] for d in all_data])
intra_push_bytes += sum([d["n_byte_push_chunk"] for d in all_data])
print("bmr {:.4f} omr {:.4f} | bytes intra {:.4f} intra_get {:.4f} intra_push {:.4f} | nReq intra get (full) {:.4f}".format(
origin_bytes/client_bytes, origin_nreq/client_nreq,
intra_bytes/client_bytes, intra_get_bytes/client_bytes, intra_push_bytes/client_bytes,
intra_get_nreq/client_nreq,
))
if system == "C2DN":
chunk_serve_nreq = sum([d["n_req_chunk_hit"] for d in all_data])
chunk_serve_nreq += sum([d["n_req_partial_miss"] for d in all_data])
chunk_serve_bytes = sum([d["n_byte_chunk_hit"] for d in all_data])
chunk_serve_bytes += sum([d["n_byte_partial_miss"] for d in all_data])
print("serving with chunks: {:.4f} req {:.4f} bytes".format(
chunk_serve_nreq/client_nreq, chunk_serve_bytes/client_bytes,
))
if __name__ == "__main__":
BASE_DIR = "/nvme/log/p/2021-02-01/"
# load_all_fe_metrics(f"{BASE_DIR}/0124/aws_CDN_akamai2_expLatency_unavail0_1000G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0124/aws_C2DN_akamai2_expLatency_unavail0_43_1000G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0125/aws_CDN_akamai2_expLatency_unavail1_1000G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0125/aws_C2DN_akamai2_expLatency_unavail1_43_1000G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0127/aws_CDN_akamai1_expLatency_unavail0_100G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0127/aws_C2DN_akamai1_expLatency_unavail0_43_100G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0130/aws_CDN_akamai1_expLatency_unavail0_100G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0130/aws_C2DN_akamai1_expLatency_unavail0_43_100G/", system="C2DN")
load_all_fe_metrics(f"{BASE_DIR}/aws_CDN_akamai2_expLatency_unavail0_1000G/", system="CDN")
load_all_fe_metrics(f"{BASE_DIR}/aws_C2DN_akamai2_expLatency_unavail0_43_1000G/", system="C2DN")
| 42.388889 | 128 | 0.642493 |
3a1a4878173988f64e8012e0966e1a78c639eef8 | 4,116 | py | Python | ToDo/settings/common.py | adarsh9780/2Do | b0f3067b34c49987a4bbb7b56813d73805d83918 | [
"MIT"
] | null | null | null | ToDo/settings/common.py | adarsh9780/2Do | b0f3067b34c49987a4bbb7b56813d73805d83918 | [
"MIT"
] | 10 | 2020-01-03T16:56:27.000Z | 2022-01-13T00:41:57.000Z | ToDo/settings/common.py | adarsh9780/2Do | b0f3067b34c49987a4bbb7b56813d73805d83918 | [
"MIT"
] | null | null | null | """
Django settings for ToDo project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4@_rz2!t@z1jvzsw84+42xxr1v2yz7qhop$khg($i@8s5@73yd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
ALLOWED_HOSTS = ['10.10.131.76', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Custom Apps
'CreateCard',
#Crispy forms
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ToDo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ToDo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# This is where static files will be collected.
STATIC_ROOT = os.path.join(BASE_DIR, 'Static_Root')
# apart from looking in 'my_app/static', this setting will also
# look for static files mentioned in below directories.
# Remove the contents if you have one.
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "static"),
# ]
# Media settings
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "Media_Root")
# TWILIO_ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
# TWILIO_AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
# TWILIO_CALLER_ID = os.environ['TWILIO_CALLER_ID']
#for Twilio
TWILIO_ACCOUNT_SID='ACf444eea774e6a2e4e0b81cd4b8cb3a8d'
TWILIO_AUTH_TOKEN='4dd33e0cf293066f9df8d7d385d454f7'
TWILIO_CALLER_ID='+18042943446'
#Crispy form
CRISPY_TEMPLATE_PACK = 'bootstrap4'
| 27.078947 | 91 | 0.710641 |
3a1ad1cbd5fa6fd57f60b6cfe90e8e847de62504 | 89 | py | Python | openamundsen/modules/__init__.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 3 | 2021-05-28T06:46:36.000Z | 2021-06-14T13:39:25.000Z | openamundsen/modules/__init__.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 22 | 2021-04-28T12:31:58.000Z | 2022-03-09T18:29:12.000Z | openamundsen/modules/__init__.py | openamundsen/openamundsen | 2ac09eb34b0c72c84c421a0dac08d114a05b7b1c | [
"MIT"
] | 1 | 2021-06-01T12:48:54.000Z | 2021-06-01T12:48:54.000Z | from . import (
canopy,
evapotranspiration,
radiation,
snow,
soil,
)
| 11.125 | 23 | 0.573034 |
3a1b3de82b0cb02451c59c3a93b30506f022268a | 188 | py | Python | config/urls.py | laactech/django-security-headers-example | 86ea0b7209f8871c32100ada31fe00aa4a8e9f63 | [
"BSD-3-Clause"
] | 1 | 2019-10-09T22:08:27.000Z | 2019-10-09T22:08:27.000Z | config/urls.py | laactech/django-security-headers-example | 86ea0b7209f8871c32100ada31fe00aa4a8e9f63 | [
"BSD-3-Clause"
] | 7 | 2020-06-05T23:45:57.000Z | 2022-02-10T10:40:54.000Z | config/urls.py | laactech/django-security-headers-example | 86ea0b7209f8871c32100ada31fe00aa4a8e9f63 | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from django_security_headers_example.core.views import LandingPageView
urlpatterns = [
path("", view=LandingPageView.as_view(), name="landing_page"),
]
| 20.888889 | 70 | 0.776596 |
3a1bb607068330f96d4bdb50c12759ee1c1a9528 | 14,071 | py | Python | tests/unit/test_experiments_analytics.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 1,690 | 2017-11-29T20:13:37.000Z | 2022-03-31T12:58:11.000Z | tests/unit/test_experiments_analytics.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 2,762 | 2017-12-04T05:18:03.000Z | 2022-03-31T23:40:11.000Z | tests/unit/test_experiments_analytics.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 961 | 2017-11-30T16:44:03.000Z | 2022-03-30T23:12:09.000Z | from __future__ import absolute_import
import mock
import pytest
import pandas as pd
from collections import OrderedDict
from sagemaker.analytics import ExperimentAnalytics
@pytest.fixture
def mock_session():
return mock.Mock()
def trial_component(trial_component_name):
return {
"TrialComponentName": trial_component_name,
"DisplayName": "Training",
"Source": {"SourceArn": "some-source-arn"},
"Parameters": {"hp1": {"NumberValue": 1.0}, "hp2": {"StringValue": "abc"}},
"Metrics": [
{
"MetricName": "metric1",
"Max": 5.0,
"Min": 3.0,
"Avg": 4.0,
"StdDev": 1.0,
"Last": 2.0,
"Count": 2.0,
},
{
"MetricName": "metric2",
"Max": 10.0,
"Min": 8.0,
"Avg": 9.0,
"StdDev": 0.05,
"Last": 7.0,
"Count": 2.0,
},
],
"InputArtifacts": {
"inputArtifacts1": {"MediaType": "text/plain", "Value": "s3:/foo/bar1"},
"inputArtifacts2": {"MediaType": "text/plain", "Value": "s3:/foo/bar2"},
},
"OutputArtifacts": {
"outputArtifacts1": {"MediaType": "text/csv", "Value": "s3:/sky/far1"},
"outputArtifacts2": {"MediaType": "text/csv", "Value": "s3:/sky/far2"},
},
"Parents": [{"TrialName": "trial1", "ExperimentName": "experiment1"}],
}
def test_trial_analytics_dataframe_all(mock_session):
mock_session.sagemaker_client.search.return_value = {
"Results": [
{"TrialComponent": trial_component("trial-1")},
{"TrialComponent": trial_component("trial-2")},
]
}
analytics = ExperimentAnalytics(experiment_name="experiment1", sagemaker_session=mock_session)
expected_dataframe = pd.DataFrame.from_dict(
OrderedDict(
[
("TrialComponentName", ["trial-1", "trial-2"]),
("DisplayName", ["Training", "Training"]),
("SourceArn", ["some-source-arn", "some-source-arn"]),
("hp1", [1.0, 1.0]),
("hp2", ["abc", "abc"]),
("metric1 - Min", [3.0, 3.0]),
("metric1 - Max", [5.0, 5.0]),
("metric1 - Avg", [4.0, 4.0]),
("metric1 - StdDev", [1.0, 1.0]),
("metric1 - Last", [2.0, 2.0]),
("metric1 - Count", [2.0, 2.0]),
("metric2 - Min", [8.0, 8.0]),
("metric2 - Max", [10.0, 10.0]),
("metric2 - Avg", [9.0, 9.0]),
("metric2 - StdDev", [0.05, 0.05]),
("metric2 - Last", [7.0, 7.0]),
("metric2 - Count", [2.0, 2.0]),
("inputArtifacts1 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts1 - Value", ["s3:/foo/bar1", "s3:/foo/bar1"]),
("inputArtifacts2 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts2 - Value", ["s3:/foo/bar2", "s3:/foo/bar2"]),
("outputArtifacts1 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts1 - Value", ["s3:/sky/far1", "s3:/sky/far1"]),
("outputArtifacts2 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts2 - Value", ["s3:/sky/far2", "s3:/sky/far2"]),
("Trials", [["trial1"], ["trial1"]]),
("Experiments", [["experiment1"], ["experiment1"]]),
]
)
)
pd.testing.assert_frame_equal(expected_dataframe, analytics.dataframe())
expected_search_exp = {
"Filters": [
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "experiment1"}
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp
)
def test_trial_analytics_dataframe_selected_hyperparams(mock_session):
mock_session.sagemaker_client.search.return_value = {
"Results": [
{"TrialComponent": trial_component("trial-1")},
{"TrialComponent": trial_component("trial-2")},
]
}
analytics = ExperimentAnalytics(
experiment_name="experiment1", parameter_names=["hp2"], sagemaker_session=mock_session
)
expected_dataframe = pd.DataFrame.from_dict(
OrderedDict(
[
("TrialComponentName", ["trial-1", "trial-2"]),
("DisplayName", ["Training", "Training"]),
("SourceArn", ["some-source-arn", "some-source-arn"]),
("hp2", ["abc", "abc"]),
("metric1 - Min", [3.0, 3.0]),
("metric1 - Max", [5.0, 5.0]),
("metric1 - Avg", [4.0, 4.0]),
("metric1 - StdDev", [1.0, 1.0]),
("metric1 - Last", [2.0, 2.0]),
("metric1 - Count", [2.0, 2.0]),
("metric2 - Min", [8.0, 8.0]),
("metric2 - Max", [10.0, 10.0]),
("metric2 - Avg", [9.0, 9.0]),
("metric2 - StdDev", [0.05, 0.05]),
("metric2 - Last", [7.0, 7.0]),
("metric2 - Count", [2.0, 2.0]),
("inputArtifacts1 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts1 - Value", ["s3:/foo/bar1", "s3:/foo/bar1"]),
("inputArtifacts2 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts2 - Value", ["s3:/foo/bar2", "s3:/foo/bar2"]),
("outputArtifacts1 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts1 - Value", ["s3:/sky/far1", "s3:/sky/far1"]),
("outputArtifacts2 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts2 - Value", ["s3:/sky/far2", "s3:/sky/far2"]),
("Trials", [["trial1"], ["trial1"]]),
("Experiments", [["experiment1"], ["experiment1"]]),
]
)
)
pd.testing.assert_frame_equal(expected_dataframe, analytics.dataframe())
expected_search_exp = {
"Filters": [
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "experiment1"}
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp
)
def test_trial_analytics_dataframe_selected_metrics(mock_session):
mock_session.sagemaker_client.search.return_value = {
"Results": [
{"TrialComponent": trial_component("trial-1")},
{"TrialComponent": trial_component("trial-2")},
]
}
analytics = ExperimentAnalytics(
experiment_name="experiment1", metric_names=["metric1"], sagemaker_session=mock_session
)
expected_dataframe = pd.DataFrame.from_dict(
OrderedDict(
[
("TrialComponentName", ["trial-1", "trial-2"]),
("DisplayName", ["Training", "Training"]),
("SourceArn", ["some-source-arn", "some-source-arn"]),
("hp1", [1.0, 1.0]),
("hp2", ["abc", "abc"]),
("metric1 - Min", [3.0, 3.0]),
("metric1 - Max", [5.0, 5.0]),
("metric1 - Avg", [4.0, 4.0]),
("metric1 - StdDev", [1.0, 1.0]),
("metric1 - Last", [2.0, 2.0]),
("metric1 - Count", [2.0, 2.0]),
("inputArtifacts1 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts1 - Value", ["s3:/foo/bar1", "s3:/foo/bar1"]),
("inputArtifacts2 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts2 - Value", ["s3:/foo/bar2", "s3:/foo/bar2"]),
("outputArtifacts1 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts1 - Value", ["s3:/sky/far1", "s3:/sky/far1"]),
("outputArtifacts2 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts2 - Value", ["s3:/sky/far2", "s3:/sky/far2"]),
("Trials", [["trial1"], ["trial1"]]),
("Experiments", [["experiment1"], ["experiment1"]]),
]
)
)
pd.testing.assert_frame_equal(expected_dataframe, analytics.dataframe())
expected_search_exp = {
"Filters": [
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "experiment1"}
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp
)
def test_trial_analytics_dataframe_search_pagination(mock_session):
result_page_1 = {
"Results": [{"TrialComponent": trial_component("trial-1")}],
"NextToken": "nextToken",
}
result_page_2 = {"Results": [{"TrialComponent": trial_component("trial-2")}]}
mock_session.sagemaker_client.search.side_effect = [result_page_1, result_page_2]
analytics = ExperimentAnalytics(experiment_name="experiment1", sagemaker_session=mock_session)
expected_dataframe = pd.DataFrame.from_dict(
OrderedDict(
[
("TrialComponentName", ["trial-1", "trial-2"]),
("DisplayName", ["Training", "Training"]),
("SourceArn", ["some-source-arn", "some-source-arn"]),
("hp1", [1.0, 1.0]),
("hp2", ["abc", "abc"]),
("metric1 - Min", [3.0, 3.0]),
("metric1 - Max", [5.0, 5.0]),
("metric1 - Avg", [4.0, 4.0]),
("metric1 - StdDev", [1.0, 1.0]),
("metric1 - Last", [2.0, 2.0]),
("metric1 - Count", [2.0, 2.0]),
("metric2 - Min", [8.0, 8.0]),
("metric2 - Max", [10.0, 10.0]),
("metric2 - Avg", [9.0, 9.0]),
("metric2 - StdDev", [0.05, 0.05]),
("metric2 - Last", [7.0, 7.0]),
("metric2 - Count", [2.0, 2.0]),
("inputArtifacts1 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts1 - Value", ["s3:/foo/bar1", "s3:/foo/bar1"]),
("inputArtifacts2 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts2 - Value", ["s3:/foo/bar2", "s3:/foo/bar2"]),
("outputArtifacts1 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts1 - Value", ["s3:/sky/far1", "s3:/sky/far1"]),
("outputArtifacts2 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts2 - Value", ["s3:/sky/far2", "s3:/sky/far2"]),
("Trials", [["trial1"], ["trial1"]]),
("Experiments", [["experiment1"], ["experiment1"]]),
]
)
)
pd.testing.assert_frame_equal(expected_dataframe, analytics.dataframe())
expected_search_exp = {
"Filters": [
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "experiment1"}
]
}
mock_session.sagemaker_client.search.assert_has_calls(
[
mock.call(Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp),
mock.call(
Resource="ExperimentTrialComponent",
SearchExpression=expected_search_exp,
NextToken="nextToken",
),
]
)
def test_trial_analytics_dataframe_filter_trials_search_exp_only(mock_session):
mock_session.sagemaker_client.search.return_value = {"Results": []}
search_exp = {"Filters": [{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"}]}
analytics = ExperimentAnalytics(search_expression=search_exp, sagemaker_session=mock_session)
analytics.dataframe()
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=search_exp
)
def test_trial_analytics_dataframe_filter_trials_search_exp_with_experiment(mock_session):
mock_session.sagemaker_client.search.return_value = {"Results": []}
search_exp = {"Filters": [{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"}]}
analytics = ExperimentAnalytics(
experiment_name="someExperiment",
search_expression=search_exp,
sagemaker_session=mock_session,
)
analytics.dataframe()
expected_search_exp = {
"Filters": [
{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"},
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "someExperiment"},
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp
)
def test_trial_analytics_dataframe_throws_error_if_no_filter_specified(mock_session):
with pytest.raises(ValueError):
ExperimentAnalytics(sagemaker_session=mock_session)
def test_trial_analytics_dataframe_filter_trials_search_exp_with_sort(mock_session):
mock_session.sagemaker_client.search.return_value = {"Results": []}
search_exp = {"Filters": [{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"}]}
analytics = ExperimentAnalytics(
experiment_name="someExperiment",
search_expression=search_exp,
sort_by="Tags.someTag",
sort_order="Ascending",
sagemaker_session=mock_session,
)
analytics.dataframe()
expected_search_exp = {
"Filters": [
{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"},
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "someExperiment"},
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent",
SearchExpression=expected_search_exp,
SortBy="Tags.someTag",
SortOrder="Ascending",
)
| 40.66763 | 100 | 0.538341 |
3a1c1e3d3d934a3c220e33611b61500c0a74317b | 14,244 | py | Python | uni_ticket/migrations/0001_initial.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 15 | 2019-09-06T06:47:08.000Z | 2022-01-17T06:39:54.000Z | uni_ticket/migrations/0001_initial.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 69 | 2019-09-06T12:03:19.000Z | 2022-03-26T14:30:53.000Z | uni_ticket/migrations/0001_initial.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 13 | 2019-09-11T10:54:20.000Z | 2021-11-23T09:09:19.000Z | # Generated by Django 2.1.7 on 2019-04-04 12:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('organizational_area', '0017_organizationalstructure_slug'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('description', models.TextField()),
('created', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('structure', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='organizational_area.OrganizationalStructure')),
],
options={
'verbose_name': 'Task',
'verbose_name_plural': 'Task',
'ordering': ['subject'],
},
),
migrations.CreateModel(
name='Task2Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='uni_ticket.Task')),
],
options={
'verbose_name': 'Dipendenza Ticket da Task',
'verbose_name_plural': 'Dipendenze Ticket da Task',
'ordering': ['task', 'ticket'],
},
),
migrations.CreateModel(
name='TaskAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('document', models.CharField(max_length=255)),
('description', models.TextField()),
('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uni_ticket.Task')),
],
options={
'verbose_name': 'Allegato',
'verbose_name_plural': 'Allegati',
'ordering': ['task', 'name'],
},
),
migrations.CreateModel(
name='TaskHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=255)),
('note', models.TextField(blank=True, null=True)),
('modified', models.DateTimeField()),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='organizational_area.OrganizationalStructureOfficeEmployee')),
('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uni_ticket.Task')),
],
options={
'verbose_name': 'Cronologia Stati Task',
'verbose_name_plural': 'Cronologie Stati Task',
'ordering': ['task', '-modified'],
},
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=255, unique=True)),
('subject', models.CharField(max_length=255)),
('description', models.TextField()),
('modulo_compilato', models.TextField()),
('created', models.DateTimeField(auto_now=True)),
('is_preso_in_carico', models.BooleanField(default=False)),
('is_chiuso', models.BooleanField(default=False)),
('data_chiusura', models.DateTimeField(blank=True, null=True)),
('motivazione_chiusura', models.TextField(blank=True, null=True)),
('priority', models.IntegerField(help_text='.....')),
],
options={
'verbose_name': 'Ticket',
'verbose_name_plural': 'Ticket',
'ordering': ['code'],
},
),
migrations.CreateModel(
name='Ticket2Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('master_ticket', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='master', to='uni_ticket.Ticket')),
('slave_ticket', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='slave', to='uni_ticket.Ticket')),
],
options={
'verbose_name': 'Dipendenza Ticket',
'verbose_name_plural': 'Dipendenze Ticket',
'ordering': ['master_ticket', 'slave_ticket'],
},
),
migrations.CreateModel(
name='TicketAssignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now=True)),
('note', models.TextField(blank=True, null=True)),
('assigned_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('office', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='organizational_area.OrganizationalStructureOffice')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uni_ticket.Ticket')),
],
options={
'verbose_name': 'Competenza Ticket',
'verbose_name_plural': 'Competenza Ticket',
'ordering': ['ticket', 'office'],
},
),
migrations.CreateModel(
name='TicketAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('document', models.CharField(max_length=255)),
('description', models.TextField()),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uni_ticket.Ticket')),
],
options={
'verbose_name': 'Allegato',
'verbose_name_plural': 'Allegati',
'ordering': ['ticket', 'name'],
},
),
migrations.CreateModel(
name='TicketCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=40, unique=True)),
('description', models.TextField(blank=True, max_length=500, null=True)),
('created', models.DateTimeField(auto_now=True)),
('modified', models.DateTimeField(blank=True, null=True)),
('is_active', models.BooleanField(default=True, help_text='Se disabilitato, non sarà visibile in Aggiungi Ticket')),
('organizational_office', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='organizational_area.OrganizationalStructureOffice')),
('organizational_structure', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='organizational_area.OrganizationalStructure')),
],
options={
'verbose_name': 'Categoria dei Ticket',
'verbose_name_plural': 'Categorie dei Ticket',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='TicketCategoryInputList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('input_type', models.CharField(choices=[('CustomFileField', 'Allegato PDF'), ('CustomHiddenField', 'Campo nascosto'), ('CheckBoxField', 'Checkbox'), ('BaseDateField', 'Data'), ('BaseDateTimeField', 'Data e Ora'), ('DateStartEndComplexField', 'Data inizio e Data fine'), ('DurataComeInteroField', 'Durata come numero intero (anni,mesi,ore)'), ('CustomRadioBoxField', 'Lista di opzioni (checkbox)'), ('CustomSelectBoxField', 'Lista di opzioni (tendina)'), ('PositiveFloatField', 'Numero con virgola positivo'), ('PositiveIntegerField', 'Numero intero positivo'), ('ProtocolloField', 'Protocollo (tipo/numero/data)'), ('CustomCharField', 'Testo'), ('TextAreaField', 'Testo lungo')], max_length=33)),
('valore', models.CharField(blank=True, default='', help_text="Viene considerato solo se si sceglie 'Menu a tendina' oppure 'Serie di Opzioni'. (Es: valore1;valore2;valore3...)", max_length=255, verbose_name='Lista di Valori')),
('is_required', models.BooleanField(default=True)),
('aiuto', models.CharField(blank=True, default='', max_length=254)),
('ordinamento', models.PositiveIntegerField(blank=True, default=0, help_text="posizione nell'ordinamento")),
],
options={
'verbose_name': 'Modulo di inserimento',
'verbose_name_plural': 'Moduli di inserimento',
'ordering': ('ordinamento',),
},
),
migrations.CreateModel(
name='TicketCategoryModule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now=True)),
('is_active', models.BooleanField(default=True)),
('ticket_category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='uni_ticket.TicketCategory')),
],
options={
'verbose_name': 'Modulo di Inserimento Ticket',
'verbose_name_plural': 'Moduli di Inserimento Ticket',
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='TicketHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('modified', models.DateTimeField(auto_now=True)),
('status', models.CharField(max_length=255)),
('note', models.TextField(blank=True, null=True)),
('modified_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uni_ticket.Ticket')),
],
options={
'verbose_name': 'Cronologia Stati Ticket',
'verbose_name_plural': 'Cronologia Stati Ticket',
'ordering': ['ticket', 'modified'],
},
),
migrations.CreateModel(
name='TicketReply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('text', models.TextField()),
('attachment', models.FileField(blank=True, null=True, upload_to='')),
('created', models.DateTimeField(auto_now=True)),
('structure', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='organizational_area.OrganizationalStructure')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uni_ticket.Ticket')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Domande/Risposte Ticket',
'verbose_name_plural': 'Domande/Risposte Ticket',
'ordering': ['ticket', 'created'],
},
),
migrations.AddField(
model_name='ticketcategoryinputlist',
name='category_module',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='uni_ticket.TicketCategoryModule'),
),
migrations.AddField(
model_name='ticket',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='uni_ticket.TicketCategory'),
),
migrations.AddField(
model_name='ticket',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='task2ticket',
name='ticket',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='uni_ticket.Ticket'),
),
migrations.AlterUniqueTogether(
name='ticketcategory',
unique_together={('name', 'organizational_structure')},
),
migrations.AlterUniqueTogether(
name='ticketassignment',
unique_together={('ticket', 'office')},
),
migrations.AlterUniqueTogether(
name='ticket2ticket',
unique_together={('master_ticket', 'slave_ticket')},
),
migrations.AlterUniqueTogether(
name='task2ticket',
unique_together={('ticket', 'task')},
),
]
| 53.750943 | 713 | 0.582982 |
3a20f5e777be4409e899dec4e5460fecff5677e0 | 10,325 | py | Python | baselines/baseline_summarunner/main.py | PKULiuHui/LiveBlogSum | b6a22521ee454e649981d70ddca6c89a1bac5a4c | [
"MIT"
] | null | null | null | baselines/baseline_summarunner/main.py | PKULiuHui/LiveBlogSum | b6a22521ee454e649981d70ddca6c89a1bac5a4c | [
"MIT"
] | null | null | null | baselines/baseline_summarunner/main.py | PKULiuHui/LiveBlogSum | b6a22521ee454e649981d70ddca6c89a1bac5a4c | [
"MIT"
] | null | null | null | # coding:utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
import math
import re
import sys
from Vocab import Vocab
from Dataset import Dataset
from RNN_RNN import RNN_RNN
import os, json, argparse, random
sys.path.append('../../')
from myrouge.rouge import get_rouge_score
parser = argparse.ArgumentParser(description='SummaRuNNer')
# model
parser.add_argument('-save_dir', type=str, default='checkpoints1/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-hidden_size', type=int, default=200)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=800)
parser.add_argument('-seg_num', type=int, default=10)
# train
parser.add_argument('-lr', type=float, default=1e-3)
parser.add_argument('-max_norm', type=float, default=5.0)
parser.add_argument('-batch_size', type=int, default=5)
parser.add_argument('-epochs', type=int, default=8)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-embedding', type=str, default='../../word2vec/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../word2vec/word2id.json')
parser.add_argument('-train_dir', type=str, default='../../data/bbc_opt/train/')
parser.add_argument('-valid_dir', type=str, default='../../data/bbc_opt/test/')
parser.add_argument('-sent_trunc', type=int, default=20)
parser.add_argument('-doc_trunc', type=int, default=10)
parser.add_argument('-blog_trunc', type=int, default=80)
parser.add_argument('-valid_every', type=int, default=100)
# test
parser.add_argument('-load_model', type=str, default='')
parser.add_argument('-test_dir', type=str, default='../../data/bbc_opt/test/')
parser.add_argument('-ref', type=str, default='outputs/ref/')
parser.add_argument('-hyp', type=str, default='outputs/hyp/')
parser.add_argument('-sum_len', type=int, default=1) # 摘要长度为原摘要长度的倍数
parser.add_argument('-mmr', type=float, default=0.75)
# other
parser.add_argument('-test', action='store_true')
parser.add_argument('-use_cuda', type=bool, default=False)
use_cuda = torch.cuda.is_available()
args = parser.parse_args()
if use_cuda:
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
args.use_cuda = use_cuda
def my_collate(batch):
return {key: [d[key] for d in batch] for key in batch[0]}
# 用rouge_1_f表示两个句子之间的相似度
def rouge_1_f(hyp, ref):
hyp = re.sub(r'[^a-z]', ' ', hyp.lower()).strip().split()
ref = re.sub(r'[^a-z]', ' ', ref.lower()).strip().split()
if len(hyp) == 0 or len(ref) == 0:
return .0
ref_flag = [0 for _ in ref]
hit = .0
for w in hyp:
for i in range(0, len(ref)):
if w == ref[i] and ref_flag[i] == 0:
hit += 1
ref_flag[i] = 1
break
p = hit / len(hyp)
r = hit / len(ref)
if math.fabs(p + r) < 1e-10:
f = .0
else:
f = 2 * p * r / (p + r)
return f
# 得到预测分数后,使用MMR策略进行重新排序,以消除冗余
def re_rank(sents, scores, ref_len):
summary = ''
chosen = []
cur_scores = [s for s in scores]
cur_len = 0
while len(chosen) <= len(scores):
sorted_idx = np.array(cur_scores).argsort()
cur_idx = sorted_idx[-1]
for i in range(len(cur_scores)):
new_score = args.mmr * scores[i] - (1 - args.mmr) * rouge_1_f(sents[i], sents[cur_idx])
cur_scores[i] = min(cur_scores[i], new_score)
cur_scores[cur_idx] = -1e20
chosen.append(cur_idx)
tmp = sents[cur_idx].split()
tmp_len = len(tmp)
if cur_len + tmp_len > ref_len:
summary += ' '.join(tmp[:ref_len - cur_len])
break
else:
summary += ' '.join(tmp) + ' '
cur_len += tmp_len
return summary.strip()
# 在验证集或测试集上测loss, rouge值
def evaluate(net, vocab, data_iter, train_next): # train_next指明接下来是否要继续训练
net.eval()
criterion = nn.MSELoss()
loss, r1, r2, rl, rsu = .0, .0, .0, .0, .0 # rouge-1,rouge-2,rouge-l,都使用recall值(长度限定为原摘要长度)
batch_num = .0
blog_num = .0
for i, batch in enumerate(tqdm(data_iter)):
# 计算loss
features, targets, sents_content, summaries, doc_nums, doc_lens = vocab.make_features(batch, args)
features, targets = Variable(features), Variable(targets.float())
if use_cuda:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_nums, doc_lens)
batch_num += 1
loss += criterion(probs, targets).data.item()
probs_start = 0 # 当前blog对应的probs起始下标
doc_lens_start = 0 # 当前blog对应的doc_lens起始下标
sents_start = 0 # 当前blog对应的sents_content起始下标
for i in range(0, args.batch_size):
sents_num = 0
for j in range(doc_lens_start, doc_lens_start + doc_nums[i]):
sents_num += doc_lens[j]
cur_probs = probs[probs_start:probs_start + sents_num]
cur_sents = sents_content[sents_start: sents_start + sents_num]
probs_start = probs_start + sents_num
doc_lens_start = doc_lens_start + doc_nums[i]
sents_start = sents_start + sents_num
if use_cuda:
cur_probs = cur_probs.cpu()
cur_probs = list(cur_probs.detach().numpy())
sorted_index = list(np.argsort(cur_probs)) # cur_probs顺序排序后对应的下标
sorted_index.reverse()
ref = summaries[i].strip()
ref_len = len(ref.split())
hyp = re_rank(cur_sents, cur_probs, ref_len)
score = get_rouge_score(hyp, ref)
r1 += score['ROUGE-1']['r']
r2 += score['ROUGE-2']['r']
rl += score['ROUGE-L']['r']
rsu += score['ROUGE-SU4']['r']
blog_num += 1
loss = loss / batch_num
r1 = r1 / blog_num
r2 = r2 / blog_num
rl = rl / blog_num
rsu = rsu / blog_num
if train_next: # 接下来要继续训练,将网络设成'train'状态
net.train()
return loss, r1, r2, rl, rsu
def train():
print('Loading vocab, train and val dataset...')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
with open(args.word2id) as f:
word2id = json.load(f)
vocab = Vocab(embed, word2id)
train_data = []
for fn in os.listdir(args.train_dir):
f = open(args.train_dir + fn, 'r')
train_data.append(json.load(f))
f.close()
train_dataset = Dataset(train_data)
val_data = []
for fn in os.listdir(args.valid_dir):
f = open(args.valid_dir + fn, 'r')
val_data.append(json.load(f))
f.close()
val_dataset = Dataset(val_data)
net = RNN_RNN(args, embed)
criterion = nn.BCELoss()
if use_cuda:
net.cuda()
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=my_collate)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=my_collate)
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
min_loss = float('inf')
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
features, targets, _1, _2, doc_nums, doc_lens = vocab.make_features(batch, args)
features, targets = Variable(features), Variable(targets.float())
if use_cuda:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_nums, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(net.parameters(), args.max_norm)
optimizer.step()
print('EPOCH [%d/%d]: BATCH_ID=[%d/%d] loss=%f' % (
epoch, args.epochs, i, len(train_iter), loss))
cnt = (epoch - 1) * len(train_iter) + i
if cnt % args.valid_every == 0:
print('Begin valid... Epoch %d, Batch %d' % (epoch, i))
cur_loss, r1, r2, rl, rsu = evaluate(net, vocab, val_iter, True)
if cur_loss < min_loss:
min_loss = cur_loss
save_path = args.save_dir + 'RNN_RNN' + '_%d_%.4f_%.4f_%.4f_%.4f_%.4f' % (
cnt / args.valid_every, cur_loss, r1, r2, rl, rsu)
net.save(save_path)
print('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f Rouge-1: %f Rouge-2: %f Rouge-l: %f Rouge-SU4: %f' %
(epoch, min_loss, cur_loss, r1, r2, rl, rsu))
def test():
print('Loading vocab and test dataset...')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
with open(args.word2id) as f:
word2id = json.load(f)
vocab = Vocab(embed, word2id)
test_data = []
for fn in os.listdir(args.test_dir):
f = open(args.test_dir + fn, 'r')
test_data.append(json.load(f))
f.close()
test_dataset = Dataset(test_data)
test_iter = DataLoader(dataset=test_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=my_collate)
print('Loading model...')
if use_cuda:
checkpoint = torch.load(args.save_dir + args.load_model)
else:
checkpoint = torch.load(args.save_dir + args.load_model, map_location=lambda storage, loc: storage)
net = RNN_RNN(checkpoint['args'])
net.load_state_dict(checkpoint['model'])
if use_cuda:
net.cuda()
net.eval()
print('Begin test...')
test_loss, r1, r2, rl, rsu = evaluate(net, vocab, test_iter, False)
print('Test_Loss: %f Rouge-1: %f Rouge-2: %f Rouge-l: %f Rouge-SU4: %f' % (test_loss, r1, r2, rl, rsu))
if __name__ == '__main__':
if args.test:
test()
else:
train()
| 36.743772 | 120 | 0.606683 |
3a236c93064f118a008812da513e38be43b9a0c5 | 3,512 | py | Python | data_utils/split_data.py | amitfishy/deep-objdetect | d8fc03bdb532443588b910fb9cb488766c8f6a97 | [
"MIT"
] | null | null | null | data_utils/split_data.py | amitfishy/deep-objdetect | d8fc03bdb532443588b910fb9cb488766c8f6a97 | [
"MIT"
] | null | null | null | data_utils/split_data.py | amitfishy/deep-objdetect | d8fc03bdb532443588b910fb9cb488766c8f6a97 | [
"MIT"
] | null | null | null | import os
from random import shuffle
import pascalvoc_to_yolo
def generate_yolo_format_data(dataset_type, annotation_directory, image_id_directory, train_data_fraction, use_validation_experiments, image_data_directory, class_names_file, train_mode):
get_split_data(image_id_directory, train_data_fraction, use_validation_experiments)
if use_validation_experiments:
gen_sub_image_ids_directory = os.path.join(image_id_directory, 'temp')
else:
Split = str(int(train_data_fraction*100)) + '-' + str(int((100-train_data_fraction*100)))
gen_sub_image_ids_directory = os.path.join(image_id_directory, Split)
yolo_annotation_directory, yolo_image_id_directory = pascalvoc_to_yolo.generate_yolo_format(dataset_type, annotation_directory, image_id_directory, gen_sub_image_ids_directory, image_data_directory, class_names_file, train_mode)
return yolo_annotation_directory, yolo_image_id_directory
def generate_split_data(train_image_id_path, train_data_fraction, out_train_image_id_path, out_val_image_id_path):
ImageIDs = []
with open(train_image_id_path, 'r') as train_image_id_file:
for line in train_image_id_file.readlines():
ImageIDs.append(line.strip())
shuffle(ImageIDs)
with open(out_train_image_id_path, 'w') as out_train_image_id_file:
for ImageID in sorted(ImageIDs[0:int(train_data_fraction*len(ImageIDs))]):
out_train_image_id_file.write(ImageID)
out_train_image_id_file.write('\n')
with open(out_val_image_id_path, 'w') as out_val_image_id_file:
for ImageID in sorted(ImageIDs[int(train_data_fraction*len(ImageIDs)):]):
out_val_image_id_file.write(ImageID)
out_val_image_id_file.write('\n')
return
def generate_temp_split_data(image_id_directory, train_data_fraction):
temp_split_directory = os.path.join(image_id_directory, 'temp')
if not os.path.exists(temp_split_directory):
os.makedirs(temp_split_directory)
train_image_id_path = os.path.join(image_id_directory, 'train.txt')
out_train_image_id_path = os.path.join(temp_split_directory, 'train.txt')
out_val_image_id_path = os.path.join(temp_split_directory, 'val.txt')
#divide train.txt > train and val
generate_split_data(train_image_id_path, train_data_fraction, out_train_image_id_path, out_val_image_id_path)
return
def generate_fixed_split_data(image_id_directory, train_data_fraction):
Split = str(int(train_data_fraction*100)) + '-' + str(int((100-train_data_fraction*100)))
fixed_split_directory = os.path.join(image_id_directory, Split)
if not os.path.exists(fixed_split_directory):
os.makedirs(fixed_split_directory)
train_image_id_path = os.path.join(image_id_directory, 'train.txt')
test_image_id_path = os.path.join(image_id_directory, 'test.txt')
out_train_image_id_path = os.path.join(fixed_split_directory, 'train.txt')
out_val_image_id_path = os.path.join(fixed_split_directory, 'val.txt')
out_test_image_id_path = os.path.join(fixed_split_directory, 'test.txt')
if not (os.path.exists(out_train_image_id_path) and os.path.exists(out_val_image_id_path)):
#divide train.txt > train and val
generate_split_data(train_image_id_path, train_data_fraction, out_train_image_id_path, out_val_image_id_path)
os.system('cp ' + test_image_id_path + ' ' + out_test_image_id_path)
return
def get_split_data(image_id_directory, train_data_fraction, use_validation_experiments):
if use_validation_experiments:
generate_temp_split_data(image_id_directory, train_data_fraction)
else:
generate_fixed_split_data(image_id_directory, train_data_fraction)
return | 46.210526 | 229 | 0.825456 |
3a26a3c6be42741ef5f1bdf670939b37671499bb | 547 | py | Python | books/migrations/0002_auto_20200518_1636.py | JorgeluissilvaC/intellinext_books | 0495744920dac6ee98c7ad024f8d8f85d0838238 | [
"MIT"
] | null | null | null | books/migrations/0002_auto_20200518_1636.py | JorgeluissilvaC/intellinext_books | 0495744920dac6ee98c7ad024f8d8f85d0838238 | [
"MIT"
] | null | null | null | books/migrations/0002_auto_20200518_1636.py | JorgeluissilvaC/intellinext_books | 0495744920dac6ee98c7ad024f8d8f85d0838238 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-18 21:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('books', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comments',
name='id_book',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='books.books'),
),
]
| 24.863636 | 122 | 0.645338 |
3a278df76c850ba375f90a83b4923f079000c2f6 | 1,385 | py | Python | debian-11-PrisonPC/xfce/log-terminal-attempt.py | mijofa/bootstrap2020 | 38f557f4f0e72eaefe366f12f6adac3e2f9c9abd | [
"MIT"
] | null | null | null | debian-11-PrisonPC/xfce/log-terminal-attempt.py | mijofa/bootstrap2020 | 38f557f4f0e72eaefe366f12f6adac3e2f9c9abd | [
"MIT"
] | null | null | null | debian-11-PrisonPC/xfce/log-terminal-attempt.py | mijofa/bootstrap2020 | 38f557f4f0e72eaefe366f12f6adac3e2f9c9abd | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import sys
import syslog # FIXME: use systemd.journal.send()?
import gi
gi.require_version('Notify', '0.7')
import gi.repository.Notify # noqa: E402
__doc__ = """ an ersatz xterm that says "No!" and quits """
# Tell the central server.
# FIXME: ends up in user journal, not system journal.
# Does rsyslog forward user journal??
who = os.environ.get('XUSER', os.geteuid())
syslog.openlog('noterm4u', facility=syslog.LOG_AUTH)
syslog.syslog(f'{who} tried to open a terminal ({sys.argv[1:]}).')
# Tell the end user.
gi.repository.Notify.init("Terminal")
gi.repository.Notify.Notification.new(
summary='Not allowed',
body='Your attempt to perform a blocked action has been reported.',
icon='dialog-warning-symbolic').show()
# https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html#Exit-Status says
# If a command is not found, the child process created to execute it returns a status of 127.
# If a command is found but is not executable, the return status is 126.
# Pretend to whoever called us, that we are not instaled.
# Probably has no effect whatsoever.
# UPDATE: if we do this, we get a big popup:
#
# Failed to execute default Terminal Emulator.
# Input/output error.
# [ ] Do not show this message again
# [ Close ]
#
# That's a bit shit, so DON'T exit with an error.
# exit(127)
| 34.625 | 95 | 0.704693 |