hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09d449514e9039bc5c41cf98d5932b80f0321669 | 898 | py | Python | TestSourceCode_Store/PIM_example_resize.py | leejaymin/QDroid | 13ff9d26932378513a7c9f0038eb59b922ed06eb | [
"Apache-2.0"
] | null | null | null | TestSourceCode_Store/PIM_example_resize.py | leejaymin/QDroid | 13ff9d26932378513a7c9f0038eb59b922ed06eb | [
"Apache-2.0"
] | null | null | null | TestSourceCode_Store/PIM_example_resize.py | leejaymin/QDroid | 13ff9d26932378513a7c9f0038eb59b922ed06eb | [
"Apache-2.0"
] | null | null | null | import sys
from PIL import Image
#import ImageChops
#import numpy as np
#import ImageChops
if __name__ == '__main__':
srcList = []
targetList = []
colorRate = []
avgColorRate = 0.0
sumColorRate = 0
validCount = 0
# image_src = '/root/Android_Application_source/recomended/0.png'
first_image = "../ImageStore/GameActivityGalaxyNexus.png"
# image_src = '../TestingResult/HTCDesire/Air_Hockey_1_1_1/MultitouchTestActivity'
# first_image = "../TestingResult/GalaxyNexus/Digger/splash.png"
imSrc = Image.open(first_image)
a = imSrc.size
print a[0]
# grayImage = Image.open(image_src)
out = imSrc.resize((1280,720))
# diff_out = ImageChops.difference(imSrc, grayImage)
# diff_out.show()
# diff_out.getbox()
# print diff_out.histogram()
out.save('/root/Android_Application_source/recomended/0_r.png','PNG')
| 26.411765 | 85 | 0.680401 |
09d4d5139b90907a08147b1f476920cdd503f04c | 15,484 | py | Python | src/testing/functionaltests/webtest.py | pgecsenyi/piepy | 37bf6cb5bc8c4f9da3f695216beda7353d79fb29 | [
"MIT"
] | 1 | 2018-03-26T22:39:36.000Z | 2018-03-26T22:39:36.000Z | src/testing/functionaltests/webtest.py | pgecsenyi/piepy | 37bf6cb5bc8c4f9da3f695216beda7353d79fb29 | [
"MIT"
] | null | null | null | src/testing/functionaltests/webtest.py | pgecsenyi/piepy | 37bf6cb5bc8c4f9da3f695216beda7353d79fb29 | [
"MIT"
] | null | null | null | """
Web unit tests
"""
# pylint: disable=too-many-public-methods
import time
import unittest
import requests
from testing.communicationhelper import get_json, put_json
from testing.functions import are_expected_items_in_list, are_expected_kv_pairs_in_list, \
get_item_from_embedded_dictionary
from testing.servermanager import ServerManager
from testing.testhelper import TestHelper
from testing.videotestenvironment import VideoTestEnvironment
class WebTest(unittest.TestCase):
####################################################################################################################
# Initialization and cleanup.
####################################################################################################################
@classmethod
def setUpClass(cls):
# Set private static attributes.
cls._episode_title_id = 0
cls._file_id = 0
cls._language_id = 0
cls._main_executable = 'main.py'
cls._parent_id = 0
cls._playlist_id = 0
cls._quality_id = 0
# Create TestHelper.
cls._helper = TestHelper()
cls._helper.add_environment(VideoTestEnvironment())
# Create test configuration and files.
cls._helper.create_configuration()
cls._helper.create_files()
# Create Server Manager and start the server.
cls._server_manager = ServerManager(cls._main_executable, cls._helper.config_path)
cls._server_manager.start()
if not cls._server_manager.wait_for_initialization(cls._helper.test_service_base_url):
print('The service is unavailable.')
cls.tearDownClass()
@classmethod
def tearDownClass(cls):
cls._server_manager.stop()
cls._helper.clean()
####################################################################################################################
# Real test methods.
####################################################################################################################
def test_1_rebuild(self):
# Arrange.
rebuild_url = WebTest._helper.build_url('rebuild')
status_url = WebTest._helper.build_url('status')
# Act.
requests.get(rebuild_url)
# Wait until database is building. Poll status in every 2 seconds.
number_of_retries = 0
result = ''
while number_of_retries < 10:
data = get_json(status_url)
result = data['status']['synchronization']
if result == 'not running':
break
number_of_retries += 1
time.sleep(2)
# Assert.
self.assertEqual(result, 'not running', 'Rebuild failed.')
def test_2_categories(self):
# Arrange.
url = WebTest._helper.build_url('categories')
# Act.
data = get_json(url)
# Assert.
are_expected_items_in_list(self, data, 'categories')
are_expected_items_in_list(self, data['categories'], 'audio', 'image', 'video')
def test_3_video_languages(self):
# Arrange.
url = WebTest._helper.build_url('video/languages')
# Act.
data = get_json(url)
# Assert.
expected_languages = ['(Uncategorized)', 'English', 'Finnish', 'German', 'Greek', 'Hindi', 'Hungarian']
are_expected_items_in_list(self, data, 'languages')
are_expected_kv_pairs_in_list(self, data['languages'], 'language', expected_languages)
WebTest._language_id = get_item_from_embedded_dictionary(
data['languages'],
'language',
'Greek',
'id')
def test_4_video_qualities(self):
# Arrange.
url = WebTest._helper.build_url('video/qualities')
# Act.
data = get_json(url)
# Assert.
expected_qualities = ['(Uncategorized)', 'LQ', 'HQ', 'HD (720p)', 'HD (1080p)']
are_expected_items_in_list(self, data, 'qualities')
are_expected_kv_pairs_in_list(self, data['qualities'], 'quality', expected_qualities)
WebTest._quality_id = get_item_from_embedded_dictionary(
data['qualities'],
'quality',
'HD (720p)',
'id')
def test_5_01_video_titles(self):
# Arrange.
url = WebTest._helper.build_url('video/titles')
# Act.
data = get_json(url)
# Assert.
expected_titles = ['(Uncategorized)', 'Triple Payback', 'Battle of Impact', 'Double Prejudice',
'Screw driver 4 (1975)', 'Compressor Head (2014)', 'Family']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
WebTest._parent_id = get_item_from_embedded_dictionary(
data['titles'],
'title',
'Compressor Head (2014)',
'id')
def test_5_02_video_titles_by_l(self):
"""
Query video titles by language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}'.format(WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Battle of Impact', 'Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_03_video_titles_by_p(self):
"""
Query video titles by parent.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?parent={}'.format(WebTest._parent_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = [
'Compressor Head [1x01] Variable Length Codes',
'Compressor Head [1x03] Markov Chain Compression',
'Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_04_video_titles_by_q(self):
"""
Query video titles by quality.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?quality={}'.format(WebTest._quality_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Triple Payback', 'Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_05_video_titles_by_l_p(self):
"""
Query video titles by language and parent.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&parent={}'.format(
WebTest._language_id,
WebTest._parent_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_06_video_titles_by_l_q(self):
"""
Query video titles by language and quality.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&quality={}'.format(
WebTest._language_id,
WebTest._quality_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_07_video_titles_by_p_q(self):
"""
Query video titles by parent and quality.
"""
# Arrange.
url = WebTest._helper.build_url(
'video/titles?parent={}&quality={}'.format(WebTest._parent_id, WebTest._quality_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
WebTest._episode_title_id = get_item_from_embedded_dictionary(
data['titles'],
'title',
'Compressor Head [1x01] Variable Length Codes',
'id')
def test_5_08_video_titles_by_sl(self):
"""
Query video titles by subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?subtitle={}'.format(WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_09_video_titles_by_l_sl(self):
"""
Query video titles by language and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&subtitle={}'.format(
WebTest._language_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_10_video_titles_by_p_sl(self):
"""
Query video titles by parent and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?parent={}&subtitle={}'.format(
WebTest._parent_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_11_video_titles_by_q_sl(self):
"""
Query video titles by quality and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?quality={}&subtitle={}'.format(
WebTest._quality_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_12_video_titles_by_l_p_sl(self):
"""
Query video titles by language, parent and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&parent={}&subtitle={}'.format(
WebTest._language_id,
WebTest._parent_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_13_video_titles_by_l_q_sl(self):
"""
Query video titles by language, quality and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&quality={}&subtitle={}'.format(
WebTest._language_id,
WebTest._quality_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head (2014)']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_5_14_video_titles_by_l_p_q_sl(self):
"""
Query video titles by language, parent, quality and subtitle language.
"""
# Arrange.
url = WebTest._helper.build_url('video/titles?language={}&parent={}&quality={}&subtitle={}'.format(
WebTest._language_id,
WebTest._parent_id,
WebTest._quality_id,
WebTest._language_id))
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Compressor Head [1x01] Variable Length Codes']
are_expected_items_in_list(self, data, 'titles')
are_expected_kv_pairs_in_list(self, data['titles'], 'title', expected_titles)
def test_6_search(self):
# Arrange.
url = WebTest._helper.build_url('search/family')
# Act.
data = get_json(url)
# Assert.
expected_titles = ['Family', 'Family [01] Intro']
are_expected_items_in_list(self, data, 'videos')
are_expected_kv_pairs_in_list(self, data['videos'], 'title', expected_titles)
def test_7_details(self):
# Arrange.
url = WebTest._helper.build_url('video/details/{}'.format(WebTest._episode_title_id))
# Act.
data = get_json(url)
# Assert.
are_expected_items_in_list(self, data['details'], 'id', 'files', 'subtitles', 'title')
self.assertEqual('Compressor Head [1x01] Variable Length Codes', data['details']['title'], 'Wrong title.')
are_expected_kv_pairs_in_list(
self,
data['details']['files'],
'language',
['Finnish', 'Greek', 'Greek'])
are_expected_kv_pairs_in_list(
self,
data['details']['files'],
'quality',
['HD (720p)', 'HD (720p)', 'LQ'])
are_expected_kv_pairs_in_list(
self,
data['details']['subtitles'],
'language',
['English', 'Greek', 'Greek', 'Hungarian'])
WebTest._file_id = data['details']['files'][0]['id']
def test_8_01_playlist_add(self):
# Arrange.
url = WebTest._helper.build_url('playlist/add')
payload = {
'title' : 'Test playlist',
'tracks': [
{'category' : 'video', 'file' : WebTest._file_id}]}
# Act.
data = put_json(url, payload)
# Assert.
self.assertEqual('Test playlist', data['playlist']['title'], 'Wrong title for the playlist.')
WebTest._playlist_id = data['playlist']['id']
def test_8_02_playlist_add_track(self):
# Arrange.
url = WebTest._helper.build_url('playlist/add-track')
payload = {'playlist' : WebTest._playlist_id, 'category' : 'video', 'file' : WebTest._file_id}
# Act.
data = put_json(url, payload)
# Assert.
self.assertEqual('video', data['track']['category'], 'Wrong category.')
self.assertEqual('Compressor Head [1x01] Variable Length Codes', data['track']['title'], 'Wrong title.')
def test_8_03_playlists(self):
# Arrange.
url = WebTest._helper.build_url('playlist/all')
# Act.
data = get_json(url)
# Assert.
self.assertNotEqual(None, data['playlists'], 'There are no playlists in the response.')
self.assertEqual(1, len(data['playlists']), 'Incorrect number of playlists.')
self.assertEqual('Test playlist', data['playlists'][0]['title'], 'Incorrect playlist title.')
| 32.529412 | 120 | 0.58822 |
09d79f0d227847749db1ddb7eb6acbb60326e8b8 | 862 | py | Python | 10_Name_Card_Detection/pytorch-faster-rcnn/lib/datasets/factory.py | ZeroWeight/Pattern-Recognize | ce18ab7d218840978f546a94d02d4183c9dc1aac | [
"MIT"
] | 4 | 2018-07-30T01:46:22.000Z | 2019-04-09T12:23:52.000Z | 10_Name_Card_Detection/pytorch-faster-rcnn/lib/datasets/factory.py | ZeroWeight/Pattern-Recognize | ce18ab7d218840978f546a94d02d4183c9dc1aac | [
"MIT"
] | null | null | null | 10_Name_Card_Detection/pytorch-faster-rcnn/lib/datasets/factory.py | ZeroWeight/Pattern-Recognize | ce18ab7d218840978f546a94d02d4183c9dc1aac | [
"MIT"
] | 1 | 2020-02-25T05:09:06.000Z | 2020-02-25T05:09:06.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
__sets = {}
from datasets.name_card import name_card
import numpy as np
for split in ['trainval', 'test']:
name = 'name_card_real_{}'.format(split)
__sets[name] = (lambda split=split: name_card(split,'NameCardReal'))
__sets['name_card_fake_train'] = (lambda: name_card('trainval','NameCardFake'))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys())
| 29.724138 | 79 | 0.612529 |
09d9a6c70c2c9fb2475c65d8587c45d1e8aa1eb1 | 1,251 | py | Python | portafolio/core/views.py | breinerGiraldo/python | 89603d7ce0be8e2bb20817ee3c845fdb26a1b54a | [
"bzip2-1.0.6"
] | null | null | null | portafolio/core/views.py | breinerGiraldo/python | 89603d7ce0be8e2bb20817ee3c845fdb26a1b54a | [
"bzip2-1.0.6"
] | null | null | null | portafolio/core/views.py | breinerGiraldo/python | 89603d7ce0be8e2bb20817ee3c845fdb26a1b54a | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render,HTTpResponse
# Create your views here.
def indexcore(request):
return render(request,'index.html')
def nosotros(request):
return render(request,'nosotros.html')
def indexcore(request):
#iniciamos el formulario de contacto
FormarContac= ContactForm()
#validamos que se haya echo la peticion post del formulario de contacto
if request.method == "POST":
#re asiganmos el valor del formcntacto estavez con todo los datos del formulario
FormarContac = ContactForm(data=request.POST)
#validaremos que todos los campos sean de dato correcto
if FormarContac.is_valid():
#retornamos validaciones de los campos
email = request.POST.get('email', '')
tipom = request.POST.get('tipom,' , '')
nombre = request.POST.get('nombre', '')
msj = request.POST.get('msj', '')
# si todo sale bien que no creo XD guardamos y rendericimos al nombre de la url con un mensaje
FormarContac.save()
return redirect(reverse('inicio')+"?Ok")
#otra forma de redirecionar
# return direct(/index/?ok!)
return render(request,'index.html',{'formulario': FormarContac})
| 39.09375 | 106 | 0.655476 |
09da5fe1886cdd50f1983ad7996a351fe7c5c4f5 | 257 | py | Python | setup.py | luphord/australian-housing-exercise | 441b8700cfe6d50742945b33d4f123dc6c497e5a | [
"MIT"
] | null | null | null | setup.py | luphord/australian-housing-exercise | 441b8700cfe6d50742945b33d4f123dc6c497e5a | [
"MIT"
] | null | null | null | setup.py | luphord/australian-housing-exercise | 441b8700cfe6d50742945b33d4f123dc6c497e5a | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='australian_housing',
packages=find_packages(),
version='0.2.1',
description='"A data science exercise on Australian house approvements"',
author='"luphord"',
license='MIT',
)
| 23.363636 | 77 | 0.696498 |
09dbc482da6f2620a0ec95d44dab6ffbe0c052f9 | 4,439 | py | Python | monopoly.py | michaelhutton/monopoly | d3adcf524dfb015dbdaaadf905ca8cc4396fde3e | [
"MIT"
] | null | null | null | monopoly.py | michaelhutton/monopoly | d3adcf524dfb015dbdaaadf905ca8cc4396fde3e | [
"MIT"
] | null | null | null | monopoly.py | michaelhutton/monopoly | d3adcf524dfb015dbdaaadf905ca8cc4396fde3e | [
"MIT"
] | null | null | null | import random
squares = [
"Go",
"Mediterranean Ave.",
"Community Chest",
"Baltic Ave.",
"Income Tax",
"Reading Railroad",
"Oriental Ave.",
"Chance",
"Vermont Ave.",
"Connecticut Ave.",
"Jail",
"St. Charles Place",
"Electric Company",
"States Ave.",
"Virginia Ave.",
"Pennsylvania Railroad",
"St. James Place",
"Community Chest",
"Tennessee Ave.",
"New York Ave.",
"Free Parking",
"Kentucky Ave.",
"Chance",
"Indiana Ave.",
"Illinois Ave.",
"B. & O. Railroad",
"Atlantic Ave.",
"Ventnor Ave.",
"Water Works",
"Marvin Gardens",
"Go To Jail",
"Pacific Ave.",
"North Carolina Ave.",
"Community Chest",
"Pennsylvania Ave.",
"Short Line Railroad",
"Chance",
"Park Place",
"Luxury Tax",
"Boardwalk"
]
SQUARES_LENGTH = len(squares)
chance_cards = [
"Advance to Go",
"Advance to Illinois Ave.",
"Advance to St. Charles Place",
"Advance token to nearest Utility",
"Advance token to the nearest Railroad",
"Bank pays you dividend of $50",
"Get out of Jail Free Card",
"Go Back 3 Spaces",
"Go to Jail",
"Make general repairs on all your property",
"Pay poor tax of $15",
"Take a trip to Reading Railroad",
"Take a walk on the Boardwalk",
"You have been elected Chairman of the Board",
"Your building loan matures - Collect $150"
]
community_chest_cards = [
"Advance to Go",
"Bank error in your favor - Collect $200",
"Doctor's fees - Pay $50",
"From sale of stock you get $50",
"Get Out of Jail Free Card",
"Go to Jail",
"Grand Opera Night - Collect $50 from every player for opening night seats",
"Holiday Fund matures - Receive $100",
"Income tax refund - Collect $20",
"Life insurance matures - Collect $100",
"Pay hospital fees of $100",
"Pay school fees of $150",
"Receive $25 consultancy fee",
"You are assessed for street repairs - $40 per house - $115 per hotel",
"You have won second prize in a beauty contest - Collect $10",
"You inherit $100"
]
def roll_dice():
return [random.randint(1,6),random.randint(1,6)]
def pick_card(player, deck):
# Take a random card from either the chance or cc deck
# and return players new position
last_card = len(deck)-1
choice = random.randint(0,last_card)
card = deck[choice]
print("Started at: " + str(player["pos"]))
if(card == "Advance to Go"):
player["pos"] = 0
elif(card == "Advance to Illinois Ave."):
player["pos"] = 24
elif(card == "Advance to St. Charles Place"):
player["pos"] = 11
elif(card == "Advance token to nearest Utility"):
if(player["pos"] == 7):
player["pos"] = 12 # Electric Company
else: # Pos 22 and 36 go to the same place
player["pos"] = 28 # Water Works
elif(card == "Advance token to the nearest Railroad"):
if(player["pos"] == 7):
player["pos"] = 5 # Reading
elif(player["pos"] == 22):
player["pos"] = 25 # B and O
elif(player["pos"] == 36):
player["pos"] = 35 # Short Line
elif(card == "Go Back 3 Spaces"):
player["pos"] = player["pos"] - 3
elif(card == "Go to Jail"):
player["pos"] = 10
player["in_jail"] = True
elif(card == "Take a trip to Reading Railroad"):
player["pos"] = 5
elif(card == "Take a walk on the Boardwalk"):
player["pos"] = 39
print("Received card: " + card)
print("Ended at: " + str(player["pos"]))
return player
player1 = {
"pos": 0,
"doubles_in_a_row": 0,
"in_jail": False
}
for turn in range(1,100):
dice = roll_dice()
print(dice)
if(dice[0] == dice[1]):
player1["doubles_in_a_row"] = player1["doubles_in_a_row"] + 1
else:
player1["doubles_in_a_row"] = 0
# TODO: if the player has rolled 3 doubles, go to jail!
player1["pos"] = (player1["pos"] + dice[0] + dice[1]) % SQUARES_LENGTH
# TODO: Check if its a go to jail space
if(squares[player1["pos"]] == "Chance"):
print("chance!")
print(player1)
pick_card(player1, chance_cards)
print(player1)
if(squares[player1["pos"]] == "Community Chest"):
print("CC!")
pick_card(player1, community_chest_cards)
print("Turn " + str(turn) + ": " + squares[player1["pos"]])
| 29.593333 | 80 | 0.581437 |
09ddae526c3cd9bcfe820b2b4ae3706b5e1e7c32 | 7,769 | py | Python | coinzdense/app.py | pibara/coinzdense-python | f051770b71fa0afe935eb0d2079dab21eea9432d | [
"BSD-3-Clause"
] | null | null | null | coinzdense/app.py | pibara/coinzdense-python | f051770b71fa0afe935eb0d2079dab21eea9432d | [
"BSD-3-Clause"
] | null | null | null | coinzdense/app.py | pibara/coinzdense-python | f051770b71fa0afe935eb0d2079dab21eea9432d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
from coinzdense.signing import SigningKey as _SigningKey
from coinzdense.validation import ValidationEnv as _ValidationEnv
from coinzdense.wallet import create_wallet as _create_wallet
from coinzdense.wallet import open_wallet as _open_wallet
def _keys_per_signature(hashlen, otsbits):
return 2*(((hashlen*8-1) // otsbits)+1)
def _sub_sub_keyspace_usage(hashlen, otsbits, height):
return 1 + _keys_per_signature(hashlen, otsbits) * (1 << height)
def _sub_keyspace_usage(hashlen, otsbits, heights):
usage = _sub_sub_keyspace_usage(hashlen, otsbits,heights[0])
if len(heights) > 1:
usage += (1 << heights[0]) * _sub_keyspace_usage(hashlen, otsbits, heights[1:])
return usage
def _keyspace_usage(hashlen, otsbits, keyspace):
usage = (1 << sum(keyspace[0]["heights"])) + _sub_keyspace_usage(hashlen, otsbits, keyspace[0]["heights"])
if len(keyspace) > 1:
usage += (1 << keyspace[0]["reserve"]) * _keyspace_usage(hashlen, otsbits, keyspace[1:])
return usage
class KeySpace:
def __init__(self, hashlen, otsbits, keyspace, offset=0, size=1<<64, state=None):
self.hashlen = hashlen
self.otsbits = otsbits
self.keyspace = keyspace
if state is None:
self.state = {}
self.state["offset"] = offset
self.state["stack"] = size
reserve_bits = keyspace[0].get("reserve", None)
if reserve_bits is None:
self.state["heap_start"] = offset
self.state["heap"] = offset
self.state["has_reserved"] = False
self.state["reserved_heap_start"] = offset
self.state["reserved_heap"] = offset
else:
reserved = (1 << reserve_bits) * _keyspace_usage(hashlen, otsbits, keyspace[1:])
self.state["heap_start"] = offset + reserved
self.state["heap"] = offset + reserved
self.state["has_reserved"] = True
self.state["reserved_heap_start"] = offset
self.state["reserved_heap"] = offset
self.state["own_offset"] = self.state["heap"]
self.state["heap"] += (1 << sum(keyspace[0]["heights"])) + _sub_keyspace_usage(hashlen, otsbits, keyspace[0]["heights"])
else:
self.state = state
def own_offset(self):
return self.state["own_offset"]
def allocate_subspace(self):
keyspace_size = _keyspace_usage(hashlen, otsbits, keyspace[1:])
self.state["stack"] -= keyspace_size
return KeySpace(self.hashlen, self.otsbits, self.keyspace[1:], self.state["stack"], keyspace_size)
def get_state(self):
return self.state
class BlockChainEnv:
def __init__(self, conf):
assert "appname" in conf, "Please run coinzdense-lint on your blockchain RC"
assert "hashlen" in conf, "Please run coinzdense-lint on your blockchain RC"
assert "otsbits" in conf, "Please run coinzdense-lint on your blockchain RC"
assert "keyspace" in conf, "Please run coinzdense-lint on your blockchain RC"
self.appname = conf["appname"]
self.hashlen = conf["hashlen"]
self.otsbits = conf["otsbits"]
self.keyspace = conf["keyspace"]
if "hierarchy" in conf:
self.hierarchy = conf["hierarchy"]
else:
self.hierarchy = {}
if "sub_path" in conf:
self.subpath = conf["sub_path"]
else:
self.subpath = []
assert isinstance(self.appname, str), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.hashlen, int), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.otsbits, int), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.keyspace, list), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.hierarchy, dict), "Please run coinzdense-lint on your blockchain RC"
assert isinstance(self.subpath, list), "Please run coinzdense-lint on your blockchain RC"
assert self.hashlen > 15
assert self.hashlen < 65
assert self.otsbits > 3
assert self.otsbits < 17
self.depth = 0
self._check_hierarchy()
for idx, val in enumerate(self.keyspace):
assert isinstance(val, dict), "Please run coinzdense-lint on your blockchain RC"
total_height = 0
assert "heights" in val, "Please run coinzdense-lint on your blockchain RC"
assert isinstance(val["heights"], list), "Please run coinzdense-lint on your blockchain RC"
assert len(val["heights"]) > 1, "Please run coinzdense-lint on your blockchain RC"
assert len(val["heights"]) < 33, "Please run coinzdense-lint on your blockchain RC"
for idx2,height in enumerate(val["heights"]):
assert isinstance(height, int), "Please run coinzdense-lint on your blockchain RC"
assert height > 2, "Please run coinzdense-lint on your blockchain RC"
assert height < 17, "Please run coinzdense-lint on your blockchain RC"
total_height += height
if idx < len(self.keyspace) -1:
assert "reserve" in val, "Please run coinzdense-lint on your blockchain RC"
assert isinstance(val["reserve"], int), "Please run coinzdense-lint on your blockchain RC"
assert val["reserve"] > 1, "Please run coinzdense-lint on your blockchain RC"
assert val["reserve"] < total_height - 1, "Please run coinzdense-lint on your blockchain RC"
else:
assert "reserve" not in val, "Please run coinzdense-lint on your blockchain RC"
for subpath_part in self.subpath:
assert isinstance(subpath_part, str), "Please run coinzdense-lint on your blockchain RC"
total = _keyspace_usage(self.hashlen, self.otsbits, self.keyspace)
assert total.bit_length() < 65, "Please run coinzdense-lint on your blockchain RC"
def _check_hierarchy(self, sub_hierarchy=None, depth=0):
if sub_hierarchy is not None:
my_hierarchy = sub_hierarchy
else:
my_hierarchy = self.hierarchy
my_depth = depth + 1
if my_depth > self.depth:
self.depth = my_depth
for key, val in my_hierarchy.items():
assert isinstance(val, dict), "Please run coinzdense-lint on your blockchain RC"
self._check_hierarchy(val, my_depth)
def __getitem__(self, key):
if key in self.hierarchy:
subconf = {}
subconf["appname"] = self.appname
subconf["hashlen"] = self.hashlen
subconf["otsbits"] = self.otsbits
subconf["keyspace"] = self.keyspace[1:]
subconf["hierarchy"] = self.hierarchy[key]
subconf["sub_path"] = self.subpath[:] + [key]
return BlockChainEnv(subconf)
else:
raise KeyError("No sub-key hierarchy named " + key)
def get_signing_key(self, wallet, idx=0, idx2=0, backup=None):
path = [self.appname] + self.subpath
return _SigningKey(self.hashlen, self.otsbits, self.keyspace, path, self.hierarchy, wallet, idx, idx2, backup)
def get_validator(self):
path = [self.appname] + self.subpath
return _ValidationEnv(self.hashlen, self.otsbits, self.keyspace, path, self.hierarchy)
def create_wallet(self, salt, key, password):
path = [self.appname] + self.subpath
return _create_wallet(salt, key, password, path)
def open_wallet(self, wdata, password):
path = [self.appname] + self.subpath
return _open_wallet(wdata, password, path)
| 50.122581 | 132 | 0.635603 |
09de00e54d3860203b7729e1854754335ac141d7 | 1,296 | py | Python | src/asyncdataflow/inspector.py | tomaszkingukrol/async-data-flow | 1572ef101cb0e6a0f27a77401538a4620ee9939f | [
"Apache-2.0"
] | null | null | null | src/asyncdataflow/inspector.py | tomaszkingukrol/async-data-flow | 1572ef101cb0e6a0f27a77401538a4620ee9939f | [
"Apache-2.0"
] | null | null | null | src/asyncdataflow/inspector.py | tomaszkingukrol/async-data-flow | 1572ef101cb0e6a0f27a77401538a4620ee9939f | [
"Apache-2.0"
] | null | null | null | from collections.abc import Iterable
from typing import Callable, Tuple
import inspect
from .definition import DataFlowInspector
from .exceptions import DataFlowFunctionArgsError, DataFlowNotCallableError, DataFlowEmptyError, DataFlowNotTupleError
class DataFlowInspect(DataFlowInspector):
''' Function inspection defined in DataFlow
'''
def check_dataflow_args(self, dataflow: tuple):
if isinstance(dataflow, tuple):
if dataflow:
for task in dataflow:
if isinstance(task, Iterable):
self.check_dataflow_args(task)
elif isinstance(task, Callable):
_check_positional_or_keyword_args(task)
else:
raise DataFlowNotCallableError(task)
else:
raise DataFlowEmptyError()
else:
raise DataFlowNotTupleError(dataflow)
def _check_positional_or_keyword_args(func: Callable) -> bool:
''' Check that function has only POSITIONAL_OR_KEYWORD arguments.
'''
inspect_args = inspect.signature(func).parameters.values()
for arg in inspect_args:
if str(arg.kind) != 'POSITIONAL_OR_KEYWORD':
raise DataFlowFunctionArgsError(func.__name__, arg)
| 35.027027 | 118 | 0.655864 |
09e5ab892fd8685aedec11f8378615ed2931fa1c | 891 | py | Python | processing_pipeline/extractionless_registration.py | SijRa/Brain-Image-Analysis-using-Deep-Learning | a35411bda6e39eff57f715a695b7fb6a30997706 | [
"MIT"
] | 2 | 2022-01-04T16:54:20.000Z | 2022-01-24T03:01:14.000Z | processing_pipeline/extractionless_registration.py | SijRa/Brain-Image-Analysis-using-Deep-Learning | a35411bda6e39eff57f715a695b7fb6a30997706 | [
"MIT"
] | null | null | null | processing_pipeline/extractionless_registration.py | SijRa/Brain-Image-Analysis-using-Deep-Learning | a35411bda6e39eff57f715a695b7fb6a30997706 | [
"MIT"
] | 1 | 2020-07-05T09:30:11.000Z | 2020-07-05T09:30:11.000Z | from ants import registration, image_read, image_write, resample_image, crop_image
from os import listdir
mri_directory = "ADNI_baseline_raw/"
template_loc = "MNI152_2009/mni_icbm152_t1_tal_nlin_sym_09a.nii"
template = image_read(template_loc)
template = resample_image(template, (192, 192, 160), True, 4)
#template = crop_image(template)
for scan in listdir(mri_directory):
id = scan.split('.')[0]
filename = "ADNI_original_registered/" + id + ".nii"
img_path = mri_directory + scan
image = image_read(img_path, reorient=True)
if image.shape[1] != 192:
print("- Resampling -")
image = resample_image(image, (192, 192, 160), True, 4)
registered_dict = registration(fixed=template, moving=image, type_of_transform="SyNRA")
#img = crop_image(registered_dict['warpedmovout'])
image_write(registered_dict['warpedmovout'], filename=filename)
print("Registered:",scan) | 40.5 | 89 | 0.751964 |
09e5f7e19923e048c28f58a87dbbfe0de36d4a04 | 362 | py | Python | 3rdparty/pytorch/torch/autograd/variable.py | WoodoLee/TorchCraft | 999f68aab9e7d50ed3ae138297226dc95fefc458 | [
"MIT"
] | 51 | 2020-01-26T23:32:57.000Z | 2022-03-20T14:49:57.000Z | 3rdparty/pytorch/torch/autograd/variable.py | WoodoLee/TorchCraft | 999f68aab9e7d50ed3ae138297226dc95fefc458 | [
"MIT"
] | 2 | 2020-12-19T20:00:28.000Z | 2021-03-03T20:22:45.000Z | 3rdparty/pytorch/torch/autograd/variable.py | WoodoLee/TorchCraft | 999f68aab9e7d50ed3ae138297226dc95fefc458 | [
"MIT"
] | 33 | 2020-02-18T16:15:48.000Z | 2022-03-24T15:12:05.000Z | import torch
from torch._six import with_metaclass
class VariableMeta(type):
def __instancecheck__(self, other):
return isinstance(other, torch.Tensor)
class Variable(with_metaclass(VariableMeta, torch._C._LegacyVariableBase)):
pass
from torch._C import _ImperativeEngine as ImperativeEngine
Variable._execution_engine = ImperativeEngine()
| 22.625 | 75 | 0.79558 |
09e7e9329ecb594a1ce5f26cf6f1dcdac3d78aef | 15,237 | py | Python | sp_api/api/finances/models/shipment_item.py | lionsdigitalsolutions/python-amazon-sp-api | 7374523ebc65e2e01e37d03fc4009a44fabf2c3b | [
"MIT"
] | null | null | null | sp_api/api/finances/models/shipment_item.py | lionsdigitalsolutions/python-amazon-sp-api | 7374523ebc65e2e01e37d03fc4009a44fabf2c3b | [
"MIT"
] | null | null | null | sp_api/api/finances/models/shipment_item.py | lionsdigitalsolutions/python-amazon-sp-api | 7374523ebc65e2e01e37d03fc4009a44fabf2c3b | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Selling Partner API for Finances
The Selling Partner API for Finances helps you obtain financial information relevant to a seller's business. You can obtain financial events for a given order, financial event group, or date range without having to wait until a statement period closes. You can also obtain financial event groups for a given date range. # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ShipmentItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'seller_sku': 'str',
'order_item_id': 'str',
'order_adjustment_item_id': 'str',
'quantity_shipped': 'int',
'item_charge_list': 'ChargeComponentList',
'item_charge_adjustment_list': 'ChargeComponentList',
'item_fee_list': 'FeeComponentList',
'item_fee_adjustment_list': 'FeeComponentList',
'item_tax_withheld_list': 'TaxWithheldComponentList',
'promotion_list': 'PromotionList',
'promotion_adjustment_list': 'PromotionList',
'cost_of_points_granted': 'Currency',
'cost_of_points_returned': 'Currency'
}
attribute_map = {
'seller_sku': 'SellerSKU',
'order_item_id': 'OrderItemId',
'order_adjustment_item_id': 'OrderAdjustmentItemId',
'quantity_shipped': 'QuantityShipped',
'item_charge_list': 'ItemChargeList',
'item_charge_adjustment_list': 'ItemChargeAdjustmentList',
'item_fee_list': 'ItemFeeList',
'item_fee_adjustment_list': 'ItemFeeAdjustmentList',
'item_tax_withheld_list': 'ItemTaxWithheldList',
'promotion_list': 'PromotionList',
'promotion_adjustment_list': 'PromotionAdjustmentList',
'cost_of_points_granted': 'CostOfPointsGranted',
'cost_of_points_returned': 'CostOfPointsReturned'
}
def __init__(self, seller_sku=None, order_item_id=None, order_adjustment_item_id=None, quantity_shipped=None, item_charge_list=None, item_charge_adjustment_list=None, item_fee_list=None, item_fee_adjustment_list=None, item_tax_withheld_list=None, promotion_list=None, promotion_adjustment_list=None, cost_of_points_granted=None, cost_of_points_returned=None): # noqa: E501
"""ShipmentItem - a model defined in Swagger""" # noqa: E501
self._seller_sku = None
self._order_item_id = None
self._order_adjustment_item_id = None
self._quantity_shipped = None
self._item_charge_list = None
self._item_charge_adjustment_list = None
self._item_fee_list = None
self._item_fee_adjustment_list = None
self._item_tax_withheld_list = None
self._promotion_list = None
self._promotion_adjustment_list = None
self._cost_of_points_granted = None
self._cost_of_points_returned = None
self.discriminator = None
if seller_sku is not None:
self.seller_sku = seller_sku
if order_item_id is not None:
self.order_item_id = order_item_id
if order_adjustment_item_id is not None:
self.order_adjustment_item_id = order_adjustment_item_id
if quantity_shipped is not None:
self.quantity_shipped = quantity_shipped
if item_charge_list is not None:
self.item_charge_list = item_charge_list
if item_charge_adjustment_list is not None:
self.item_charge_adjustment_list = item_charge_adjustment_list
if item_fee_list is not None:
self.item_fee_list = item_fee_list
if item_fee_adjustment_list is not None:
self.item_fee_adjustment_list = item_fee_adjustment_list
if item_tax_withheld_list is not None:
self.item_tax_withheld_list = item_tax_withheld_list
if promotion_list is not None:
self.promotion_list = promotion_list
if promotion_adjustment_list is not None:
self.promotion_adjustment_list = promotion_adjustment_list
if cost_of_points_granted is not None:
self.cost_of_points_granted = cost_of_points_granted
if cost_of_points_returned is not None:
self.cost_of_points_returned = cost_of_points_returned
@property
def seller_sku(self):
"""Gets the seller_sku of this ShipmentItem. # noqa: E501
The seller SKU of the item. The seller SKU is qualified by the seller's seller ID, which is included with every call to the Selling Partner API. # noqa: E501
:return: The seller_sku of this ShipmentItem. # noqa: E501
:rtype: str
"""
return self._seller_sku
@seller_sku.setter
def seller_sku(self, seller_sku):
"""Sets the seller_sku of this ShipmentItem.
The seller SKU of the item. The seller SKU is qualified by the seller's seller ID, which is included with every call to the Selling Partner API. # noqa: E501
:param seller_sku: The seller_sku of this ShipmentItem. # noqa: E501
:type: str
"""
self._seller_sku = seller_sku
@property
def order_item_id(self):
"""Gets the order_item_id of this ShipmentItem. # noqa: E501
An Amazon-defined order item identifier. # noqa: E501
:return: The order_item_id of this ShipmentItem. # noqa: E501
:rtype: str
"""
return self._order_item_id
@order_item_id.setter
def order_item_id(self, order_item_id):
"""Sets the order_item_id of this ShipmentItem.
An Amazon-defined order item identifier. # noqa: E501
:param order_item_id: The order_item_id of this ShipmentItem. # noqa: E501
:type: str
"""
self._order_item_id = order_item_id
@property
def order_adjustment_item_id(self):
"""Gets the order_adjustment_item_id of this ShipmentItem. # noqa: E501
An Amazon-defined order adjustment identifier defined for refunds, guarantee claims, and chargeback events. # noqa: E501
:return: The order_adjustment_item_id of this ShipmentItem. # noqa: E501
:rtype: str
"""
return self._order_adjustment_item_id
@order_adjustment_item_id.setter
def order_adjustment_item_id(self, order_adjustment_item_id):
"""Sets the order_adjustment_item_id of this ShipmentItem.
An Amazon-defined order adjustment identifier defined for refunds, guarantee claims, and chargeback events. # noqa: E501
:param order_adjustment_item_id: The order_adjustment_item_id of this ShipmentItem. # noqa: E501
:type: str
"""
self._order_adjustment_item_id = order_adjustment_item_id
@property
def quantity_shipped(self):
"""Gets the quantity_shipped of this ShipmentItem. # noqa: E501
The number of items shipped. # noqa: E501
:return: The quantity_shipped of this ShipmentItem. # noqa: E501
:rtype: int
"""
return self._quantity_shipped
@quantity_shipped.setter
def quantity_shipped(self, quantity_shipped):
"""Sets the quantity_shipped of this ShipmentItem.
The number of items shipped. # noqa: E501
:param quantity_shipped: The quantity_shipped of this ShipmentItem. # noqa: E501
:type: int
"""
self._quantity_shipped = quantity_shipped
@property
def item_charge_list(self):
"""Gets the item_charge_list of this ShipmentItem. # noqa: E501
:return: The item_charge_list of this ShipmentItem. # noqa: E501
:rtype: ChargeComponentList
"""
return self._item_charge_list
@item_charge_list.setter
def item_charge_list(self, item_charge_list):
"""Sets the item_charge_list of this ShipmentItem.
:param item_charge_list: The item_charge_list of this ShipmentItem. # noqa: E501
:type: ChargeComponentList
"""
self._item_charge_list = item_charge_list
@property
def item_charge_adjustment_list(self):
"""Gets the item_charge_adjustment_list of this ShipmentItem. # noqa: E501
:return: The item_charge_adjustment_list of this ShipmentItem. # noqa: E501
:rtype: ChargeComponentList
"""
return self._item_charge_adjustment_list
@item_charge_adjustment_list.setter
def item_charge_adjustment_list(self, item_charge_adjustment_list):
"""Sets the item_charge_adjustment_list of this ShipmentItem.
:param item_charge_adjustment_list: The item_charge_adjustment_list of this ShipmentItem. # noqa: E501
:type: ChargeComponentList
"""
self._item_charge_adjustment_list = item_charge_adjustment_list
@property
def item_fee_list(self):
"""Gets the item_fee_list of this ShipmentItem. # noqa: E501
:return: The item_fee_list of this ShipmentItem. # noqa: E501
:rtype: FeeComponentList
"""
return self._item_fee_list
@item_fee_list.setter
def item_fee_list(self, item_fee_list):
"""Sets the item_fee_list of this ShipmentItem.
:param item_fee_list: The item_fee_list of this ShipmentItem. # noqa: E501
:type: FeeComponentList
"""
self._item_fee_list = item_fee_list
@property
def item_fee_adjustment_list(self):
"""Gets the item_fee_adjustment_list of this ShipmentItem. # noqa: E501
:return: The item_fee_adjustment_list of this ShipmentItem. # noqa: E501
:rtype: FeeComponentList
"""
return self._item_fee_adjustment_list
@item_fee_adjustment_list.setter
def item_fee_adjustment_list(self, item_fee_adjustment_list):
"""Sets the item_fee_adjustment_list of this ShipmentItem.
:param item_fee_adjustment_list: The item_fee_adjustment_list of this ShipmentItem. # noqa: E501
:type: FeeComponentList
"""
self._item_fee_adjustment_list = item_fee_adjustment_list
@property
def item_tax_withheld_list(self):
"""Gets the item_tax_withheld_list of this ShipmentItem. # noqa: E501
:return: The item_tax_withheld_list of this ShipmentItem. # noqa: E501
:rtype: TaxWithheldComponentList
"""
return self._item_tax_withheld_list
@item_tax_withheld_list.setter
def item_tax_withheld_list(self, item_tax_withheld_list):
"""Sets the item_tax_withheld_list of this ShipmentItem.
:param item_tax_withheld_list: The item_tax_withheld_list of this ShipmentItem. # noqa: E501
:type: TaxWithheldComponentList
"""
self._item_tax_withheld_list = item_tax_withheld_list
@property
def promotion_list(self):
"""Gets the promotion_list of this ShipmentItem. # noqa: E501
:return: The promotion_list of this ShipmentItem. # noqa: E501
:rtype: PromotionList
"""
return self._promotion_list
@promotion_list.setter
def promotion_list(self, promotion_list):
"""Sets the promotion_list of this ShipmentItem.
:param promotion_list: The promotion_list of this ShipmentItem. # noqa: E501
:type: PromotionList
"""
self._promotion_list = promotion_list
@property
def promotion_adjustment_list(self):
"""Gets the promotion_adjustment_list of this ShipmentItem. # noqa: E501
:return: The promotion_adjustment_list of this ShipmentItem. # noqa: E501
:rtype: PromotionList
"""
return self._promotion_adjustment_list
@promotion_adjustment_list.setter
def promotion_adjustment_list(self, promotion_adjustment_list):
"""Sets the promotion_adjustment_list of this ShipmentItem.
:param promotion_adjustment_list: The promotion_adjustment_list of this ShipmentItem. # noqa: E501
:type: PromotionList
"""
self._promotion_adjustment_list = promotion_adjustment_list
@property
def cost_of_points_granted(self):
"""Gets the cost_of_points_granted of this ShipmentItem. # noqa: E501
:return: The cost_of_points_granted of this ShipmentItem. # noqa: E501
:rtype: Currency
"""
return self._cost_of_points_granted
@cost_of_points_granted.setter
def cost_of_points_granted(self, cost_of_points_granted):
"""Sets the cost_of_points_granted of this ShipmentItem.
:param cost_of_points_granted: The cost_of_points_granted of this ShipmentItem. # noqa: E501
:type: Currency
"""
self._cost_of_points_granted = cost_of_points_granted
@property
def cost_of_points_returned(self):
"""Gets the cost_of_points_returned of this ShipmentItem. # noqa: E501
:return: The cost_of_points_returned of this ShipmentItem. # noqa: E501
:rtype: Currency
"""
return self._cost_of_points_returned
@cost_of_points_returned.setter
def cost_of_points_returned(self, cost_of_points_returned):
"""Sets the cost_of_points_returned of this ShipmentItem.
:param cost_of_points_returned: The cost_of_points_returned of this ShipmentItem. # noqa: E501
:type: Currency
"""
self._cost_of_points_returned = cost_of_points_returned
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ShipmentItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShipmentItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.270833 | 377 | 0.671458 |
09e83bd920be035d9c74e3803047761cd01ba2d9 | 583 | py | Python | glycan_profiling/database/prebuilt/__init__.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 4 | 2019-04-26T15:47:57.000Z | 2021-04-20T22:53:58.000Z | glycan_profiling/database/prebuilt/__init__.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 8 | 2017-11-22T19:20:20.000Z | 2022-02-14T01:49:58.000Z | glycan_profiling/database/prebuilt/__init__.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 3 | 2017-11-21T18:05:28.000Z | 2021-09-23T18:38:33.000Z | from .utils import hypothesis_register
from . import heparin
from . import combinatorial_mammalian_n_linked
from . import glycosaminoglycan_linkers
from . import combinatorial_human_n_linked
from . import biosynthesis_human_n_linked
from . import biosynthesis_mammalian_n_linked
from . import human_mucin_o_linked
__all__ = [
"hypothesis_register",
"heparin",
"combinatorial_mammalian_n_linked",
"combinatorial_human_n_linked",
"glycosaminoglycan_linkers",
"biosynthesis_human_n_linked",
"biosynthesis_mammalian_n_linked",
"human_mucin_o_linked",
]
| 27.761905 | 46 | 0.806175 |
09e84e1dd2d539b68ad7f9d464717d52336a5ae9 | 216 | py | Python | zhongyicheng/scripts/test.py | Chr0802/No-easy-summer1 | d8d88b8d025f039deca7c89518b63446e4f80567 | [
"MIT"
] | 2 | 2021-08-05T11:44:12.000Z | 2021-08-31T10:50:13.000Z | zhongyicheng/scripts/test.py | Chr0802/No-easy-summer1 | d8d88b8d025f039deca7c89518b63446e4f80567 | [
"MIT"
] | 1 | 2021-08-07T03:21:12.000Z | 2021-08-07T03:21:12.000Z | zhongyicheng/scripts/test.py | Chr0802/No-easy-summer1 | d8d88b8d025f039deca7c89518b63446e4f80567 | [
"MIT"
] | 8 | 2021-07-26T05:11:37.000Z | 2021-10-05T05:34:34.000Z | from elasticsearch import Elasticsearch
es = Elasticsearch([
{'host':'localhost','port':8200},
])
print(es.search(index='ename_test_multiprocess',body={"query":{"match":{"name":str(input())}}})['hits']['hits'])
| 30.857143 | 112 | 0.680556 |
09e89717699974cfa907e599273f2f898e6cc20f | 30 | py | Python | pastepdb/__init__.py | pooriaahmadi/pastepdb | 166b2e8614ee2ea6c8f2f804af23458defb4674a | [
"MIT"
] | 8 | 2021-03-17T10:48:49.000Z | 2021-04-06T08:16:04.000Z | pastepdb/__init__.py | pooriaahmadi/pastepdb | 166b2e8614ee2ea6c8f2f804af23458defb4674a | [
"MIT"
] | null | null | null | pastepdb/__init__.py | pooriaahmadi/pastepdb | 166b2e8614ee2ea6c8f2f804af23458defb4674a | [
"MIT"
] | null | null | null | from .pastepdb import pastepdb | 30 | 30 | 0.866667 |
09e89b2450d77d8cea8acdf70dfa8deb4095af90 | 3,370 | py | Python | my_plugins/youcompleteme/python/ycm/tests/diagnostic_interface_test.py | VirtualLG/vimrc | 33f961b0e465b852753479bc4aa0a32a6ff017cf | [
"MIT"
] | null | null | null | my_plugins/youcompleteme/python/ycm/tests/diagnostic_interface_test.py | VirtualLG/vimrc | 33f961b0e465b852753479bc4aa0a32a6ff017cf | [
"MIT"
] | null | null | null | my_plugins/youcompleteme/python/ycm/tests/diagnostic_interface_test.py | VirtualLG/vimrc | 33f961b0e465b852753479bc4aa0a32a6ff017cf | [
"MIT"
] | null | null | null | # Copyright (C) 2015-2018 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm import diagnostic_interface
from ycm.tests.test_utils import VimBuffer, MockVimModule, MockVimBuffers
from hamcrest import assert_that, contains_exactly, has_entries, has_item
from unittest import TestCase
MockVimModule()
def SimpleDiagnosticToJson( start_line, start_col, end_line, end_col ):
return {
'kind': 'ERROR',
'location': { 'line_num': start_line, 'column_num': start_col },
'location_extent': {
'start': {
'line_num': start_line,
'column_num': start_col
},
'end': {
'line_num': end_line,
'column_num': end_col
}
},
'ranges': [
{
'start': {
'line_num': start_line,
'column_num': start_col
},
'end': {
'line_num': end_line,
'column_num': end_col
}
}
]
}
def YcmTextPropertyTupleMatcher( start_line, start_col, end_line, end_col ):
return has_item( contains_exactly(
start_line,
start_col,
'YcmErrorProperty',
has_entries( { 'end_col': end_col, 'end_lnum': end_line } ) ) )
class DiagnosticInterfaceTest( TestCase ):
def test_ConvertDiagnosticToTextProperties( self ):
for diag, contents, result in [
# Error in middle of the line
[
SimpleDiagnosticToJson( 1, 16, 1, 23 ),
[ 'Highlight this error please' ],
YcmTextPropertyTupleMatcher( 1, 16, 1, 23 )
],
# Error at the end of the line
[
SimpleDiagnosticToJson( 1, 16, 1, 21 ),
[ 'Highlight this warning' ],
YcmTextPropertyTupleMatcher( 1, 16, 1, 21 )
],
[
SimpleDiagnosticToJson( 1, 16, 1, 19 ),
[ 'Highlight unicøde' ],
YcmTextPropertyTupleMatcher( 1, 16, 1, 19 )
],
# Non-positive position
[
SimpleDiagnosticToJson( 0, 0, 0, 0 ),
[ 'Some contents' ],
YcmTextPropertyTupleMatcher( 1, 1, 1, 1 )
],
[
SimpleDiagnosticToJson( -1, -2, -3, -4 ),
[ 'Some contents' ],
YcmTextPropertyTupleMatcher( 1, 1, 1, 1 )
],
]:
with self.subTest( diag = diag, contents = contents, result = result ):
current_buffer = VimBuffer( 'foo', number = 1, contents = [ '' ] )
target_buffer = VimBuffer( 'bar', number = 2, contents = contents )
with MockVimBuffers( [ current_buffer, target_buffer ],
[ current_buffer, target_buffer ] ):
actual = diagnostic_interface._ConvertDiagnosticToTextProperties(
target_buffer.number,
diag )
print( actual )
assert_that( actual, result )
| 32.403846 | 77 | 0.625223 |
09e91e344b133bb70e9396f03df09da25b24a2b5 | 1,372 | py | Python | oa_zalo/zalo_base/migrations/0004_auto_20210714_0949.py | quandxbp/vnpt-ccos | 23d2bc3d3db2e0bce479e0ccfa62e13451306635 | [
"bzip2-1.0.6"
] | null | null | null | oa_zalo/zalo_base/migrations/0004_auto_20210714_0949.py | quandxbp/vnpt-ccos | 23d2bc3d3db2e0bce479e0ccfa62e13451306635 | [
"bzip2-1.0.6"
] | null | null | null | oa_zalo/zalo_base/migrations/0004_auto_20210714_0949.py | quandxbp/vnpt-ccos | 23d2bc3d3db2e0bce479e0ccfa62e13451306635 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 3.2.4 on 2021-07-14 02:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zalo_base', '0003_auto_20210713_1103'),
]
operations = [
migrations.DeleteModel(
name='ZaloMessage',
),
migrations.AddField(
model_name='zalouser',
name='address',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='zalouser',
name='city',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='zalouser',
name='district',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='zalouser',
name='ward',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='zalouser',
name='name',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='zalouser',
name='phone',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| 29.191489 | 74 | 0.559038 |
09e9f3d9309a9df34ab48cce11c67316c012a978 | 272 | py | Python | project/web/utils/docx_files.py | sirodeneko/pyProject | 02cd67df26decf7b0ec20d86a2bbb5bce9f4e369 | [
"MIT"
] | null | null | null | project/web/utils/docx_files.py | sirodeneko/pyProject | 02cd67df26decf7b0ec20d86a2bbb5bce9f4e369 | [
"MIT"
] | 1 | 2020-11-30T09:35:15.000Z | 2020-11-30T09:35:15.000Z | project/web/utils/docx_files.py | sirodeneko/pyProject | 02cd67df26decf7b0ec20d86a2bbb5bce9f4e369 | [
"MIT"
] | 1 | 2020-11-30T09:33:24.000Z | 2020-11-30T09:33:24.000Z | import json
import os
import re
import urllib.request
def docx_files_names():
file_path = './static/reports/'
files = os.listdir(file_path)
names = []
for item in files:
if item.endswith('.docx'):
names.append(item)
return names
| 17 | 35 | 0.632353 |
09eadaf88e96e921514284415b829745e173d0ca | 708 | py | Python | pkgs/javusdev/javusdev/settings.py | quapka/javus | 577e0c2dabfaea39d7ffacd42100d8a5f4cd738c | [
"MIT"
] | 1 | 2020-09-22T01:38:21.000Z | 2020-09-22T01:38:21.000Z | pkgs/javusdev/javusdev/settings.py | petrs/javus | 6927c824d5e6b574a6a323c87bd5aa117eca5b00 | [
"MIT"
] | null | null | null | pkgs/javusdev/javusdev/settings.py | petrs/javus | 6927c824d5e6b574a6a323c87bd5aa117eca5b00 | [
"MIT"
] | 1 | 2020-07-26T07:20:47.000Z | 2020-07-26T07:20:47.000Z | import os
from pathlib import Path
def get_project_root() -> Path:
# kudos to https://stackoverflow.com/a/53465812/2377489
relative_root = Path(__file__).parent.parent
return relative_root
def get_project_src() -> Path:
return get_project_root() / "javusdev"
def get_project_data() -> Path:
return get_project_src() / "data"
def get_javus_attacks_dir() -> Path:
# FIXME this can break easily, but there isn't seem to be reasonable workaround
# at the moment
return get_project_root().parent.parent / "javus" / "data" / "attacks"
PROJECT_ROOT = get_project_root()
PROJECT_SRC = get_project_src()
DATA = get_project_data()
JAVUS_ATTACKS_DIR = get_javus_attacks_dir()
| 23.6 | 83 | 0.728814 |
09edfb321e8839956c0dd18d657713402150647f | 2,043 | py | Python | examples/design_studies/ihm_fingergait/check_progress.py | cbteeple/somo | 53a1a94f7d9d624bc4c43e582c80f24a0e98df24 | [
"MIT"
] | null | null | null | examples/design_studies/ihm_fingergait/check_progress.py | cbteeple/somo | 53a1a94f7d9d624bc4c43e582c80f24a0e98df24 | [
"MIT"
] | null | null | null | examples/design_studies/ihm_fingergait/check_progress.py | cbteeple/somo | 53a1a94f7d9d624bc4c43e582c80f24a0e98df24 | [
"MIT"
] | null | null | null | # Be sure to run this file from the "palm_sweeps" folder
# cd examples/palm_sweeps
import os
import sys
from datetime import datetime
path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, path)
from somo.sweep import iter_utils
config_file = "sweeps/grid_diam_height.yaml"
todo_file = "_runs_todo.yaml"
num_files_per_folder_end = 5
num_files_per_folder_start = 1
time_per_run = 25 # seconds
avg_size = 40 # MB
parallel_cores = 4
# Get data from config files
config = iter_utils.load_yaml(config_file)
todo = iter_utils.load_yaml(todo_file)
total_runs = len(todo)
# Calculate the time
total_time_min = (time_per_run / 60.0) * total_runs / parallel_cores
total_time_hr = total_time_min / 60.0
total_time_day = total_time_hr / 24.0
# Calculate total data size
total_size_GB = float(avg_size) * total_runs / 1000.0
# Calculate the percent complete
folder_to_count = iter_utils.get_group_folder(config)
cpt = sum([len(files) for r, d, files in os.walk(folder_to_count)])
total_files_expected_end = total_runs * num_files_per_folder_end
total_files_expected_start = total_runs * num_files_per_folder_start
progress = (cpt - total_files_expected_start) / (
total_files_expected_end - total_files_expected_start
)
eta_min = total_time_min * (1.0 - progress)
eta_hr = eta_min / 60.0
eta_day = eta_hr / 24.0
# Print info
print("")
print("Current time: " + datetime.now().strftime("%H:%M:%S"))
print("=================================")
print("Number of runs to complete: %d" % (total_runs))
print(
"Estimated total data saved @ %0.1f MB per run: %0.2f GB"
% (avg_size, total_size_GB)
)
print(
"Estimated total time @ %0.1f sec per run with %d cores: %0.1f min, %0.2f hrs, %0.3f days"
% (time_per_run, parallel_cores, total_time_min, total_time_hr, total_time_day)
)
print("---------------------------------")
print("Percent Complete: %0.3f %%" % (progress * 100))
print(
"Estimated time left: %0.1f min, %0.2f hrs, %0.3f days" % (eta_min, eta_hr, eta_day)
)
print("")
| 29.185714 | 94 | 0.708762 |
09f226a5810e82fde46ce6d76eb7db7321ca355b | 3,998 | py | Python | Projects/Project 1/Handin/program.py | ymirthor/T-215-STY1 | b888da1e88c5aa16eac03353f525e9e0b9d901df | [
"MIT"
] | null | null | null | Projects/Project 1/Handin/program.py | ymirthor/T-215-STY1 | b888da1e88c5aa16eac03353f525e9e0b9d901df | [
"MIT"
] | null | null | null | Projects/Project 1/Handin/program.py | ymirthor/T-215-STY1 | b888da1e88c5aa16eac03353f525e9e0b9d901df | [
"MIT"
] | null | null | null | from collections import deque as LL
class Process:
def __init__(self, parent, priority):
self.state = 1 # State: 1=ready / 0=blocked
self.parent = parent
self.children = LL()
self.resources = LL()
self.priority = priority
self.blocked_on = None
class Resource:
def __init__(self):
self.state = 1 # State: 1=ready / 0=allocated
self.waitlist = LL()
class PCB:
def __init__(self, size=16):
self.size = size # Nr of processses in PCB
self.priorities = 3 # Nr of priorties for RL
self.resources = 4 # Nr of resources for RCB
self.RL = [LL() for _ in range(3)] # RL with n priorities
self.RCB = [Resource() for _ in range(4)] # RCB with n resources
self.PCB = [None] * self.size # Empty PCB
self.running = 0 # Running process, starts on 0
self.PCB[0] = Process(None, 0)
self.RL[0].append(0)
def create(self, priority):
for idx, process in enumerate(self.PCB):
if process == None:
self.PCB[idx] = Process(parent=self.running, priority=priority)
self.PCB[self.running].children.append(idx)
self.RL[priority].append(idx)
self.scheduler()
return f'process {idx} created'
def scheduler(self):
for priority in reversed(self.RL):
if priority:
self.running = priority[0]
break
def _destroy_recur(self, index):
count = 1
# Recur destroy children
for child in list(self.PCB[index].children):
count += self._destroy_recur(child)
# Release all resources
for resource in list(self.PCB[index].resources):
self.release(resource, index)
# Remove from ready list or from waitlist
try:
pri = self.PCB[index].priority
self.RL[pri].remove(index)
except ValueError:
resource = self.PCB[index].blocked_on
self.RCB[resource].waitlist.remove(index)
# Remove parent
parent = self.PCB[self.PCB[index].parent]
parent.children.remove(index)
self.PCB[index] = None
return count
def destroy(self, index):
count = self._destroy_recur(index)
self.scheduler()
return f'{count} processes destroyed'
def timeout(self):
i = self.running
ready_list = self.RL[self.PCB[i].priority]
ready_list.remove(i)
ready_list.append(i)
self.scheduler()
return f'process {self.running} running'
def request(self, index_resource):
resource = self.RCB[index_resource]
running_process = self.PCB[self.running]
if index_resource in running_process.resources:
return f'process {self.running} already has resource'
ready_list = self.RL[running_process.priority]
if resource.state == 1:
resource.state = 0
running_process.resources.append(index_resource)
return f'resource {index_resource} allocated'
else:
running_process.state = 0
running_process.blocked_on = index_resource
ready_list.remove(self.running)
resource.waitlist.append(self.running)
self.scheduler()
return f'process {self.running} blocked'
def release(self, index_resource, index=None):
curr_process = self.PCB[index or self.running]
resource = self.RCB[index_resource]
curr_process.resources.remove(index_resource)
if len(resource.waitlist) == 0:
resource.state = 1
else:
index_process = resource.waitlist.popleft()
process = self.PCB[index_process]
self.RL[process.priority].append(index_process)
process.state = 1
process.resources.append(index_resource)
return f'resource {index_resource} released' | 35.070175 | 79 | 0.598049 |
09f23dc12e2bcbc1428ea1ce895f4e644cb3aca4 | 2,449 | py | Python | utils/ion.py | karlstratos/EntityQuestions | c4969aa6ca464773c2c35ab0569ba5924320d8d9 | [
"MIT"
] | 103 | 2021-09-16T18:19:49.000Z | 2022-03-29T03:18:50.000Z | utils/ion.py | karlstratos/EntityQuestions | c4969aa6ca464773c2c35ab0569ba5924320d8d9 | [
"MIT"
] | 8 | 2021-09-25T00:00:37.000Z | 2022-03-24T01:01:35.000Z | utils/ion.py | karlstratos/EntityQuestions | c4969aa6ca464773c2c35ab0569ba5924320d8d9 | [
"MIT"
] | 10 | 2021-09-19T08:12:53.000Z | 2022-03-23T09:09:23.000Z | """
General framework for reading/writing data from/to files on
the local system.
"""
import json
import random
from pathlib import Path
###############################################################################
# FILE READING #################################################
###############################################################################
def read_file(infile, handle_file, log=False, skip_first_line=False):
if log:
print('Opening "{}"...'.format(infile))
data = None
with open(infile) as f:
if skip_first_line:
f.readline()
data = handle_file(f)
if log:
print(' Done.')
return data
def read_json(infile, log=False):
handler = lambda f: json.load(f)
return read_file(infile, handler, log=log)
def read_jsonl(infile, log=False):
handler = lambda f: [json.loads(line) for line in f.readlines()]
return read_file(infile, handler, log=log)
def read_tsv(infile, row_fn=lambda x: x, log=False, skip_first_line=False):
handler = lambda f: [row_fn(line.split('\t')) for line in f.readlines()]
return read_file(infile, handler, log=log, skip_first_line=skip_first_line)
###############################################################################
# FILE WRITING #################################################
###############################################################################
def write_file(outfile, handle_file, log=False):
if log:
print('Writing to "{}"...'.format(outfile))
with open(outfile, 'w+') as f:
handle_file(f)
if log:
print(' Done.')
def write_json(outfile, data, log=False, pretty=False):
handler = lambda f: f.write(json.dumps(data, indent=4 if pretty else None))
write_file(outfile, handler, log=log)
def write_jsonl(outfile, data, log=False):
def _write_jsonl(f):
for dct in data:
f.write(json.dumps(dct) + '\n')
handler = _write_jsonl
write_file(outfile, handler, log=log)
def mkdir_optional(outdir):
Path(outdir).mkdir(parents=True, exist_ok=True)
###############################################################################
# OTHER OUTPUT #################################################
###############################################################################
def sample_print(item, pct):
if random.random() < (pct / 100):
print(item)
| 31 | 79 | 0.475704 |
09f240acbe9b8fa80d51945cdcc670845719d41c | 2,394 | py | Python | pg_methods/interfaces/state_processors.py | zafarali/policy-gradient-methods | f0d83a80ddc772dcad0c851aac9bfd41d436c274 | [
"MIT"
] | 28 | 2018-06-12T21:37:20.000Z | 2021-12-27T15:13:14.000Z | pg_methods/interfaces/state_processors.py | zafarali/policy-gradient-methods | f0d83a80ddc772dcad0c851aac9bfd41d436c274 | [
"MIT"
] | 3 | 2018-05-10T16:33:05.000Z | 2018-06-19T18:17:37.000Z | pg_methods/interfaces/state_processors.py | zafarali/policy-gradient-methods | f0d83a80ddc772dcad0c851aac9bfd41d436c274 | [
"MIT"
] | 7 | 2018-05-08T04:13:21.000Z | 2021-04-02T12:31:55.000Z | import gym
import torch
import numpy as np
from pg_methods.interfaces import common_interfaces as common
class SimpleStateProcessor(common.Interface):
"""
Allows one to interface states between a single instance of gym
"""
def __init__(self, environment_observation_space, one_hot=False, use_cuda=False, normalize=False):
self.observation_space = environment_observation_space
if isinstance(environment_observation_space, gym.spaces.Box):
# continous environment
self.continous = True
self.state_size = environment_observation_space.shape
if len(self.state_size) == 1:
self.state_size = self.state_size[0]
self.one_hot = False
self.normalize = False
else:
self.continous = False
self.one_hot = one_hot
if self.one_hot:
self.state_size = environment_observation_space.n
self.normalize = False
self.max_obs = environment_observation_space.n
else:
self.normalize = normalize
self.state_size = 1
self.max_obs = environment_observation_space.n
self.use_cuda = use_cuda
def state2pytorch(self, state_idx):
if self.one_hot and not self.continous:
state = np.zeros(self.state_size)
state[self.state_idx] = 1
state = torch.from_numpy(state.reshape(1, -1))
if self.use_cuda:
return state.float().cuda()
else:
return state.float()
else:
state = None
if not self.continous:
state = torch.from_numpy(np.array([state_idx]).reshape(1, -1))
else:
state = torch.from_numpy(np.array(state_idx).reshape(1, -1))
if self.normalize:
state = state / self.max_obs
if self.use_cuda:
return state.float().cuda()
else:
return state.float()
def pytorch2state(self, tensor):
if self.continous:
return common.pytorch2list(tensor)
else:
list_state = list(map(int, common.pytorch2list(tensor)))
if self.state_size == 1:
return list_state[0]
else:
return list_state | 36.272727 | 102 | 0.575188 |
09f69dea9d9541fb1a471fe9f8d7ffca1d756933 | 3,935 | py | Python | tests/test_emlib.py | mjpekala/faster-membranes | f203fc8608603bc7b16a1abeac324d52e9dfe96a | [
"Apache-2.0"
] | null | null | null | tests/test_emlib.py | mjpekala/faster-membranes | f203fc8608603bc7b16a1abeac324d52e9dfe96a | [
"Apache-2.0"
] | null | null | null | tests/test_emlib.py | mjpekala/faster-membranes | f203fc8608603bc7b16a1abeac324d52e9dfe96a | [
"Apache-2.0"
] | null | null | null | """Unit test for emlib.py
To run:
PYTHONPATH=../src python test_emlib.py
"""
__author__ = "Mike Pekala"
__copyright__ = "Copyright 2015, JHU/APL"
__license__ = "Apache 2.0"
import unittest
import numpy as np
from sklearn.metrics import precision_recall_fscore_support as smetrics
import emlib
class TestEmlib(unittest.TestCase):
def test_metrics(self):
Y = np.random.randint(0,2,size=(2,5,5))
Yhat = np.random.randint(0,2,size=(2,5,5))
C,acc,prec,recall,f1 = emlib.metrics(Y, Yhat, display=False)
prec2, recall2, f12, supp = smetrics(np.reshape(Y, (Y.size,)),
np.reshape(Yhat, (Yhat.size,)))
self.assertAlmostEqual(prec, prec2[1])
self.assertAlmostEqual(recall, recall2[1])
self.assertAlmostEqual(f1, f12[1])
def test_mirror_edges(self):
X = np.random.rand(10,3,3);
b = 2 # b := border size
Xm = emlib.mirror_edges(X,b)
# make sure the result has the proper size
assert(Xm.shape[0] == X.shape[0]);
assert(Xm.shape[1] == X.shape[1]+2*b);
assert(Xm.shape[2] == X.shape[2]+2*b);
# make sure the data looks reasonable
self.assertTrue(np.all(Xm[:,:,b-1] == Xm[:,:,b]))
self.assertTrue(np.all(Xm[:, b:-b, b:-b] == X))
def test_interior_pixel_generator(self):
b = 10 # b := border size
Z = np.zeros((2,100,100), dtype=np.int32)
for idx, pct in emlib.interior_pixel_generator(Z,b,30):
Z[idx[:,0],idx[:,1],idx[:,2]] += 1
self.assertTrue(np.all(Z[:,b:-b,b:-b]==1))
Z[:,b:-b,b:-b] = 0
self.assertTrue(np.all(Z==0))
def test_stratified_interior_pixel_generator(self):
b = 10 # b := border size
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# For a 50/50 split of pixels in the interior, the generator
# should reproduce the entire interior.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Y = np.zeros((2,100,100))
Y[:,0:50,:] = 1
Z = np.zeros(Y.shape)
for idx,pct in emlib.stratified_interior_pixel_generator(Y,b,30):
Z[idx[:,0],idx[:,1],idx[:,2]] += 1
self.assertTrue(np.all(Z[:,b:-b,b:-b]==1))
Z[:,b:-b,b:-b] = 0
self.assertTrue(np.all(Z==0))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# For a random input, should see a 50/50 split of class
# labels, but not necessarily hit the entire interior.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Y = np.random.rand(2,100,100) > 0.5
nOne=0; nZero=0;
for idx,pct in emlib.stratified_interior_pixel_generator(Y,b,30):
slices = idx[:,0]; rows = idx[:,1]; cols = idx[:,2]
nOne += np.sum(Y[slices,rows,cols] == 1)
nZero += np.sum(Y[slices,rows,cols] == 0)
self.assertTrue(nOne == nZero)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# For an input tensor with "no-ops", the sampler should only
# return pixels with a positive or negative label.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Y = np.zeros((2,100,100))
Y[:,0:20,0:20] = 1
Y[:,50:70,50:70] = -1
Z = np.zeros(Y.shape)
nPos=0; nNeg=0; nTotal=0;
for idx,pct in emlib.stratified_interior_pixel_generator(Y,0,10,omitLabels=[0]):
slices = idx[:,0]; rows = idx[:,1]; cols = idx[:,2]
Z[slices,rows,cols] = Y[slices,rows,cols]
nPos += np.sum(Y[slices,rows,cols] == 1)
nNeg += np.sum(Y[slices,rows,cols] == -1)
nTotal += len(slices)
self.assertTrue(nPos == 20*20*2);
self.assertTrue(nNeg == 20*20*2);
self.assertTrue(nTotal == 20*20*2*2);
self.assertTrue(np.all(Y == Z))
if __name__ == "__main__":
unittest.main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 33.067227 | 88 | 0.519695 |
09f80f8b20cbf9fab41433b7fb79cb311415a791 | 193 | py | Python | src/biopsykit/signals/imu/feature_extraction/__init__.py | Zwitscherle/BioPsyKit | 7200c5f1be75c20f53e1eb4c991aca1c89e3dd88 | [
"MIT"
] | 10 | 2020-11-05T13:34:55.000Z | 2022-03-11T16:20:10.000Z | src/biopsykit/signals/imu/feature_extraction/__init__.py | Zwitscherle/BioPsyKit | 7200c5f1be75c20f53e1eb4c991aca1c89e3dd88 | [
"MIT"
] | 14 | 2021-03-11T14:43:52.000Z | 2022-03-10T19:44:57.000Z | src/biopsykit/signals/imu/feature_extraction/__init__.py | Zwitscherle/BioPsyKit | 7200c5f1be75c20f53e1eb4c991aca1c89e3dd88 | [
"MIT"
] | 3 | 2021-09-13T13:14:38.000Z | 2022-02-19T09:13:25.000Z | """Module containing scripts for different feature extraction techniques from raw IMU data."""
from biopsykit.signals.imu.feature_extraction import static_moments
__all__ = ["static_moments"]
| 38.6 | 94 | 0.818653 |
09f91afeaca4a61947c025a6985fde971a2433a0 | 727 | py | Python | app/core/bluetooth/models.py | FHellmann/MLWTF | 582c3505d638907a848d5a6c739ee99981300f17 | [
"Apache-2.0"
] | null | null | null | app/core/bluetooth/models.py | FHellmann/MLWTF | 582c3505d638907a848d5a6c739ee99981300f17 | [
"Apache-2.0"
] | null | null | null | app/core/bluetooth/models.py | FHellmann/MLWTF | 582c3505d638907a848d5a6c739ee99981300f17 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""
Author: Fabio Hellmann <info@fabio-hellmann.de>
"""
from attr import s, ib
from attr.validators import instance_of
@s(frozen=True)
class BLEDevice(object):
"""
Device MAC address (as a hex string separated by colons).
"""
addr = ib(validator=instance_of(str), type=str)
"""
The name which is set
"""
name = ib(validator=instance_of(str), type=str)
"""
Received Signal Strength Indication for the last received broadcast from the device. This is an integer value
measured in dB, where 0 dB is the maximum (theoretical) signal strength, and more negative numbers indicate a
weaker signal.
"""
rssi = ib(validator=instance_of(int), type=int)
| 27.961538 | 114 | 0.68088 |
09f949d20672656308f4b25b2fb52c7d29555163 | 1,511 | py | Python | Algorithms_medium/1102. Path With Maximum Minimum Value.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | 4 | 2020-08-11T20:45:15.000Z | 2021-03-12T00:33:34.000Z | Algorithms_medium/1102. Path With Maximum Minimum Value.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | Algorithms_medium/1102. Path With Maximum Minimum Value.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | """
1102. Path With Maximum Minimum Value
Medium
Given a matrix of integers A with R rows and C columns, find the maximum score of a path starting at [0,0] and ending at [R-1,C-1].
The score of a path is the minimum value in that path. For example, the value of the path 8 → 4 → 5 → 9 is 4.
A path moves some number of times from one visited cell to any neighbouring unvisited cell in one of the 4 cardinal directions (north, east, west, south).
Example 1:
Input: [[5,4,5],[1,2,6],[7,4,6]]
Output: 4
Explanation:
The path with the maximum score is highlighted in yellow.
Example 2:
Input: [[2,2,1,2,2,2],[1,2,2,2,1,2]]
Output: 2
Example 3:
Input: [[3,4,6,3,4],[0,2,1,1,7],[8,8,3,2,7],[3,2,4,9,8],[4,1,2,0,0],[4,6,5,4,3]]
Output: 3
Note:
1 <= R, C <= 100
0 <= A[i][j] <= 10^9
"""
class Solution:
def maximumMinimumPath(self, A: List[List[int]]) -> int:
dire = [(0, 1), (1, 0), (0, -1), (-1, 0)]
R, C = len(A), len(A[0])
maxHeap = [(-A[0][0], 0, 0)]
seen = [[0 for _ in range(C)] for _ in range(R)]
while maxHeap:
val, x, y = heapq.heappop(maxHeap)
# seen[x][y] = 1 # got TLE
if x == R - 1 and y == C - 1: return -val
for dx, dy in dire:
nx, ny = x + dx, y + dy
if 0 <= nx < R and 0 <= ny < C and not seen[nx][ny]:
seen[nx][ny] = 1 # passed
heapq.heappush(maxHeap, (max(val, -A[nx][ny]), nx, ny))
return -1 | 26.982143 | 154 | 0.536069 |
09f9da8e8fb3a2cb6c40b0627a6fdbf5844460e0 | 1,436 | py | Python | tests/extractor/test_factory.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | null | null | null | tests/extractor/test_factory.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | null | null | null | tests/extractor/test_factory.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | null | null | null | import pytest
import data_pipeline.db.factory as db_factory
import data_pipeline.extractor.factory as extractor_factory
import tests.unittest_utils as utils
import data_pipeline.constants.const as const
from pytest_mock import mocker
from data_pipeline.db.exceptions import UnsupportedDbTypeError
@pytest.fixture()
def setup(tmpdir, mocker):
mockargv_config = utils.get_default_argv_config(tmpdir)
mockargv = mocker.Mock(**mockargv_config)
pc_config = {'insert.return_value': None, 'update.return_value': None}
mock_pc = mocker.Mock(**pc_config)
af_config = {'build_process_control.return_value': mock_pc}
mock_audit_factory = mocker.Mock(**af_config)
utils.mock_build_kafka_producer(mocker)
yield (mockargv, mock_audit_factory)
@pytest.mark.parametrize("dbtype, expect_class", [
(const.ORACLE, "OracleCdcExtractor"),
(const.MSSQL, "MssqlCdcExtractor"),
])
def test_build(dbtype, expect_class, setup):
(mockargv, mock_audit_factory) = setup
mode = const.CDCEXTRACT
db = db_factory.build(dbtype)
extractor = extractor_factory.build(mode, db, mockargv, mock_audit_factory)
assert type(extractor).__name__ == expect_class
def test_build_unsupported(setup):
(mockargv, mock_audit_factory) = setup
with pytest.raises(ImportError):
db = db_factory.build("AnUnsupportedDatabase")
extractor = extractor_factory.build(db, mockargv, mock_audit_factory)
| 34.190476 | 79 | 0.766017 |
09fa1379267ff36d7eaf0c8f04ba9a7c23bd124b | 3,424 | py | Python | suremco/tracker.py | modsim/SurEmCo | 71fc0cfc62f8733de93ee2736421574a154e3db3 | [
"BSD-2-Clause"
] | null | null | null | suremco/tracker.py | modsim/SurEmCo | 71fc0cfc62f8733de93ee2736421574a154e3db3 | [
"BSD-2-Clause"
] | null | null | null | suremco/tracker.py | modsim/SurEmCo | 71fc0cfc62f8733de93ee2736421574a154e3db3 | [
"BSD-2-Clause"
] | null | null | null | # SurEmCo - C++ tracker wrapper
import ctypes
from enum import IntEnum
import sys
import os
import numpy
import numpy.ctypeslib
class Tracker(object):
class Mode(IntEnum):
MOVING = 0
STATIC = 1
class Strategy(IntEnum):
BRUTE_FORCE = 0
KD_TREE = 1
track_input_type = {'dtype': [
('x', 'float64'),
('y', 'float64'),
('precision', 'float64'),
('frame', 'int64'),
('index', 'intp'),
('label', 'int64'),
('square_displacement', 'float64')
]}
debug = False
def __init__(self, debug=False):
self.debug = debug
file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'_tracker.' + ('so' if sys.platform == 'linux' else 'dll'))
old_cwd = os.getcwd()
os.chdir(os.path.dirname(file))
_track_so = ctypes.CDLL(file)
os.chdir(old_cwd)
_track_so.track.argtypes = (
numpy.ctypeslib.ndpointer(**self.track_input_type), # , flags='C_CONTIGUOUS'),
ctypes.c_size_t,
ctypes.c_float,
ctypes.c_int32,
ctypes.c_int32,
ctypes.c_int32
)
_track_so.track.restype = None
_track_so.msd.argtypes = (
numpy.ctypeslib.ndpointer(**self.track_input_type), # , flags='C_CONTIGUOUS'),
ctypes.c_size_t,
ctypes.c_float,
ctypes.c_float
)
_track_so.msd.restype = ctypes.c_float
self._track_so = _track_so
self._track = _track_so.track
self._msd = _track_so.msd
if self.debug:
_track_so.getBuildDate.restype = ctypes.c_char_p
# noinspection PyProtectedMember
print("Loaded %s compiled at %s" % (_track_so._name, _track_so.getBuildDate().decode(),))
def track(self, transfer, maximum_displacement=1.0, memory=0, mode=None, strategy=None):
if mode is None:
mode = self.Mode.MOVING
if strategy is None:
strategy = self.Strategy.BRUTE_FORCE
if len(transfer) == 0:
raise RuntimeError('Empty data!')
if self.debug:
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(prefix='track_dataset', delete=False) as tf:
transfer.tofile(tf)
print("track(\"%s\", %d, %f, %d, %d, %d)" % (
tf.name, len(transfer), maximum_displacement, memory, mode, strategy
))
return self._track(transfer, len(transfer), maximum_displacement, memory, mode, strategy)
def msd(self, transfer, micron_per_pixel=1.0, frames_per_second=1.0):
# the MSD calculation was not thoroughly verified
if len(transfer) == 0:
raise RuntimeError('Empty data!')
return self._msd(transfer, len(transfer), micron_per_pixel, frames_per_second)
def __del__(self):
if not self.debug:
return
# noinspection PyProtectedMember
_handle = self._track_so._handle
del self._track_so
if sys.platform == 'linux':
dl = ctypes.CDLL('libdl.so')
dl.dlclose.argtypes = [ctypes.c_void_p]
dl.dlclose(_handle)
# elif # handle windows?
@classmethod
def empty_track_input_type(cls, count):
return numpy.zeros(count, **cls.track_input_type)
| 29.517241 | 101 | 0.580023 |
09fa8917e01388a6694109a784fa133ce2d71a48 | 774 | py | Python | lianxi/abc.py | xuguoqiang1/qiang | a95f105897462a630d8312eabea6a0f905c3e3e1 | [
"MIT"
] | null | null | null | lianxi/abc.py | xuguoqiang1/qiang | a95f105897462a630d8312eabea6a0f905c3e3e1 | [
"MIT"
] | null | null | null | lianxi/abc.py | xuguoqiang1/qiang | a95f105897462a630d8312eabea6a0f905c3e3e1 | [
"MIT"
] | null | null | null | # coding=utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
class Role(db.Model):
# 定义表名
__tablename__ = 'roles'
# 定义列对象
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
us = db.relationship('User', backref='role')
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True, index=True)
email = db.Column(db.String(64),unique=True)
pswd = db.Column(db.String(64))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
if __name__ == '__main__':
db.create_all()
app.run() | 24.967742 | 62 | 0.682171 |
09fb1d9a7c357f2bc49fb2f43274b073bfff333e | 4,026 | py | Python | foreign_languages/anderson.py | ds-modules/NESTUD-190A | 54ca1cd9a8f369f48946147f72377f874738f7d5 | [
"MIT"
] | 6 | 2017-11-06T03:18:12.000Z | 2019-10-02T19:41:06.000Z | foreign_languages/anderson.py | admndrsn/NESTUD-190A | 54ca1cd9a8f369f48946147f72377f874738f7d5 | [
"MIT"
] | null | null | null | foreign_languages/anderson.py | admndrsn/NESTUD-190A | 54ca1cd9a8f369f48946147f72377f874738f7d5 | [
"MIT"
] | 2 | 2018-02-09T01:04:58.000Z | 2019-06-19T17:45:34.000Z | from IPython.core.display import display, HTML
import translation
class translate(object):
id_start = 0
def __init__(self, column_types, language_to='en'):
self.num_of_columns = len(column_types) + 1
column_types.insert(0, 'original text')
self.column_types = column_types
self.language_to = language_to
self.funcs = {'original text':self.original_text_pls, 'translate':self.tranlate_pls,
'parts of speech':self.polyglot_pos, 'language':self.polyglot_languages}
self.header = {'original text':'Original Text:', 'translate':'Translation:',
'parts of speech':'Parts of Speech:', 'language':'Language(s) Detected:'}
self.fonttype = 'Courier New'
self.additionalcss = ''
# these are the functions that will go within the body calls
# need to fill in these functions so that we get the right things
def tranlate_pls(self, txt):
return translation.bing(txt, dst = self.language_to)
def original_text_pls(self, txt):
return txt
def parts_of_speech_pls(self, txt):
import nltk
tokenized = nltk.word_tokenize(txt)
return nltk.pos_tag(tokenized)
def polyglot_languages(self, txt):
from polyglot.detect import Detector
langs = Detector(txt, quiet=True).languages
selected_items = [(x.name, x.confidence) for x in langs]
# converting to readable from Detector objects
stringy_list = ['Name: ' + str(x) + ' Confidence: ' +str(y) for x,y in selected_items]
return '<br><br>'.join(stringy_list)
def polyglot_pos(self, txt):
from polyglot.text import Text
return Text(txt).pos_tags
# make a function for part of speech counts
# and names in the text
# maybe name counts
# make it so that we can try different translating services
# a google integration may be necessary :(
# incrementing the ids so that the css of a new one doesn't change an old one
def increment_ids(self):
strt_id = translate.id_start
translate.id_start += self.num_of_columns
return range(strt_id, strt_id + self.num_of_columns)
# creating the divs and the content that will go in them
def create_body(self, id_numbers, txt):
# setting up all of the divs that will be there
base_column = '<div id="{}">{}<br>{}</div>'
blank_divs = base_column * self.num_of_columns
# calling the functions specified in our constructor on our body of text
content = [self.funcs[col](txt) for col in self.column_types]
headers = [self.header[col] for col in self.column_types]
# zipping them together so we can make a string in the correct order, then flattening
nested_order = list(zip(id_numbers, headers, content))
unnested = [item for sublist in nested_order for item in sublist]
return '<div id="wrapper">' + blank_divs.format(*(unnested)) + '</div>'
def create_css(self, id_numbers):
# picking alternating colors for columns
clrs = ['#e6f3f7', 'lightgray']
def alternate():
while True:
yield 0
yield 1
gen = alternate()
clr_list = [clrs[next(gen)] for i in range(self.num_of_columns)]
# width evenly divided by number of columns
width = "width:{}%;".format(str(100 / self.num_of_columns))
# setting up for all different css that will be there
base_css = "#{} {{background-color: {};" + width + "float:left;padding: .5vw;border-right: solid black 1.5px;}}"
blank_csss = base_css * self.num_of_columns
# zipping them together so we can make a string in the correct order, then flattening
nested_order = list(zip(id_numbers, clr_list))
unnested = [item for sublist in nested_order for item in sublist]
final_css = blank_csss.format(*(unnested))
wrapper = "{} #wrapper {{width:100%;clear:both;display: flex;font-family:{};}}".format(self.additionalcss, self.fonttype)
return wrapper + final_css
def create(self, initial_text):
id_list = self.increment_ids()
string_ids = ['d'+str(x) for x in id_list]
display(HTML('<style>{}</style> <body>{}</body>'.format(self.create_css(string_ids), self.create_body(string_ids, initial_text))))
# Add a return statement so that the values are accessible
| 31.209302 | 132 | 0.721063 |
61cb92b7eff849f550f556cfcf71f302f039dac7 | 1,315 | py | Python | landdox/core.py | natefduncan/landdox | 58908554034577cc20c6f89ee6056da90cbfbd4e | [
"MIT"
] | 1 | 2019-12-13T16:19:56.000Z | 2019-12-13T16:19:56.000Z | landdox/core.py | natefduncan/landdox | 58908554034577cc20c6f89ee6056da90cbfbd4e | [
"MIT"
] | null | null | null | landdox/core.py | natefduncan/landdox | 58908554034577cc20c6f89ee6056da90cbfbd4e | [
"MIT"
] | null | null | null | import requests
import json
import pandas as pd
import os
from .endpoints import *
class Client:
endpoints = {
"contacts" : contacts,
"leases" : leases,
"units" : units,
"wells" : wells,
"custom" : custom,
"tracts" : tracts,
"payments" : payments
}
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
self.authorize()
def __getattr__(self, name):
endpoint = self.endpoints.get(name)
endpoint.access_token = self.access_token
return endpoint
def authorize(self):
payload = {
"client_id" : self.client_id,
"client_secret" : self.client_secret,
"audience" : "api.landdox.com",
"grant_type" : "client_credentials"
}
url = "https://landdox.auth0.com/oauth/token"
response = requests.post(url, data=payload)
if response.status_code != 200:
raise ValueError("{error}".format(error=response))
else:
response = response.json()
self.access_token = response.get("access_token")
self.expires_in = response.get("expires_in")
self.expires_in = response.get("token_type")
| 26.3 | 62 | 0.580989 |
61cd1b7623e09e8563a60f3d87a7caf270f2faa2 | 589 | py | Python | src/signalplotter/qt/makePyUI.py | jowanpittevils/Databasemanager_Signalplotter | 993152ad15793054df2acf386eb1c9a76610b789 | [
"BSD-3-Clause"
] | null | null | null | src/signalplotter/qt/makePyUI.py | jowanpittevils/Databasemanager_Signalplotter | 993152ad15793054df2acf386eb1c9a76610b789 | [
"BSD-3-Clause"
] | null | null | null | src/signalplotter/qt/makePyUI.py | jowanpittevils/Databasemanager_Signalplotter | 993152ad15793054df2acf386eb1c9a76610b789 | [
"BSD-3-Clause"
] | null | null | null | #%%
def makeUI(uiNames):
import sys, os
print('Check the pwd first, It must be at .../SignalPlotter/qt.')
print(os.getcwd())
p0 = os.path.dirname(sys.executable)
for uiName in (uiNames):
print('===== for: ',uiName,' ======')
p1 = '"'+p0+'\Scripts\pyuic5.exe'+'" '
p1 += ' -x "' + uiName + '.ui"'
p1 += ' -o "' + uiName + '.py"'
print(p1)
import subprocess
res = subprocess.call(p1) != 0
print('Done.')
print('Is there any error: ', res)
uiNames = ['plotter_uiDesign']
makeUI(uiNames)
# %%
| 21.035714 | 69 | 0.50764 |
61cd45cf1541403e9fd5c523d38b1e30cb5cbcc0 | 1,640 | py | Python | scraper.py | mikeku1116/python-accupass-scraper | ad3301fde373ce68e55459ba5af0273599d25e37 | [
"MIT"
] | null | null | null | scraper.py | mikeku1116/python-accupass-scraper | ad3301fde373ce68e55459ba5af0273599d25e37 | [
"MIT"
] | null | null | null | scraper.py | mikeku1116/python-accupass-scraper | ad3301fde373ce68e55459ba5af0273599d25e37 | [
"MIT"
] | null | null | null | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import pandas as pd
# 安裝Chrome驅動程式及建立Chrome物件
browser = webdriver.Chrome(ChromeDriverManager().install())
browser.get(
"https://old.accupass.com/search/r/0/0/0/0/4/0/00010101/99991231?q=python")
soup = BeautifulSoup(browser.page_source, "lxml")
activities = soup.find_all(
"div", {"class": "apcss-activity-card ng-isolate-scope"})
result = []
for activity in activities:
# 活動名稱
title = activity.find(
"h3", {"class": "apcss-activity-card-title ng-binding"}).getText().strip()
# 觀看人數
view = activity.find(
"span", {"class": "apcss-activity-pageview ng-binding"}).getText().strip()
# 喜歡人數(去除其中的中文字)
like = activity.find("span", {
"class": "apcss-activity-card-like ng-binding"}).getText().strip().replace(" 人喜歡", "")
# 售票狀態
status = activity.find(
"a", {"class": "apcss-btn apcss-btn-block ng-binding activity-card-status-ready"})
# 如果售票狀態為已完售,則爬取另一個樣式類別(class)
if status == None:
status = activity.find(
"a", {"class": "apcss-btn apcss-btn-block ng-binding activity-card-status-end"})
result.append((title, int(view), int(like), status.getText()))
df = pd.DataFrame(result, columns=["活動名稱", "觀看人數", "喜歡人數", "售票狀態"])
new_df = df[df["售票狀態"] == "熱銷中"] # 篩選資料
sort_df = new_df.sort_values(["觀看人數"], ascending=False) # 依據觀看人數來遞減排序
sort_df.to_excel("accupass.xlsx",
sheet_name="activities",
index=False) # 匯出Excel檔案(不寫入資料索引值)
browser.quit() # 關閉Chrome瀏覽器
| 30.943396 | 111 | 0.65 |
61cea84c27bf7df9b0289ed47ffee2781ddbdc17 | 3,148 | py | Python | mpcontribs-users/mpcontribs/users/swf/pre_submission.py | josuav1/MPContribs | 3cbf0e83ba6cd749dd4fc988c9f6ad076b05f935 | [
"MIT"
] | 1 | 2019-07-03T04:38:58.000Z | 2019-07-03T04:38:58.000Z | mpcontribs-users/mpcontribs/users/swf/pre_submission.py | josuav1/MPContribs | 3cbf0e83ba6cd749dd4fc988c9f6ad076b05f935 | [
"MIT"
] | null | null | null | mpcontribs-users/mpcontribs/users/swf/pre_submission.py | josuav1/MPContribs | 3cbf0e83ba6cd749dd4fc988c9f6ad076b05f935 | [
"MIT"
] | 1 | 2019-07-03T04:39:04.000Z | 2019-07-03T04:39:04.000Z | from mpcontribs.config import mp_level01_titles
from mpcontribs.io.core.recdict import RecursiveDict
from mpcontribs.io.core.utils import clean_value, get_composition_from_string
from mpcontribs.users.utils import duplicate_check
def round_to_100_percent(number_set, digit_after_decimal=1):
unround_numbers = [
x / float(sum(number_set)) * 100 * 10**digit_after_decimal
for x in number_set
]
decimal_part_with_index = sorted([
(index, unround_numbers[index] % 1)
for index in range(len(unround_numbers))
], key=lambda y: y[1], reverse=True)
remainder = 100 * 10**digit_after_decimal - sum(map(int, unround_numbers))
index = 0
while remainder > 0:
unround_numbers[decimal_part_with_index[index][0]] += 1
remainder -= 1
index = (index + 1) % len(number_set)
return [int(x)/float(10**digit_after_decimal) for x in unround_numbers]
@duplicate_check
def run(mpfile, **kwargs):
import pymatgen
import pandas as pd
from mpcontribs.users.swf.rest.rester import SwfRester
# load data from google sheet
google_sheet = mpfile.document[mp_level01_titles[0]].pop('google_sheet')
google_sheet += '/export?format=xlsx'
df_dct = pd.read_excel(google_sheet, sheet_name=None)
# rename sheet columns
elements = ['Fe', 'V', 'Co']
df_dct['IP Energy Product'].columns = ['IP_Energy_product'] + elements
df_dct['total'].columns = elements
df_dct['MOKE'].columns = elements + ['thickness', 'MOKE_IP_Hc']
df_dct['VSM'].columns = elements + ['thickness', 'VSM_IP_Hc']
df_dct['formula'].columns = elements
df_dct['Kondorsky'].columns = ['angle', 'Kondorsky_Model', 'Experiment']
# round all compositions to 100%
for sheet, df in df_dct.items():
if sheet != 'Kondorsky':
for idx, row in df.iterrows():
df.loc[idx:idx, elements] = round_to_100_percent(row[elements])
row5 = df_dct['formula'].iloc[0]
formula5 = get_composition_from_string(
pymatgen.Composition(10*row5).formula.replace(' ', '')
)
dct = dict((k, clean_value(v, '%')) for k,v in row5.to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula5)
mpfile.add_data_table(
formula5, df_dct['Kondorsky'], name='Angular Dependence of Switching Field'
)
for sheet, df in df_dct.items():
if sheet == 'formula' or sheet == 'Kondorsky' or sheet == 'total':
continue
for idx, row in df.iterrows():
composition = pymatgen.Composition(row[elements]*10)
formula = get_composition_from_string(composition.formula.replace(' ', ''))
dct = dict((k, clean_value(v, '%')) for k,v in row[elements].to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula)
columns = [x for x in row.index if x not in elements]
if columns:
data = row[columns].round(decimals=1)
dct = dict((k, clean_value(v)) for k,v in data.to_dict().items())
mpfile.add_hierarchical_data({'data': dct}, identifier=formula)
| 43.123288 | 91 | 0.656607 |
61cec369b3732fca5135012a654d9aa0eab32326 | 7,944 | py | Python | MTVulnerability/utils/losses_pytorch.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | MTVulnerability/utils/losses_pytorch.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | MTVulnerability/utils/losses_pytorch.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | import sys, os
sys.path.append("./midlevel-reps")
from visualpriors.taskonomy_network import TaskonomyDecoder
import torch
import torch.nn.functional as F
import torch.nn as nn
SMOOTH = 1e-6
CHANNELS_TO_TASKS = {
1: ['colorization', 'edge_texture', 'edge_occlusion', 'keypoints3d', 'keypoints2d', 'reshading', 'depth_zbuffer', 'depth_euclidean', ],
2: ['curvature', 'principal_curvature'],
3: ['autoencoding', 'denoising', 'normal', 'inpainting', 'rgb', 'normals'],
128: ['segment_unsup2d', 'segment_unsup25d'],
1000: ['class_object'],
None: ['segment_semantic']
}
TASKS_TO_CHANNELS = {}
for n, tasks in CHANNELS_TO_TASKS.items():
for task in tasks:
TASKS_TO_CHANNELS[task] = n
PIX_TO_PIX_TASKS = ['colorization', 'edge_texture', 'edge_occlusion', 'keypoints3d', 'keypoints2d', 'reshading', 'depth_zbuffer', 'depth_euclidean', 'curvature', 'autoencoding', 'denoising', 'normal', 'inpainting', 'segment_unsup2d', 'segment_unsup25d', 'segment_semantic', ]
FEED_FORWARD_TASKS = ['class_object', 'class_scene', 'room_layout', 'vanishing_point']
SINGLE_IMAGE_TASKS = PIX_TO_PIX_TASKS + FEED_FORWARD_TASKS
def heteroscedastic_normal(mean_and_scales, target, weight=None, eps=1e-2):
mu, scales = mean_and_scales
loss = (mu - target)**2 / (scales**2 + eps) + torch.log(scales**2 + eps)
# return torch.sum(weight * loss) / torch.sum(weight) if weight is not None else loss.mean()
return torch.mean(weight * loss) / weight.mean() if weight is not None else loss.mean()
def heteroscedastic_double_exponential(mean_and_scales, target, weight=None, eps=5e-2):
mu, scales = mean_and_scales
loss = torch.abs(mu - target) / (scales + eps) + torch.log(2.0 * (scales + eps))
return torch.mean(weight * loss) / weight.mean() if weight is not None else loss.mean()
def iou_loss(outputs: torch.Tensor, labels: torch.Tensor, threshold:float=None):
outputs, labels = torch.argmax(outputs, axis=1), torch.argmax(labels, axis=1)
outputs = outputs.squeeze(1) if len(outputs.shape)>3 else outputs # BATCH x 1 x H x W => BATCH x H x W
intersection = (outputs & labels).float().sum((1, 2)) # Will be zero if Truth=0 or Prediction=0
union = (outputs | labels).float().sum((1, 2)) # Will be zzero if both are 0
iou = (intersection + SMOOTH) / (union + SMOOTH) # We smooth our devision to avoid 0/0
if threshold is not None:
return torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10 # This is equal to comparing with thresolds
return iou # IOU returns 1 when error is null (similar iou) and 0 when totally different
def weighted_mse_loss(inputs, target, weight=None):
if weight is not None:
# sq = (inputs - target) ** 2
# weightsq = torch.sum(weight * sq)
return torch.mean(weight * (inputs - target) ** 2)/torch.mean(weight)
else:
return F.mse_loss(inputs, target)
def weighted_l1_loss(inputs, target, weight=None):
if weight is not None:
return torch.mean(weight * torch.abs(inputs - target))/torch.mean(weight)
return F.l1_loss(inputs, target)
def perceptual_l1_loss(decoder_path, bake_decodings):
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
# the last arguments are so we can 'cache' and pass the decodings outside
inputs_decoded = decoder(inputs)
targets_decoded = target if bake_decodings else decoder(target)
cache['inputs_decoded'] = inputs_decoded
cache['targets_decoded'] = targets_decoded
if weight is not None:
return torch.mean(weight * torch.abs(inputs_decoded - targets_decoded))/torch.mean(weight)
return F.l1_loss(inputs_decoded, targets_decoded)
return runner
def perceptual_cross_entropy_loss(decoder_path, bake_decodings):
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
# the last arguments are so we can 'cache' and pass the decodings outside
inputs_decoded = decoder(inputs)
targets_decoded = target if bake_decodings else decoder(target)
cache['inputs_decoded'] = inputs_decoded
cache['targets_decoded'] = targets_decoded
batch_size, _ = targets_decoded.shape
return -1. * torch.sum(torch.softmax(targets_decoded, dim=1) * F.log_softmax(inputs_decoded, dim=1)) / batch_size
return runner
class CrossEntropy2d(nn.Module):
def __init__(self, size_average=True, ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.size_average = size_average
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask]
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average)
return loss
class CrossEntropy2d(nn.Module):
def __init__(self, size_average=True, ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.size_average = size_average
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
if target.dim()==4:
target = torch.argmax(target, axis=1)
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask]
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average)
return loss
| 45.136364 | 276 | 0.656596 |
61cf7342efb940a3f5d7c9b44e90c3d3f4d12610 | 21,205 | py | Python | src/trails/flow_model.py | BenDickens/trails | a89a1a901c7be38cdcb7a59339587e518ab8f14d | [
"MIT"
] | 4 | 2020-09-14T07:20:19.000Z | 2021-04-22T14:23:04.000Z | src/trails/flow_model.py | BenDickens/trails | a89a1a901c7be38cdcb7a59339587e518ab8f14d | [
"MIT"
] | 5 | 2021-03-17T17:02:27.000Z | 2021-08-31T10:09:38.000Z | src/trails/flow_model.py | BenDickens/trails | a89a1a901c7be38cdcb7a59339587e518ab8f14d | [
"MIT"
] | 3 | 2020-09-07T07:35:28.000Z | 2021-04-22T14:23:39.000Z | import os,sys
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import pygeos
from osgeo import gdal
from tqdm import tqdm
import igraph as ig
import contextily as ctx
from rasterstats import zonal_stats
import time
import pylab as pl
from IPython import display
import seaborn as sns
import subprocess
import shutil
from multiprocessing import Pool,cpu_count
import pathlib
code_path = (pathlib.Path(__file__).parent.absolute())
gdal.SetConfigOption("OSM_CONFIG_FILE", os.path.join(code_path,'..','..',"osmconf.ini"))
from shapely.wkb import loads
data_path = os.path.join('..','data')
from simplify import *
from extract import railway,ferries,mainRoads,roads
from population_OD import create_bbox,create_grid
pd.options.mode.chained_assignment = None
def closest_node(node, nodes):
"""[summary]
Args:
node ([type]): [description]
nodes ([type]): [description]
Returns:
[type]: [description]
"""
dist_2 = np.sum((nodes - node)**2, axis=1)
return np.argmin(dist_2)
def load_network(osm_path,mainroad=True):
"""[summary]
Args:
osm_path ([type]): [description]
mainroad (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if mainroad:
df = mainRoads(osm_path)
else:
df = roads(osm_path)
net = Network(edges=df)
net = clean_roundabouts(net)
net = split_edges_at_nodes(net)
net = add_endpoints(net)
net = add_ids(net)
net = add_topology(net)
net = drop_hanging_nodes(net)
net = merge_edges(net)
net = reset_ids(net)
net = add_distances(net)
net = merge_multilinestrings(net)
net = fill_attributes(net)
net = add_travel_time(net)
return net
def make_directed(edges):
save_edges = []
for ind,edge in edges.iterrows():
if edge.oneway == 'yes':
save_edges.append(edge)
else:
edge.oneway = 'yes'
edge.lanes = np.round(edge.lanes/2,0)
save_edges.append(edge)
edge2 = edge.copy()
from_id = edge.from_id
to_id = edge.to_id
edge2.from_id = to_id
edge2.to_id = from_id
save_edges.append(edge2)
new_edges = pd.DataFrame(save_edges).reset_index(drop=True)
new_edges.id = new_edges.index
return new_edges
def get_gdp_values(gdf,data_path):
"""[summary]
Args:
gdf ([type]): [description]
Returns:
[type]: [description]
"""
world_pop = os.path.join(data_path,'global_gdp','GDP_2015.tif')
gdf['geometry'] = gdf.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
gdp = list(item['sum'] for item in zonal_stats(gdf.geometry,world_pop,
stats="sum"))
gdp = [x if x is not None else 0 for x in gdp]
gdf['geometry'] = pygeos.from_shapely(gdf.geometry)
return gdp
def country_grid_gdp_filled(trans_network,country,data_path,rough_grid_split=100,from_main_graph=False):
"""[summary]
Args:
trans_network ([type]): [description]
rough_grid_split (int, optional): [description]. Defaults to 100.
Returns:
[type]: [description]
"""
if from_main_graph==True:
node_df = trans_network.copy()
envelop = pygeos.envelope(pygeos.multilinestrings(node_df.geometry.values))
height = np.sqrt(pygeos.area(envelop)/rough_grid_split)
else:
node_df = trans_network.nodes.copy()
node_df.geometry,approximate_crs = convert_crs(node_df)
envelop = pygeos.envelope(pygeos.multilinestrings(node_df.geometry.values))
height = np.sqrt(pygeos.area(envelop)/rough_grid_split)
gdf_admin = pd.DataFrame(create_grid(create_bbox(node_df),height),columns=['geometry'])
#load data and convert to pygeos
country_shape = gpd.read_file(os.path.join(data_path,'GADM','gadm36_levels.gpkg'),layer=0)
country_shape = pd.DataFrame(country_shape.loc[country_shape.GID_0==country])
country_shape.geometry = pygeos.from_shapely(country_shape.geometry)
gdf_admin = pygeos.intersection(gdf_admin,country_shape.geometry)
gdf_admin = gdf_admin.loc[~pygeos.is_empty(gdf_admin.geometry)]
gdf_admin['centroid'] = pygeos.centroid(gdf_admin.geometry)
gdf_admin['km2'] = area(gdf_admin)
gdf_admin['gdp'] = get_gdp_values(gdf_admin,data_path)
gdf_admin = gdf_admin.loc[gdf_admin.gdp > 0].reset_index()
gdf_admin['gdp_area'] = gdf_admin.gdp/gdf_admin['km2']
return gdf_admin
def convert_crs(gdf,current_crs="epsg:4326"):
"""[summary]
Args:
gdf ([type]): [description]
Returns:
[type]: [description]
"""
if current_crs == "epsg:4326":
lat = pygeos.geometry.get_y(pygeos.centroid(gdf['geometry'].iloc[0]))
lon = pygeos.geometry.get_x(pygeos.centroid(gdf['geometry'].iloc[0]))
# formula below based on :https://gis.stackexchange.com/a/190209/80697
approximate_crs = "epsg:" + str(int(32700-np.round((45+lat)/90,0)*100+np.round((183+lon)/6,0)))
else:
approximate_crs = "epsg:4326"
#from pygeos/issues/95
geometries = gdf['geometry']
coords = pygeos.get_coordinates(geometries)
transformer=pyproj.Transformer.from_crs(current_crs, approximate_crs,always_xy=True)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(geometries.copy(), np.array(new_coords).T)
return result,approximate_crs
def area(gdf,km=True):
"""[summary]
Args:
gdf ([type]): [description]
km (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if km:
return pygeos.area(convert_crs(gdf)[0])/1e6
else:
return pygeos.area(convert_crs(gdf)[0])
def get_basetable(country,data_path):
io_data_path = os.path.join(data_path,'country_IO_tables')
df = pd.read_csv(os.path.join(io_data_path,'IO_{}_2015_BasicPrice.txt'.format(country)),
sep='\t', skiprows=1,header=[0,1,2],index_col = [0,1,2,3],
skipfooter=2617,engine='python')
basetable = df.iloc[:26,:26]
return basetable.astype(int)
def create_OD(gdf_admin,country_name,data_path):
"""[summary]
Args:
gdf_admin ([type]): [description]
country_name ([type]): [description]
Returns:
[type]: [description]
"""
# create list of sectors
sectors = [chr(i).upper() for i in range(ord('a'),ord('o')+1)]
# add a region column if not existing yet.
if 'NAME_1' not in gdf_admin.columns:
gdf_admin['NAME_1'] = ['reg'+str(x) for x in list(gdf_admin.index)]
# prepare paths to downscale a country. We give a country its own directory
# to allow for multiple unique countries running at the same time
downscale_basepath = os.path.join(code_path,'..','..','downscale_od')
downscale_countrypath = os.path.join(code_path,'..','..','run_downscale_od_{}'.format(country_name))
# copy downscaling method into the country directory
shutil.copytree(downscale_basepath,downscale_countrypath)
# save national IO table as basetable for downscaling
get_basetable(country_name,data_path).to_csv(os.path.join(downscale_countrypath,'basetable.csv'),
sep=',',header=False,index=False)
# create proxy table with GDP values per region/area
proxy_reg = pd.DataFrame(gdf_admin[['NAME_1','gdp_area']])
proxy_reg['year'] = 2016
proxy_reg = proxy_reg[['year','NAME_1','gdp_area']]
proxy_reg.columns = ['year','id','gdp_area']
proxy_reg.to_csv(os.path.join(downscale_countrypath,'proxy_reg.csv'),index=False)
indices = pd.DataFrame(sectors,columns=['sector'])
indices['name'] = country_name
indices = indices.reindex(['name','sector'],axis=1)
indices.to_csv(os.path.join(downscale_countrypath,'indices.csv'),index=False,header=False)
# prepare yaml file
yaml_file = open(os.path.join(downscale_countrypath,"settings_basic.yml"), "r")
list_of_lines = yaml_file.readlines()
list_of_lines[6] = ' - id: {}\n'.format(country_name)
list_of_lines[8] = ' into: [{}] \n'.format(','.join(['reg'+str(x) for x in list(gdf_admin.index)]))
yaml_file = open(os.path.join(downscale_countrypath,"settings_basic.yml"), "w")
yaml_file.writelines(list_of_lines)
yaml_file.close()
# run libmrio
p = subprocess.Popen([os.path.join(downscale_countrypath,'mrio_disaggregate'), 'settings_basic.yml'],
cwd=os.path.join(downscale_countrypath))
p.wait()
# create OD matrix from libmrio results
OD = pd.read_csv(os.path.join(downscale_countrypath,'output.csv'),header=None)
OD.columns = pd.MultiIndex.from_product([gdf_admin.NAME_1,sectors])
OD.index = pd.MultiIndex.from_product([gdf_admin.NAME_1,sectors])
OD = OD.groupby(level=0,axis=0).sum().groupby(level=0,axis=1).sum()
OD = (OD*5)/365
OD_dict = OD.stack().to_dict()
gdf_admin['import'] = list(OD.sum(axis=1))
gdf_admin['export'] = list(OD.sum(axis=0))
gdf_admin = gdf_admin.rename({'NAME_1': 'name'}, axis='columns')
# and remove country folder again to avoid clutter in the directory
shutil.rmtree(downscale_countrypath)
return OD,OD_dict,sectors,gdf_admin
def prepare_network_routing(transport_network):
"""[summary]
Args:
transport_network ([type]): [description]
Returns:
[type]: [description]
"""
gdf_roads = make_directed(transport_network.edges)
gdf_roads = gdf_roads.rename(columns={"highway": "infra_type"})
gdf_roads['GC'] = gdf_roads.apply(gc_function,axis=1)
gdf_roads['max_flow'] = gdf_roads.apply(set_max_flow,axis=1)
gdf_roads['flow'] = 0
gdf_roads['wait_time'] = 0
return gdf_roads
def create_graph(gdf_roads):
"""[summary]
Args:
gdf_roads ([type]): [description]
Returns:
[type]: [description]
"""
gdf_in = gdf_roads.reindex(['from_id','to_id'] + [x for x in list(gdf_roads.columns) if x not in ['from_id','to_id']],axis=1)
g = ig.Graph.TupleList(gdf_in.itertuples(index=False), edge_attrs=list(gdf_in.columns)[2:],directed=True)
sg = g.clusters().giant()
gdf_in.set_index('id',inplace=True)
return sg,gdf_in
def nearest_network_node_list(gdf_admin,gdf_nodes,sg):
"""[summary]
Args:
gdf_admin ([type]): [description]
gdf_nodes ([type]): [description]
sg ([type]): [description]
Returns:
[type]: [description]
"""
gdf_nodes = gdf_nodes.loc[gdf_nodes.id.isin(sg.vs['name'])]
gdf_nodes.reset_index(drop=True,inplace=True)
nodes = {}
for admin_ in gdf_admin.itertuples():
nodes[admin_.name] = gdf_nodes.iloc[pygeos.distance((admin_.centroid),gdf_nodes.geometry).idxmin()].id
return nodes
def set_max_flow(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
empty_trip_correction = 0.7 #available capacity for freight reduces
# standard lane capacity = 1000 passenger vehicles per lane per hour
# trunk and motorway correct by factor 4
# primary correct by factor 2
# secondary correct by factor 1
# tertiary correct factor 0.5
# other roads correct factor 0.5
# passenger vehicle equvalent for trucks: 3.5
# average truck load: 8 tonnes
# 30 % of trips are empty
# median value per ton: 2,000 USD
# median truck value: 8*2000 = 16,000 USD
standard_max_flow = 1000/3.5*16000*empty_trip_correction
if (segment.infra_type == 'trunk') | (segment.infra_type == 'trunk_link'):
return standard_max_flow*4
elif (segment.infra_type == 'motorway') | (segment.infra_type == 'motorway_link'):
return standard_max_flow*4
elif (segment.infra_type == 'primary') | (segment.infra_type == 'primary_link'):
return standard_max_flow*2
elif (segment.infra_type == 'secondary') | (segment.infra_type == 'secondary_link'):
return standard_max_flow*1
elif (segment.infra_type == 'tertiary') | (segment.infra_type == 'tertiary_link'):
return standard_max_flow*0.5
else:
return standard_max_flow*0.5
def gc_function(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
# GC = α ∗ WaitT + β ∗ TrvlT + μ ∗ Trate + γ ∗ stddev
Wait_time = 0
if segment.infra_type in ['primary','primary_link']:
Trate = 0.5
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
elif segment.infra_type in ['secondary','secondary_link']:
Trate = 1
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
elif segment.infra_type in ['tertiary','tertiary_link']:
Trate = 1.5
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
else:
Trate = 2
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
def update_gc_function(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
# GC = α ∗ WaitT + β ∗ TrvlT + μ ∗ Trate + γ ∗ stddev
if segment['flow'] > segment['max_flow']:
segment['wait_time'] += 1
elif segment['wait_time'] > 0:
segment['wait_time'] - 1
else:
segment['wait_time'] = 0
if segment['infra_type'] in ['primary','primary_link']:
Trate = 0.5
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
elif segment['infra_type'] in ['secondary','secondary_link']:
Trate = 1
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
elif segment['infra_type'] in ['tertiary','tertiary_link']:
Trate = 1.5
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
else:
Trate = 2
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
def run_flow_analysis(country,transport_network,gdf_admin,OD_dict,notebook=False):
"""[summary]
Args:
transport_network ([type]): [description]
gdf_admin ([type]): [description]
Returns:
[type]: [description]
"""
plt.rcParams['figure.figsize'] = [5, 5]
gdf_roads = prepare_network_routing(transport_network)
sg,gdf_in = create_graph(gdf_roads)
nearest_node = nearest_network_node_list(gdf_admin,transport_network.nodes,sg)
dest_nodes = [sg.vs['name'].index(nearest_node[x]) for x in list(nearest_node.keys())]
# this is where the iterations goes
iterator = 0
optimal = False
max_iter = 100
save_fits = []
if not notebook:
plt.ion() ## Note this correction
# run flow optimization model
while optimal == False:
#update cost function per segment, dependent on flows from previous iteration.
sg.es['GC'] = [(lambda segment: update_gc_function(segment))(segment) for segment in list(sg.es)]
sg.es['flow'] = 0
#(re-)assess shortest paths between all regions
for admin_orig in (list(gdf_admin.name)):
paths = sg.get_shortest_paths(sg.vs[sg.vs['name'].index(nearest_node[admin_orig])],dest_nodes,weights='GC',output="epath")
for path,admin_dest in zip(paths,list(gdf_admin.name)):
flow_value = OD_dict[(admin_orig,admin_dest)]
sg.es[path]['flow'] = [x + flow_value for x in sg.es[path]['flow']]
fitting_edges = (sum([x<y for x,y in zip(sg.es['flow'],sg.es['max_flow'])])/len(sg.es))
save_fits.append(fitting_edges)
# if at least 99% of roads are below max flow, we say its good enough
if (sum([x<y for x,y in zip(sg.es['flow'],sg.es['max_flow'])])/len(sg.es)) > 0.99:
optimal = True
iterator += 1
# when running the code in a notebook, the figure updates instead of a new figure each iteration
if notebook:
pl.plot(save_fits)
display.display(pl.gcf())
display.clear_output(wait=True)
else:
plt.plot(save_fits)
plt.xlabel('# iteration')
plt.ylabel('Share of edges below maximum flow')
plt.show()
plt.pause(0.0001) #Note this correction
if iterator == max_iter:
break
# save output
plt.savefig(os.path.join(code_path,'..','..','figures','{}_flow_modelling.png'.format(country)))
gdf_in['flow'] = pd.DataFrame(sg.es['flow'],columns=['flow'],index=sg.es['id'])
gdf_in['max_flow'] = pd.DataFrame(sg.es['max_flow'],columns=['max_flow'],index=sg.es['id'])
gdf_in['wait_time'] = pd.DataFrame(sg.es['wait_time'],columns=['wait_time'],index=sg.es['id'])
gdf_in['overflow'] = gdf_in['flow'].div(gdf_in['max_flow'])
return gdf_in
def plot_OD_matrix(OD):
"""[summary]
Args:
OD ([type]): [description]
"""
plt.rcParams['figure.figsize'] = [20, 15]
sns.heatmap(OD,vmin=0,vmax=1e5,cmap='Reds')
def plot_results(gdf_in):
"""[summary]
Args:
gdf_in ([type]): [description]
"""
gdf_in['geometry'] = gdf_in.geometry.apply(lambda x : loads(pygeos.to_wkb(x)))
gdf_plot = gpd.GeoDataFrame(gdf_in)
gdf_plot.crs = 4326
gdf_plot = gdf_plot.to_crs(3857)
plt.rcParams['figure.figsize'] = [20, 10]
fig, axes = plt.subplots(1, 2)
for iter_,ax in enumerate(axes.flatten()):
if iter_ == 0:
gdf_plot.loc[gdf_plot.flow>1].plot(ax=ax,column='flow',legend=False,cmap='Reds',linewidth=3) #loc[gdf_plot.flow>1]
ctx.add_basemap(ax, source=ctx.providers.Stamen.TonerLite,zoom=15)
ax.set_axis_off()
ax.set_title('Flows along the network')
else:
pd.DataFrame(gdf_in.loc[gdf_in.max_flow>1].groupby(
'infra_type').sum()['distance']/gdf_in.groupby('infra_type').sum()['distance']).dropna().sort_values(
by='distance',ascending=False).plot(type='bar',color='red',ax=ax)
ax.set_ylabel('Percentage of edges > max flow')
ax.set_xlabel('Road type')
#plt.show(block=True)
def country_run(country,data_path=os.path.join('C:\\','Data'),plot=False,save=True):
"""[summary]
Args:
country ([type]): [description]
plot (bool, optional): [description]. Defaults to True.
"""
osm_path = os.path.join(data_path,'country_osm','{}.osm.pbf'.format(country))
transport_network = load_network(osm_path)
print('NOTE: Network created')
gdf_roads = prepare_network_routing(transport_network)
sg = create_graph(gdf_roads)[0]
main_graph = pd.DataFrame(list(sg.es['geometry']),columns=['geometry'])
gdf_admin = country_grid_gdp_filled(main_graph,country,data_path,rough_grid_split=100,from_main_graph=True)
print('NOTE: GDP values extracted')
# OD,OD_dict,sectors,gdf_admin = create_OD(gdf_admin,country,data_path)
# print('NOTE: OD created')
# gdf_out = run_flow_analysis(country,transport_network,gdf_admin,OD_dict)
# print('NOTE: Flow analysis finished')
# if save:
# gdf_admin['geometry'] = gdf_admin.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
# gdf_out = gdf_out.loc[~gdf_out.max_flow.isna()].reset_index(drop=True)
# gdf_out_save = gdf_out.copy()
# gdf_out_save['geometry'] = gdf_out_save.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
# gpd.GeoDataFrame(gdf_admin.drop('centroid',axis=1)).to_file(
# os.path.join(code_path,'..','..','data',
# '{}.gpkg'.format(country)),layer='grid',driver='GPKG')
# gpd.GeoDataFrame(gdf_out_save).to_file(os.path.join('..','..','data',
# '{}.gpkg'.format(country)),layer='network',driver='GPKG')
# if plot:
# plot_results(gdf_out)
if __name__ == '__main__':
#country_run(sys.argv[1],os.path.join('C:\\','Data'),plot=False)
#country_run(sys.argv[1],os.path.join(code_path,'..','..','Data'),plot=False)
#data_path = os.path.join('C:\\','Data')
if (len(sys.argv) > 1) & (len(sys.argv[1]) == 3):
country_run(sys.argv[1])
elif (len(sys.argv) > 1) & (len(sys.argv[1]) > 3):
glob_info = pd.read_excel(os.path.join('/scistor','ivm','eks510','projects','trails','global_information.xlsx'))
glob_info = glob_info.loc[glob_info.continent==sys.argv[1]]
countries = list(glob_info.ISO_3digit)
if len(countries) == 0:
print('FAILED: Please write the continents as follows: Africa, Asia, Central-America, Europe, North-America,Oceania, South-America')
with Pool(cpu_count()) as pool:
pool.map(country_run,countries,chunksize=1)
else:
print('FAILED: Either provide an ISO3 country name or a continent name') | 35.400668 | 145 | 0.636831 |
61cffba0eebf31780c12f21faf032f94e065f6a5 | 1,238 | py | Python | offsite/core/utils.py | wh1te909/backup-offsite | 694f773583eb825b44ff20c51598ac9e1106cd32 | [
"MIT"
] | 4 | 2021-01-20T15:45:35.000Z | 2021-07-09T02:15:31.000Z | offsite/core/utils.py | wh1te909/backup-offsite | 694f773583eb825b44ff20c51598ac9e1106cd32 | [
"MIT"
] | 6 | 2020-08-02T23:31:07.000Z | 2021-09-22T19:19:50.000Z | offsite/core/utils.py | wh1te909/backup-offsite | 694f773583eb825b44ff20c51598ac9e1106cd32 | [
"MIT"
] | null | null | null | from channels.auth import AuthMiddlewareStack
from knox.auth import TokenAuthentication
from django.contrib.auth.models import AnonymousUser
from channels.db import database_sync_to_async
@database_sync_to_async
def get_user(access_token):
try:
auth = TokenAuthentication()
token = access_token.decode().split("access_token=")[1]
user = auth.authenticate_credentials(token.encode())
except Exception:
return AnonymousUser()
else:
return user[0]
class KnoxAuthMiddlewareInstance:
"""
https://github.com/django/channels/issues/1399
"""
def __init__(self, scope, middleware):
self.middleware = middleware
self.scope = dict(scope)
self.inner = self.middleware.inner
async def __call__(self, receive, send):
q = self.scope["query_string"]
self.scope["user"] = await get_user(q)
inner = self.inner(self.scope)
return await inner(receive, send)
class KnoxAuthMiddleware:
def __init__(self, inner):
self.inner = inner
def __call__(self, scope):
return KnoxAuthMiddlewareInstance(scope, self)
KnoxAuthMiddlewareStack = lambda inner: KnoxAuthMiddleware(AuthMiddlewareStack(inner))
| 25.265306 | 86 | 0.697092 |
61d14e7bc92cdd86e7f3f92f3039ee396ac2a457 | 6,841 | py | Python | unik/indexing.py | balbasty/unik | 7b8b2a0989495eec7bc0db6c672ce904cbcb1063 | [
"MIT"
] | null | null | null | unik/indexing.py | balbasty/unik | 7b8b2a0989495eec7bc0db6c672ce904cbcb1063 | [
"MIT"
] | null | null | null | unik/indexing.py | balbasty/unik | 7b8b2a0989495eec7bc0db6c672ce904cbcb1063 | [
"MIT"
] | null | null | null | """Access / change tensor shape."""
import tensorflow as tf
import numpy as np
from .magik import tensor_compat
from .alloc import zeros_like
from .types import has_tensor, as_tensor, cast, dtype
from .shapes import shape, reshape, flatten, transpose, unstack
from ._math_for_indexing import cumprod, minimum, maximum
from ._utils import pop
@tensor_compat
def gather(input, indices, validate_indices=None,
axis=None, batch_dims=0, name=None):
"""Gather / Take values from a tensor / array along an axis."""
if tf.is_tensor(input) or tf.is_tensor(indices) \
or tf.is_tensor(axis) or tf.is_tensor(batch_dims):
return tf.gather(input, indices, validate_indices,
axis, batch_dims, name)
else:
if batch_dims > 0:
raise NotImplementedError()
return np.take(input, indices, axis=axis, mode='raise')
@tensor_compat
def scatter(indices, updates, *args, **kwargs):
"""Scatter `updates` at `indices` into a tensor.
Signatures
----------
scatter(indices, updates, shape, mode='new', axis=0, name=None)
scatter(indices, updates, input, mode, axis=0, name=None)
Parameters
----------
indices - (*ind_shape, L) tensor_like[int]
ND-indices in which to place the `updates`. The last dimension
maps to dimensions of the output tensor.
updates - (*up_shape, *slice_shape) tensor_like or scalar
Values to place in the tensor.
shape - vector_like[int], if mode == 'new'
Shape of the output tensor.
input - (*shape) tensor_like, if mode != 'new'
Tensor in which to place `updates`.
mode - {'new', 'update', 'add', 'sub', 'min', 'max'}, default='new'
Scatter mode.
name - str, optional
A name for the operation.
Returns
-------
output - (*shape) tensor or array
Tensor with updated values.
"""
# Parse arguments
args = list(args)
kwargs = dict(kwargs)
mode = pop(args, 1) if len(args) > 1 else kwargs.pop('mode', 'new')
if mode == 'new':
input = []
_shape = pop(args, 0) if len(args) > 0 else kwargs.pop('shape', None)
else:
input = pop(args, 0) if len(args) > 0 else kwargs.pop('input', None)
_shape = shape(input)
name = pop(args, 0) if len(args) > 0 else kwargs.pop('name', None)
# Ensure tensors
if has_tensor([indices, updates, _shape, input], 'tf'):
updates = as_tensor(updates, 'tf')
indices = as_tensor(indices, 'tf')
elif has_tensor([indices, updates, _shape, input], 'np'):
updates = as_tensor(updates, 'np')
indices = as_tensor(indices, 'np')
else:
updates = as_tensor(updates)
indices = as_tensor(indices)
if mode == 'new':
# Mode new: allocate tensor and populate
if has_tensor([indices, updates, _shape], 'tf'):
print(indices.dtype)
return tf.scatter_nd(indices, updates, _shape, name=name)
else:
# np.put works with linear indices only.
# NOTE: with this implementation, ind_shape and up_shape
# must be exactly equal, not just broadcastable.
output = zeros_like(updates, shape=_shape)
indices = reshape(indices, [-1, shape(indices)[-1]])
indices = sub2ind(transpose(indices), _shape)
updates = flatten(updates)
np.put(output, indices, updates)
return output
else:
if has_tensor([indices, updates, input], 'tf'):
if mode == 'update':
scatter_fn = tf.tensor_scatter_nd_update
elif mode == 'add':
scatter_fn = tf.tensor_scatter_nd_add
elif mode == 'sub':
scatter_fn = tf.tensor_scatter_nd_sub
elif mode == 'min':
scatter_fn = tf.tensor_scatter_nd_min
elif mode == 'max':
scatter_fn = tf.tensor_scatter_nd_max
else:
raise ValueError('Unknown operation {}'.format(mode))
updates = cast(updates, dtype(input))
return scatter_fn(input, indices, updates, name=name)
else:
# If mode != 'update', equivalent to:
# 0) the left-hand side is the input tensor
# 1) generate right-hand side using mode scatter with mode 'new'
# 2) apply op(LHS, RHS),
if mode == 'update':
output = input.copy()
indices = reshape(indices, [-1, shape(indices)[-1]])
indices = sub2ind(transpose(indices), _shape)
updates = flatten(updates)
np.put(output, indices, updates)
return output
elif mode == 'add':
op = lambda x, y: x + y
elif mode == 'sub':
op = lambda x, y: x - y
elif mode == 'min':
op = lambda x, y: minimum(x, y)
elif mode == 'max':
op = lambda x, y: maximum(x, y)
else:
raise ValueError('Unknown operation {}'.format(mode))
updates = scatter(indices, updates, shape=_shape, mode='new')
return op(input, updates)
@tensor_compat
def sub2ind(subs, shape):
"""Convert sub indices (i, j, k) into linear indices.
The rightmost dimension is the most rapidly changing one
-> if shape == [D, H, W], the strides are therefore [H*W, W, 1]
Parameters
----------
subs : (D, *shape) tensor_like
List of sub-indices. The first dimension is the number of dimension.
Each element should have the same number of elements and shape.
shape : (D,) vector_like
Size of each dimension. Its length should be the same as the
first dimension of ``subs``.
Returns
-------
ind : (*shape) tensor or array
Linear indices
"""
*subs, ind = unstack(subs)
stride = cumprod(shape[1:], reverse=True)
for i, s in zip(subs, stride):
ind = ind + as_tensor(i) * s
return ind
@tensor_compat
def where(cond, x=None, y=None, name=None):
"""Select values from two tensors based on a condition."""
if has_tensor([cond, x, y], 'tf'):
return tf.where(cond, x, y, name)
else:
if x is None and y is None:
return np.where(cond)
else:
return np.where(cond, x, y)
@tensor_compat
def boolean_mask(input, mask, axis=0, name='boolean_mask'):
"""Gather elements from a tensor / array using a mask."""
input = as_tensor(input)
if has_tensor([input, mask], 'tf'):
return tf.boolean_mask(input, mask, axis=axis, name=name)
else:
axis = axis or 0
slices = (slice(None, None),) * axis + (mask,) + (Ellipsis,)
return input.__getitem__(slices)
| 35.262887 | 77 | 0.582517 |
61d192d69ecdae0462072ff12464ac90f01f69d0 | 1,478 | py | Python | aleph/views/alerts_api.py | adikadashrieq/aleph | acc03197c10e511a279ae3a05120187223f173d2 | [
"MIT"
] | 1 | 2019-06-18T21:35:59.000Z | 2019-06-18T21:35:59.000Z | aleph/views/alerts_api.py | heartofstone/aleph | d66b6615d2bfa10c291c63754f53b468de8bebde | [
"MIT"
] | null | null | null | aleph/views/alerts_api.py | heartofstone/aleph | d66b6615d2bfa10c291c63754f53b468de8bebde | [
"MIT"
] | null | null | null | from flask import Blueprint, request
from aleph.core import db
from aleph.model import Alert
from aleph.search import DatabaseQueryResult
from aleph.views.forms import AlertSchema
from aleph.views.serializers import AlertSerializer
from aleph.views.util import require, obj_or_404
from aleph.views.util import parse_request
from aleph.views.context import tag_request
blueprint = Blueprint('alerts_api', __name__)
@blueprint.route('/api/2/alerts', methods=['GET'])
def index():
require(request.authz.logged_in)
query = Alert.by_role_id(request.authz.id)
result = DatabaseQueryResult(request, query)
return AlertSerializer.jsonify_result(result)
@blueprint.route('/api/2/alerts', methods=['POST', 'PUT'])
def create():
require(request.authz.session_write)
data = parse_request(AlertSchema)
alert = Alert.create(data, request.authz.id)
db.session.commit()
tag_request(alert_id=alert.id)
return AlertSerializer.jsonify(alert)
@blueprint.route('/api/2/alerts/<int:alert_id>', methods=['GET'])
def view(alert_id):
require(request.authz.logged_in)
alert = obj_or_404(Alert.by_id(alert_id, role_id=request.authz.id))
return AlertSerializer.jsonify(alert)
@blueprint.route('/api/2/alerts/<int:alert_id>', methods=['DELETE'])
def delete(alert_id):
require(request.authz.session_write)
alert = obj_or_404(Alert.by_id(alert_id, role_id=request.authz.id))
alert.delete()
db.session.commit()
return ('', 204)
| 31.446809 | 71 | 0.750338 |
61d210a06894e407303586520efa2e44fe445461 | 11,283 | py | Python | run.py | Acforest/LogPrompt | 199766cea9988bc6e8b1c71352b090da68bbb71d | [
"Apache-2.0"
] | null | null | null | run.py | Acforest/LogPrompt | 199766cea9988bc6e8b1c71352b090da68bbb71d | [
"Apache-2.0"
] | null | null | null | run.py | Acforest/LogPrompt | 199766cea9988bc6e8b1c71352b090da68bbb71d | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to train and evaluate either a regular supervised model or a PET/iPET model on
one of the supported tasks and datasets.
"""
import os
import log
import pet
import torch
import argparse
from pet.config import load_configs
from pet.tasks import PROCESSORS, UNLABELED_SET, TRAIN_SET, DEV_SET, TEST_SET, METRICS, DEFAULT_METRICS, load_examples
from pet.utils import eq_div
from pet.wrapper import WRAPPER_TYPES, MODEL_CLASSES
logger = log.get_logger('root')
def main():
parser = argparse.ArgumentParser(description="Command line interface for PET/iPET")
# Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data directory, which should contain the data files for the task")
parser.add_argument("--model_type", default=None, type=str, required=True, choices=MODEL_CLASSES.keys(),
help="The type of the pretrained language model to use")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to the pre-trained model or shortcut name")
parser.add_argument("--task_name", default=None, type=str, required=True, choices=PROCESSORS.keys(),
help="The name of the task to train/evaluate on")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written")
# PET-specific optional parameters
parser.add_argument("--wrapper_type", default="mlm", choices=WRAPPER_TYPES,
help="The wrapper type. Set this to 'mlm' for a masked language model like BERT or to 'plm' "
"for a permuted language model like XLNet")
parser.add_argument("--pattern_ids", default=[0], type=int, nargs='+',
help="The ids of the PVPs to be used")
parser.add_argument("--lm_training", action='store_true',
help="Whether to use language modeling as auxiliary task")
parser.add_argument("--alpha", default=0.9999, type=float,
help="Weighting term for the auxiliary language modeling task")
parser.add_argument("--temperature", default=2, type=float,
help="Temperature used for combining PVPs")
parser.add_argument("--verbalizer_file", default=None,
help="The path to a file to override default verbalizers")
parser.add_argument("--reduction", default='wmean', choices=['wmean', 'mean'],
help="Reduction strategy for merging predictions from multiple PET models. Select either "
"uniform weighting (mean) or weighting based on train set accuracy (wmean)")
parser.add_argument("--decoding_strategy", default='default', choices=['default', 'ltr', 'parallel'],
help="The decoding strategy with multiple masks")
parser.add_argument("--no_distillation", action='store_true',
help="If set to true, no distillation is performed")
parser.add_argument("--repetitions", default=3, type=int,
help="The number of times to repeat training and testing with different seeds")
parser.add_argument("--max_seq_length", default=256, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation")
parser.add_argument("--per_gpu_unlabeled_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for auxiliary language modeling examples")
parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass")
parser.add_argument("--num_train_epochs", default=3, type=float,
help="Total number of training epochs to perform")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform, override num_train_epochs")
# Other optional parameters
parser.add_argument("--train_examples", default=-1, type=int,
help="The total number of train examples to use, where -1 equals all examples")
parser.add_argument("--eval_examples", default=-1, type=int,
help="The total number of evaluation examples to use, where -1 equals all examples")
parser.add_argument("--dev_examples", default=-1, type=int,
help="The total number of development examples to use, where -1 equals all examples")
parser.add_argument("--unlabeled_examples", default=-1, type=int,
help="The total number of unlabeled examples to use, where -1 equals all examples")
parser.add_argument("--split_examples_evenly", action='store_true',
help="If true, train examples are not chosen randomly, but split evenly across all labels")
parser.add_argument("--cache_dir", default="pretrained", type=str,
help="Where to store the pre-trained models downloaded")
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--early_stop_epochs", default=5, type=int,
help="Threshold epochs for early stop")
parser.add_argument("--logging_steps", type=int, default=50,
help="Log every X updates steps")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--overwrite_output_dir", action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
parser.add_argument("--do_train", action='store_true',
help="Whether to perform training")
parser.add_argument("--do_eval", action='store_true',
help="Whether to perform evaluation")
parser.add_argument("--priming", action='store_true',
help="Whether to use priming for evaluation")
parser.add_argument("--eval_set", choices=['dev', 'test'], default='test',
help="Whether to perform evaluation on the dev set or the test set")
parser.add_argument("--embed_size", default=128, type=int, help="The embedding size of prompt")
parser.add_argument("--prompt_encoder_type", type=str, default="lstm", choices=['lstm', 'mlp', 'manual'],
help="The type of encoder")
parser.add_argument("--eval_every_step", default=20, type=int, help="Evaluate between two `eval_every_step` steps")
args = parser.parse_args()
logger.info("Parameters: {}".format(args))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) \
and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
assert args.do_train or args.do_eval, "`do_train` and `do_eval` should be at least true for one"
# Setup CUDA, GPU & distributed training
args.device = "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
args.n_gpu = torch.cuda.device_count()
# Prepare task
if args.task_name not in PROCESSORS:
raise ValueError("Task '{}' not found".format(args.task_name))
processor = PROCESSORS[args.task_name]()
args.label_list = processor.get_labels()
train_ex_per_label, eval_ex_per_label, dev_ex_per_label = None, None, None
train_ex, eval_ex, dev_ex = args.train_examples, args.eval_examples, args.dev_examples
if args.split_examples_evenly:
train_ex_per_label = eq_div(args.train_examples, len(args.label_list)) if args.train_examples != -1 else -1
eval_ex_per_label = eq_div(args.eval_examples, len(args.label_list)) if args.eval_examples != -1 else -1
dev_ex_per_label = eq_div(args.dev_examples, len(args.label_list)) if args.dev_examples != -1 else -1
train_ex, eval_ex, dev_ex = None, None, None
eval_set = TEST_SET if args.eval_set == 'test' else DEV_SET
train_data = load_examples(
args.task_name, args.data_dir, TRAIN_SET, num_examples=train_ex, num_examples_per_label=train_ex_per_label)
eval_data = load_examples(
args.task_name, args.data_dir, eval_set, num_examples=eval_ex, num_examples_per_label=eval_ex_per_label)
dev_data = load_examples(
args.task_name, args.data_dir, DEV_SET, num_examples=dev_ex, num_examples_per_label=dev_ex_per_label)
unlabeled_data = load_examples(
args.task_name, args.data_dir, UNLABELED_SET, num_examples=args.unlabeled_examples)
args.metrics = METRICS.get(args.task_name, DEFAULT_METRICS)
pet_model_cfg, pet_train_cfg, pet_eval_cfg = load_configs(args)
pet.train_pet(train_data=train_data,
eval_data=eval_data,
dev_data=dev_data,
unlabeled_data=unlabeled_data,
model_config=pet_model_cfg,
train_config=pet_train_cfg,
eval_config=pet_eval_cfg,
do_train=args.do_train,
do_eval=args.do_eval,
pattern_ids=args.pattern_ids,
output_dir=args.output_dir,
repetitions=args.repetitions,
reduction=args.reduction,
no_distillation=args.no_distillation,
seed=args.seed)
if __name__ == "__main__":
main()
| 58.46114 | 119 | 0.660197 |
61d24122d7792980c0b72c95b9dc3ec6c9efd631 | 2,282 | py | Python | data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z |
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Activation, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.regularizers import l2
from keras import backend as K
def center_normalize(x):
"""
Custom activation for online sample-wise center and std. normalization
"""
return (x - K.mean(x)) / K.std(x)
def get_model():
model = Sequential()
model.add(Activation(activation=center_normalize, input_shape=(45, 64, 64)))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='valid'))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(96, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(96, 3, 3, border_mode='valid'))
model.add(Activation('relu'))
model.add(ZeroPadding2D(padding=(1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(128, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(128, 2, 2, border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(256, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(256, 2, 2, border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(512, 2, 2, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, W_regularizer=l2(3e-3)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='rmse')
return model
| 34.575758 | 82 | 0.659509 |
61d29e48cb817ece86e476bffbf91b00d5532c33 | 8,685 | py | Python | BuildDeb.py | KOLANICH/GraalVM_deb_packages_CI | f41786b4daa11efebe24402f5000111137365b4f | [
"Apache-2.0",
"Unlicense"
] | null | null | null | BuildDeb.py | KOLANICH/GraalVM_deb_packages_CI | f41786b4daa11efebe24402f5000111137365b4f | [
"Apache-2.0",
"Unlicense"
] | null | null | null | BuildDeb.py | KOLANICH/GraalVM_deb_packages_CI | f41786b4daa11efebe24402f5000111137365b4f | [
"Apache-2.0",
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import sys
import struct
import re
import os
from itertools import chain
import warnings
import tarfile
import sh
from tqdm import tqdm
from pydebhelper import *
from getLatestVersionAndURLWithGitHubAPI import getTargets
def genGraalProvides(start=6, end=8): # java 12 still not supported yet
graalvmProvides = ["default-jre", "default-jre-headless", "java-compiler"]
for i in range(start, end + 1):
si = str(i)
graalvmProvides += ["openjdk-" + si + "-jre", "openjdk-" + si + "-jre-headless", "java" + si + "-runtime", "java" + si + "-runtime-headless", "java" + si + "-sdk-headless"]
return graalvmProvides
config = OrderedDict()
config["llvm"] = {
"descriptionLong": "LLVM engine for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/llvm/",
"rip": {
"bin": ["lli"],
"other": ["jre/languages/llvm"]
}
}
config["js"] = {
"descriptionLong": "JavaScript engine & node.js runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/js/",
"rip": {
"bin": ["js", "node", "npm"],
"other": ["jre/languages/js", "jre/lib/graalvm/graaljs-launcher.jar"]
}
}
config["python"] = {
"descriptionLong": "python runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/python/",
"rip": {
"bin": ["graalpython"],
"other": ["jre/languages/python", "jre/lib/graalvm/graalpython-launcher.jar", "LICENSE_GRAALPYTHON", "jre/languages/python/LICENSE_GRAALPYTHON"]
}
}
config["ruby"] = {
"descriptionLong": "ruby runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/ruby/",
"rip": {
"bin": ["truffleruby", "ruby", "bundle", "bundler", "gem", "irb", "rake", "rdoc", "ri"],
"other": ["jre/languages/ruby", "jre/lib/boot/truffleruby-services.jar", "jre/lib/graalvm/truffleruby-launcher.jar", "LICENSE_TRUFFLERUBY.md", "3rd_party_licenses_truffleruby.txt"]
}
}
config["r"] = {
"descriptionLong": "R runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/R/",
"rip": {
"bin": ["R", "Rscript"],
"other": ["jre/languages/R", "LICENSE_FASTR", "3rd_party_licenses_fastr.txt"]
}
}
config["gu"] = {
"descriptionLong": "Package manager for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/graal-updater/",
"rip": {
"bin": ["gu"],
"other": ["jre/lib/installer", "bin/gu"]
}
}
config["polyglot"] = {
"descriptionLong": "Polyglot for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/polyglot/",
"rip": {
"bin": ["polyglot"],
"other": ["jre/lib/polyglot"]
}
}
config["samples"] = {
"descriptionLong": "Example code for GraalVM",
"homepage": "https://www.graalvm.org/",
"rip": {
"other": ["sample"]
}
}
config["visualvm"] = {
"descriptionLong": "VisualVM for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/tools/#heap-viewer",
"rip": {
"bin": ["jvisualvm"],
"other": ["lib/visualvm"]
}
}
def removeUnneededSources(unpackedDir):
for f in chain(unpackedDir.glob("**/src.zip"), unpackedDir.glob("**/*.src.zip")):
f.unlink()
def ripGraalPackage(unpackedDir, packagesDir, version, maintainer, builtDir):
mainPackageName = "graalvm"
systemPrefix = "usr/lib/jvm/graalvm-ce-amd64"
removeUnneededSources(unpackedDir)
results = []
for pkgPostfix, pkgCfg in config.items():
pkgCfg = type(pkgCfg)(pkgCfg)
rip = pkgCfg["rip"]
del pkgCfg["rip"]
with Package(mainPackageName + "-" + pkgPostfix, packagesDir, version=version, section="java", maintainer=maintainer, builtDir=builtDir, **pkgCfg) as pkg:
if "other" in rip:
for el in rip["other"]:
pkg.rip(unpackedDir / el, systemPrefix + "/" + el)
if "bin" in rip:
for el in rip["bin"]:
a = "bin/" + el
aUnp = unpackedDir / a
if aUnp.exists() or aUnp.is_symlink():
pkg.rip(aUnp, systemPrefix + "/" + a)
else:
warnings.warn(str(aUnp) + " doesn't exist")
b = "jre/" + a
bUnp = unpackedDir / b
if aUnp.exists() or aUnp.is_symlink():
pkg.rip(bUnp, systemPrefix + "/" + b)
else:
warnings.warn(str(bUnp) + " doesn't exist")
results.append(pkg)
with Package(mainPackageName, packagesDir, version=version, section="java", homepage="https://github.com/oracle/graal/releases", provides=genGraalProvides(), descriptionShort="graalvm", descriptionLong="GraalVM is a high-performance, embeddable, polyglot virtual machine for running applications written in JavaScript, Python, Ruby, R, JVM-based languages like Java, Scala, Kotlin, and LLVM-based languages such as C and C++. \nAdditionally, GraalVM allows efficient interoperability between programming languages and compiling Java applications ahead-of-time into native executables for faster startup time and lower memory overhead.", maintainer=maintainer, builtDir=builtDir) as graalVM:
graalVM.rip(unpackedDir, systemPrefix)
results.append(graalVM)
return results
def isSubdir(parent: Path, child: Path) -> bool:
parent = parent.absolute().resolve()
child = child.absolute().resolve().relative_to(parent)
for p in child.parts:
if p == "..":
return False
return True
def unpack(archPath, extrDir):
extrDir = extrDir.resolve()
packedSize = archPath.stat().st_size
with archPath.open("rb") as arch:
arch.seek(packedSize - 4)
unpackedSize = struct.unpack("<I", arch.read(4))[0]
with tarfile.open(archPath, "r:gz") as arch:
with tqdm(total=unpackedSize, unit="B", unit_divisor=1024, unit_scale=True) as pb:
for f in arch:
fp = (extrDir / f.name).absolute()
if isSubdir(extrDir, fp):
if fp.is_file() or fp.is_symlink():
fp.unlink()
fp.parent.mkdir(parents=True, exist_ok=True)
arch.extract(f, extrDir, set_attrs=True)
pb.set_postfix(file=str(fp.relative_to(extrDir)), refresh=False)
pb.update(f.size)
currentProcFileDescriptors = Path("/proc") / str(os.getpid()) / "fd"
fj = sh.firejail.bake(noblacklist=str(currentProcFileDescriptors), _fg=True)
aria2c = fj.aria2c.bake(_fg=True, **{"continue": "true", "check-certificate": "true", "enable-mmap": "true", "optimize-concurrent-downloads": "true", "j": 16, "x": 16, "file-allocation": "falloc"})
def download(targets):
args = []
for dst, uri in targets.items():
args += [uri, linesep, " ", "out=", str(dst), linesep]
pO, pI = os.pipe()
with os.fdopen(pI, "w") as pIF:
pIF.write("".join(args))
pIF.flush()
try:
aria2c(**{"input-file": str(currentProcFileDescriptors / str(pO))})
finally:
os.close(pO)
try:
os.close(pI)
except:
pass
vmTagRx = re.compile("^vm-((?:\\d+\\.){2}\\d+(?:-rc\\d+))?$")
vmTitleMarker = "GraalVM Community Edition .+$"
platformMarker = "linux-amd64"
versionFileNameMarker = "[\\w\\.-]+"
releaseFileNameMarker = versionFileNameMarker + "-" + platformMarker
def getLatestGraalVMRelease():
downloadFileNameRx = re.compile("^" + releaseFileNameMarker + "\\.tar\\.gz$")
return max(getTargets("oracle/graal", re.compile("^" + vmTitleMarker), vmTagRx, downloadFileNameRx))
def getLatestGraalRuntimeRelease(repoPath):
downloadFileNameRx = re.compile(".+installable-ce-" + releaseFileNameMarker + "\\.jar$")
return max(getTargets(repoPath, re.compile(".+- " + vmTitleMarker), vmTagRx, downloadFileNameRx))
def doBuild():
thisDir = Path(".")
downloadDir = Path(thisDir / "downloads")
archPath = Path(downloadDir / "graalvm-github.tar.gz")
unpackDir = thisDir / "graalvm-unpacked"
packagesRootsDir = thisDir / "packagesRoots"
builtDir = thisDir / "packages"
repoDir = thisDir / "public" / "repo"
selT = getLatestGraalVMRelease()
print("Selected release:", selT, file=sys.stderr)
runtimesRepos = {"python": "graalvm/graalpython", "ruby": "oracle/truffleruby", "R": "oracle/fastr"}
runtimeReleases = {k: getLatestGraalRuntimeRelease(v) for k, v in runtimesRepos.items()}
runtimeFiles = {(downloadDir / (k + ".jar")): v.uri for k, v in runtimeReleases.items()}
downloadTargets = {archPath: selT.uri, **runtimeFiles}
download(downloadTargets)
unpack(archPath, unpackDir)
graalUnpackedRoot = unpackDir / ("graalvm-ce-" + selT.version)
guCmd = fj.bake(str(graalUnpackedRoot / "bin/gu"), _fg=True)
guCmd("-L", "install", *runtimeFiles.keys())
builtDir.mkdir(parents=True, exist_ok=True)
maintainer = Maintainer()
pkgs = ripGraalPackage(graalUnpackedRoot, packagesRootsDir, selT.version, maintainer=maintainer, builtDir=builtDir)
for pkg in pkgs:
pkg.build()
with Repo(root=repoDir, descr=maintainer.name+"'s repo for apt with GraalVM binary packages, built from the official builds on GitHub") as r:
for pkg in pkgs:
r += pkg
print(r.packages2add)
if __name__ == "__main__":
doBuild()
| 32.773585 | 691 | 0.687737 |
61d2ae9ec01343c7273afc66fcb5912f5895801a | 6,267 | py | Python | mergify_engine/utils.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | mergify_engine/utils.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | mergify_engine/utils.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import hashlib
import hmac
import logging
import shutil
import subprocess
import sys
import tempfile
from billiard import current_process
import celery.app.log
import daiquiri
import github
import redis
from mergify_engine import config
LOG = daiquiri.getLogger(__name__)
global REDIS_CONNECTION_CACHE
REDIS_CONNECTION_CACHE = None
def get_redis_for_cache():
global REDIS_CONNECTION_CACHE
if REDIS_CONNECTION_CACHE is None:
REDIS_CONNECTION_CACHE = redis.StrictRedis.from_url(
config.STORAGE_URL, decode_responses=True,
)
p = current_process()
REDIS_CONNECTION_CACHE.client_setname("cache:%s" % p.name)
return REDIS_CONNECTION_CACHE
global REDIS_CONNECTION_HTTP_CACHE
REDIS_CONNECTION_HTTP_CACHE = None
def get_redis_for_http_cache():
global REDIS_CONNECTION_HTTP_CACHE
if REDIS_CONNECTION_HTTP_CACHE is None:
REDIS_CONNECTION_HTTP_CACHE = redis.StrictRedis.from_url(config.HTTP_CACHE_URL)
p = current_process()
REDIS_CONNECTION_HTTP_CACHE.client_setname("http-cache:%s" % p.name)
return REDIS_CONNECTION_HTTP_CACHE
def utcnow():
return datetime.datetime.now(tz=datetime.timezone.utc)
def unicode_truncate(s, length, encoding="utf-8"):
"""Truncate a string to length in bytes.
:param s: The string to truncate.
:param length: The length in number of bytes — not characters."""
return s.encode(encoding)[:length].decode(encoding, errors="ignore")
class CustomFormatter(
daiquiri.formatter.ColorExtrasFormatter, celery.app.log.TaskFormatter
):
pass
CELERY_EXTRAS_FORMAT = (
"%(asctime)s [%(process)d] %(color)s%(levelname)-8.8s "
"[%(task_id)s] "
"%(name)s%(extras)s: %(message)s%(color_stop)s"
)
def GithubPullRequestLog(self):
return daiquiri.getLogger(
__name__,
gh_owner=self.base.user.login,
gh_repo=self.base.repo.name,
gh_private=self.base.repo.private,
gh_branch=self.base.ref,
gh_pull=self.number,
gh_pull_url=self.html_url,
gh_pull_state=("merged" if self.merged else (self.mergeable_state or "none")),
)
github.PullRequest.PullRequest.log = property(GithubPullRequestLog)
def setup_logging():
outputs = []
if config.LOG_STDOUT:
outputs.append(
daiquiri.output.Stream(
sys.stdout,
formatter=CustomFormatter(fmt=CELERY_EXTRAS_FORMAT),
level=config.LOG_STDOUT_LEVEL,
)
)
if config.LOG_DATADOG:
outputs.append(daiquiri.output.Datadog())
daiquiri.setup(
outputs=outputs, level=(logging.DEBUG if config.DEBUG else logging.INFO),
)
daiquiri.set_default_log_levels(
[
("celery", "INFO"),
("kombu", "WARN"),
("github.Requester", "WARN"),
("urllib3.connectionpool", "WARN"),
("urllib3.util.retry", "WARN"),
("vcr", "WARN"),
("httpx", "WARN"),
("cachecontrol", "WARN"),
]
)
config.log()
def compute_hmac(data):
mac = hmac.new(
config.WEBHOOK_SECRET.encode("utf8"), msg=data, digestmod=hashlib.sha1
)
return str(mac.hexdigest())
def get_github_pulls_from_sha(repo, sha):
try:
return list(
github.PaginatedList.PaginatedList(
github.PullRequest.PullRequest,
repo._requester,
"%s/commits/%s/pulls" % (repo.url, sha),
None,
headers={"Accept": "application/vnd.github.groot-preview+json"},
)
)
except github.GithubException as e:
if e.status in [404, 422]:
return []
raise
class Gitter(object):
def __init__(self):
self.tmp = tempfile.mkdtemp(prefix="mergify-gitter")
LOG.info("working in: %s", self.tmp)
def __call__(self, *args, **kwargs): # pragma: no cover
LOG.info("calling: %s", " ".join(args))
kwargs["cwd"] = self.tmp
kwargs["stderr"] = subprocess.STDOUT
try:
return subprocess.check_output(["git"] + list(args), **kwargs)
except subprocess.CalledProcessError as e:
LOG.info("output: %s", e.output)
raise
def cleanup(self):
LOG.info("cleaning: %s", self.tmp)
try:
self("credential-cache", "--socket=%s/.git/creds/socket" % self.tmp, "exit")
except subprocess.CalledProcessError: # pragma: no cover
LOG.warning("git credential-cache exit fail")
shutil.rmtree(self.tmp)
def configure(self):
self("config", "user.name", "%s-bot" % config.CONTEXT)
self("config", "user.email", config.GIT_EMAIL)
# Use one git cache daemon per Gitter
self("config", "credential.useHttpPath", "true")
self(
"config",
"credential.helper",
"cache --timeout=300 --socket=%s/.git/creds/socket" % self.tmp,
)
def add_cred(self, username, password, path):
domain = config.GITHUB_DOMAIN
self(
"credential",
"approve",
input=(
"url=https://%s:%s@%s/%s\n\n" % (username, password, domain, path)
).encode("utf8"),
)
@contextlib.contextmanager
def ignore_client_side_error():
try:
yield
except github.GithubException as e:
if 400 <= e.status < 500:
return
raise
def Github(*args, **kwargs):
kwargs["base_url"] = "https://api.%s" % config.GITHUB_DOMAIN
return github.Github(*args, **kwargs)
| 27.977679 | 88 | 0.6322 |
61d440e6d71c032e6b0102e0319c9ad174f35ff4 | 1,750 | py | Python | milefrienddb/models/vehicles.py | jcrjaci/mil_test | ed54f55c5aacd8ffd110b7c173422dbd0cac631f | [
"MIT"
] | null | null | null | milefrienddb/models/vehicles.py | jcrjaci/mil_test | ed54f55c5aacd8ffd110b7c173422dbd0cac631f | [
"MIT"
] | null | null | null | milefrienddb/models/vehicles.py | jcrjaci/mil_test | ed54f55c5aacd8ffd110b7c173422dbd0cac631f | [
"MIT"
] | null | null | null | """Vehicle's app models."""
import uuid
from django.db import models
from .clients import Client
class Vehicle(models.Model):
"""Model representing a vehicle."""
road_worthiness_path = 'vehicles/certs/road_worthiness'
ownership_path = 'vehicles/certs/ownership'
photo_path = 'vehicles/photos'
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
client = models.ForeignKey(Client, null=True)
make = models.CharField(max_length=20)
model = models.CharField(max_length=20)
year = models.IntegerField(null=True)
license_plate_number = models.CharField(max_length=20)
tracker_id = models.CharField(max_length=64)
car_value = models.FloatField(null=True)
cert_road_worthiness = models.FileField(upload_to=road_worthiness_path)
cert_ownership = models.FileField(upload_to=ownership_path)
policy_number = models.CharField(max_length=255)
photo = models.FileField(upload_to=photo_path)
date_insurance = models.DateTimeField(null=True)
premium_paid = models.FloatField(null=True)
bonus_paid = models.FloatField(null=True)
net_premium = models.FloatField(null=True)
driven_meters = models.IntegerField(default=0)
driven_minutes = models.IntegerField(default=0)
total_fuel_consumption = models.FloatField(null=True, blank=True)
car_health = models.TextField(null=True)
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
def __str__(self):
"""String representation of the object."""
return "{0}, {1}, {2}".format(self.make, self.model, self.license_plate_number)
class Meta:
db_table = 'vehicles_vehicle'
app_label = 'milefrienddb'
| 38.043478 | 87 | 0.737714 |
61d6182a3cde9be8c7c4791931417d4e0d9e7b55 | 187 | py | Python | ejercicio_4.py | Laurardila440/taller-de-secuencias | 9db216d2431661e0777273fc5b8360a316d7dbd2 | [
"Apache-2.0"
] | null | null | null | ejercicio_4.py | Laurardila440/taller-de-secuencias | 9db216d2431661e0777273fc5b8360a316d7dbd2 | [
"Apache-2.0"
] | null | null | null | ejercicio_4.py | Laurardila440/taller-de-secuencias | 9db216d2431661e0777273fc5b8360a316d7dbd2 | [
"Apache-2.0"
] | null | null | null | """
Entradas
compra-->int-->c
salidas
Descuento-->flot-->d
"""
c=float(input("digite compra"))
#caja negra
d=(c*0.15)
total=(c-d)
#Salidas
print("el total a pagar es de :"+str(total))
| 14.384615 | 44 | 0.641711 |
61d6aa3833e84422d5bd54157900ea1d35ffca0b | 878 | py | Python | 429.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | 429.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | 429.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | #LeetCode problem 429: N-ary Tree Level Order Traversal
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
res=[]
h=self.getHeight(root)
for i in range(1,h+1):
a=[]
self.getLevelOrder(root,a,i)
res.append(a)
return res
def getHeight(self, root:'Node')->int:
if root is None:
return 0
m=1
for child in root.children:
m=max(self.getHeight(child)+1,m)
return m
def getLevelOrder(self, root: 'Node', l:List, level:int):
if level==1:
l.append(root.val)
for child in root.children:
self.getLevelOrder(child,l,level-1)
| 26.606061 | 61 | 0.544419 |
61d7cc4850de782acf97ce8fd6bae60d5d5eb06f | 544 | py | Python | PyhonServer/app-client.py | sixfourtwo/auhack19 | 65b94c6cbdbfbd50e355c12b8ca2792b3b086321 | [
"Apache-2.0"
] | null | null | null | PyhonServer/app-client.py | sixfourtwo/auhack19 | 65b94c6cbdbfbd50e355c12b8ca2792b3b086321 | [
"Apache-2.0"
] | null | null | null | PyhonServer/app-client.py | sixfourtwo/auhack19 | 65b94c6cbdbfbd50e355c12b8ca2792b3b086321 | [
"Apache-2.0"
] | null | null | null | # importing the requests library
import requests
import json
# api-endpoint
URL = "http://127.0.0.1:80/water_mark"
# defining a params dict for the parameters to be sent to the API
# data is picture data
# tagString is the text to embed into picture.
data = {
"data":"This is the original text",
"tagString":" Yesyesyes"
}
PARAMS = json.dumps(data)
rPost = requests.post(url = URL, data = PARAMS) # kør det med JSON
data1 = json.loads(rPost.text)
#print("waterMarked data: " + rPost.text )
print("DATA: \n" + data1["data"])
| 22.666667 | 66 | 0.6875 |
61d90f523acdcf1af2ba8df7242ffe2e2fdeac93 | 9,827 | py | Python | memnet.py | 404akhan/memnet | a8cf9e0a480575d9d36de6fa3357f667d64e0b05 | [
"BSD-3-Clause"
] | 1 | 2018-02-01T05:17:13.000Z | 2018-02-01T05:17:13.000Z | memnet.py | 404akhan/memnet | a8cf9e0a480575d9d36de6fa3357f667d64e0b05 | [
"BSD-3-Clause"
] | null | null | null | memnet.py | 404akhan/memnet | a8cf9e0a480575d9d36de6fa3357f667d64e0b05 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch import nn, autograd
from torch.utils.data import DataLoader
from babi import BabiDataset, pad_collate
from torch.nn.utils import clip_grad_norm
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.fastest = True
class MemoryCell(nn.Module):
def __init__(self, num_mem_slots, embed_dim):
super(MemoryCell, self).__init__()
self.num_mem_slots = num_mem_slots
self.embed_dim = embed_dim
# Memory update linear layers.
self.U = nn.Linear(embed_dim, embed_dim)
self.V = nn.Linear(embed_dim, embed_dim, bias=False)
self.W = nn.Linear(embed_dim, embed_dim, bias=False)
self.prelu_memory = nn.PReLU(init=1)
init.xavier_normal(self.U.weight)
init.xavier_normal(self.V.weight)
init.xavier_normal(self.W.weight)
def forward(self, inputs, keys):
memories = keys
memory_inputs = inputs
for index, sentence in enumerate(memory_inputs):
# Compute memory updates.
sentence = sentence.unsqueeze(1).repeat(1, self.num_mem_slots, 1)
sentence = sentence.view_as(memories)
memory_gates = F.sigmoid((sentence * (memories + keys)).sum(dim=-1))
memory_gates = memory_gates.expand_as(memories)
candidate_memories = self.prelu_memory(self.U(memories) + self.V(sentence) + self.W(keys))
updated_memories = memories + memory_gates * candidate_memories
updated_memories = updated_memories / (
updated_memories.norm(p=2, dim=-1).expand_as(updated_memories) + 1e-12)
memories = updated_memories
return memories
class RecurrentEntityNetwork(nn.Module):
def __init__(self, hidden_dim, max_num_sentences=150, vocab_size=50):
super(RecurrentEntityNetwork, self).__init__()
self.max_num_sentences = max_num_sentences
self.embed_dim = hidden_dim
self.num_mem_slots = 20
self.vocab_size = vocab_size
self.memory_mask = nn.Parameter(torch.randn(max_num_sentences, 1))
self.question_mask = nn.Parameter(torch.randn(max_num_sentences, 1))
self.embedding = nn.Embedding(vocab_size + self.num_mem_slots, hidden_dim, padding_idx=0)
init.uniform(self.embedding.weight, a=-(3 ** 0.5), b=3 ** 0.5)
self.cell = MemoryCell(self.num_mem_slots, hidden_dim)
# Fully connected linear layers.
self.C = nn.Linear(hidden_dim, hidden_dim)
self.H = nn.Linear(hidden_dim, hidden_dim, bias=False)
self.Z = nn.Linear(hidden_dim, vocab_size, bias=False)
self.prelu_outputs = nn.ReLU()
# Initialize weights.
init.xavier_normal(self.C.weight)
init.xavier_normal(self.H.weight)
init.xavier_normal(self.Z.weight)
self.memory_mask.data.fill_(1)
self.question_mask.data.fill_(1)
def forward(self, contexts, questions):
batch_size, context_length, context_num_words = contexts.size()
_, question_length = questions.size()
# List of sentence embeddings for every story in a batch. (num. sentences, batch size, encoder dim.)
contexts = self.embedding(contexts.view(batch_size, -1))
contexts = contexts.view(batch_size, context_length, context_num_words, -1)
questions = self.embedding(questions)
memory_mask = self.memory_mask[:context_length].unsqueeze(0).unsqueeze(2).expand(*contexts.size())
question_mask = self.question_mask[:question_length].unsqueeze(0).expand(*questions.size())
memory_inputs = torch.sum(contexts * memory_mask, dim=2).squeeze().t()
question_inputs = torch.sum(questions * question_mask, dim=1).squeeze()
# Compute memory updates.
keys = torch.arange(self.vocab_size, self.vocab_size + self.num_mem_slots)
keys = torch.autograd.Variable(keys.unsqueeze(0).expand(batch_size, self.num_mem_slots).long().cuda())
keys = self.embedding(keys).view(batch_size * self.num_mem_slots, -1)
network_graph = self.cell(memory_inputs, keys)
network_graph = self.C(network_graph).view(batch_size, self.num_mem_slots, self.embed_dim)
# Apply attention to the entire acyclic graph using the questions.
attention_energies = network_graph * question_inputs.unsqueeze(1).expand_as(network_graph)
attention_energies = attention_energies.sum(dim=-1)
attention_weights = F.softmax(attention_energies).expand_as(network_graph)
attended_network_graph = (network_graph * attention_weights).sum(dim=1).squeeze()
# Condition the fully-connected layer using the questions.
outputs = self.prelu_outputs(question_inputs + self.H(attended_network_graph))
outputs = self.Z(outputs)
return outputs
HIDDEN_DIM = 100
BATCH_SIZE = 100
NUM_EPOCHS = 250
LOG_FILE = "memnet.txt"
if __name__ == '__main__':
dataset = BabiDataset()
vocab_size = len(dataset.QA.VOCAB)
criterion = nn.CrossEntropyLoss(size_average=False)
model = RecurrentEntityNetwork(HIDDEN_DIM, 130, vocab_size)
model.cuda()
early_stopping_counter = 0
best_accuracy = 0
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
for epoch in range(NUM_EPOCHS):
dataset.set_mode('train')
train_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=pad_collate
)
model.train()
if early_stopping_counter < 20:
total_accuracy = 0
num_batches = 0
for batch_idx, data in enumerate(train_loader):
optimizer.zero_grad()
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
loss.backward()
clip_grad_norm(model.parameters(), 40)
total_accuracy += acc
num_batches += 1
if batch_idx % 20 == 0:
print('[Epoch %d] [Training] loss : %f, acc : %f, batch_idx : %d' % (
epoch, loss.data[0], total_accuracy / num_batches, batch_idx
))
optimizer.step()
dataset.set_mode('valid')
valid_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_collate
)
model.eval()
total_accuracy = 0
num_batches = 0
for batch_idx, data in enumerate(valid_loader):
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
total_accuracy += acc
num_batches += 1
total_accuracy = total_accuracy / num_batches
if total_accuracy > best_accuracy:
best_accuracy = total_accuracy
best_state = model.state_dict()
early_stopping_counter = 0
else:
early_stopping_counter += 1
print('[Epoch %d] [Validate] Accuracy : %f' % (epoch, total_accuracy))
with open(LOG_FILE, 'a') as fp:
fp.write('[Epoch %d] [Validate] Accuracy : %f' % (epoch, total_accuracy) + '\n')
if total_accuracy == 1.0:
break
else:
print('Early Stopping at Epoch %d, Valid Accuracy : %f' % (epoch, best_accuracy))
break
dataset.set_mode('test')
test_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_collate
)
test_acc = 0
num_batches = 0
for batch_idx, data in enumerate(test_loader):
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
model.state_dict().update(best_state)
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
test_acc += acc
num_batches += 1
print('[Epoch %d] [Test] Accuracy : %f' % (epoch, test_acc / num_batches))
with open(LOG_FILE, 'a') as fp:
fp.write('[Epoch %d] [Test] Accuracy : %f' % (epoch, test_acc / num_batches) + '\n') | 37.083019 | 110 | 0.623792 |
61d93349709f00bb603d8566d8afdb83080026fb | 3,444 | py | Python | tests/test_tba.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 11 | 2019-10-14T02:05:38.000Z | 2022-03-10T14:10:22.000Z | tests/test_tba.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 29 | 2019-09-02T05:49:40.000Z | 2022-02-26T00:57:54.000Z | tests/test_tba.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 1 | 2021-04-16T20:26:13.000Z | 2021-04-16T20:26:13.000Z | from lake.models.tba_model import TBAModel
from lake.modules.transpose_buffer_aggregation import TransposeBufferAggregation
from lake.passes.passes import lift_config_reg
import magma as m
from magma import *
import fault
import tempfile
import kratos as k
import random as rand
import pytest
def test_tba(word_width=16,
fetch_width=4,
num_tb=1,
tb_height=1,
max_range=5,
max_range_inner=5):
model_tba = TBAModel(word_width,
fetch_width,
num_tb,
tb_height,
max_range,
max_range_inner)
new_config = {}
new_config["range_outer"] = 5
new_config["range_inner"] = 3
new_config["stride"] = 2
new_config["indices"] = [0, 1, 2]
new_config["dimensionality"] = 2
new_config["tb_height"] = 1
new_config["starting_addr"] = 0
model_tba.set_config(new_config=new_config)
dut = TransposeBufferAggregation(word_width,
fetch_width,
num_tb,
tb_height,
max_range,
max_range_inner,
max_stride=5,
tb_iterator_support=2)
lift_config_reg(dut.internal_generator)
magma_dut = k.util.to_magma(dut, flatten_array=True, check_flip_flop_always_ff=False)
tester = fault.Tester(magma_dut, magma_dut.clk)
# configuration registers
tester.circuit.tb_0_indices_0 = 0
tester.circuit.tb_0_indices_1 = 1
tester.circuit.tb_0_indices_2 = 2
tester.circuit.tb_0_range_outer = 5
tester.circuit.tb_0_range_inner = 3
tester.circuit.tb_0_stride = 2
tester.circuit.tb_0_dimensionality = 2
tester.circuit.tb_0_tb_height = 1
tester.circuit.tb_0_starting_addr = 0
tester.circuit.clk = 0
tester.circuit.rst_n = 1
tester.step(2)
tester.circuit.rst_n = 0
tester.step(2)
tester.circuit.tba_ren = 1
tester.circuit.rst_n = 1
rand.seed(0)
num_iters = 100
for i in range(num_iters):
data = []
for j in range(fetch_width):
data.append(rand.randint(0, 2**word_width - 1))
for j in range(fetch_width):
setattr(tester.circuit, f"SRAM_to_tb_data_{j}", data[j])
valid_data = rand.randint(0, 1)
tester.circuit.valid_data = valid_data
mem_valid_data = rand.randint(0, 1)
tester.circuit.mem_valid_data = mem_valid_data
tb_index_for_data = 0
tester.circuit.tb_index_for_data = tb_index_for_data
ack_in = valid_data
tester.circuit.ack_in = ack_in
model_data, model_valid = \
model_tba.tba_main(data, valid_data, ack_in, tb_index_for_data, 1, mem_valid_data)
tester.eval()
tester.circuit.tb_to_interconnect_valid.expect(model_valid)
if model_valid:
tester.circuit.tb_to_interconnect_data.expect(model_data[0])
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
tester.compile_and_run(target="verilator",
directory=tempdir,
magma_output="verilog",
flags=["-Wno-fatal"])
if __name__ == "__main__":
test_tba()
| 29.947826 | 94 | 0.594948 |
61da398102287561106f2583dbf3dd6a0d400ea3 | 1,442 | py | Python | 2018/02/py/run.py | Bigsby/aoc | 409fefbb0467628fa298288064acb622bb53ee58 | [
"CC0-1.0"
] | 1 | 2021-06-11T17:24:05.000Z | 2021-06-11T17:24:05.000Z | 2018/02/py/run.py | Bigsby/aoc | 409fefbb0467628fa298288064acb622bb53ee58 | [
"CC0-1.0"
] | null | null | null | 2018/02/py/run.py | Bigsby/aoc | 409fefbb0467628fa298288064acb622bb53ee58 | [
"CC0-1.0"
] | null | null | null | #! /usr/bin/python3
import sys, os, time
from typing import List, Tuple
from itertools import combinations
def part1(ids: List[str]) -> int:
twice_count = 0
thrice_count = 0
for id in ids:
id_counts = { id.count(c) for c in id }
twice_count += 2 in id_counts
thrice_count += 3 in id_counts
return twice_count * thrice_count
def part2(ids: List[str]) -> str:
for id1, id2 in combinations(ids, 2):
differences = [ i for i in range(len(id1)) if id1[i] != id2[i] ]
if len(differences) == 1:
diferent_index = differences[0]
return id1[:diferent_index] + id1[diferent_index + 1:]
raise Exception("Ids differencing 1 not found")
def solve(ids: List[str]) -> Tuple[int,str]:
return (
part1(ids),
part2(ids)
)
def get_input(file_path: str) -> List[str]:
if not os.path.isfile(file_path):
raise FileNotFoundError(file_path)
with open(file_path, "r") as file:
return [ line.strip() for line in file.readlines() ]
def main():
if len(sys.argv) != 2:
raise Exception("Please, add input file path as parameter")
start = time.perf_counter()
part1_result, part2_result = solve(get_input(sys.argv[1]))
end = time.perf_counter()
print("P1:", part1_result)
print("P2:", part2_result)
print()
print(f"Time: {end - start:.7f}")
if __name__ == "__main__":
main() | 25.75 | 72 | 0.615811 |
61da655b21d56bf52e1b1c392472699b90dc9b53 | 275 | py | Python | text_directions.py | ErnestoPena/Game-Of-Life.-Python | 046bca4ae1c69e08efed4f45057b097653410ec8 | [
"MIT"
] | null | null | null | text_directions.py | ErnestoPena/Game-Of-Life.-Python | 046bca4ae1c69e08efed4f45057b097653410ec8 | [
"MIT"
] | null | null | null | text_directions.py | ErnestoPena/Game-Of-Life.-Python | 046bca4ae1c69e08efed4f45057b097653410ec8 | [
"MIT"
] | null | null | null | import pygame
def print(text, window):
font = pygame.font.SysFont('Times New Roman', 18)
header_text = font.render(text,1,(255,255,0))
window.blit(header_text, (1000 + (200/2 - header_text.get_width()/2), 680 + (header_text.get_height()/2))) | 45.833333 | 122 | 0.632727 |
61db9a4dde565ed6cc9ccd45b6a858a56b15b618 | 515 | py | Python | brummi/config.py | fredreichbier/brummi | d833e6dc0b74c8bddea225c785b3cba463b13ecd | [
"MIT"
] | 1 | 2015-11-05T04:35:07.000Z | 2015-11-05T04:35:07.000Z | brummi/config.py | fredreichbier/brummi | d833e6dc0b74c8bddea225c785b3cba463b13ecd | [
"MIT"
] | null | null | null | brummi/config.py | fredreichbier/brummi | d833e6dc0b74c8bddea225c785b3cba463b13ecd | [
"MIT"
] | null | null | null | import pkg_resources
from . import BrummiRepository
DEFAULTS = {
'templates': pkg_resources.resource_filename('brummi', 'templates'),
'out_path': 'docs',
}
class Config(object):
def __init__(self, options):
self.options = DEFAULTS.copy()
self.options.update(options)
def launch(self):
repo = BrummiRepository(
self.options['ooc_path'],
self.options['templates'],
self.options['out_path']
)
repo.build_all_modules()
| 23.409091 | 72 | 0.615534 |
61de22931c74120ebd50c4d032782c041e459df7 | 730 | py | Python | kittiground/__init__.py | JeremyBYU/polylidar-kitti | bfad0dc4a74e136d2841dccf3ccc05d982f18a8e | [
"MIT"
] | null | null | null | kittiground/__init__.py | JeremyBYU/polylidar-kitti | bfad0dc4a74e136d2841dccf3ccc05d982f18a8e | [
"MIT"
] | null | null | null | kittiground/__init__.py | JeremyBYU/polylidar-kitti | bfad0dc4a74e136d2841dccf3ccc05d982f18a8e | [
"MIT"
] | null | null | null | from pathlib import Path
import numpy as np
THIS_FILE = Path(__file__)
THIS_DIR = THIS_FILE.parent
DEFAULT_CONFIG_FILE = THIS_DIR / 'config' / 'default.yaml'
# Width/height of the visual screens
IMG_WIDTH = 1242
IMG_HEIGHT = 375
# INTRINISCS = np.array([[649.51905284, 0.00000000, 620.50000000],
# [0.00000000, 649.51905284, 374.50000000],
# [0.00000000, 0.00000000, 1.00000000]])
EXTRINSICS = np.array([[0.99960128, 0.00806920, -0.02705864, -0.07041882],
[-0.01559983, -0.64094650, -0.76742702, 7.50137898],
[-0.02353566, 0.76754314, -0.64056507, 8.23519670],
[0.00000000, 0.00000000, 0.00000000, 1.00000000]])
| 38.421053 | 75 | 0.610959 |
61df694948c2ba5c7d34c79e97268eab5f090a30 | 3,272 | py | Python | palette/core/color_transfer.py | SuziKim/PaletteSelection | cfc0052996b5c8dc1da2d6e30798dd1fed138ebe | [
"MIT"
] | 23 | 2015-08-25T12:31:44.000Z | 2021-12-15T03:18:12.000Z | palette/core/color_transfer.py | SuziKim/PaletteSelection | cfc0052996b5c8dc1da2d6e30798dd1fed138ebe | [
"MIT"
] | null | null | null | palette/core/color_transfer.py | SuziKim/PaletteSelection | cfc0052996b5c8dc1da2d6e30798dd1fed138ebe | [
"MIT"
] | 7 | 2017-07-27T10:57:36.000Z | 2022-02-22T06:51:44.000Z | # -*- coding: utf-8 -*-
## @package palette.core.color_transfer
#
# Color transfer.
# @author tody
# @date 2015/09/16
import numpy as np
from scipy.interpolate import Rbf
import matplotlib.pyplot as plt
from palette.core.lab_slices import LabSlice, LabSlicePlot, Lab2rgb_py
## Color transfer for ab coordinates.
class ABTransfer:
## Constructor
# @param abs_original original ab coordinates.
# @param abs_edited edited ab coordinates.
def __init__(self, abs_original, abs_edited):
abs_original = np.array(abs_original)
abs_edited = np.array(abs_edited)
rbf_a = Rbf(abs_original[:, 0], abs_original[:, 1], abs_edited[:, 0])
rbf_b = Rbf(abs_original[:, 0], abs_original[:, 1], abs_edited[:, 1])
self._rbf = [rbf_a, rbf_b]
## Color transfer for ab coordinates.
def transfer(self, ab_original):
abs_edited = [rbf(ab_original[0], ab_original[1]) for rbf in self._rbf]
abs_edited = np.array(abs_edited)
return abs_edited
## Simple plotter for ABTransfer.
class ABTransferPlot:
## Constructor
# @param abs_original original ab coordinates.
# @param abs_edited edited ab coordinates.
# @param L target L coordinate.
# @param abs_animation list of ab coordinates for plot animation.
def __init__(self, abs_original, abs_edited, L=50, abs_animation=[]):
self._slice = LabSlice(func=Lab2rgb_py)
self._slice_plot = LabSlicePlot(self._slice)
self._slice_plot.plot(L)
self._abs_original = abs_original
self._abs_edited = abs_edited
self._abs_animation = abs_animation
self._transfer = ABTransfer(abs_original, abs_edited)
self._plot()
## Animation function for matplot.
def animationFunc(self, step, *args):
ab_id = step % len(self._abs_animation)
ab_original = self._abs_animation[ab_id]
xy_original, xy_edited = self._blendResult(ab_original)
self._setArrow(self._blend_plot, xy_original, xy_edited)
return self._blend_plot
def _plot(self):
xys_original = [self._slice.ab2xy(ab_original) for ab_original in self._abs_original]
xys_edited = [self._slice.ab2xy(ab_edited) for ab_edited in self._abs_edited]
for xy_original, xy_edited in zip(xys_original, xys_edited):
self._arrow(xy_original, xy_edited)
xy_original, xy_edited = self._blendResult(self._abs_animation[0])
self._blend_plot = self._arrow(xy_original, xy_edited, color=[0.7, 0.5, 0.4])
def _arrow(self, ps, pe, color=[1, 1, 1]):
xs = [ps[0], pe[0]]
ys = [ps[1], pe[1]]
return [plt.plot(xs, ys, '-', color=color, linewidth=2, alpha=0.8)[0],
plt.plot(ps[0], ps[1], 'o', color=color, linewidth=2, alpha=0.8)[0]]
def _setArrow(self, arrow_plot, ps, pe):
xs = [ps[0], pe[0]]
ys = [ps[1], pe[1]]
arrow_plot[0].set_data(xs, ys)
arrow_plot[1].set_data(ps[0], ps[1])
def _blendResult(self, ab_original):
ab_edited = self._transfer.transfer(ab_original)
xy_original = self._slice.ab2xy(ab_original)
xy_edited = self._slice.ab2xy(ab_edited)
return xy_original, xy_edited
| 37.181818 | 93 | 0.652812 |
61dfafddb5a99f013e5962a29c6779ac49a5f150 | 1,447 | py | Python | CursoEmVideoPython/desafio95.py | miguelabreuss/scripts_python | cf33934731a9d1b731672d4309aaea0a24ae151a | [
"MIT"
] | null | null | null | CursoEmVideoPython/desafio95.py | miguelabreuss/scripts_python | cf33934731a9d1b731672d4309aaea0a24ae151a | [
"MIT"
] | 1 | 2020-07-04T16:27:25.000Z | 2020-07-04T16:27:25.000Z | CursoEmVideoPython/desafio95.py | miguelabreuss/scripts_python | cf33934731a9d1b731672d4309aaea0a24ae151a | [
"MIT"
] | null | null | null | scoult = dict()
gols = list()
time = list()
temp = 0
while True:
scoult['Jogador'] = str(input('Qual o nome do jogador: '))
scoult['Número partidas'] = int(input('Quantas partidas foram jogadas? '))
for i in range(0,scoult['Número partidas']):
gols.append(int(input(f'Quantos gols foram marcados na partida {i+1} de {scoult["Jogador"]}? ')))
scoult['Gols'] = gols[:]
for i in range(0,scoult['Número partidas']):
if i == 0:
scoult['Total de gols'] = gols[i]
else:
scoult['Total de gols'] += gols[i]
time.append(scoult.copy())
gols.clear()
if str(input('Deseja continuar [S/N]? ')) in 'Nn':
break
print('-' * 50)
print('-' * 50)
print('{:^50}'.format('TABELO PERFORMANCE'))
print('-' * 50)
print('{:<5}{:<15}{:<25}{:<5}'.format('cod', 'Jogador', 'Gols', 'Total'))
for e in time:
print('{:<5}{:<15}{:<25}{:<5}'.format(temp, e['Jogador'], str(e['Gols']), e['Total de gols']))
temp += 1
print('-' * 50)
while True:
temp = int(input('De aual jogador você deseja mais detalhes? [cod] 999 p/ sair. '))
if temp == 999:
break
else:
print(f'-- Performance do jogador: {time[temp]["Jogador"]}')
for i in range(0,time[temp]["Número partidas"]):
print(f' => Na partida {i+1} {time[temp]["Jogador"]} marcou {time[temp]["Gols"][i]} vez(es).')
print(f'Foi um total de {time[temp]["Total de gols"]} gols')
| 38.078947 | 109 | 0.561852 |
61dfc58457362e0a41be0f73d8c2ed155141035c | 428 | py | Python | Section07_Bridge/script-Bridge.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | 1 | 2020-10-20T07:41:51.000Z | 2020-10-20T07:41:51.000Z | Section07_Bridge/script-Bridge.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | null | null | null | Section07_Bridge/script-Bridge.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | null | null | null | # Circles and squares
# Each can be rendered in vector or raster form
from Section07_Bridge.Brigde.Circle import Circle
from Section07_Bridge.Brigde.RasterRenderer import RasterRenderer
from Section07_Bridge.Brigde.VectorRenderer import VectorRenderer
if __name__ == '__main__':
raster = RasterRenderer()
vector = VectorRenderer()
circle = Circle(vector, 5)
circle.draw()
circle.resize(2)
circle.draw()
| 30.571429 | 65 | 0.766355 |
61e1ff665914cfb40790ee569edb6f9cb201dad5 | 3,668 | py | Python | Algorithms/On-Policy/A2C/DISCOVER_A2C.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | Algorithms/On-Policy/A2C/DISCOVER_A2C.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | Algorithms/On-Policy/A2C/DISCOVER_A2C.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from utils import init
class Explorer(nn.Module):
def __init__(self, state_dim, max_action, exp_regularization):
super(Explorer, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), np.sqrt(2))
self.l1 = init_(nn.Linear(state_dim, 64))
self.l2 = init_(nn.Linear(64, 64))
self.l3 = init_(nn.Linear(64, state_dim))
self.max_action = max_action
self.exp_regularization = exp_regularization
def forward(self, state):
a = torch.tanh(self.l1(state))
a = torch.tanh(self.l2(a))
return self.max_action * torch.tanh(self.l3(a)) * self.exp_regularization ** 2
class DISCOVER_A2C():
def __init__(self,
state_dim,
max_action,
exp_regularization,
policy,
value_loss_coef,
entropy_coef,
learning_rate=None,
adam_eps=None,
alpha=None,
max_grad_norm=None,
device=None):
self.policy = policy
self.explorer = Explorer(state_dim, max_action, exp_regularization).to(device)
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.optimizer = optim.RMSprop(policy.parameters(), learning_rate, eps=adam_eps, alpha=alpha)
self.explorer_optimizer = optim.Adam(self.explorer.parameters(), lr=learning_rate, eps=adam_eps)
def explore(self, inputs):
return self.explorer(inputs)
def update_parameters(self, rollouts):
obs_shape = rollouts.obs.size()[2:]
action_shape = rollouts.actions.size()[-1]
num_steps, num_processes, _ = rollouts.rewards.size()
values, action_log_probs, dist_entropy, _ = self.policy.evaluate_actions(
rollouts.obs[:-1].view(-1, *obs_shape),
rollouts.exploration_directions.view(-1, *obs_shape),
rollouts.recurrent_hidden_states[0].view(-1, self.policy.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
rollouts.actions.view(-1, action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = rollouts.returns[:-1] - values
value_loss = advantages.pow(2).mean()
action_loss = -(advantages.detach() * action_log_probs).mean()
self.optimizer.zero_grad()
(value_loss * self.value_loss_coef + action_loss - dist_entropy * self.entropy_coef).backward()
nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.optimizer.step()
# Compute the explorer loss
values, action_log_probs, dist_entropy, _ = self.policy.evaluate_actions(
rollouts.obs[:-1].view(-1, *obs_shape),
self.explorer(rollouts.obs[:-1].view(-1, *obs_shape)),
rollouts.recurrent_hidden_states[0].view(-1, self.policy.recurrent_hidden_state_size),
rollouts.masks[:-1].view(-1, 1),
rollouts.actions.view(-1, action_shape))
values = values.view(num_steps, num_processes, 1)
advantages = rollouts.returns[:-1] - values
value_loss = advantages.pow(2).mean()
self.explorer_optimizer.zero_grad()
(-value_loss * self.value_loss_coef).backward()
nn.utils.clip_grad_norm_(self.explorer.parameters(), self.max_grad_norm)
self.explorer_optimizer.step()
| 37.050505 | 104 | 0.638768 |
61e3abea3e991562a75549fe727c93817d1999de | 3,400 | py | Python | user/beaninfo_Global.py | dvdrm/gd | c004724344577bb608fa0611d10c16b211995f72 | [
"Apache-2.0"
] | null | null | null | user/beaninfo_Global.py | dvdrm/gd | c004724344577bb608fa0611d10c16b211995f72 | [
"Apache-2.0"
] | null | null | null | user/beaninfo_Global.py | dvdrm/gd | c004724344577bb608fa0611d10c16b211995f72 | [
"Apache-2.0"
] | null | null | null | from telethon import events, Button
from .login import user
from .. import jdbot
from ..bot.utils import cmd, TASK_CMD,split_list, press_event
from ..diy.utils import read, write
import asyncio
import re
@user.on(events.NewMessage(pattern=r'^setbd', outgoing=True))
async def SetBeanDetailInfo(event):
try:
msg_text= event.raw_text.split(' ')
if len(msg_text) == 2:
text = msg_text[-1]
else:
text = None
if text==None:
await event.edit('请输入正确的格式: setbd 屏蔽京豆数量')
return
key="BOTShowTopNum"
kv=f'{key}="{text}"'
change=""
configs = read("str")
if kv not in configs:
if key in configs:
configs = re.sub(f'{key}=("|\').*("|\')', kv, configs)
write(configs)
else:
configs = read("str")
configs += f'export {key}="{text}"\n'
write(configs)
change = f'已替换屏蔽京豆数为{text}'
else:
change = f'设定没有改变,想好再来.'
await event.edit(change)
except Exception as e:
title = "【💥错误💥】"
name = "文件名:" + os.path.split(__file__)[-1].split(".")[0]
function = "函数名:" + e.__traceback__.tb_frame.f_code.co_name
details = "错误详情:第 " + str(e.__traceback__.tb_lineno) + " 行"
tip = '建议百度/谷歌进行查询'
await jdbot.send_message(chat_id, f"{title}\n\n{name}\n{function}\n错误原因:{str(e)}\n{details}\n{traceback.format_exc()}\n{tip}")
logger.error(f"错误--->{str(e)}")
@user.on(events.NewMessage(pattern=r'^bd', outgoing=True))
async def CCBeanDetailInfo(event):
msg_text= event.raw_text.split(' ')
if len(msg_text) == 2:
text = msg_text[-1]
else:
text = None
if text==None:
await event.edit('请指定要查询的账号,格式: cb 1 或 cb ptpin')
return
key="BOTCHECKCODE"
kv=f'{key}="{text}"'
change=""
configs = read("str")
intcount=0
if kv not in configs:
if key in configs:
configs = re.sub(f'{key}=("|\').*("|\')', kv, configs)
change += f"【替换】环境变量:`{kv}`\n"
write(configs)
else:
configs = read("str")
configs += f'export {key}="{text}"\n'
change += f"【新增】环境变量:`{kv}`\n"
write(configs)
await event.edit('开始查询账号'+text+'的资产,请稍后...')
cmdtext="task /ql/data/scripts/jk_script/bot_jd_bean_info_QL.js now"
p = await asyncio.create_subprocess_shell(
cmdtext, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
res_bytes, res_err = await p.communicate()
res = res_bytes.decode('utf-8')
txt=res.split('\n')
strReturn=""
await event.delete()
if res:
for line in txt:
if "【" in line and "🔔" not in line:
strReturn=strReturn+line+'\n'
if intcount==100:
intcount=0
if strReturn:
await user.send_message(event.chat_id, strReturn)
strReturn=""
else:
await user.send_message(event.chat_id,'查询失败!')
if strReturn:
await user.send_message(event.chat_id, strReturn)
| 33.009709 | 134 | 0.516471 |
61e4ce6929b3cf5b02ae69957ea9065425f62a24 | 2,409 | py | Python | samples/boxcheck.py | cg2v/box_requests | 024141a263c8918962957b75d208c532c7b853f0 | [
"Apache-2.0"
] | null | null | null | samples/boxcheck.py | cg2v/box_requests | 024141a263c8918962957b75d208c532c7b853f0 | [
"Apache-2.0"
] | null | null | null | samples/boxcheck.py | cg2v/box_requests | 024141a263c8918962957b75d208c532c7b853f0 | [
"Apache-2.0"
] | 1 | 2018-10-23T15:48:00.000Z | 2018-10-23T15:48:00.000Z | #!/usr/bin/python
import box_requests
import requests
import os
import sys
import time
import socket
import optparse
import logging
def checktime(path, days):
try:
r=os.stat(path)
except OSError:
return False
return (time.time() - r.st_mtime) < (days * 86400)
def logfailure(msg):
print >> sys.stderr, "Box API credentials expire if they are not used for a two week"
print >> sys.stderr, "period. The process that attempts to keep them fresh on"
print >> sys.stderr, "{0} failed. Details follow:".format(socket.gethostname())
print >> sys.stderr
if msg:
print >> sys.stderr, msg
print >> sys.stderr
raise
o=optparse.OptionParser()
o.add_option('-v', '--verbose', action="store_true", dest="verbose",
default=False, help="Display username on success")
o.add_option('-d', '--debug', action="store_true", dest="debug",
default=False, help="Enable debug logging")
rl=logging.getLogger()
sh=logging.StreamHandler(sys.stderr)
fm=logging.Formatter("%(asctime)s %(name)s [%(levelname)s] %(filename)s:%(lineno)d:%(funcName)s %(message)s")
sh.setFormatter(fm)
rl.addHandler(sh)
(options, args) = o.parse_args()
if options.debug:
sh.setLevel(logging.DEBUG)
rl.setLevel(logging.DEBUG)
try:
with box_requests.boxsession("/var/local/box/boxtoken.dat") as bs:
ok=False
try:
resp=bs.request("GET", "/2.0/users/me")
except requests.ConnectionError:
if not checktime("/var/local/box/boxtoken.dat", 7):
logfailure("Some sort of network problem occured, and has prevented the refresh\nprocess for several days.")
except requests.Timeout:
if not checktime("/var/local/box/boxtoken.dat", 7):
logfailure("Some sort of network problem occured, and has prevented the refresh\nprocess for several days.")
except ValueError:
logfailure("This failure seems to be due to a programming error")
except box_requests.BoxAPIError:
logfailure("Box rejected the credentials, they may already be invalid")
except:
logfailure(None)
else:
if options.verbose:
print "Current user is {0}".format(resp["login"])
except OSError:
logfailure("The credentials are missing or could not be loaded")
except box_requests.BoxTokenError:
logfailure("The credentials are missing or could not be loaded")
finally:
rl.removeHandler(sh)
| 33.929577 | 120 | 0.688252 |
61e6e408c9d358e1ba90de75b214eb2a33ce5303 | 2,663 | py | Python | sourcecode/src/vx/lha/Main.py | ivarvb/LHA | b3b7613180d533468edf762195922b73c70c525c | [
"MIT"
] | null | null | null | sourcecode/src/vx/lha/Main.py | ivarvb/LHA | b3b7613180d533468edf762195922b73c70c525c | [
"MIT"
] | null | null | null | sourcecode/src/vx/lha/Main.py | ivarvb/LHA | b3b7613180d533468edf762195922b73c70c525c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Ivar
"""
from Description import *
from Classification import *
if __name__ == "__main__":
inputdir = "../../../../data/LHA/dataset_1"
outputdir = inputdir+"/csv/exp/"+Util.now()
template = [
{
"name":"RAD",
"imagedir":"images_cleaned",
"maskdir":"seg/seg_window",
"masksubsetdir":"100",
"parameters":{"tile_size":100},
"erode":[30]
},
{
"name":"RAD",
"imagedir":"images_cleaned",
"maskdir":"seg/seg_window",
"masksubsetdir":"200",
"parameters":{"tile_size":200},
"erode":[30]
},
{
"name":"RAD",
"imagedir":"images_cleaned",
"maskdir":"seg/seg_window",
"masksubsetdir":"300",
"parameters":{"tile_size":300},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"100",
"parameters":{"tile_size":100, "radius":5},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"200",
"parameters":{"tile_size":200, "radius":5},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"300",
"parameters":{"tile_size":300, "radius":5},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"100",
"parameters":{"tile_size":100, "radius":10},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"200",
"parameters":{"tile_size":200, "radius":10},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"300",
"parameters":{"tile_size":300, "radius":10},
"erode":[30]
},
]
inputdir = "../../../../data/LHA/dataset_2"
outputdir = inputdir+"/csv/exp/"+Util.now()
Description.start(inputdir, outputdir, template)
Classification.start(outputdir, outputdir)
print("Complete in {}".format(outputdir))
| 23.156522 | 56 | 0.437852 |
61e6fadc19dca2b7aaa1c0e67b41806d94ed6219 | 12,263 | py | Python | pyemits/core/ml/regression/trainer.py | thompson0012/PyEmits | 9cb6fbf27ca7e8952ed5aca26118055e04492c23 | [
"Apache-2.0"
] | 6 | 2021-10-21T14:13:25.000Z | 2021-12-26T12:22:51.000Z | pyemits/core/ml/regression/trainer.py | thompson0012/PyEmits | 9cb6fbf27ca7e8952ed5aca26118055e04492c23 | [
"Apache-2.0"
] | null | null | null | pyemits/core/ml/regression/trainer.py | thompson0012/PyEmits | 9cb6fbf27ca7e8952ed5aca26118055e04492c23 | [
"Apache-2.0"
] | null | null | null | from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, Ridge, Lasso, BayesianRidge, HuberRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from pyemits.core.ml.base import BaseTrainer, BaseWrapper, NeuralNetworkWrapperBase
from pyemits.common.config_model import BaseConfig, KerasSequentialConfig, TorchLightningSequentialConfig
from pyemits.common.data_model import RegressionDataModel
from pyemits.common.py_native_dtype import SliceableDeque
from pyemits.common.validation import raise_if_value_not_contains
from typing import List, Dict, Optional, Union, Any
from pyemits.core.ml.regression.nn import TorchLightningWrapper
RegModelContainer = {
'RF': RandomForestRegressor,
'GBDT': GradientBoostingRegressor,
# 'HGBDT': HistGradientBoostingRegressor,
'AdaBoost': AdaBoostRegressor,
'MLP': MLPRegressor,
'ElasticNet': ElasticNet,
'Ridge': Ridge,
'Lasso': Lasso,
'BayesianRidge': BayesianRidge,
'Huber': HuberRegressor,
'XGBoost': XGBRegressor,
'LightGBM': LGBMRegressor
}
def _get_reg_model(algo_or_wrapper: Union[str, BaseWrapper]):
if isinstance(algo_or_wrapper, str):
return RegModelContainer[algo_or_wrapper]
# return wrapper model
elif isinstance(algo_or_wrapper, BaseWrapper):
return algo_or_wrapper
def fill_algo_config_clf(clf_or_wrapper,
algo_config: Optional[BaseConfig] = None):
# nn wrapper
if isinstance(clf_or_wrapper, NeuralNetworkWrapperBase):
# have algo config
if algo_config is not None:
# if keras model object
if isinstance(algo_config, KerasSequentialConfig):
for i in algo_config.layer:
clf_or_wrapper.model_obj.add(i)
clf_or_wrapper.model_obj.compile(**algo_config.compile)
return clf_or_wrapper
elif isinstance(algo_config, TorchLightningSequentialConfig):
clf_or_wrapper: TorchLightningWrapper
for nos, layer in enumerate(algo_config.layer, 1):
clf_or_wrapper.add_layer2blank_model(str(nos), layer)
return clf_or_wrapper
# not support pytorch, mxnet model right now
raise TypeError('now only support KerasSequentialConfig')
# no algo config
return clf_or_wrapper
# sklearn clf path
if algo_config is None:
return clf_or_wrapper() # activate
else:
return clf_or_wrapper(**dict(algo_config))
def fill_fit_config_clf(clf_or_wrapper,
X,
y,
fit_config: Optional[Union[BaseConfig, Dict]] = None,
):
from pyemits.core.ml.regression.nn import torchlighting_data_helper
# nn wrapper
if isinstance(clf_or_wrapper, NeuralNetworkWrapperBase):
dl_train, dl_val = torchlighting_data_helper(X, y)
if fit_config is None:
# pytorch_lightning path
if isinstance(clf_or_wrapper, TorchLightningWrapper):
return clf_or_wrapper.fit(dl_train, dl_val)
# keras path
return clf_or_wrapper.fit(X, y)
if isinstance(fit_config, BaseConfig):
if isinstance(clf_or_wrapper, TorchLightningWrapper):
return clf_or_wrapper.fit(dl_train, dl_val, **dict(fit_config))
# keras path
return clf_or_wrapper.fit(X, y, **dict(fit_config))
elif isinstance(fit_config, Dict):
if isinstance(clf_or_wrapper, TorchLightningWrapper):
return clf_or_wrapper.fit(dl_train, dl_val, **fit_config)
# keras path
return clf_or_wrapper.fit(X, y, **fit_config)
# sklearn/xgboost/lightgbm clf
else:
if fit_config is None:
return clf_or_wrapper.fit(X, y)
else:
assert isinstance(fit_config, BaseConfig), "fig_config type not matched"
return clf_or_wrapper.fit(X, y, **dict(fit_config))
class RegTrainer(BaseTrainer):
def __init__(self,
algo: List[Union[str, Any]],
algo_config: List[Optional[BaseConfig]],
raw_data_model: RegressionDataModel,
other_config: Dict[str, Union[List, BaseConfig, Any]] = {}):
"""
universal class for regression model training,
all-in-one training including sklearn, xgboost, lightgbm, keras, pytorch_lightning
you are not required to fill the algo config if you have idea on algo_config
the algo config is designed for people to config their model based on the configuration that provided in config_model
so that people can easily config their model during creation
for Pytorch_lightning user, pls configured your model before use this. at that moment, no algo_config is
Parameters
----------
algo: List[str]
the machine learning algorithm, any machine learning model that have fit/predict can used in here
algo_config: List[BaseConfig] or List[None]
the respective config model of algo
raw_data_model: RegressionDataModel
data model obj, stores data and meta data
other_config: BaseConfig
other global config, shall be used in its sub-class
"""
super(RegTrainer, self).__init__(algo, algo_config)
# raise_if_value_not_contains(algo, list(RegModelContainer.keys()))
self.raw_data_model = raw_data_model
self.other_config = other_config
self.clf_models = SliceableDeque()
self._is_algo_valid()
self._is_algo_config_valid()
def _is_algo_valid(self):
for item in self._algo:
if not isinstance(item, (str, NeuralNetworkWrapperBase)):
raise TypeError('must be str or WrapperBase')
if isinstance(item, str):
raise_if_value_not_contains([item], list(RegModelContainer.keys()))
def _is_algo_config_valid(self):
for item in self._algo_config:
if item is None:
continue # skip to next loop
if not isinstance(item, (BaseConfig, Dict)):
raise TypeError('Only accept ConfigBase or Dict as input')
# no checking when model is object, which directly passing it
def is_config_exists(self, config_key: str):
config_item = self.other_config.get(config_key, None)
if config_item is None:
return False
return True
def get_fill_fit_config(self):
fit_config = self.other_config.get('fit_config', None)
if isinstance(fit_config, list):
assert len(fit_config) == len(self._algo), 'length not matched'
return fit_config
elif fit_config is None:
fit_config_ = [] # rename variable
for i in range(len(self._algo)):
fit_config_.append(None)
fit_config = fit_config_ # pointer,
return fit_config
else:
raise TypeError('fit config not a list type')
def _fit(self):
X = self.raw_data_model.X_data
y = self.raw_data_model.y_data
# make sure y is 1D array in RegTrainer
fit_config = self.get_fill_fit_config()
for n, (algo, algo_config) in enumerate(zip(self._algo, self._algo_config)):
clf = fill_algo_config_clf(_get_reg_model(algo), algo_config)
fill_fit_config_clf(clf, X, y, fit_config[n])
self.clf_models.append((str(algo), clf))
return
class ParallelRegTrainer(RegTrainer):
def __init__(self,
algo: List[str],
algo_config: List[BaseConfig],
raw_data_model: RegressionDataModel,
other_config: Dict[str, Union[List, BaseConfig, Any]] = {}):
"""
handy function to realize parallel training
Parameters
----------
algo: List[str]
the machine learning algorithm, any machine learning model that have fit/predict can used in here
algo_config: List[BaseConfig] or List[None]
the respective config model of algo
raw_data_model: RegressionDataModel
data model obj, stores data and meta data
other_config: BaseConfig
other global config, shall be used in its sub-class
"""
super(ParallelRegTrainer, self).__init__(algo, algo_config, raw_data_model, other_config)
def _fit(self):
from joblib import Parallel, delayed
parallel = Parallel(n_jobs=-1)
def _get_fitted_trainer(algo: List,
algo_config: List[BaseConfig],
raw_data_model: RegressionDataModel,
other_config: Dict[str, BaseConfig] = {}):
trainer = RegTrainer(algo, algo_config, raw_data_model, other_config)
trainer.fit() # fit config auto filled by RegTrainer, no need to handle
return trainer
out: List[RegTrainer] = parallel(
delayed(_get_fitted_trainer)([algo_], [algo_config_], self.raw_data_model, self.other_config) for
algo_, algo_config_ in
zip(self._algo, self._algo_config))
for obj in out:
self.clf_models.append(obj.clf_models)
return
def fit(self):
return self._fit()
class MultiOutputRegTrainer(RegTrainer):
"""
machine learning based multioutput regression trainer
bring forecasting power into machine learning model,
forecasting is not only the power of deep learning
"""
def __init__(self,
algo: List[Union[str, Any]],
algo_config: List[Optional[BaseConfig]],
raw_data_model: RegressionDataModel,
other_config: Dict[str, Union[List, BaseConfig, Any]] = {},
parallel_n_jobs: int = -1):
super(MultiOutputRegTrainer, self).__init__(algo, algo_config, raw_data_model, other_config)
self.parallel_n_jobs = parallel_n_jobs
def _fit(self):
fit_config = self.get_fill_fit_config()
from sklearn.multioutput import MultiOutputRegressor
X = self.raw_data_model.X_data
y = self.raw_data_model.y_data
for n, (algo, algo_config) in enumerate(zip(self._algo, self._algo_config)):
clf = fill_algo_config_clf(_get_reg_model(algo), algo_config) # clf already activated
clf = MultiOutputRegressor(estimator=clf, n_jobs=self.parallel_n_jobs)
fill_fit_config_clf(clf, X, y, fit_config[n])
self.clf_models.append((str(algo), clf))
return
class KFoldCVTrainer(RegTrainer):
def __init__(self,
algo: List[Union[str, Any]],
algo_config: List[Optional[BaseConfig]],
raw_data_model: RegressionDataModel,
other_config: Dict[str, Union[List, BaseConfig, Any]] = {},
):
super(KFoldCVTrainer, self).__init__(algo, algo_config, raw_data_model, other_config)
def _fit(self):
from pyemits.core.ml.cross_validation import KFoldCV
kfold_config = self.other_config.get('kfold_config', None)
if kfold_config is not None:
kfold_cv = KFoldCV(self.raw_data_model, kfold_config)
else:
kfold_cv = KFoldCV(self.raw_data_model)
splitted_kfold = kfold_cv.split()
for n, item in enumerate(splitted_kfold):
self._meta_data_model.add_meta_data('kfold_record', [item])
train_idx = item[0]
test_idx = item[1]
X_ = self.raw_data_model.X_data[train_idx]
y_ = self.raw_data_model.y_data[train_idx]
sliced_data_model = RegressionDataModel(X_, y_)
trainer = ParallelRegTrainer(self._algo, self._algo_config, sliced_data_model,
other_config=self.other_config)
trainer.fit()
self.clf_models.append((f'kfold_{n}', trainer.clf_models))
return
| 40.471947 | 125 | 0.644133 |
61e7231e5da397e138846e32322894665e310b28 | 7,092 | py | Python | network_core/network_graph.py | markusgl/SocialCompanion | e816af21c600b33dbcac25d088d4d75957d0349a | [
"MIT"
] | 2 | 2018-12-21T12:55:21.000Z | 2019-05-29T06:35:58.000Z | network_core/network_graph.py | markusgl/SocialCompanion | e816af21c600b33dbcac25d088d4d75957d0349a | [
"MIT"
] | 8 | 2019-12-16T21:08:36.000Z | 2021-03-31T18:58:35.000Z | network_core/network_graph.py | markusgl/SocialCompanion | e816af21c600b33dbcac25d088d4d75957d0349a | [
"MIT"
] | null | null | null | """
knowledge graph representation using neo4j
this class uses py2neo with will be the final version
"""
import os
import json
from py2neo import Graph, Relationship, NodeMatcher, Node
from network_core.ogm.node_objects import Me, Contact, Misc
USERTYPE = "User"
CONTACTTYPE = "Contact"
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
relationships = {'freund': 'FRIEND',
'schwester': 'SISTER',
'bruder': 'BROTHER',
'mutter': 'MOTHER',
'vater': 'FATHER',
'tochter': 'DAUGHTER',
'sohn': 'SON',
'enkel': 'GRANDCHILD',
'enkelin': 'GRANDCHILD'}
class NetworkGraph:
def __init__(self):
path = os.path.realpath(ROOT_DIR + '/neo4j_creds.json')
with open(path) as f:
data = json.load(f)
username = data['username']
password = data['password']
self.graph = Graph(host="localhost", username=username, password=password)
def add_node_by_name(self, name, age=None, gender=None, node_type="PERSON"):
if name == 'USER':
node_type = 'user'
node = Node(node_type, name=name, age=age, gender=gender)
self.graph.create(node)
return node
def get_node_by_name(self, name):
matcher = NodeMatcher(self.graph)
node = matcher.match(name=name).first()
return node
def add_relationship(self, node1, node2, rel_type='KNOWS'):
first_node = self.get_node_by_name(node1)
second_node = self.get_node_by_name(node2)
if not first_node:
first_node = self.add_node_by_name(node1)
if not second_node:
second_node = self.add_node_by_name(node2)
self.graph.create(Relationship(first_node, rel_type, second_node))
def add_rel_tuple(self, ent1, ent2):
"""
Pushes a new central user 'Me' to the graph
Gets a username, creats an Me object and pushes it to the graph
:param username: string username
:return: me object (see ogm pkg)
"""
# define nodes
node1 = Misc()
node1.name = ent1
node2 = Misc()
node2.name = ent2
# add relationship to nodes
node1.related_ent.add(node2)
node2.related_ent.add(node1)
# save to neo4j
self.graph.create(node1)
self.graph.create(node2)
def search_node_by_name(self, node_name):
# replace white spaces
_node_name = node_name.replace(" ", "")
query = 'MATCH (n) WHERE n.name={node_name} RETURN n;'
result = self.graph.run(query,
node_name=_node_name,
).data()
if result:
node = result[0]['n.name']
else:
node = None
return node
def add_me_w_firstname(self, username, age="", gender=""):
"""
Pushes a new central user 'Me' to the graph
Gets a username, creats an Me object and pushes it to the graph
:param username: string username
:return: me object (see ogm pkg)
"""
# OGM
me = Me()
me.firstname = username.title()
me.lastname = ""
me.age = age
me.gender = gender
self.graph.push(me)
return me
def add_me_w_lastname(self, username, age="", gender=""):
"""
Pushes a new central user 'Me' to the graph
Gets a username, creats an Me object and pushes it to the graph
:param username: string username
:return: me object (see ogm pkg)
"""
# OGM
me = Me()
me.firstname = ""
me.lastname = username.title()
me.age = age
me.gender = gender
self.graph.push(me)
return me
def get_me_by_firstname(self, me_name):
"""
return me object by firstname
:param me_name: string with firstname of me
:return: me object
"""
result = self.graph.run('MATCH (n:Me) WHERE n.firstname="' + me_name.title() + '" RETURN n.firstname').data()
me = Me()
if result:
me.firstname = result[0]['n.firstname']
return me
else:
return None
def get_me_by_lastname(self, me_name):
"""
return me object by firstname
:param me_name: string with firstname of me
:return: me object
"""
result = self.graph.run('MATCH (n:Me) WHERE n.lastname="' + me_name.title() + '" RETURN n.lastname').data()
me = Me()
if result:
me.firstname = result[0]['n.lastname']
return me
else:
return None
def add_contact(self, me_name, contactname, relationship):
"""
adds a new contact to the central user i.e. 'Me' in graph
:param me: name of the centraluser object
:param contact: string will be converted to contact object
:param relationship: string will be converted to object property
:return:
"""
# select central user 'Me'
me = self.get_me_by_firstname(me_name)
contact = Contact()
contact.firstname = contactname
relationship = relationships[relationship]
if relationship == 'freund':
me.friend.add(contact)
contact.friend.add(me)
elif relationship == 'bruder':
me.brother.add(contact)
contact.brother.add(me)
elif relationship == 'schwester':
me.sister.add(contact)
contact.sister.add(me)
elif relationship == 'mutter':
me.mother.add(contact)
elif relationship == 'vater':
me.father.add(contact)
elif relationship == 'sohn':
me.son.add(contact)
elif relationship == 'tocher':
me.daughter.add(contact)
#TODO other relationships
self.graph.push(me)
def search_relationship_by_contactname(self, me_name, contact_name):
mename = me_name.replace(" ", "")
contactname = contact_name.replace(" ", "")
query = 'MATCH (n:Me)-[r]->(c:Contact) WHERE n.firstname={me_name} AND c.firstname={contactname} RETURN type(r);'
result = self.graph.run(query,
me_name=mename,
contactname=contactname
).data()
if result:
relationship = result[0]['type(r)']
else:
relationship = None
return relationship
def search_contactname_by_relationship(self, me_name, relationship):
relationship = relationships[relationship]
if relationship:
result = self.graph.run('MATCH (u:Me)-[:'+relationship+']->(c:Contact) RETURN c.firstname;', rel=relationship).data()
else:
return None
if result:
contactname = result[0]['c.firstname']
else:
contactname = None
return contactname
| 30.437768 | 129 | 0.563593 |
61e83a0c30e6a67dbfeb574d16e0f027af82160b | 538 | py | Python | randutils/lists.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | 1 | 2021-08-03T17:34:31.000Z | 2021-08-03T17:34:31.000Z | randutils/lists.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | null | null | null | randutils/lists.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | null | null | null | import numpy as np
from .chance import by_chance
from .exceptions import EmptyListError
def pop_random_entry(lst):
if not lst:
raise EmptyListError
index = np.random.randint(0, len(lst))
return lst.pop(index)
def pick_random_entry(lst):
if not lst:
raise EmptyListError
index = np.random.randint(0, len(lst))
return lst[index]
def randomly_filter(lst, weight=0.1):
return [i for i in lst if by_chance(weight)]
def scramble(lst):
return sorted(lst, key=lambda x: random.random())
| 18.551724 | 53 | 0.689591 |
61ea28b84ee81d7761635919c06d71cde4b781c4 | 2,355 | py | Python | src/train_and_evaluate.py | rajeevteejwal/mlops_wine_quality | 970ce27712932ca535309230da69fc5c29d82c38 | [
"MIT"
] | null | null | null | src/train_and_evaluate.py | rajeevteejwal/mlops_wine_quality | 970ce27712932ca535309230da69fc5c29d82c38 | [
"MIT"
] | null | null | null | src/train_and_evaluate.py | rajeevteejwal/mlops_wine_quality | 970ce27712932ca535309230da69fc5c29d82c38 | [
"MIT"
] | null | null | null | import os
import pandas as pd
from sklearn.linear_model import ElasticNet
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
import argparse
import numpy as np
import json
import joblib
from get_data import read_config
def evaluate_metrics(actual, pred):
r2 = r2_score(actual,pred)
mae = mean_absolute_error(actual,pred)
rmse = np.sqrt(mean_squared_error(actual,pred))
return r2, rmse, mae
def train_and_evaluate(config_path):
config = read_config(config_path)
train_data_path = config["split_data"]["train_path"]
test_data_path = config["split_data"]["test_path"]
output_col = config["base"]["target_col"]
random_state = config["base"]["random_state"]
train_dataset = pd.read_csv(train_data_path,sep=",", encoding="utf-8")
test_dataset = pd.read_csv(test_data_path,sep=",", encoding="utf-8")
y_train = train_dataset[[output_col]]
x_train = train_dataset.drop([output_col],axis=1)
y_test = test_dataset[[output_col]]
x_test = test_dataset.drop([output_col],axis=1)
alpha = config["estimators"]["ElasticNet"]["params"]["alpha"]
l1_ratio = config["estimators"]["ElasticNet"]["params"]["l1_ratio"]
lr = ElasticNet(alpha=alpha,l1_ratio=l1_ratio,random_state=random_state)
lr.fit(x_train,y_train)
prediction = lr.predict(x_test)
r2, rmse, mae = evaluate_metrics(y_test,prediction)
print(f"ElasticNet model (alpha: {alpha}, l1_ratio: {l1_ratio}")
print(f" RMSE: {rmse}")
print(f" MAE: {mae}")
print(f" R2 Score: {r2}")
scores_file = config["reports"]["scores"]
params_file = config["reports"]["params"]
with open(scores_file,"w") as f:
scores = {
"r2":r2,
"rmse":rmse,
"mae":mae
}
json.dump(scores,f,indent=4)
with open(params_file,"w") as f:
params = {
"alpha":alpha,
"l1_ratio":l1_ratio
}
json.dump(params,f,indent=4)
model_dir = config["model_dir"]
os.makedirs(model_dir,exist_ok=True)
model_path = os.path.join(model_dir,"model.joblib")
joblib.dump(lr,model_path)
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config",default="params.yaml")
parsed_args = args.parse_args()
train_and_evaluate(config_path=parsed_args.config)
| 31.824324 | 77 | 0.675159 |
61ebdb6920b4b4c3e3a8b0b2f9c1a74ed61083fb | 961 | py | Python | examples/plot_magnitudes.py | zsiciarz/pygcvs | ed5522ab9cf9237592a6af7a0bc8cad079afeb67 | [
"MIT"
] | null | null | null | examples/plot_magnitudes.py | zsiciarz/pygcvs | ed5522ab9cf9237592a6af7a0bc8cad079afeb67 | [
"MIT"
] | null | null | null | examples/plot_magnitudes.py | zsiciarz/pygcvs | ed5522ab9cf9237592a6af7a0bc8cad079afeb67 | [
"MIT"
] | null | null | null | """
Visualisation of maximum/minimum magnitude for GCVS stars.
"""
import sys
import matplotlib.pyplot as plot
from pygcvs import read_gcvs
if __name__ == '__main__':
try:
gcvs_file = sys.argv[1]
except IndexError:
print('Usage: python plot_magnitudes.py <path to iii.dat>')
else:
min_magnitudes = []
max_magnitudes = []
for star in read_gcvs(gcvs_file):
if star['min_magnitude'] and star['max_magnitude']:
min_magnitudes.append(star['min_magnitude'])
max_magnitudes.append(star['max_magnitude'])
plot.title('GCVS variable star magnitudes')
plot.plot(min_magnitudes, max_magnitudes, 'ro')
plot.xlabel('Min magnitude')
plot.ylabel('Max magnitude')
# invert axes because brightest stars have lowest magnitude value
plot.gca().invert_xaxis()
plot.gca().invert_yaxis()
plot.savefig('magnitudes.png')
| 30.03125 | 73 | 0.64204 |
61ebe9703928c7c7be701af932bf4a612970dd3f | 382 | py | Python | InvenTree/stock/migrations/0028_auto_20200421_0724.py | ArakniD/InvenTree | 0ebf2ebd832b2d736e895abe054ca56bfd1cc477 | [
"MIT"
] | 656 | 2017-03-29T22:06:14.000Z | 2022-03-30T11:23:52.000Z | InvenTree/stock/migrations/0028_auto_20200421_0724.py | ArakniD/InvenTree | 0ebf2ebd832b2d736e895abe054ca56bfd1cc477 | [
"MIT"
] | 1,545 | 2017-04-10T23:26:04.000Z | 2022-03-31T18:32:10.000Z | InvenTree/stock/migrations/0028_auto_20200421_0724.py | fablabbcn/InvenTree | 1d7ea7716cc96c6ffd151c822b01cd1fb5dcfecd | [
"MIT"
] | 196 | 2017-03-28T03:06:21.000Z | 2022-03-28T11:53:29.000Z | # Generated by Django 3.0.5 on 2020-04-21 07:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0027_stockitem_sales_order'),
]
operations = [
migrations.RenameField(
model_name='stockitem',
old_name='sales_order',
new_name='sales_order_line',
),
]
| 20.105263 | 48 | 0.604712 |
61ec2ee4a4b5c284984cd0be3baf3b3ee50702c4 | 1,595 | py | Python | weideshop/urls.py | michaelgichia/weideshop | 01a408b358b9ad7d52747b42c36dc16206b4b915 | [
"BSD-2-Clause"
] | null | null | null | weideshop/urls.py | michaelgichia/weideshop | 01a408b358b9ad7d52747b42c36dc16206b4b915 | [
"BSD-2-Clause"
] | null | null | null | weideshop/urls.py | michaelgichia/weideshop | 01a408b358b9ad7d52747b42c36dc16206b4b915 | [
"BSD-2-Clause"
] | null | null | null | """weideshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from weideshop.products.views import CatalogueListView,CatalogueDetailView
from weideshop.public.views import IndexView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$',IndexView.as_view(), name='home'),
url(r'^catalog/$', CatalogueListView.as_view(), name='catalogue'),
# url(r'^catalog/(?P<product_slug>[-\w]+)/$', CatalogueDetailView.as_view(), name='detail'),
url(r'^category/', include('weideshop.products.urls', namespace='products-app', app_name='products')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
] | 37.093023 | 106 | 0.717241 |
61ed3298ce258d1708cb601b97ca2bb3d32448c9 | 18,023 | py | Python | netor/tinydb/scripts/netorconf.py | aegiacometti/neto | 4169a93a4d789facfe9a41d214b1a6c15e8f2fb9 | [
"Apache-2.0"
] | 1 | 2020-01-02T04:31:11.000Z | 2020-01-02T04:31:11.000Z | netor/tinydb/scripts/netorconf.py | aegiacometti/neto | 4169a93a4d789facfe9a41d214b1a6c15e8f2fb9 | [
"Apache-2.0"
] | null | null | null | netor/tinydb/scripts/netorconf.py | aegiacometti/neto | 4169a93a4d789facfe9a41d214b1a6c15e8f2fb9 | [
"Apache-2.0"
] | 1 | 2021-02-23T04:34:48.000Z | 2021-02-23T04:34:48.000Z | #!/usr/bin/env python3
import os
import sys
import configparser
import fileinput
import netorlogging
import datetime
from shutil import copyfile
def _netor_config():
"""
It is used for updating the Neto home directory in the configuration files and scripts.
This is useful, if you want to have 2 working installations of Neto in completely independent directories.
It will update the ``NETOR_HOME_DIRECTORY`` variable in the ``netor.conf`` file,
and also in the following Neto python scripts which then works with the TinyDB:
# netor/tinydb/scripts/listdb.py
# netor/tinydb/scripts/pushcustdb.py
# netor/tinydb/scripts/worker.py
# netor/tinydb/scripts/switchdb.py
Later it will also update the ``hosts_file`` variable in the following bash scripts:
# bin/netor-ping
# bin/netor-traceroute
:return: nothing
"""
_NETOR_HOME_DIRECTORY = os.getenv('NETOR')
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
netor_config_path_name = _NETOR_HOME_DIRECTORY + "netor/netor.config"
config.read(netor_config_path_name)
if os.path.isdir(_NETOR_HOME_DIRECTORY):
answer = input("\nDefault \"$NETOR/netor\" directory found at:\n" + str(_NETOR_HOME_DIRECTORY) +
"\nDo you want to keep it (y/n): ").lower()
if answer == "y":
print("Keeping same configuration\n")
try:
config['Netor']['netor_home_directory'] = _NETOR_HOME_DIRECTORY
except KeyError:
print("\nConfiguration files do no exist, clone the previous directory before start the changes\n")
sys.exit(1)
with open(netor_config_path_name, 'w') as configfile:
config.write(configfile)
_update_ansible(_NETOR_HOME_DIRECTORY)
tinydb_log_file = config['TinyDB']['tinydb_log_file']
_update_config(tinydb_log_file, __file__, _NETOR_HOME_DIRECTORY)
sys.exit()
elif answer == "n":
print('If you want to change the $NETOR directory, you must first update the $NETOR environment variable')
print('Set $NETOR environment value by adding/changing the line at the end of the file /etc/environment')
print('NETOR=\"/my/dir/netor/\"')
print('Restart the system and execute this script again')
else:
print("Invalid option/n")
sys.exit()
else:
print("\nDefault \"$NETOR/netor\" NOT found")
print('Set $NETOR environment value by adding/changing the line at the end of the file /etc/environment')
print('NETOR=\"/my/dir/netor/\"')
print('Restart the system and execute this script again')
def _update_ansible(netor_home_directory):
"""
Update Ansible configuration files.
:param netor_home_directory: Neto home directory to used for updating the configuration files
:return: nothing
"""
ansible_config_file = os.environ['HOME'] + '/.ansible.cfg'
replace_static_vars_scripts(ansible_config_file, '#inventory ', '= ' + netor_home_directory +
'netor/ansible/hosts', '', '')
replace_static_vars_scripts(ansible_config_file, 'transport', ' = paramiko', '', '')
replace_static_vars_scripts(ansible_config_file, 'host_key_auto_add', ' = True', '', '')
replace_static_vars_scripts(ansible_config_file, 'host_key_checking', ' = False', '', '')
replace_static_vars_scripts(ansible_config_file, 'inventory = ', netor_home_directory +
'netor/ansible/hosts', '', '')
print('\nNetor home directory replaced in Ansible.')
def _backup_filename(new_netor_home_directory, filename):
"""
Create a backup of the specified configuration file
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:param filename: file name to backup
:return: nothing
"""
print('\nBacking up ' + filename + ' to ' + new_netor_home_directory + 'netor/salt/backup/')
source = new_netor_home_directory + 'netor/salt/config/' + filename
destination = new_netor_home_directory + 'netor/salt/backup/' + filename + "_" + \
datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
copyfile(source, destination)
def _create_master_config_file(new_netor_home_directory, filename):
"""
Create new Salt master configuration file.
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:param filename: filename to backup
:return: nothing
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('# for salt-sproxy\n')
file.write('use_existing_proxy: true\n')
file.write('##### Large-scale tuning settings #####\n')
file.write('##########################################\n')
file.write('#max_open_files: 100000\n')
file.write('\n')
file.write('##### Security settings #####\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.write('\n')
file.write('# Enable auto_accept, this setting will automatically accept all incoming\n')
file.write('# public keys from the minions. Note that this is insecure.\n')
file.write('auto_accept: True\n')
file.write('\n')
file.write('# The path to the master\'s configuration file.\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/master\n')
file.write('\n')
file.write('# Directory used to store public key data:\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/master\n')
file.write('\n')
file.write('##### File Server settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/states/\n')
file.write('\n')
file.write('##### Pillar settings #####\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/states/\n')
file.write('engines:\n')
file.write(' - slack:\n')
file.write(' token: YOUR-TOKEN-GOES-HERE\n')
file.write(' control: true\n')
file.write(' fire_all: False\n')
file.write('######## CREATE YOUR OWN POLICIES FOR COMMAND PERMISSIONS ########\n')
file.write(' groups:\n')
file.write(' default:\n')
file.write(' users:\n')
file.write(' - \'*\'\n')
file.write(' commands:\n')
file.write(' - \'*\'\n')
file.close()
def _update_master_config_file(new_netor_home_directory, filename):
"""
Update Salt master configuration file.
:param new_netor_home_directory: Location where the file is located
:param filename: file name
:return: nothing
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_master_config_file(new_netor_home_directory, filename)
def _create_minion_config_file(new_netor_home_directory, filename):
"""
Create Salt minion configuration file.
:param new_netor_home_directory: Location where the file will be located
:param filename: file name
:return: nothing
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('##### Primary configuration settings #####\n')
file.write('master: localhost\n')
file.write('\n')
file.write('# The path to the minion\'s configuration file.\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/minion\n')
file.write('# The directory to store the pki information in\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/minion\n')
file.write('\n')
file.write('##### File Directory Settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'neto/salt/config/pillar/states/\n')
file.write('\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/ states /\n')
file.write('\n')
file.write('###### Security settings #####\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.close()
def _update_minion_config_file(new_netor_home_directory, filename):
"""
Update Salt minion configuration file.
:param new_netor_home_directory: Location where the file is located
:param filename: file name
:return:
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_minion_config_file(new_netor_home_directory, filename)
def _create_proxy_config_file(new_netor_home_directory, filename):
"""
Create Salt proxy configuration file.
:param new_netor_home_directory: Location where the file will be located
:param filename: file name
:return:
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('##### Primary configuration settings #####\n')
file.write('\n')
file.write('master: localhost\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/proxy\n')
file.write('mine_enabled: true # not required, but nice to have\n')
file.write('mine_functions:\n')
file.write(' net.ipaddrs: []\n')
file.write(' net.lldp: []\n')
file.write(' net.mac: []\n')
file.write(' net.arp: []\n')
file.write(' net.interfaces: []\n')
file.write('mine_interval: 5\n')
file.write('\n')
file.write('###### Thread settings #####\n')
file.write('multiprocessing: false\n')
file.write('\n')
file.write('##### File Directory Settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write('\n')
file.write('###### Security settings #####\n')
file.write('###########################################\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.write('# The directory to store the pki information in\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/proxy # not required - this separates '
'the proxy keys into a different directory\n')
file.close()
def _update_proxy_config_file(new_netor_home_directory, filename):
"""
Update Salt proxy configuration file.
:param new_netor_home_directory: Directory where the file is located
:param filename: file name
:return:
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_proxy_config_file(new_netor_home_directory, filename)
def _file_update_redirect(new_netor_home_directory, filename):
"""
Update the configuration files.
:param new_netor_home_directory: Directory where the files are located
:param filename: file name to update
:return: nothing
"""
if 'master' in filename:
_update_master_config_file(new_netor_home_directory, filename)
elif 'minion' in filename:
_update_minion_config_file(new_netor_home_directory, filename)
elif 'proxy' in filename:
_update_proxy_config_file(new_netor_home_directory, filename)
else:
print('\nError while checking Salt master, minion and proxy configuration files')
sys.exit(1)
def _file_create_redirect(new_netor_home_directory, filename):
"""
Create the configuration files.
:param new_netor_home_directory: it is the actual new Neto home directory where to create the file
:param filename: file name to create
:return: nothing
"""
if 'master' in filename:
_create_master_config_file(new_netor_home_directory, filename)
elif 'minion' in filename:
_create_minion_config_file(new_netor_home_directory, filename)
elif 'proxy' in filename:
_create_proxy_config_file(new_netor_home_directory, filename)
else:
print('\nError while checking Salt master, minion and proxy configuration files')
sys.exit(1)
def _create_update_master_minion_proxy(new_netor_home_directory, filename):
"""
Update or create (if do not exists) Salt configuration files.
:param new_netor_home_directory: it is the actual new Neto home directory to used in the process
:param filename: file name to update
:return: nothing
"""
full_salt_config_filename = new_netor_home_directory + 'netor/salt/' + filename
if os.path.isfile(full_salt_config_filename):
_file_update_redirect(new_netor_home_directory, filename)
else:
_file_create_redirect(new_netor_home_directory, filename)
def _update_config(tinydb_log_file, __file__, new_netor_home_directory):
"""
Execute the actual updates in the files. Salt master, minion and proxy.
:param tinydb_log_file: the filename to send the logging message after the operation is completed
:param __file__: script name who is sending the message to log
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:return: nothing
"""
_create_update_master_minion_proxy(new_netor_home_directory, 'master')
_create_update_master_minion_proxy(new_netor_home_directory, 'minion')
_create_update_master_minion_proxy(new_netor_home_directory, 'proxy')
print('\nNetor home directory replaced in salt master, minion and proxy.')
print("\nAdd or modified if necessary " + new_netor_home_directory + "bin to your .profile")
print(" vi $HOME/.profile")
print(" PATH=\"$PATH:" + new_netor_home_directory + "bin\n")
print("\nAdd or modified if necessary " + new_netor_home_directory + " to /etc/environment")
print(" sudo vi /etc/environment")
print(" NETOR=\"$PATH:" + new_netor_home_directory)
print("\nLogoff session or restart system, and login again.")
print("\nATTENTION: If you are using Salt restart the daemons with \"netor-salt-restart\"\n")
netorlogging.log_msg(tinydb_log_file, __file__,
"Netconf executed. Neto.config and static vars in scripts updated. ")
def replace_static_vars_scripts(filename, search, replace, delimiter, extra):
"""
Replace line by line the ``NETOR_HOME_DIRECTORY`` static variable in scripts.
:param filename: filename to review
:param search: search pattern to look for
:param replace: patter to replace
:param delimiter: to add a delimiter surrounding the path names
:param extra: add extra path information
:return: nothing
"""
try:
for line in fileinput.input(filename, inplace=True):
if search in line:
print((search + delimiter + replace + extra + delimiter), end="\n")
else:
print(line, end="")
except FileNotFoundError:
print("\nERROR File not found " + filename)
print("Manually find systemd folder and file " + filename.split("/")[-1] +
" and modify the parameter \"" + search + "\" in the file to point to " + replace + "\n")
except PermissionError:
print("\nERROR Permission denied to modify file " + filename)
print("Manually modify the parameter -\"" + search + "\" in the file to point to " + replace)
def check_netor_config(netor_home_directory):
"""
Verifies if the ``netor.config`` file exists in the file tree.
:param netor_home_directory: to verify if the netor home directory and file exists
:return: nothing
"""
if (os.path.isdir(netor_home_directory)) and (os.path.isfile((netor_home_directory + "netor/netor.config"))):
return
else:
print("Neto home directory or config file not found.\nRun configuration script (netor-config).")
sys.exit(1)
if __name__ == '__main__':
_netor_config()
print()
| 42.011655 | 119 | 0.668146 |
61edb2c25c99c318b707a55fcdfcaaf007b47999 | 4,780 | py | Python | test/api/mutations/test_check_repository_by_commit.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | test/api/mutations/test_check_repository_by_commit.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | test/api/mutations/test_check_repository_by_commit.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | import pytest
from zoo.auditing.models import Issue
from zoo.auditing.check_discovery import Effort, Kind, Severity
pytestmark = pytest.mark.django_db
@pytest.fixture
def scenario(mocker, repository_factory, issue_factory, check_factory, fake_path):
owner, name, sha = "games", "lemmings", "GINLNNIIJL"
repository = repository_factory(id=42, owner=owner, name=name, remote_id=3)
kinds = {}
for namespace, id, status, severity, effort in [
("A", "new", Issue.Status.NEW, Severity.UNDEFINED, Effort.UNDEFINED),
("A", "fixed", Issue.Status.FIXED, Severity.ADVICE, Effort.LOW),
("A", "wontfix", Issue.Status.WONTFIX, Severity.WARNING, Effort.MEDIUM),
("A", "not-found", Issue.Status.NOT_FOUND, Severity.CRITICAL, Effort.HIGH),
("A", "reopened", Issue.Status.REOPENED, Severity.UNDEFINED, Effort.UNDEFINED),
("B", "new", Issue.Status.NEW, Severity.ADVICE, Effort.LOW),
("B", "fixed", Issue.Status.FIXED, Severity.WARNING, Effort.MEDIUM),
("B", "wontfix", Issue.Status.WONTFIX, Severity.CRITICAL, Effort.HIGH),
("B", "not-found", Issue.Status.NOT_FOUND, Severity.ADVICE, Effort.LOW),
("B", "reopened", Issue.Status.REOPENED, Severity.UNDEFINED, Effort.HIGH),
("C", "is-found", Issue.Status.NEW, Severity.CRITICAL, Effort.HIGH),
("C", "not-found", Issue.Status.NOT_FOUND, Severity.WARNING, Effort.LOW),
]:
kind = Kind(
category="tests",
namespace=namespace,
id=id,
severity=severity,
effort=effort,
title=f"Title for {namespace}:{id}",
description=f"Description for {namespace}:{id} | Status: {{was}} -> {{is}}",
)
kinds[kind.key] = kind
if namespace != "C":
issue_factory(repository=repository, kind_key=kind.key, status=status.value)
checks = [
# known issues, found
check_factory("A:new", True, {"was": "new", "is": "known"}),
check_factory("A:fixed", True, {"was": "fixed", "is": "reopened"}),
check_factory("A:wontfix", True, {"was": "wontfix", "is": "wontfix"}),
check_factory("A:not-found", True, {"was": "not-found", "is": "new"}),
check_factory("A:reopened", True, {"was": "reopened", "is": "known"}),
# known issues, not found
check_factory("B:new", False, {"was": "new", "is": "fixed"}),
check_factory("B:fixed", False, {"was": "fixed", "is": "not-found"}),
check_factory("B:wontfix", False, {"was": "wontfix", "is": "fixed"}),
check_factory("B:not-found", False, {"was": "not-found", "is": "not-found"}),
check_factory("B:reopened", False, {"was": "reopened", "is": "fixed"}),
# new issues
check_factory("C:is-found", True),
check_factory("C:not-found", False),
]
mocker.patch("zoo.api.mutations.CHECKS", checks)
mocker.patch("zoo.auditing.check_discovery.KINDS", kinds)
m_download_repository = mocker.patch(
"zoo.api.mutations.download_repository", return_value=fake_path
)
yield repository, sha
m_download_repository.assert_called_once_with(repository, mocker.ANY, sha=sha)
query = """
mutation test ($input: CheckRepositoryByCommitInput!) {
checkRepositoryByCommit (input: $input) {
allCheckResults {
isFound
kindKey
status
details
severity
effort
title
description
}
}
}
"""
def test_unknown_repository(snapshot, call_api):
input = {"owner": "games", "name": "doom", "sha": "IDKFA"}
response = call_api(query, input)
snapshot.assert_match(response)
def test_all_results(scenario, snapshot, call_api):
repository, sha = scenario
input = {"owner": repository.owner, "name": repository.name, "sha": sha}
response = call_api(query, input)
snapshot.assert_match(response)
def test_only_found(scenario, snapshot, call_api):
repository, sha = scenario
input = {
"owner": repository.owner,
"name": repository.name,
"sha": sha,
"onlyFound": True,
}
response = call_api(query, input)
snapshot.assert_match(response)
def test_with_repository(scenario, snapshot, call_api):
repository, sha = scenario
query = """
mutation test ($input: CheckRepositoryByCommitInput!) {
checkRepositoryByCommit (input: $input) {
repository {
id
owner
name
url
remoteId
}
}
}
"""
input = {"owner": repository.owner, "name": repository.name, "sha": sha}
response = call_api(query, input)
snapshot.assert_match(response)
| 34.142857 | 88 | 0.604393 |
61ee17d15d59c91dd4a80c2ec70be31d3dc1095f | 134 | py | Python | start/hello_world.py | nguyenductamlhp/tensorflow_demo | 7c4b55dff80dd435806a1b22dee6eb32ae39c02d | [
"MIT"
] | null | null | null | start/hello_world.py | nguyenductamlhp/tensorflow_demo | 7c4b55dff80dd435806a1b22dee6eb32ae39c02d | [
"MIT"
] | null | null | null | start/hello_world.py | nguyenductamlhp/tensorflow_demo | 7c4b55dff80dd435806a1b22dee6eb32ae39c02d | [
"MIT"
] | 1 | 2018-11-05T06:40:09.000Z | 2018-11-05T06:40:09.000Z | # -*- coding: utf-8 -*-
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
| 19.142857 | 41 | 0.671642 |
61f0c4fd22f5b70221a5b58b1db5553ecb4e26b8 | 755 | py | Python | behavioral/command/logic/generators/trader_generator.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | behavioral/command/logic/generators/trader_generator.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | behavioral/command/logic/generators/trader_generator.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | from random import randint
from typing import Optional
from behavioral.command.data import Trader
from behavioral.command.logic.generators import ItemsGenerator
class TraderGenerator:
__MIN_GOLD = 0
__MAX_GOLD = 450
def __init__(self) -> None:
self._items_generator = ItemsGenerator()
def _generate_random_gold(self) -> int:
return randint(self.__MIN_GOLD, self.__MAX_GOLD)
def generate_trader(self, name: str, items_amount: int, gold: Optional[int] = None) -> Trader:
items_generator = ItemsGenerator()
gold = gold if gold is not None else self._generate_random_gold()
items = items_generator.generate_items(items_amount)
trader = Trader(name, items, gold)
return trader
| 30.2 | 98 | 0.717881 |
61f42117264c7b5f1ae0b590ff1b7ddfa85808ba | 11,119 | py | Python | test3.py | dg1223/GestureRecognition | 07078b0b8340c8b94f42414efe0ed36158e8c0ea | [
"MIT"
] | 2 | 2019-02-12T17:59:41.000Z | 2019-10-27T03:36:08.000Z | test3.py | dg1223/GestureRecognition | 07078b0b8340c8b94f42414efe0ed36158e8c0ea | [
"MIT"
] | null | null | null | test3.py | dg1223/GestureRecognition | 07078b0b8340c8b94f42414efe0ed36158e8c0ea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 00:07:11 2015
@author: Shamir
"""
def CalculateValidData():
# Calculate the number of missing values in the array
number_of_nan = len(readFile.values[m][pandas.isnull(readFile.values[m])])
length_of_array = len(readFile.values[m])
valid_datapoints = length_of_array - number_of_nan
return valid_datapoints
for i in range(len(os.listdir(sourcePath))): # we have 6 files corresponding to 6 gestures
print 'i = ', i
gesture = os.listdir(sourcePath)[i] # Jab, Uppercut, Throw, Jets, Block, Asgard
#dataset = os.listdir(sourcePath + gesture)[0] # Train, Cross Validation, Test
copy = False
AngVel_array = []
for k in range(len(os.listdir(sourcePath + gesture))):
sensor = os.listdir(sourcePath + gesture)[k] # Sensor15, Sensor16, Sensor17, Sensor18, Sensor19
sensorFolder = os.listdir(sourcePath + gesture + backslash + sensor)
print sensorFolder
for l in range(len(sensorFolder)):
csvfile = sourcePath + gesture + backslash + sensor + backslash + sensorFolder[l] # full filepath
readFile = pandas.read_csv(csvfile, header = None)
readFile.values[1:] = readFile.values[1:].astype(float)
velocityAlpha = ['Precession_' + sensor[6:]]
velocityBeta = ['Nutation_' + sensor[6:]]
velocityGamma = ['Spin_' + sensor[6:]]
#print velocityAlpha
velocityAlpha = np.asarray(velocityAlpha)
velocityBeta = np.asarray(velocityBeta)
velocityGamma = np.asarray(velocityGamma)
#time = np.shape(readFile.values)[1] / frequency_euc
if copy == True:
print 'This is the If phase'
for m in range(1, len(readFile.values)): # for every two files ???
## need to add code to check if number_of_rows matches
precession, nutation, spin = 0, 0, 0
for n in range(0, np.shape(readFile.values)[1] - 5, 3):
alpha = n
beta = n + 1
gamma = n + 2
alphaNext = n + 3
betaNext = n + 4
gammaNext = n + 5
try:
precession += euclidean(readFile.values[m, alpha], readFile.values[m, alphaNext])
#print 'precession = ', precession
nutation += euclidean(readFile.values[m, beta], readFile.values[m, betaNext])
spin += euclidean(readFile.values[m, gamma], readFile.values[m, gammaNext])
except ValueError:
#print '1st catch (copy = True) at file, m, n = ', csvfile[-6:], m, n
break
valid_data = CalculateValidData() # Exclude missing values (we exclude 6 more values to remain within a safer margin)
time = valid_data / frequency_euc
precessionVelocity = precession/time
#print 'precessionVelocity = ', precessionVelocity
nutationVelocity = nutation/time
spinVelocity = spin/time
for n in range(0, np.shape(readFile.values)[1] - 3, 3):
alpha = n
beta = n + 1
gamma = n + 2
try:
readFile.values[m, alpha] = (precessionVelocity * np.sin(readFile.values[m, gamma]) * np.sin(readFile.values[m, beta])) + (nutationVelocity * np.cos(readFile.values[m, gamma])) # alpha component
readFile.values[m, beta] = (precessionVelocity * np.cos(readFile.values[m, gamma]) * np.sin(readFile.values[m, beta])) - (nutationVelocity * np.sin(readFile.values[m, gamma])) # beta component
readFile.values[m, beta] = (precessionVelocity * np.cos(readFile.values[m, beta])) * spinVelocity # gamma compomemt
except ValueError:
#print '2nd catch (copy = True) at file, m, n = ', csvfile[-6:], m, n
continue
averageAlpha = np.sum(readFile.values[m, range(0, valid_data, 3)]) / time
averageBeta = np.sum(readFile.values[m, range(1, valid_data, 3)]) / time
averageGamma = np.sum(readFile.values[m, range(2, valid_data, 3)]) / time
velocityAlpha = np.vstack((velocityAlpha, averageAlpha))
#print 'filename, m, velocityAlpha = ', csvfile[-6:], m, velocityAlpha
velocityBeta = np.vstack((velocityBeta, averageBeta))
velocityGamma = np.vstack((velocityGamma, averageGamma))
columnSize = len(velocityAlpha)
angular_velocity = np.zeros((len(velocityAlpha), 3))
angular_velocity = angular_velocity.astype(str) # to avoid string to float conversion error
# Return the column vectors in a single 2D array
angular_velocity[:,0] = velocityAlpha.reshape(1, columnSize)
angular_velocity[:,1] = velocityBeta.reshape (1, columnSize)
angular_velocity[:,2] = velocityGamma.reshape(1, columnSize)
AngVel_array = np.hstack((AngVel_array, angular_velocity))
#print 'AngVel_array = ', AngVel_array
else:
print 'This is the Else phase'
for m in range(1, len(readFile.values)): # for every two files
## need to add code to check if number_of_rows matches
precession, nutation, spin = 0, 0, 0
for n in range(0, np.shape(readFile.values)[1] - 5, 3):
alpha = n
beta = n + 1
gamma = n + 2
alphaNext = n + 3
betaNext = n + 4
gammaNext = n + 5
try:
precession += euclidean(readFile.values[m, alpha], readFile.values[m, alphaNext])
nutation += euclidean(readFile.values[m, beta], readFile.values[m, betaNext])
spin += euclidean(readFile.values[m, gamma], readFile.values[m, gammaNext])
except ValueError:
#print '1st catch (copy = False) at print file, m, n = ', csvfile[-6:], m, n
continue
valid_data = CalculateValidData()
time = valid_data / frequency_euc
precessionVelocity = precession/time
nutationVelocity = nutation/time
spinVelocity = spin/time
#print 'precession,nutation,spinVelocity = ', precessionVelocity, nutationVelocity, spinVelocity
for n in range(0, np.shape(readFile.values)[1] - 3, 3):
alpha = n
beta = n + 1
gamma = n + 2
try:
readFile.values[m, alpha] = (precessionVelocity * np.sin(readFile.values[m, gamma]) * np.sin(readFile.values[m, beta])) + (nutationVelocity * np.cos(readFile.values[m, gamma])) # alpha component
readFile.values[m, beta] = (precessionVelocity * np.cos(readFile.values[m, gamma]) * np.sin(readFile.values[m, beta])) - (nutationVelocity * np.sin(readFile.values[m, gamma])) # beta component
readFile.values[m, beta] = (precessionVelocity * np.cos(readFile.values[m, beta])) * spinVelocity # gamma compomemt
except ValueError:
#print '2nd catch (copy = True) at file, m, n = ', csvfile[-6:], m, n
continue
averageAlpha = np.sum(readFile.values[m, range(0, valid_data, 3)]) / time
#print 'averageAlpha = ', averageAlpha
averageBeta = np.sum(readFile.values[m, range(1, valid_data, 3)]) / time
averageGamma = np.sum(readFile.values[m, range(2, valid_data, 3)]) / time
velocityAlpha = np.vstack((velocityAlpha, averageAlpha))
#print 'filename, m, velocityAlpha = ', csvfile[-6:], m, velocityAlpha
velocityBeta = np.vstack((velocityBeta, averageBeta))
velocityGamma = np.vstack((velocityGamma, averageGamma))
columnSize = len(velocityAlpha)
angular_velocity = np.zeros((len(velocityAlpha), 3))
angular_velocity = angular_velocity.astype(str)
# Return the column vectors in a single 2D array
angular_velocity[:,0] = velocityAlpha.reshape(1, columnSize)
angular_velocity[:,1] = velocityBeta.reshape (1, columnSize)
angular_velocity[:,2] = velocityGamma.reshape(1, columnSize)
AngVel_array = angular_velocity.copy()
#print 'AngVel_array = ', AngVel_array
copy = True
# Create complete file structure/dataframe
if i == 0:
fullFile4 = DataFrame(AngVel_array)
else:
AngVel_array = DataFrame(AngVel_array)
fullFile4 = pandas.concat([fullFile4, AngVel_array], join = 'inner')
| 63.176136 | 229 | 0.461462 |
61f65e88bb74b76264401d01893c2004742b5044 | 1,919 | py | Python | build.py | micklenguyen/hw2-scripting | 3603a2c4d7518890eacc4f071f347f90dd295ee6 | [
"MIT"
] | null | null | null | build.py | micklenguyen/hw2-scripting | 3603a2c4d7518890eacc4f071f347f90dd295ee6 | [
"MIT"
] | null | null | null | build.py | micklenguyen/hw2-scripting | 3603a2c4d7518890eacc4f071f347f90dd295ee6 | [
"MIT"
] | null | null | null | def main():
content_pages = auto_populate_content_files()
for page in content_pages:
filepath = page['filepath']
output = page['output']
title = page['title']
# Read content of html pages
content = open(filepath).read()
# Invoke function to return finished_page (base.html with filled in content)
finshed_page = apply_template(content, title, content_pages)
write_html(output, finshed_page)
def auto_populate_content_files():
import glob
import os
# Loop through files in the content/ directory and save paths as a list
all_html_files = glob.glob("content/*.html")
#print(all_html_files)
# Loop through the all_html_files list, modify and extract file_name and name_only from the path
pages = []
for file_path in all_html_files:
# Saving the path to a varaible (ex. content/resume.html)
file_path = file_path
# Removes the file path from the file name (ex. content/resume.html -> resume.html)
file_name = os.path.basename(file_path)
# Removes the file path from the file name (ex. content/resume.html -> resume.html)
file_name = os.path.basename(file_path)
#print(file_name)
# Split the name from the file extention (ex. resume.html -> resume)
name_only, extension = os.path.splitext(file_name)
# Build a list with dicts of content information
pages.append({
"filepath": file_path,
"title": name_only,
"output": "docs/" + file_name,
"filename": file_name
})
return pages
def apply_template(content, title, pages):
from jinja2 import Template
# Read base.html and save to template
template_html = open("templates/base.html").read()
new_template = Template(template_html)
finished_page = new_template.render(
title=title,
content=content,
pages=pages,
)
return finished_page
def write_html(output, finshed_page):
# Writes complete html files
open(output, "w+").write(finshed_page)
if __name__ == "__main__":
main() | 24.922078 | 97 | 0.730589 |
61f94a0bece7deb448882a08f6a458e64ef93c8e | 35,113 | py | Python | src/jote/jote.py | InformaticsMatters/data-manager-job-tester | f8915e005f16685d159535a2455628eb1d7ac518 | [
"MIT"
] | null | null | null | src/jote/jote.py | InformaticsMatters/data-manager-job-tester | f8915e005f16685d159535a2455628eb1d7ac518 | [
"MIT"
] | 1 | 2022-01-28T10:06:28.000Z | 2022-01-31T14:51:52.000Z | src/jote/jote.py | InformaticsMatters/data-manager-job-tester | f8915e005f16685d159535a2455628eb1d7ac518 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Informatics Matters Job Tester (JOTE).
Get help running this utility with 'jote --help'
"""
import argparse
import os
import shutil
import stat
from stat import S_IRGRP, S_IRUSR, S_IWGRP, S_IWUSR
import subprocess
import sys
from typing import Any, Dict, List, Optional, Tuple
from munch import DefaultMunch
import yaml
from yamllint import linter
from yamllint.config import YamlLintConfig
from decoder import decoder
from .compose import get_test_root, INSTANCE_DIRECTORY, DEFAULT_TEST_TIMEOUT_M
from .compose import Compose
# Where can we expect to find Job definitions?
_DEFINITION_DIRECTORY: str = "data-manager"
# What's the default manifest file?
_DEFAULT_MANIFEST: str = os.path.join(_DEFINITION_DIRECTORY, "manifest.yaml")
# Where can we expect to find test data?
_DATA_DIRECTORY: str = "data"
# Our yamllint configuration file
# from the same directory as us.
_YAMLLINT_FILE: str = os.path.join(os.path.dirname(__file__), "jote.yamllint")
# Read the version file
_VERSION_FILE: str = os.path.join(os.path.dirname(__file__), "VERSION")
with open(_VERSION_FILE, "r", encoding="utf-8") as file_handle:
_VERSION = file_handle.read().strip()
# Job image types (lower-case)
_IMAGE_TYPE_SIMPLE: str = "simple"
_IMAGE_TYPE_NEXTFLOW: str = "nextflow"
_DEFAULT_IMAGE_TYPE: str = _IMAGE_TYPE_SIMPLE
# User HOME directory.
# Used to check for netflow files if nextflow is executed.
# The user CANNOT have any pf their own nextflow config.
_USR_HOME: str = os.environ.get("HOME", "")
def _print_test_banner(collection: str, job_name: str, job_test_name: str) -> None:
print(" ---")
print(f"+ collection={collection} job={job_name} test={job_test_name}")
def _lint(definition_filename: str) -> bool:
"""Lints the provided job definition file."""
if not os.path.isfile(_YAMLLINT_FILE):
print(f"! The yamllint file ({_YAMLLINT_FILE}) is missing")
return False
with open(definition_filename, "rt", encoding="UTF-8") as definition_file:
errors = linter.run(definition_file, YamlLintConfig(file=_YAMLLINT_FILE))
if errors:
# We're given a 'generator' and we don't know if there are errors
# until we iterator over it. So here we print an initial error message
# on the first error.
found_errors: bool = False
for error in errors:
if not found_errors:
print(f'! Job definition "{definition_file}" fails yamllint:')
found_errors = True
print(error)
if found_errors:
return False
return True
def _validate_schema(definition_filename: str) -> bool:
"""Checks the Job Definition against the decoder's schema."""
with open(definition_filename, "rt", encoding="UTF-8") as definition_file:
job_def: Optional[Dict[str, Any]] = yaml.load(
definition_file, Loader=yaml.FullLoader
)
assert job_def
# If the decoder returns something there's been an error.
error: Optional[str] = decoder.validate_job_schema(job_def)
if error:
print(
f'! Job definition "{definition_filename}"' " does not comply with schema"
)
print("! Full response follows:")
print(error)
return False
return True
def _validate_manifest_schema(manifest_filename: str) -> bool:
"""Checks the Manifest against the decoder's schema."""
with open(manifest_filename, "rt", encoding="UTF-8") as definition_file:
job_def: Optional[Dict[str, Any]] = yaml.load(
definition_file, Loader=yaml.FullLoader
)
assert job_def
# If the decoder returns something there's been an error.
error: Optional[str] = decoder.validate_manifest_schema(job_def)
if error:
print(f'! Manifest "{manifest_filename}"' " does not comply with schema")
print("! Full response follows:")
print(error)
return False
return True
def _check_cwd() -> bool:
"""Checks the execution directory for sanity (cwd). Here we must find
a data-manager directory
"""
expected_directories: List[str] = [_DEFINITION_DIRECTORY, _DATA_DIRECTORY]
for expected_directory in expected_directories:
if not os.path.isdir(expected_directory):
print(f'! Expected directory "{expected_directory}"' " but it is not here")
return False
return True
def _load(manifest_filename: str, skip_lint: bool) -> Tuple[List[DefaultMunch], int]:
"""Loads definition files listed in the manifest
and extracts the definitions that contain at least one test. The
definition blocks for those that have tests (ignored or otherwise)
are returned along with a count of the number of tests found
(ignored or otherwise).
If there was a problem loading the files an empty list and
-ve count is returned.
"""
# Prefix manifest filename with definition directory if required...
manifest_path: str = (
manifest_filename
if manifest_filename.startswith(f"{_DEFINITION_DIRECTORY}/")
else os.path.join(_DEFINITION_DIRECTORY, manifest_filename)
)
if not os.path.isfile(manifest_path):
print(f'! The manifest file is missing ("{manifest_path}")')
return [], -1
if not _validate_manifest_schema(manifest_path):
return [], -1
with open(manifest_path, "r", encoding="UTF-8") as manifest_file:
manifest: Dict[str, Any] = yaml.load(manifest_file, Loader=yaml.FullLoader)
if manifest:
manifest_munch: DefaultMunch = DefaultMunch.fromDict(manifest)
# Iterate through the named files...
job_definitions: List[DefaultMunch] = []
num_tests: int = 0
for jd_filename in manifest_munch["job-definition-files"]:
# Does the definition comply with the dschema?
# No options here - it must.
jd_path: str = os.path.join(_DEFINITION_DIRECTORY, jd_filename)
if not _validate_schema(jd_path):
return [], -1
# YAML-lint the definition?
if not skip_lint:
if not _lint(jd_path):
return [], -2
with open(jd_path, "r", encoding="UTF-8") as jd_file:
job_def: Dict[str, Any] = yaml.load(jd_file, Loader=yaml.FullLoader)
if job_def:
jd_munch: DefaultMunch = DefaultMunch.fromDict(job_def)
for jd_name in jd_munch.jobs:
if jd_munch.jobs[jd_name].tests:
num_tests += len(jd_munch.jobs[jd_name].tests)
if num_tests:
jd_munch.definition_filename = jd_filename
job_definitions.append(jd_munch)
return job_definitions, num_tests
def _copy_inputs(test_inputs: List[str], project_path: str) -> bool:
"""Copies all the test files into the test project directory."""
# The files are assumed to reside in the repo's 'data' directory.
print(f'# Copying inputs (from "${{PWD}}/{_DATA_DIRECTORY}")...')
expected_prefix: str = f"{_DATA_DIRECTORY}/"
for test_input in test_inputs:
print(f"# + {test_input}")
if not test_input.startswith(expected_prefix):
print("! FAILURE")
print(f'! Input file {test_input} must start with "{expected_prefix}"')
return False
if not os.path.isfile(test_input):
print("! FAILURE")
print(f"! Missing input file {test_input} ({test_input})")
return False
# Looks OK, copy it
shutil.copy(test_input, project_path)
print("# Copied")
return True
def _check_exists(name: str, path: str, expected: bool, fix_permissions: bool) -> bool:
exists: bool = os.path.exists(path)
if expected and not exists:
print(f"# exists ({expected}) [FAILED]")
print("! FAILURE")
print(f'! Check exists "{name}" (does not exist)')
return False
if not expected and exists:
print(f"# exists ({expected}) [FAILED]")
print("! FAILURE")
print(f'! Check does not exist "{name}" (exists)')
return False
# File exists or does not exist, as expected.
# If it exists we check its 'user' and 'group' read and write permission.
#
# If 'fix_permissions' is True (i.e. the DM is expected to fix (group) permissions)
# the group permissions are expected to be incorrect. If False
# then the group permissions are expected to be correct/
if exists:
stat_info: os.stat_result = os.stat(path)
# Check user permissions
file_mode: int = stat_info.st_mode
if file_mode & S_IRUSR == 0 or file_mode & S_IWUSR == 0:
print("! FAILURE")
print(
f'! "{name}" exists but has incorrect user permissions'
f" ({stat.filemode(file_mode)})"
)
return False
# Check group permissions
if file_mode & S_IRGRP == 0 or file_mode & S_IWGRP == 0:
# Incorrect permissions.
if not fix_permissions:
# And not told to fix them!
print("! FAILURE")
print(
f'! "{name}" exists but has incorrect group permissions (fix-permissions=False)'
f" ({stat.filemode(file_mode)})"
)
return False
else:
# Correct group permissions.
if fix_permissions:
# But told to fix them!
print("! FAILURE")
print(
f'! "{name}" exists but has correct group permissions (fix-permissions=True)'
f" ({stat.filemode(file_mode)})"
)
return False
print(f"# exists ({expected}) [OK]")
return True
def _check_line_count(name: str, path: str, expected: int) -> bool:
line_count: int = 0
with open(path, "rt", encoding="UTF-8") as check_file:
for _ in check_file:
line_count += 1
if line_count != expected:
print(f"# lineCount ({line_count}) [FAILED]")
print("! FAILURE")
print(f"! Check lineCount {name}" f" (found {line_count}, expected {expected})")
return False
print(f"# lineCount ({line_count}) [OK]")
return True
def _check(
t_compose: Compose, output_checks: DefaultMunch, fix_permissions: bool
) -> bool:
"""Runs the checks on the Job outputs.
We currently support 'exists' and 'lineCount'.
If 'fix_permissions' is True we error if the permissions are correct,
if False we error if the permissions are not correct.
"""
assert t_compose
assert isinstance(t_compose, Compose)
assert output_checks
assert isinstance(output_checks, List)
print("# Checking...")
for output_check in output_checks:
output_name: str = output_check.name
print(f"# - {output_name}")
expected_file: str = os.path.join(
t_compose.get_test_project_path(), output_name
)
for check in output_check.checks:
check_type: str = list(check.keys())[0]
if check_type == "exists":
if not _check_exists(
output_name, expected_file, check.exists, fix_permissions
):
return False
elif check_type == "lineCount":
if not _check_line_count(output_name, expected_file, check.lineCount):
return False
else:
print("! FAILURE")
print(f"! Unknown output check type ({check_type})")
return False
print("# Checked")
return True
def _run_nextflow(
command: str, project_path: str, timeout_minutes: int = DEFAULT_TEST_TIMEOUT_M
) -> Tuple[int, str, str]:
"""Runs nextflow in the project directory returning the exit code,
stdout and stderr.
"""
assert command
assert project_path
# The user cannot have a nextflow config in their home directory.
# Nextflow looks here and any config will be merged with the test config.
if _USR_HOME:
home_config: str = os.path.join(_USR_HOME, ".nextflow", "config")
if os.path.exists(home_config) and os.path.isfile(home_config):
print("! FAILURE")
print(
"! A nextflow test but"
f" you have your own config file ({home_config})"
)
print("! You cannot test Jobs and have your own config file")
return 1, "", ""
print('# Executing the test ("nextflow")...')
print(f'# Execution directory is "{project_path}"')
cwd = os.getcwd()
os.chdir(project_path)
try:
test = subprocess.run(
command,
shell=True,
check=False,
capture_output=True,
timeout=timeout_minutes * 60,
)
finally:
os.chdir(cwd)
return test.returncode, test.stdout.decode("utf-8"), test.stderr.decode("utf-8")
def _test(
args: argparse.Namespace,
filename: str,
collection: str,
job: str,
job_definition: DefaultMunch,
) -> Tuple[int, int, int, int]:
"""Runs the tests for a specific Job definition returning the number
of tests passed, skipped (due to run-level), ignored and failed.
"""
assert job_definition
assert isinstance(job_definition, DefaultMunch)
# The test status, assume success
tests_passed: int = 0
tests_skipped: int = 0
tests_ignored: int = 0
tests_failed: int = 0
if args.image_tag:
print(f"W Replacing image tag. Using '{args.image_tag}'")
job_image: str = f"{job_definition.image.name}:{args.image_tag}"
else:
job_image = f"{job_definition.image.name}:{job_definition.image.tag}"
job_image_memory: str = job_definition.image["memory"]
if job_image_memory is None:
job_image_memory = "1Gi"
job_image_cores: int = job_definition.image["cores"]
if job_image_cores is None:
job_image_cores = 1
job_project_directory: str = job_definition.image["project-directory"]
job_working_directory: str = job_definition.image["working-directory"]
if "type" in job_definition.image:
job_image_type: str = job_definition.image["type"].lower()
else:
job_image_type = _DEFAULT_IMAGE_TYPE
# Does the image need the (group write) permissions
# of files it creates fixing? Default is 'no'.
# If 'yes' (true) the DM is expected to fix the permissions of the
# generated files once the job has finished.
job_image_fix_permissions: bool = False
if "fix-permissions" in job_definition.image:
job_image_fix_permissions = job_definition.image["fix-permissions"]
for job_test_name in job_definition.tests:
# If a job test has been named,
# skip this test if it doesn't match.
# We do not include this test in the count.
if args.test and not args.test == job_test_name:
continue
_print_test_banner(collection, job, job_test_name)
# The status changes to False if any
# part of this block fails.
test_status: bool = True
print(f"> definition filename={filename}")
# Does the test have an 'ignore' declaration?
# Obey it unless the test is named explicitly -
# i.e. if th user has named a specific test, run it.
if "ignore" in job_definition.tests[job_test_name]:
if args.test:
print("W Ignoring the ignore: property (told to run this test)")
else:
print('W Ignoring test (found "ignore")')
tests_ignored += 1
continue
# Does the test have a 'run-level' declaration?
# If so, is it higher than the run-level specified?
if args.test:
print("W Ignoring any run-level check (told to run this test)")
else:
if "run-level" in job_definition.tests[job_test_name]:
run_level = job_definition.tests[job_test_name]["run-level"]
print(f"> run-level={run_level}")
if run_level > args.run_level:
print(f'W Skipping test (test is "run-level: {run_level}")')
tests_skipped += 1
continue
else:
print("> run-level=Undefined")
# Render the command for this test.
# First extract the variables and values from 'options'
# and then 'inputs'.
job_variables: Dict[str, Any] = {}
for variable in job_definition.tests[job_test_name].options:
job_variables[variable] = job_definition.tests[job_test_name].options[
variable
]
# If the option variable's declaration is 'multiple'
# it must be handled as a list, e.g. it might be declared like this: -
#
# The double-comment is used
# to avoid mypy getting upset by the 'type' line...
#
# # properties:
# # fragments:
# # title: Fragment molecules
# # multiple: true
# # mime-types:
# # - chemical/x-mdl-molfile
# # type: file
#
# We only pass the basename of the input to the command decoding
# i.e. strip the source directory.
# A list of input files (relative to this directory)
# We populate this with everything we find declared as an input
input_files: List[str] = []
# Process every 'input'
if job_definition.tests[job_test_name].inputs:
for variable in job_definition.tests[job_test_name].inputs:
# Test variable must be known as an input or option.
# Is the variable an option (otherwise it's an input)
variable_is_option: bool = False
variable_is_input: bool = False
if variable in job_definition.variables.options.properties:
variable_is_option = True
elif variable in job_definition.variables.inputs.properties:
variable_is_input = True
if not variable_is_option and not variable_is_input:
print("! FAILURE")
print(
f"! Test variable ({variable})"
+ " not declared as input or option"
)
# Record but do no further processing
tests_failed += 1
test_status = False
# Is it declared as a list?
value_is_list: bool = False
if variable_is_option:
if job_definition.variables.options.properties[variable].multiple:
value_is_list = True
else:
if job_definition.variables.inputs.properties[variable].multiple:
value_is_list = True
# Add each value or just one value
# (depending on whether it's a list)
if value_is_list:
job_variables[variable] = []
for value in job_definition.tests[job_test_name].inputs[variable]:
job_variables[variable].append(os.path.basename(value))
input_files.append(value)
else:
value = job_definition.tests[job_test_name].inputs[variable]
job_variables[variable] = os.path.basename(value)
input_files.append(value)
decoded_command: str = ""
test_environment: Dict[str, str] = {}
if test_status:
# Jote injects Job variables that are expected.
# 'DM_' variables are injected by the Data Manager,
# other are injected by Jote.
# - DM_INSTANCE_DIRECTORY
job_variables["DM_INSTANCE_DIRECTORY"] = INSTANCE_DIRECTORY
# - CODE_DIRECTORY
job_variables["CODE_DIRECTORY"] = os.getcwd()
# Has the user defined any environment variables in the test?
# If so they must exist, although we don't care about their value.
# Extract them here to pass to the test.
if "environment" in job_definition.tests[job_test_name]:
for env_name in job_definition.tests[job_test_name].environment:
env_value: Optional[str] = os.environ.get(env_name, None)
if env_value is None:
print("! FAILURE")
print("! Test environment variable is not defined")
print(f"! variable={env_name}")
# Record but do no further processing
tests_failed += 1
test_status = False
break
test_environment[env_name] = env_value
if test_status:
# Get the raw (encoded) command from the job definition...
raw_command: str = job_definition.command
# Decode it using our variables...
decoded_command, test_status = decoder.decode(
raw_command,
job_variables,
"command",
decoder.TextEncoding.JINJA2_3_0,
)
if not test_status:
print("! FAILURE")
print("! Failed to render command")
print(f"! error={decoded_command}")
# Record but do no further processing
tests_failed += 1
test_status = False
# Create the test directories, docker-compose file
# and copy inputs...
t_compose: Optional[Compose] = None
job_command: str = ""
project_path: str = ""
if test_status:
# The command must not contain new-lines.
# So split then join the command.
assert decoded_command
job_command = "".join(decoded_command.splitlines())
print(f"> image={job_image}")
print(f"> image-type={job_image_type}")
print(f"> command={job_command}")
# Create the project
t_compose = Compose(
collection,
job,
job_test_name,
job_image,
job_image_type,
job_image_memory,
job_image_cores,
job_project_directory,
job_working_directory,
job_command,
test_environment,
args.run_as_user,
)
project_path = t_compose.create()
if input_files:
# Copy the data into the test's project directory.
# Data's expected to be found in the Job's 'inputs'.
test_status = _copy_inputs(input_files, project_path)
# Run the container
if test_status and not args.dry_run:
timeout_minutes: int = DEFAULT_TEST_TIMEOUT_M
if "timeout-minutes" in job_definition.tests[job_test_name]:
timeout_minutes = job_definition.tests[job_test_name]["timeout-minutes"]
exit_code: int = 0
out: str = ""
err: str = ""
if job_image_type in [_IMAGE_TYPE_SIMPLE]:
# Run the image container
assert t_compose
exit_code, out, err = t_compose.run(timeout_minutes)
elif job_image_type in [_IMAGE_TYPE_NEXTFLOW]:
# Run nextflow directly
assert job_command
assert project_path
exit_code, out, err = _run_nextflow(
job_command, project_path, timeout_minutes
)
else:
print("! FAILURE")
print(f"! unsupported image-type ({job_image_type}")
test_status = False
if test_status:
expected_exit_code: int = job_definition.tests[
job_test_name
].checks.exitCode
if exit_code != expected_exit_code:
print("! FAILURE")
print(
f"! exit_code={exit_code}"
f" expected_exit_code={expected_exit_code}"
)
print("! Test stdout follows...")
print(out)
print("! Test stderr follows...")
print(err)
test_status = False
if args.verbose:
print(out)
# Inspect the results
# (only if successful so far)
if (
test_status
and not args.dry_run
and job_definition.tests[job_test_name].checks.outputs
):
assert t_compose
test_status = _check(
t_compose,
job_definition.tests[job_test_name].checks.outputs,
job_image_fix_permissions,
)
# Clean-up
if test_status and not args.keep_results:
assert t_compose
t_compose.delete()
# Count?
if test_status:
print("- SUCCESS")
tests_passed += 1
else:
tests_failed += 1
# Told to stop on first failure?
if not test_status and args.exit_on_failure:
break
return tests_passed, tests_skipped, tests_ignored, tests_failed
def _wipe() -> None:
"""Wipes the results of all tests."""
test_root: str = get_test_root()
if os.path.isdir(test_root):
shutil.rmtree(test_root)
def arg_check_run_level(value: str) -> int:
"""A type checker for the argparse run-level."""
i_value = int(value)
if i_value < 1:
raise argparse.ArgumentTypeError("Minimum value is 1")
if i_value > 100:
raise argparse.ArgumentTypeError("Maximum value is 100")
return i_value
def arg_check_run_as_user(value: str) -> int:
"""A type checker for the argparse run-as-user."""
i_value = int(value)
if i_value < 0:
raise argparse.ArgumentTypeError("Minimum value is 0")
if i_value > 65_535:
raise argparse.ArgumentTypeError("Maximum value is 65535")
return i_value
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
def main() -> int:
"""The console script entry-point. Called when jote is executed
or from __main__.py, which is used by the installed console script.
"""
# Build a command-line parser
# and process the command-line...
arg_parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Data Manager Job Tester",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
arg_parser.add_argument(
"-m",
"--manifest",
help="The manifest file.",
default=_DEFAULT_MANIFEST,
type=str,
)
arg_parser.add_argument(
"-c",
"--collection",
help="The Job collection to test. If not"
" specified the Jobs in all collections"
" will be candidates for testing.",
)
arg_parser.add_argument(
"-j",
"--job",
help="The Job to test. If specified the collection"
" is required. If not specified all the Jobs"
" that match the collection will be"
" candidates for testing.",
)
arg_parser.add_argument(
"--image-tag",
help="An image tag to use rather then the one defined in the job definition.",
)
arg_parser.add_argument(
"-t",
"--test",
help="A specific test to run. If specified the job"
" is required. If not specified all the Tests"
" that match the collection will be"
" candidates for testing.",
)
arg_parser.add_argument(
"-r",
"--run-level",
help="The run-level of the tests you want to"
" execute. All tests at or below this level"
" will be executed, a value from 1 to 100",
default=1,
type=arg_check_run_level,
)
arg_parser.add_argument(
"-u",
"--run-as-user",
help="A user ID to run the tests as. If not set"
" your user ID is used to run the test"
" containers.",
type=arg_check_run_as_user,
)
arg_parser.add_argument(
"-d",
"--dry-run",
action="store_true",
help="Setting this flag will result in jote"
" simply parsing the Job definitions"
" but not running any of the tests."
" It is can be used to check the syntax of"
" your definition file and its test commands"
" and data.",
)
arg_parser.add_argument(
"-k",
"--keep-results",
action="store_true",
help="Normally all material created to run each"
" test is removed when the test is"
" successful",
)
arg_parser.add_argument(
"-v", "--verbose", action="store_true", help="Displays test stdout"
)
arg_parser.add_argument(
"--version", action="store_true", help="Displays jote version"
)
arg_parser.add_argument(
"-x",
"--exit-on-failure",
action="store_true",
help="Normally jote reports test failures but"
" continues with the next test."
" Setting this flag will force jote to"
" stop when it encounters the first failure",
)
arg_parser.add_argument(
"-s",
"--skip-lint",
action="store_true",
help="Normally jote runs the job definition"
" files against the prevailing lint"
" configuration of the repository under test."
" Using this flag skips that step",
)
arg_parser.add_argument(
"-w",
"--wipe",
action="store_true",
help="Wipe does nto run any tests, it simply"
" wipes the repository clean of jote"
" test material. It would be wise"
" to run this once you have finished testing."
" Using this negates the effect of any other"
" option.",
)
arg_parser.add_argument(
"-a",
"--allow-no-tests",
action="store_true",
help="Normally jote expects to run tests"
" and if you have no tests jote will fail."
" To prevent jote complaining about the lack"
" of tests you can use this option.",
)
args: argparse.Namespace = arg_parser.parse_args()
# If a version's been asked for act on it and then leave
if args.version:
print(_VERSION)
return 0
if args.test and args.job is None:
arg_parser.error("--test requires --job")
if args.job and args.collection is None:
arg_parser.error("--job requires --collection")
if args.wipe and args.keep_results:
arg_parser.error("Cannot use --wipe and --keep-results")
# Args are OK if we get here.
total_passed_count: int = 0
total_skipped_count: int = 0
total_ignore_count: int = 0
total_failed_count: int = 0
# Check CWD
if not _check_cwd():
print("! FAILURE")
print("! The directory does not look correct")
arg_parser.error("Done (FAILURE)")
# Told to wipe?
# If so wipe, and leave.
if args.wipe:
_wipe()
print("Done [Wiped]")
return 0
print(f'# Using manifest "{args.manifest}"')
# Load all the files we can and then run the tests.
job_definitions, num_tests = _load(args.manifest, args.skip_lint)
if num_tests < 0:
print("! FAILURE")
print("! Definition file has failed yamllint")
arg_parser.error("Done (FAILURE)")
msg: str = "test" if num_tests == 1 else "tests"
print(f"# Found {num_tests} {msg}")
if args.collection:
print(f'# Limiting to Collection "{args.collection}"')
if args.job:
print(f'# Limiting to Job "{args.job}"')
if args.test:
print(f'# Limiting to Test "{args.test}"')
if job_definitions:
# There is at least one job-definition with a test
# Now process all the Jobs that have tests...
for job_definition in job_definitions:
# If a collection's been named,
# skip this file if it's not the named collection
collection: str = job_definition.collection
if args.collection and not args.collection == collection:
continue
for job_name in job_definition.jobs:
# If a Job's been named,
# skip this test if the job does not match
if args.job and not args.job == job_name:
continue
if job_definition.jobs[job_name].tests:
num_passed, num_skipped, num_ignored, num_failed = _test(
args,
job_definition.definition_filename,
collection,
job_name,
job_definition.jobs[job_name],
)
total_passed_count += num_passed
total_skipped_count += num_skipped
total_ignore_count += num_ignored
total_failed_count += num_failed
# Break out of this loop if told to stop on failures
if num_failed > 0 and args.exit_on_failure:
break
# Break out of this loop if told to stop on failures
if num_failed > 0 and args.exit_on_failure:
break
# Success or failure?
# It's an error to find no tests.
print(" ---")
dry_run: str = "[DRY RUN]" if args.dry_run else ""
summary: str = (
f"passed={total_passed_count}"
f" skipped={total_skipped_count}"
f" ignored={total_ignore_count}"
f" failed={total_failed_count}"
)
failed: bool = False
if total_failed_count:
arg_parser.error(f"Done (FAILURE) {summary} {dry_run}")
failed = True
elif total_passed_count == 0 and not args.allow_no_tests:
arg_parser.error(
f"Done (FAILURE) {summary}" f" (at least one test must pass)" f" {dry_run}"
)
failed = True
else:
print(f"Done (OK) {summary} {dry_run}")
# Automatically wipe.
# If there have been no failures
# and not told to keep directories.
if total_failed_count == 0 and not args.keep_results:
_wipe()
return 1 if failed else 0
# -----------------------------------------------------------------------------
# MAIN
# -----------------------------------------------------------------------------
if __name__ == "__main__":
_RET_VAL: int = main()
if _RET_VAL != 0:
sys.exit(_RET_VAL)
| 35.183367 | 100 | 0.584741 |
61f95f027b40f870a2f775166934b53fdd79358c | 18,636 | py | Python | src/cred_manage/bitwarden.py | areese801/cred_manage | 01f08ddc0b954c36e27ce1d6407f087c8aff0d4f | [
"MIT"
] | null | null | null | src/cred_manage/bitwarden.py | areese801/cred_manage | 01f08ddc0b954c36e27ce1d6407f087c8aff0d4f | [
"MIT"
] | null | null | null | src/cred_manage/bitwarden.py | areese801/cred_manage | 01f08ddc0b954c36e27ce1d6407f087c8aff0d4f | [
"MIT"
] | null | null | null | """
Subclass of the BaseCredContainer used for reading secrets from bitwarden password manager.
This class wraps the bitwarden CLI. See: https://bitwarden.com/help/article/cli/#using-an-api-key
Note that only the Enterprise version of bitwarden can (supported) hit the REST API.
In contrast, the API key that can be found under the "My Account" page can be used to log into the cli tool
"""
from cred_manage.flat_file import FlatFileCredContainer
from cred_manage.base_cred_container import CredContainerBase
import json
import getpass
import os
import subprocess
import uuid
from shutil import which
from packaging import version
def make_bitwarden_container(api_key_flat_file:str = '/.credentials/bw_api.json'):
"""
Factory function to return a BitwardenCredContainer object, instantiated using data
read from a flat file.See 'View API Key' button at https://vault.bitwarden.com/#/settings/account
Args:
api_key_flat_file (str): The flat file that contains the API details.
Returns:
BitwardenCredContainer
"""
# Validate that the api key flat file actually exists
if not os.path.isfile(api_key_flat_file):
raise FileNotFoundError(f"Cannot read the bitwarden API key out of the file '{api_key_flat_file}' because it does not exist!")
# Read the contents of the flat file
file_cred_obj = FlatFileCredContainer(
file_path=api_key_flat_file,
allow_broad_permissions=False) # This is very stubborn about reading a file that isn't locked down properly
file_contents = file_cred_obj.read()
j = json.loads(file_contents)
o = BitwardenCredContainer(**j)
return o
class BitwardenCredContainer(CredContainerBase):
"""
A credential container for interacting with bitwarden
Args:
CredContainerBase ([type]): [description]
"""
def __init__(self, client_id:str = None, client_secret:str = None, session_key:str = None, **kwargs) -> None:
"""
Init method for the BitwardenCredContainer
Args:
client_id (string): Username (email address)
client_secret (string): Password (Hashed, as would be returned by the has_password function)
session_key (string): If passed, should correspond with a currently valid session key that corresponds with the '--session'
for any command and/or the BW_SESSION environment variable. Ultimately, this is the value we're after for any subsequent
interactions with the cli. Thus, if supplied (and valid) this is really the only arg we need
"""
# We won't get far at all if the bw tool isn't installed.
which_bw = which('bw')
if which_bw is None:
raise FileNotFoundError(f"This program wraps the bitwarden cli tool, 'bw', but it doesn't seem to be installed (or is not on PATH). Please fix that and try again. See: https://bitwarden.com/help/article/cli/")
# We also need that bw needs to be at least a specific version
minimum_bw_version="1.18.1"
valid_version_installed = self._check_bw_version_is_valid(minimum_required_version=minimum_bw_version)
if valid_version_installed is False:
raise FileNotFoundError(f"The 'bw' command line is installed, but the version is too old. Version {minimum_bw_version} or greater is required. Please upgrade using your OS package manager. Type 'bw --version' to check your version")
# Pin client id and client secret to self
self.client_id = client_id
self.client_secret = client_secret
self.session_key = session_key
# Just for a stab in the dark , see if BW_SESSION is set and if so, set the value to self.session_key
# If it's invalid, it's not a big deal because get_auth_status (which wraps get_bitwarden_status) will return 'locked'
if 'BW_SESSION' in os.environ:
self.session_key = os.getenv('BW_SESSION')
# Do validations
if session_key is None:
# Then we've got to have the client id and secret
if self.client_id is None or self.client_secret is None:
raise ValueError(f"If not instantiating with a session key, client_id and client_secret arguments must be supplied")
# Pin other arbitrary stuff to self
for k in kwargs.keys():
if not hasattr(self, k):
setattr(self, k, kwargs[k])
# Set environment variables that the BW CLI looks for to skip prompt for credentials
os.environ['BW_CLIENTID'] = self.client_id
os.environ['BW_CLIENTSECRET'] = self.client_secret
# Get context about email address
if not hasattr(self, 'email_address'):
self.email_address = input("Bitwarden account email: ")
print("If you instantiated via a JSON config file, you can avoid this message in the future by adding the key 'email_address'")
# Do the login flow. This will ultimately pin the value for self.session_key if we didn't have a valid one already
if self._get_auth_status() != 'unlocked':
self._do_auth_and_unlock_flow()
# At this point we should be unlocked for sure. If not, we've failed miserably
if self._get_auth_status() != 'unlocked':
raise ValueError(f"The bitwarden vault should be unlocked now using the session key but it still isn't. Something bad happened. There might be a bug in this program. Please troubleshoot.\nSession Key: {self.session_key}")
# Load the vault and pin it to self. Note that this will pin the vault with all passwords redacted
self.vault_contents = None
self._load_vault() # Sets self.vault_contents
def _check_bw_version_is_valid(self, minimum_required_version:str):
"""
Checks the version of bitwarden. We need 1.18.1 or higher to leverage reading password out of environment variables via --passwordenv
This method does not use the _do_bw_command() helper function because that passes the session key which we may not have yet
Args:
minimum_required_version (str): A minimum required version string. Like "1.18.1"
Raises:
ValueError: If the 'bw --version' command results in an error for some reason
Returns:
[Boolean]: A flag telling us if the installed version is recent enough or not
"""
# Get the bitwarden version from the command line.
# We're purposely avoiding the _do_bw_command() method here.
cmd = "bw --version"
result = subprocess.run(cmd, shell=True, capture_output=True)
return_code = result.returncode
std_out = result.stdout.decode('utf-8').strip()
std_err = result.stderr.decode('utf-8').strip()
if return_code != 0:
raise ValueError(f"The command '{cmd}' resulted in a non-zero exit code of {return_code}.\n{std_err}")
else:
bw_version_string = std_out.strip()
# Is it recent enough?
valid_version = version.parse(bw_version_string) >= version.parse(minimum_required_version) # Returns bool
return valid_version
def _do_auth_and_unlock_flow(self):
"""
Gently guides us through the necessary steps to get the vault into an unlocked state
We need to go from 'unauthenticated' --> 'locked' --> 'unlocked'
"""
auth_status = self._get_auth_status()
# Bail out if we're already unlocked
if auth_status == 'unlocked':
return
# We've got some auth and/or unlocking to do. Put the password into a randomly named environment variable
rand_variable_name = str(uuid.uuid1()).upper()
os.environ[rand_variable_name] = getpass.getpass("Bitwarden Master Password: ")
try:
while auth_status != 'unlocked':
auth_status = self._get_auth_status()
if auth_status == 'unauthenticated':
# Let's get authenticated
self._log_in(password_environemt_variable=rand_variable_name)
elif auth_status == 'locked':
# We are authenticated (That is, bitwarden is pointing to our account), but the vault is locked
self._unlock(password_environemt_variable=rand_variable_name) # This method pins session_key to self
elif auth_status == 'unlocked':
# We are authenticated and the vault is unlocked. We can interact with it now
print("The vault is now unlocked.")
break
else:
raise ValueError(f"There is no handling for the status '{auth_status}'")
finally:
del os.environ[rand_variable_name] # Implicitly calls unsetenv
def _log_in(self, password_environemt_variable:str):
"""
Walks is through the login process. For details, see 'bw login --help'
Args:
password_environemt_variable (string): The name of an environment variable which contains our master password
"""
client_secret = self.client_secret
email = self.email_address
# Now log in and point to the environment variable
print ("Logging into Bitwarden...")
cmd = f"bw login {self.email_address} --passwordenv {password_environemt_variable} --apikey {self.client_secret}"
self._do_bw_command(command=cmd)
def _unlock(self, password_environemt_variable:str):
"""
Unlocks the vault after having previously logged in. This action returns a session key
Args:
password_environemt_variable (string): The name of an environment variable which contains our master password
"""
print ("Unlocking Bitwarden Vault...")
cmd = f"bw unlock --passwordenv {password_environemt_variable} --raw" #The raw flag simply prints the session key that we should use for subsequent requests
session_key = self._do_bw_command(command=cmd)
self.session_key = session_key # This can be set in the env var BW_SESSION or passed with a '--session' argument with any bw command
def _get_bitwarden_status(self):
"""
Issues the 'bitwarden status' command, which returns a JSON object we can use to tell if we're logged in or not
"""
# Do we already have a session key?
if self.session_key is not None and self.session_key != '':
session_key_part = f" --session '{self.session_key}'"
else:
session_key_part = ""
cmd = f"bw status{session_key_part}"
ret_val = self._do_bw_command(command=cmd)
return json.loads(ret_val)
def _get_auth_status(self):
"""
Returns the authentication status which according to 'bw status --help' should be one of these:
"unauthenticated", "locked", "unlocked"
"""
return self._get_bitwarden_status()['status']
def _retrieve_session_key(self):
"""
Issues the command 'bw login --raw' which causes authentication to happen and returns a session key to be used for subsequent requests
"""
# Get the status
self._get_bitwarden_status()
command = "bw login --raw"
result = self._do_bw_command(command=command)
print(f"Instantiated {type(self)} for username (email address) {self.username}")
def get_cred(self, guid:str):
"""
A wrapper around the get_credentials_by_guid method.
Why? Because this method is defined in the superclass and is intended to be overridden.
That's why. Of course, it's perfectly fine to just call get_credentials_by_guid
Args:
guid (str): The Guid which we care to seek
Returns:
[dict]: Dict Containing the username and password in question
"""
return self.get_credentials_by_guid(guid=guid) # Raises exceptions as needed
def set_cred(self):
return super().set_cred()
def delete_cred(self):
return super().delete_cred()
def _load_vault(self, force_reload=False):
"""
Gets the entire vault, removes all passwords and pins it to self for some client side interrogation
Args:
force_reload (bool, optional): If True, causes a refresh from bw if the vault is already pinned to self. Defaults to False.
Returns:
[dict]: A dictionary with the complete contents of the vault and given value at the path i['login']['password'] will be removed
"""
# Short circuit?
if force_reload is False and self.vault_contents is not None:
return self.vault_contents
# Get everything in the vault
print("Synchronizing Vault")
self._do_bw_command('bw sync')
print("Getting all vault items. Passwords will be redacted.")
vault_items_as_string = self._do_bw_command('bw list items')
vault_items = json.loads(vault_items_as_string)
# Just to be safe. Get rid of the vault as string.
vault_items_as_string = ''
del vault_items_as_string
# Drop all passwords from the json blob, just for good measure. If we actually want a password, we'll get it from the vault again
for i in vault_items:
login = i.get('login')
if login is not None:
if type(login) is dict and 'password' in login.keys():
login['password'] = '<password removed>'
# Just to be safe, again.
ret_val = vault_items.copy()
vault_items = {}
del vault_items
self.vault_contents = ret_val
return ret_val
def _do_bw_command(self, command:str, raise_exceptions_on_non_zero=True):
"""
Helper method. Does a bitwarden cli command and passes the results back
Args:
command (string): The command to pass to the bw cli
raise_exceptions_on_non_zero (bool, optional): Controls exception raising if the command returns a non-zero code. Defaults to True.
"""
session_key_part = f'--session "{self.session_key}"'
cmd = command
if session_key_part not in cmd:
cmd = f"{cmd} {session_key_part}"
result = subprocess.run(cmd, shell=True, capture_output=True)
return_code = result.returncode
std_out = result.stdout.decode('utf-8')
std_err = result.stderr.decode('utf-8')
# Raise an exception as necessary
if return_code != 0 and raise_exceptions_on_non_zero is True:
raise Exception(f"The bw cli returned a non-0 exit code for the command: '{cmd.replace(self.session_key, '<session_key>')}'\n{std_err}")
return std_out
def print_items(self):
"""
Prints the ID and Name of each item from the vault. Useful mostly for figuring out the GUID of a given object
"""
vault_contents = self.vault_contents
for item in vault_contents:
object_type = item['object']
object_id = item['id']
object_name = item['name']
s = f"Object ID: {object_id}\tObject Type: {object_type}\tObject Name: {object_name}"
if object_type != 'item':
raise ValueError(f"Encountered a non 'item' object type in the vault. This is unexpected. {s}")
print(s)
def get_vault_item_by_guid(self, guid:str):
"""
Gets the item (JSON object) for a given GUID from the vault
been redacted previously
Args:
item_guid (str): [description]
"""
# Load the vault that is pinned to self
if self.vault_contents is None:
self._load_vault()
# Try to find the item within the vault pinned to self. This saves an unnecessary trip to to BW over the internet if it isn't there
sought_item = None
for item in self.vault_contents:
if item['id'] == guid:
sought_item = item
break
# Raise an exception if we didn't find the item
if sought_item is None:
raise ValueError(f"The item with GUID {guid} was not found in the vault.")
item_with_password = json.loads(self._do_bw_command(f"bw get item {guid}"))
return item_with_password
def get_credentials_by_guid(self, guid:str):
"""
Returns the username and password (beneath the 'login' key) for a given item from the vault
This function simply wraps get_vault_item_by_guid, which will raise exceptions is the item is not in the vault
Args:
guid (str): The GUID for the item we want to retreive credentials for
"""
item_with_password = self.get_vault_item_by_guid(guid=guid)
login = item_with_password['login']
username = login['username']
password = login['password']
return dict(username=username, password=password)
def get_username_by_guid(self, guid:str):
"""
Returns the username for a given vault item by GUID. Since we'll have the vault (without passwords) pinned to self already
We can just read that rather than pinging bitwarden again
Args:
guid (str): [description]
"""
# Load the vault that is pinned to self
if self.vault_contents is None:
self._load_vault()
# Try to find the item within the vault pinned to self. This saves an unnecessary trip to to BW over the internet if it isn't there
sought_item = None
for item in self.vault_contents:
if item['id'] == guid:
sought_item = item
break
# Raise an exception if we didn't find the item
if sought_item is None:
raise ValueError(f"The item with GUID {guid} was not found in the vault.")
return sought_item['login']['username']
def get_password_by_guid(self, guid:str):
"""
Returns the password for a given item by GUID. Wraps the get_credentials_by_guid method which in turn wraps get_vault_item_by_guid
(which raises exceptions if something is missing)
Args:
guid (str): [description]
"""
creds = self.get_credentials_by_guid(guid=guid)
return creds['password']
| 41.972973 | 247 | 0.6481 |
61f9d61ddf16dfe982de5cd443717f5e39b05a82 | 7,027 | py | Python | transforms/waveform.py | koukyo1994/kaggle-rfcx | c3573d014d99312b58882e7b939de6c1055129b1 | [
"MIT"
] | 6 | 2021-02-18T05:18:17.000Z | 2022-02-19T02:49:32.000Z | transforms/waveform.py | koukyo1994/kaggle-rfcx | c3573d014d99312b58882e7b939de6c1055129b1 | [
"MIT"
] | null | null | null | transforms/waveform.py | koukyo1994/kaggle-rfcx | c3573d014d99312b58882e7b939de6c1055129b1 | [
"MIT"
] | 2 | 2021-02-18T11:31:50.000Z | 2022-02-19T02:49:07.000Z | import colorednoise as cn
import librosa
import numpy as np
def get_waveform_transforms(config: dict, phase: str):
transforms = config.get("transforms")
if transforms is None:
return None
else:
if transforms[phase] is None:
return None
trns_list = []
for trns_conf in transforms[phase]:
trns_name = trns_conf["name"]
trns_params = {} if trns_conf.get("params") is None else trns_conf["params"]
if globals().get(trns_name) is not None:
trns_cls = globals()[trns_name]
trns_list.append(trns_cls(**trns_params))
if len(trns_list) > 0:
return Compose(trns_list)
else:
return None
class Compose:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, y: np.ndarray):
for trns in self.transforms:
y = trns(y)
return y
class OneOf:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, y: np.ndarray):
n_trns = len(self.transforms)
trns_idx = np.random.choice(n_trns)
trns = self.transforms[trns_idx]
y = trns(y)
return y
class AudioTransform:
def __init__(self, always_apply=False, p=0.5):
self.always_apply = always_apply
self.p = p
def __call__(self, y: np.ndarray):
if self.always_apply:
return self.apply(y)
else:
if np.random.rand() < self.p:
return self.apply(y)
else:
return y
def apply(self, y: np.ndarray):
raise NotImplementedError
class Normalize:
def __call__(self, y: np.ndarray):
max_vol = np.abs(y).max()
y_vol = y * 1 / max_vol
return np.asfortranarray(y_vol)
class NewNormalize:
def __call__(self, y: np.ndarray):
y_mm = y - y.mean()
return y_mm / y_mm.abs().max()
class LibrosaNormalize:
def __call__(self, y: np.ndarray):
return librosa.util.normalize(y)
class GaussianNoiseSNR(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5.0, max_snr=20.0, **kwargs):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
white_noise = np.random.randn(len(y))
a_white = np.sqrt(white_noise ** 2).max()
augmented = (y + white_noise * 1 / a_white * a_noise).astype(y.dtype)
return augmented
class PinkNoiseSNR(AudioTransform):
def __init__(self, always_apply=False, p=0.5, min_snr=5.0, max_snr=20.0, **kwargs):
super().__init__(always_apply, p)
self.min_snr = min_snr
self.max_snr = max_snr
def apply(self, y: np.ndarray, **params):
snr = np.random.uniform(self.min_snr, self.max_snr)
a_signal = np.sqrt(y ** 2).max()
a_noise = a_signal / (10 ** (snr / 20))
pink_noise = cn.powerlaw_psd_gaussian(1, len(y))
a_pink = np.sqrt(pink_noise ** 2).max()
augmented = (y + pink_noise * 1 / a_pink * a_noise).astype(y.dtype)
return augmented
class PitchShift(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_steps=5, sr=32000):
super().__init__(always_apply, p)
self.max_steps = max_steps
self.sr = sr
def apply(self, y: np.ndarray, **params):
n_steps = np.random.randint(-self.max_steps, self.max_steps)
augmented = librosa.effects.pitch_shift(y, sr=self.sr, n_steps=n_steps)
return augmented
class Identity(AudioTransform):
def __init__(self, always_apply=False, p=0.5):
super().__init__(always_apply=always_apply, p=p)
def apply(self, y: np.ndarray, **params):
return y
class PitchUp(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_steps=5, sr=32000):
super().__init__(always_apply=always_apply, p=p)
self.max_steps = max_steps
self.sr = sr
def apply(self, y: np.ndarray, **params):
n_steps = np.random.randint(0, self.max_steps)
augmented = librosa.effects.pitch_shift(y, sr=self.sr, n_steps=n_steps)
return augmented
class PitchDown(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_steps=5, sr=32000):
super().__init__(always_apply=always_apply, p=p)
self.max_steps = max_steps
self.sr = sr
def apply(self, y: np.ndarray, **params):
n_steps = np.random.randint(-self.max_steps, 0)
augmented = librosa.effects.pitch_shift(y, sr=self.sr, n_steps=n_steps)
return augmented
class TimeStretch(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_rate=1.2):
super().__init__(always_apply, p)
self.max_rate = max_rate
def apply(self, y: np.ndarray, **params):
rate = np.random.uniform(0, self.max_rate)
augmented = librosa.effects.time_stretch(y, rate)
return augmented
class TimeShift(AudioTransform):
def __init__(self, always_apply=False, p=0.5, max_shift_second=2, sr=32000, padding_mode="replace"):
super().__init__(always_apply, p)
assert padding_mode in ["replace", "zero"], "`padding_mode` must be either 'replace' or 'zero'"
self.max_shift_second = max_shift_second
self.sr = sr
self.padding_mode = padding_mode
def apply(self, y: np.ndarray, **params):
shift = np.random.randint(-self.sr * self.max_shift_second, self.sr * self.max_shift_second)
augmented = np.roll(y, shift)
if self.padding_mode == "zero":
if shift > 0:
augmented[:shift] = 0
else:
augmented[shift:] = 0
return augmented
class VolumeControl(AudioTransform):
def __init__(self, always_apply=False, p=0.5, db_limit=10, mode="uniform"):
super().__init__(always_apply, p)
assert mode in ["uniform", "fade", "fade", "cosine", "sine"], \
"`mode` must be one of 'uniform', 'fade', 'cosine', 'sine'"
self.db_limit = db_limit
self.mode = mode
def apply(self, y: np.ndarray, **params):
db = np.random.uniform(-self.db_limit, self.db_limit)
if self.mode == "uniform":
db_translated = 10 ** (db / 20)
elif self.mode == "fade":
lin = np.arange(len(y))[::-1] / (len(y) - 1)
db_translated = 10 ** (db * lin / 20)
elif self.mode == "cosine":
cosine = np.cos(np.arange(len(y)) / len(y) * np.pi * 2)
db_translated = 10 ** (db * cosine / 20)
else:
sine = np.sin(np.arange(len(y)) / len(y) * np.pi * 2)
db_translated = 10 ** (db * sine / 20)
augmented = y * db_translated
return augmented
| 31.231111 | 104 | 0.605237 |
61fa26c1e849bd6b6249a17cb7a588c5997af757 | 438 | py | Python | Back/generalchatroom/models.py | sadeghjafari5528/404- | 0499b93cc473ec4def96d95364180eb4f4dafb11 | [
"MIT"
] | null | null | null | Back/generalchatroom/models.py | sadeghjafari5528/404- | 0499b93cc473ec4def96d95364180eb4f4dafb11 | [
"MIT"
] | 1 | 2020-12-27T14:59:35.000Z | 2020-12-27T14:59:35.000Z | Back/generalchatroom/models.py | sadeghjafari5528/404- | 0499b93cc473ec4def96d95364180eb4f4dafb11 | [
"MIT"
] | 2 | 2020-10-30T08:08:32.000Z | 2020-10-30T20:47:51.000Z | from django.db import models
from registeration.models import User
from chatroom.models import Chatroom
class Message(models.Model):
chatroom = models.ForeignKey(Chatroom, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
text = models.TextField()
parentMessage = models.ForeignKey("self", null=True, on_delete=models.SET_NULL)
time = models.DateTimeField(db_index=True)
| 36.5 | 83 | 0.773973 |
61fa91668b7e930a4d4c6429b8910bfdb88b86e5 | 1,095 | py | Python | plyplus/test/test_trees.py | rubycandy/test-plyplus | ced9377e6c26dcf308dd9f480411af9c8dbe9c56 | [
"MIT"
] | 169 | 2015-01-16T12:48:23.000Z | 2021-12-09T16:00:13.000Z | plyplus/test/test_trees.py | rubycandy/test-plyplus | ced9377e6c26dcf308dd9f480411af9c8dbe9c56 | [
"MIT"
] | 26 | 2015-01-23T16:30:28.000Z | 2018-07-07T09:14:18.000Z | plyplus/test/test_trees.py | rubycandy/test-plyplus | ced9377e6c26dcf308dd9f480411af9c8dbe9c56 | [
"MIT"
] | 53 | 2015-01-22T20:20:10.000Z | 2021-12-05T13:39:57.000Z | from __future__ import absolute_import
import unittest
import logging
import copy
import pickle
from plyplus.plyplus import STree
logging.basicConfig(level=logging.INFO)
class TestSTrees(unittest.TestCase):
def setUp(self):
self.tree1 = STree('a', [STree(x, y) for x, y in zip('bcd', 'xyz')])
def test_deepcopy(self):
assert self.tree1 == copy.deepcopy(self.tree1)
def test_parents(self):
s = copy.deepcopy(self.tree1)
s.calc_parents()
for i, x in enumerate(s.tail):
assert x.parent() == s
assert x.index_in_parent == i
def test_pickle(self):
s = copy.deepcopy(self.tree1)
data = pickle.dumps(s)
assert pickle.loads(data) == s
def test_pickle_with_parents(self):
s = copy.deepcopy(self.tree1)
s.calc_parents()
data = pickle.dumps(s)
s2 = pickle.loads(data)
assert s2 == s
for i, x in enumerate(s2.tail):
assert x.parent() == s2
assert x.index_in_parent == i
if __name__ == '__main__':
unittest.main()
| 24.886364 | 76 | 0.613699 |
61fae1b5b671ac52f912549b4f9c186cb38b0495 | 1,563 | py | Python | misaligned.py | clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya | 1861f2db11a508e9c1e2f7ce351d11d87c0c734c | [
"MIT"
] | null | null | null | misaligned.py | clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya | 1861f2db11a508e9c1e2f7ce351d11d87c0c734c | [
"MIT"
] | null | null | null | misaligned.py | clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya | 1861f2db11a508e9c1e2f7ce351d11d87c0c734c | [
"MIT"
] | null | null | null |
MAJOR_COLORS = ["White", "Red", "Black", "Yellow", "Violet"]
MINOR_COLORS = ["Blue", "Orange", "Green", "Brown", "Slate"]
def get_color_from_pair_number(pair_number):
zero_based_pair_number = pair_number - 1
major_index = zero_based_pair_number // len(MINOR_COLORS)
minor_index = zero_based_pair_number % len(MINOR_COLORS)
return MAJOR_COLORS[major_index], MINOR_COLORS[minor_index]
def print_color_map():
for i in range(5):
for j in range(5):
pair_number = i * 5 + j +1 #1 is added to account for zero error as list index begins with 0
print(f'{pair_number} | {get_color_from_pair_number(pair_number)[0]} | {get_color_from_pair_number(pair_number)[1]}')
print_color_map()
def test_color_map(test_paid_number, expected_major_colour, expected_minor_color):
assert(get_color_from_pair_number(test_paid_number) == (expected_major_colour, expected_minor_color))
#testing each of 25 color pairs
if __name__ == '__main__':
print_color_map()
test_color_map(1, 'White', 'Blue')
test_color_map(2, 'White', 'Orange')
test_color_map(3, 'White', 'Green')
test_color_map(4, 'White', 'Brown')
test_color_map(5, 'White', 'Slate')
test_color_map(6, 'Red', 'Blue')
test_color_map(7, 'Red', 'Orange')
test_color_map(8, 'Red', 'Green')
test_color_map(9, 'Red', 'Brown')
test_color_map(10, 'Red', 'Slate')
test_color_map(11, 'Black', 'Blue')
test_color_map(12, 'Black', 'Orange')
test_color_map(13, 'Black', 'Green')
print("All is well (maybe!)\n")
| 36.348837 | 129 | 0.685861 |
61fe5553a131ad8494dec157c4505511e27beecb | 611 | py | Python | examples/embed_cmd.py | bentettmar/discord.py-self_embed | 4253ea7977b17972de2e15de3606a183f70b22b0 | [
"MIT"
] | 2 | 2022-03-31T04:06:05.000Z | 2022-03-31T16:39:40.000Z | examples/embed_cmd.py | bentettmar/discord.py-self_embed | 4253ea7977b17972de2e15de3606a183f70b22b0 | [
"MIT"
] | 3 | 2022-03-29T11:58:16.000Z | 2022-03-31T16:41:13.000Z | examples/embed_cmd.py | bentettmar/discord.py-self_embed | 4253ea7977b17972de2e15de3606a183f70b22b0 | [
"MIT"
] | null | null | null | import discord_self_embed
from discord.ext import commands
bot = commands.Bot(command_prefix=".", self_bot=True)
@bot.event
async def on_ready():
print("ready")
@bot.command(name="embed")
async def embed_cmd(ctx):
embed = discord_self_embed.Embed("discord.py-self_embed", description="A way for selfbots to send embeds again.", colour="ff0000", url="https://github.com/bentettmar/discord.py-self_embed")
embed.set_author("Ben Tettmar")
await ctx.send(embed.generate_url(hide_url=True)) # You can also send the embed converted to a string which will auto hide the url.
bot.run("TOKEN_HERE")
| 33.944444 | 193 | 0.749591 |
1101b9ca063e23e2fd57ae664425f377c0723f09 | 8,823 | py | Python | analysis.py | liunx/lmms | ea54f64934d90887a38446ef02ed2baed91548db | [
"MIT"
] | null | null | null | analysis.py | liunx/lmms | ea54f64934d90887a38446ef02ed2baed91548db | [
"MIT"
] | null | null | null | analysis.py | liunx/lmms | ea54f64934d90887a38446ef02ed2baed91548db | [
"MIT"
] | null | null | null | import re
import copy
from operator import itemgetter
import music21 as m21
class Core:
meter_len = 192
notes = {'C': 60, 'D': 62, 'E': 64, 'F': 65, 'G': 67, 'A': 69, 'B': 71}
percussion = {
35: 'AcousticBassDrum', 36: 'BassDrum1', 37: 'SideStick', 38: 'AcousticSnare',
39: 'HandClap', 40: 'ElectricSnare', 41: 'LowFloorTom', 42: 'ClosedHiHat',
43: 'HighFloorTom', 44: 'PedalHi-Hat', 45: 'LowTom', 46: 'OpenHi-Hat',
47: 'Low-MidTom', 48: 'Hi-MidTom', 49: 'CrashCymbal1', 50: 'HighTom',
51: 'RideCymbal1', 52: 'ChineseCymbal', 53: 'RideBell', 54: 'Tambourine',
55: 'SplashCymbal', 56: 'Cowbell', 57: 'CrashCymbal2', 58: 'Vibraslap',
59: 'RideCymbal2', 60: 'HiBongo', 61: 'LowBongo', 62: 'MuteHiConga',
63: 'OpenHiConga', 64: 'LowConga', 65: 'HighTimbale', 66: 'LowTimbale',
67: 'HighAgogo', 68: 'LowAgogo', 69: 'Cabasa', 70: 'Maracas', 71: 'ShortWhistle',
72: 'LongWhistle', 73: 'ShortGuiro', 74: 'LongGuiro', 75: 'Claves', 76: 'HiWoodBlock',
77: 'LowWoodBlock', 78: 'MuteCuica', 79: 'OpenCuica', 80: 'MuteTriangle', 81: 'OpenTriangle'}
def __init__(self, staff, data):
self.total_len = 0
self.noteset = []
self.roman_numerals = []
self.instructions = {}
self.styles = {}
self.emotions = {}
self.time_signs = {0: staff['timesign']}
self.keys = {0: staff['key']}
self.analysis(copy.deepcopy(data))
def show_noteset(self):
print("==== total notes ====")
for i in self.noteset:
print(i)
def note_midi(self, note):
step = note[0].upper()
midi = self.notes[step]
if note[0].islower():
midi += 12 * note.count("'")
else:
midi -= 12 * note.count(step)
if note.count('-') > 0:
alter = note.count('-')
midi -= alter
elif note.count('#') > 0:
alter = note.count('#')
midi += alter
return midi
def note_len(self, note):
num = 0
dot = 0
# Rest & Notation
m = re.match(r'([a-grA-GR\'#-]+)(\d+)([.]*)', note)
if not m:
return 0
num = int(m.group(2))
dot = m.group(3).count('.')
n1 = self.meter_len / num
curr = n1
for _ in range(dot):
n1 += curr / 2
curr = curr / 2
return n1
def to_note(self, note, offset):
d = {}
d['offset'] = offset
midi = self.note_midi(note)
d['midi'] = midi
if note.count('~') > 0:
d['tie'] = 1
else:
d['tie'] = 0
return d
def is_note(self, note):
m = re.match(r'[a-grA-GR\'#-]+\d+', note)
if not m:
return False
return True
def divide_keyword(self, n, offset):
if n.startswith('!!'):
d = {'offset': offset, 'instruction': n[2:]}
self.instructions[offset] = n[2:]
elif n.startswith('$$'):
self.styles[offset] = n[2:]
elif n.startswith('!'):
d = {'offset': offset, 'roman_numeral': n[1:]}
self.roman_numerals.append(d)
elif n.startswith('*'):
self.emotions[offset] = n[1:]
else:
raise ValueError("Unknown keyword: {}!".format(n))
def to_noteset(self, data):
offset = 0
_len = 0
for n in data:
# chord | trip
if type(n) == list:
if n[0] == 'chord':
_len = self.note_len(n[-1])
for _n in n[1:]:
d = self.to_note(_n, offset)
d['len'] = _len
self.noteset.append(d)
offset += _len
elif n[0] == 'tripchord':
_len = self.note_len(n[-1]) * 2 / 3
for _n in n[1:]:
d = self.to_note(_n, offset)
d['len'] = _len
self.noteset.append(d)
offset += _len
elif n[0] == 'trip':
_len = self.note_len(n[-1]) * 2 / 3
for _n in n[1:]:
if _n[0] != 'r':
d = self.to_note(_n, offset)
d['len'] = _len
self.noteset.append(d)
offset += _len
else:
raise ValueError("Unknown keyword: {}!".format(n[0]))
else:
# skip keywords
if not self.is_note(n):
self.divide_keyword(n, offset)
continue
# skip Rest note
if n[0].upper() == 'R':
_len = self.note_len(n)
offset += _len
continue
d = self.to_note(n, offset)
_len = self.note_len(n)
offset += _len
d['len'] = _len
self.noteset.append(d)
self.total_len = offset
def _tie(self, nset, i):
_len = len(self.noteset)
while i < _len:
_nset = self.noteset[i]
if _nset['midi'] == nset['midi'] and (nset['offset'] + nset['len']) == _nset['offset']:
if _nset['tie'] > 0:
self._tie(_nset, i)
nset['tie'] = 0
nset['len'] += _nset['len']
_nset['drop'] = 1
else:
nset['tie'] = 0
nset['len'] += _nset['len']
_nset['drop'] = 1
break
i += 1
def update_tie(self):
_noteset = []
_noteset_len = len(self.noteset)
i = 0
while i < _noteset_len:
nset = self.noteset[i]
if nset.get('drop'):
i += 1
continue
if nset['tie'] > 0:
self._tie(nset, i)
i += 1
for i in self.noteset:
if i.get('drop'):
continue
_noteset.append(i)
self.noteset = _noteset
def update_roman_numeral(self):
# get the total length of notesets
if not self.total_len > 0:
return
_len = len(self.roman_numerals)
if _len == 0:
return
i = 0
while i < _len:
rn = self.roman_numerals[i]
if rn['roman_numeral'] == 'N':
rn['drop'] = 1
i += 1
continue
if (i + 1) == _len:
rn['len'] = self.total_len - rn['offset']
break
_rn = self.roman_numerals[i + 1]
rn['len'] = _rn['offset'] - rn['offset']
i += 1
# rm dropped set
l = []
for i in self.roman_numerals:
if 'drop' in i:
continue
l.append(i)
self.roman_numerals = l
def analysis(self, data):
raise NotImplementedError
class Analysis(Core):
def __init__(self, staff, data):
super().__init__(staff, data)
def reform_roman_numeral(self):
d = {}
for rn in self.roman_numerals:
d[rn['offset']] = rn
return d
def analysis(self, data):
self.to_noteset(data)
self.update_tie()
self.update_roman_numeral()
def get_result(self):
d = {}
d['noteset'] = self.noteset
d['styles'] = self.styles
d['roman_numerals'] = self.reform_roman_numeral()
d['emotions'] = self.emotions
d['instructions'] = self.instructions
d['total_len'] = self.total_len
d['time_signs'] = self.time_signs
d['keys'] = self.keys
return d
if __name__ == "__main__":
data = ['C4~', ['chord', 'E4~', 'G4~'], [
'chord', 'E4~', 'G4~'], ['chord', 'E4', 'G4']]
data2 = ['C4', ['trip', 'C4', 'E4', 'G4']]
data3 = ['C4~', 'C4', 'E4~', 'E4']
data4 = ['CC8', 'r8', 'DD8', 'CC8', 'CC8', 'r8', 'DD8', 'r8']
data5 = [
'c2', '!up', '!good', 'c4.', 'c8', 'c2', '!happy', 'c2', 'c1~', 'c1', 'G2', 'c4.', 'c8', 'c1', 'G2', 'd4.',
'B8', 'c1', 'G2', 'c4.', 'e8', 'g2', 'e4', 'c4', 'd2', 'c4.', 'd8', 'd1', 'G2', 'c4.',
'c8', 'c1', 'G2', 'd4.', 'B8', 'c1', 'G2', 'c4.', 'e8', 'g2', 'e4', 'c4', 'f2', 'e4.',
'd8', 'c1', 'r1', 'r1', 'r1', 'r1']
data6 = ['!I', 'R1', '!II', 'R1', '!III', '!IV', '!V', '!VI', '!VII']
data7 = ['$$pop', 'r1', '!I', 'r1', '*happy', '!IV',
'!V7', '!i', '!Isus4', '!!ts_44', '!!to_D']
#rym = Rhythm(data)
#bt = Beats(data4)
ml = Melody({}, data7)
# ml.show_noteset()
| 33.804598 | 115 | 0.438286 |
11025303e524cbae387748d4c806d2a09276590a | 6,302 | py | Python | tests/server/utils.py | csadorf/aiida-optimade | 99ee1113cfc109a40a83bb43af8d07ce7e1601e6 | [
"MIT"
] | null | null | null | tests/server/utils.py | csadorf/aiida-optimade | 99ee1113cfc109a40a83bb43af8d07ce7e1601e6 | [
"MIT"
] | null | null | null | tests/server/utils.py | csadorf/aiida-optimade | 99ee1113cfc109a40a83bb43af8d07ce7e1601e6 | [
"MIT"
] | null | null | null | # pylint: disable=no-name-in-module,too-many-arguments
import json
import re
import typing
from urllib.parse import urlparse
import warnings
from requests import Response
from fastapi.testclient import TestClient
from pydantic import BaseModel
import pytest
from starlette import testclient
from optimade import __api_version__
from optimade.models import ResponseMeta
class OptimadeTestClient(TestClient):
"""Special OPTIMADE edition of FastAPI's (Starlette's) TestClient
This is needed, since `urllib.parse.urljoin` removes paths from the passed
`base_url`.
So this will prepend any requests with the MAJOR OPTIMADE version path.
"""
def __init__(
self,
app: typing.Union[testclient.ASGI2App, testclient.ASGI3App],
base_url: str = "http://example.org",
raise_server_exceptions: bool = True,
root_path: str = "",
version: str = "",
) -> None:
super(OptimadeTestClient, self).__init__(
app=app,
base_url=base_url,
raise_server_exceptions=raise_server_exceptions,
root_path=root_path,
)
if version:
if not version.startswith("v"):
version = f"/v{version}"
if re.match(r"v[0-9](.[0-9]){0,2}", version) is None:
warnings.warn(
f"Invalid version passed to client: '{version}'. "
f"Will use the default: '/v{__api_version__.split('.')[0]}'"
)
version = f"/v{__api_version__.split('.')[0]}"
self.version = version
def request( # pylint: disable=too-many-locals
self,
method: str,
url: str,
params: testclient.Params = None,
data: testclient.DataType = None,
headers: typing.MutableMapping[str, str] = None,
cookies: testclient.Cookies = None,
files: testclient.FileType = None,
auth: testclient.AuthType = None,
timeout: testclient.TimeOut = None,
allow_redirects: bool = None,
proxies: typing.MutableMapping[str, str] = None,
hooks: typing.Any = None,
stream: bool = None,
verify: typing.Union[bool, str] = None,
cert: typing.Union[str, typing.Tuple[str, str]] = None,
json: typing.Any = None, # pylint: disable=redefined-outer-name
) -> Response:
if (
re.match(r"/?v[0-9](.[0-9]){0,2}/", url) is None
and not urlparse(url).scheme
):
while url.startswith("/"):
url = url[1:]
url = f"{self.version}/{url}"
return super().request(
method=method,
url=url,
params=params,
data=data,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
json=json,
)
class EndpointTests:
"""Base class for common tests of endpoints"""
request_str: str = None
response_cls: BaseModel = None
response = None
json_response = None
@pytest.fixture(autouse=True)
def get_response(self, client):
"""Get response from client"""
self.response = client.get(self.request_str)
self.json_response = self.response.json()
yield
self.response = None
self.json_response = None
@staticmethod
def check_keys(keys: list, response_subset: typing.Iterable):
"""Utility function to help validate dict keys"""
for key in keys:
assert (
key in response_subset
), f"{key} missing from response {response_subset}"
def test_response_okay(self):
"""Make sure the response was successful"""
assert self.response.status_code == 200, (
f"Request to {self.request_str} failed: "
f"{json.dumps(self.json_response, indent=2)}"
)
def test_meta_response(self):
"""General test for `meta` property in response"""
assert "meta" in self.json_response
meta_required_keys = ResponseMeta.schema()["required"]
meta_optional_keys = list(
set(ResponseMeta.schema()["properties"].keys()) - set(meta_required_keys)
)
implemented_optional_keys = ["data_available", "implementation"]
self.check_keys(meta_required_keys, self.json_response["meta"])
self.check_keys(implemented_optional_keys, meta_optional_keys)
self.check_keys(implemented_optional_keys, self.json_response["meta"])
def test_serialize_response(self):
"""General test for response JSON and pydantic model serializability"""
assert self.response_cls is not None, "Response class unset for this endpoint"
self.response_cls(**self.json_response) # pylint: disable=not-callable
def client_factory():
"""Return TestClient for OPTIMADE server"""
from aiida_optimade.main import APP
def inner(
version: str = None, raise_server_exceptions: bool = True
) -> OptimadeTestClient:
if version:
return OptimadeTestClient(
APP,
base_url="http://example.org",
version=version,
raise_server_exceptions=raise_server_exceptions,
)
return OptimadeTestClient(
APP,
base_url="http://example.org",
raise_server_exceptions=raise_server_exceptions,
)
return inner
class NoJsonEndpointTests:
"""A simplified mixin class for tests on non-JSON endpoints."""
request_str: str = None
response_cls: BaseModel = None
response: Response = None
@pytest.fixture(autouse=True)
def get_response(self, client):
"""Get response from client"""
self.response = client.get(self.request_str)
yield
self.response = None
def test_response_okay(self):
"""Make sure the response was successful"""
assert (
self.response.status_code == 200
), f"Request to {self.request_str} failed: {self.response.content}"
| 32.65285 | 86 | 0.60822 |
11026c0c5eee347310533201a00163d72346ee00 | 3,673 | py | Python | super_topic/main.py | susmote/WeiboTools | 659232b4525bcbedf350da1127d382ff6c6e9e71 | [
"MIT"
] | 3 | 2018-11-11T22:07:23.000Z | 2019-03-08T08:20:31.000Z | super_topic/main.py | susmote/WeiboTools | 659232b4525bcbedf350da1127d382ff6c6e9e71 | [
"MIT"
] | null | null | null | super_topic/main.py | susmote/WeiboTools | 659232b4525bcbedf350da1127d382ff6c6e9e71 | [
"MIT"
] | 1 | 2021-08-31T06:44:54.000Z | 2021-08-31T06:44:54.000Z | # -*- coding: utf-8 -*-
"""
Created on 2018/11/5
@author: susmote
"""
import time
import requests
import json
# 查看自己关注的超话
if __name__ == '__main__':
username = input("请输入用户名: ")
password = input("请输入密码: ")
login_url = "https://passport.weibo.cn/sso/login"
headers = {
"Referer": "https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=https%3A%2F%2Fm.weibo.cn%2F"
}
session = requests.session()
login_post_data = {
"username": username,
"password": password,
"savestate": "1",
"r": "https://m.weibo.cn/",
"ec": "0",
"pagerefer": "https://m.weibo.cn/login?backURL=https%253A%252F%252Fm.weibo.cn%252F",
"entry": "mweibo",
"wentry": "",
"loginfrom": "",
"client_id": "",
"code": "",
"qq": "",
"mainpageflag": "1",
"hff": "",
"hfp": ""
}
login_page_res = session.post(login_url, data=login_post_data, headers=headers)
login_page_res_json = json.loads(login_page_res.text)
judge_login_res = session.get("https://m.weibo.cn/api/config").text
judge_login_res_json = json.loads(judge_login_res)
cookie_str = ''
if judge_login_res_json["data"]["login"] == True:
print(1, "自动登录成功")
for key in list(session.cookies.get_dict().keys()): # 遍历字典
cookie_str += (key + '=' + session.cookies.get_dict()[key] + ';') # 将键值对拿出来拼接一下
else:
if login_page_res_json["msg"] == "用户名或密码错误":
print("用户名或密码错误")
exit()
else:
print(login_page_res_json)
print("不能直接登录,需要进行手势验证码验证")
exit()
followtopic_list = []
url = "https://m.weibo.cn/api/container/getIndex?containerid=100803_-_followsuper"
session = requests.session()
headers = {
"Host": "m.weibo.cn",
"Referer": "https://m.weibo.cn",
"Cookie": cookie_str
}
followtopic_res = session.get(url, headers=headers)
followtopic_res_json = json.loads(followtopic_res.text)
for i in range(0, len(followtopic_res_json["data"]["cards"][0]["card_group"])):
if followtopic_res_json["data"]["cards"][0]["card_group"][i]["card_type"] == "8":
followtopic_list.append(followtopic_res_json["data"]["cards"][0]["card_group"][i])
if followtopic_res_json["data"]["cardlistInfo"]["since_id"] != "":
followtopic_url = url+"&since_id="+ followtopic_res_json["data"]["cardlistInfo"]["since_id"]
res = session.get(followtopic_url, headers=headers)
res_json = json.loads(res.text)
for i in range(0, len(res_json["data"]["cards"][0]["card_group"])-1):
if res_json["data"]["cards"][0]["card_group"][i]["card_type"] == "8":
followtopic_list.append(res_json["data"]["cards"][0]["card_group"][i])
for i in range(0, len(followtopic_list)):
print(followtopic_list[i]["title_sub"])
st_url = "https://m.weibo.cn/api/config"
login_data = session.get(st_url, headers=headers).text
login_data_json = json.loads(login_data)["data"]
postdata = {
"st": login_data_json["st"]
}
if followtopic_list[i]["buttons"][0]["scheme"] == False:
continue
else:
checkin_url = "https://m.weibo.cn"+str(followtopic_list[i]["buttons"][0]["scheme"])
print(checkin_url)
res = session.post(checkin_url, data=postdata, headers=headers)
res_json = json.loads(res.text)
if res_json["ok"] == 1:
print("签到成功 "+res_json["data"]["msg"])
else:
print("签到失败 "+res_json) | 37.865979 | 118 | 0.58263 |
11028d4ec017320409e77b44e5459cd4e2c1cd81 | 1,163 | py | Python | websupportsk_ddns/notifiers.py | JozefGalbicka/websupportsk-ddns | 8fe1408121dc5f14f42e6603d9a50bcaa5afabee | [
"MIT"
] | 2 | 2021-07-28T09:09:58.000Z | 2021-07-28T10:28:45.000Z | websupportsk_ddns/notifiers.py | JozefGalbicka/websupportsk-ddns | 8fe1408121dc5f14f42e6603d9a50bcaa5afabee | [
"MIT"
] | 1 | 2021-11-14T11:31:38.000Z | 2021-11-19T22:38:44.000Z | websupportsk_ddns/notifiers.py | JozefGalbicka/websupportsk-ddns | 8fe1408121dc5f14f42e6603d9a50bcaa5afabee | [
"MIT"
] | null | null | null | import requests
import logging
logger = logging.getLogger(__name__)
def send_notifications(notifiers, message):
for notifier in notifiers:
notifier.send_notification(message)
class Pushover:
def __init__(self, api_token, user_key):
self.api_token = api_token
self.user_key = user_key
self.url = "https://api.pushover.net/1/messages.json"
def send_notification(self, text):
r = requests.post(self.url, data={
"token": self.api_token,
"user": self.user_key,
"message": text
})
logger.debug(f"Pushover notification response: {r.text}")
if "errors" in r.text:
logger.error(f"Pushover error occured: {r.text}")
class Gotify:
def __init__(self, url, api_token):
self.api_token = api_token
self.url = f"http://{url}/message?token={api_token}"
def send_notification(self, text):
r = requests.post(self.url, data={
"message": text
})
logger.debug(f"Gotify notification response: {r.text}")
if "error" in r.text:
logger.error(f"Gotify error occured: {r.text}") | 29.075 | 65 | 0.618229 |
110368e70ed99472870504d326991f7e709f610a | 311 | py | Python | apps/xero_workspace/job_urls.py | akshay-codemonk/fyle-xero | a040dab12282a9a64ca01aca2dc09f8bb7eaa0f6 | [
"MIT"
] | null | null | null | apps/xero_workspace/job_urls.py | akshay-codemonk/fyle-xero | a040dab12282a9a64ca01aca2dc09f8bb7eaa0f6 | [
"MIT"
] | null | null | null | apps/xero_workspace/job_urls.py | akshay-codemonk/fyle-xero | a040dab12282a9a64ca01aca2dc09f8bb7eaa0f6 | [
"MIT"
] | null | null | null | from django.urls import path, include
from apps.xero_workspace.views import ScheduleSyncView
urlpatterns = [
path('<int:workspace_id>/expense_group/', include('apps.fyle_expense.job_urls')),
path('<int:workspace_id>/settings/schedule/trigger/', ScheduleSyncView.as_view(), name="schedule_trigger"),
]
| 34.555556 | 111 | 0.768489 |
1106ac76603f4bd71050edfab94366e8d6245198 | 43,058 | py | Python | tests/test_decision_maker/test_base.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | null | null | null | tests/test_decision_maker/test_base.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | 1 | 2020-02-21T14:28:13.000Z | 2020-03-05T14:53:53.000Z | tests/test_decision_maker/test_base.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains tests for decision_maker."""
import os
import time
from queue import Queue
from unittest import mock
import pytest
from web3.auto import Web3
import aea
import aea.decision_maker.base
from aea.configurations.base import PublicId
from aea.crypto.ethereum import ETHEREUM
from aea.crypto.fetchai import DEFAULT_FETCHAI_CONFIG
from aea.crypto.ledger_apis import LedgerApis
from aea.crypto.wallet import FETCHAI, Wallet
from aea.decision_maker.base import DecisionMaker, OwnershipState, Preferences
from aea.decision_maker.base import LedgerStateProxy
from aea.decision_maker.messages.base import InternalMessage
from aea.decision_maker.messages.state_update import StateUpdateMessage
from aea.decision_maker.messages.transaction import TransactionMessage
from aea.mail.base import Multiplexer, OutBox
from aea.protocols.default.message import DefaultMessage
from ..conftest import CUR_PATH, DUMMY_CONNECTION_PUBLIC_ID, DummyConnection
MAX_REACTIONS = 10
class TestOwnershipState:
"""Test the base.py for DecisionMaker."""
@classmethod
def setup_class(cls):
"""Initialise the class."""
cls.ownership_state = OwnershipState()
def test_properties(self):
"""Test the assertion error for *_holdings."""
with pytest.raises(AssertionError):
self.ownership_state.amount_by_currency_id
with pytest.raises(AssertionError):
self.ownership_state.quantities_by_good_id
def test_initialisation(self):
"""Test the initialisation of the ownership_state."""
currency_endowment = {"FET": 100}
good_endowment = {"good_id": 2}
self.ownership_state.init(
amount_by_currency_id=currency_endowment,
quantities_by_good_id=good_endowment,
)
assert self.ownership_state.amount_by_currency_id is not None
assert self.ownership_state.quantities_by_good_id is not None
assert self.ownership_state.is_initialized
def test_body(self):
"""Test the setter for the body."""
msg = InternalMessage()
msg.body = {"test_key": "test_value"}
other_msg = InternalMessage(body={"test_key": "test_value"})
assert msg == other_msg, "Messages should be equal."
assert str(msg) == "InternalMessage(test_key=test_value)"
assert msg._body is not None
msg.body = {"Test": "My_test"}
assert msg._body == {
"Test": "My_test"
}, "Message body must be equal with the above dictionary."
msg.set("Test", 2)
assert msg._body["Test"] == 2, "body['Test'] should be equal to 2."
msg.unset("Test")
assert "Test" not in msg._body.keys(), "Test should not exist."
def test_transaction_is_affordable_agent_is_buyer(self):
"""Check if the agent has the money to cover the sender_amount (the agent=sender is the buyer)."""
currency_endowment = {"FET": 100}
good_endowment = {"good_id": 20}
self.ownership_state.init(
amount_by_currency_id=currency_endowment,
quantities_by_good_id=good_endowment,
)
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": -1},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
assert self.ownership_state.is_affordable_transaction(
tx_message=tx_message
), "We should have the money for the transaction!"
def test_transaction_is_affordable_there_is_no_wealth(self):
"""Reject the transaction when there is no wealth exchange."""
currency_endowment = {"FET": 0}
good_endowment = {"good_id": 0}
self.ownership_state.init(
amount_by_currency_id=currency_endowment,
quantities_by_good_id=good_endowment,
)
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": 0},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
assert not self.ownership_state.is_affordable_transaction(
tx_message=tx_message
), "We must reject the transaction."
def tests_transaction_is_affordable_agent_is_the_seller(self):
"""Check if the agent has the goods (the agent=sender is the seller)."""
currency_endowment = {"FET": 0}
good_endowment = {"good_id": 0}
self.ownership_state.init(
amount_by_currency_id=currency_endowment,
quantities_by_good_id=good_endowment,
)
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": 10},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
assert self.ownership_state.is_affordable_transaction(
tx_message=tx_message
), "We must reject the transaction."
def tests_transaction_is_affordable_else_statement(self):
"""Check that the function returns false if we cannot satisfy any if/elif statements."""
currency_endowment = {"FET": 0}
good_endowment = {"good_id": 0}
self.ownership_state.init(
amount_by_currency_id=currency_endowment,
quantities_by_good_id=good_endowment,
)
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": 10},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 50},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
assert not self.ownership_state.is_affordable_transaction(
tx_message=tx_message
), "We must reject the transaction."
def test_apply(self):
"""Test the apply function."""
currency_endowment = {"FET": 100}
good_endowment = {"good_id": 2}
self.ownership_state.init(
amount_by_currency_id=currency_endowment,
quantities_by_good_id=good_endowment,
)
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=5,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
list_of_transactions = [tx_message]
state = self.ownership_state
new_state = self.ownership_state.apply_transactions(
transactions=list_of_transactions
)
assert (
state != new_state
), "after applying a list_of_transactions must have a different state!"
def test_transaction_update(self):
"""Test the transaction update when sending tokens."""
currency_endowment = {"FET": 100}
good_endowment = {"good_id": 20}
self.ownership_state.init(
amount_by_currency_id=currency_endowment,
quantities_by_good_id=good_endowment,
)
assert self.ownership_state.amount_by_currency_id == currency_endowment
assert self.ownership_state.quantities_by_good_id == good_endowment
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=5,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
self.ownership_state._update(tx_message=tx_message)
expected_amount_by_currency_id = {"FET": 75}
expected_quantities_by_good_id = {"good_id": 30}
assert (
self.ownership_state.amount_by_currency_id == expected_amount_by_currency_id
)
assert (
self.ownership_state.quantities_by_good_id == expected_quantities_by_good_id
)
def test_transaction_update_receive(self):
"""Test the transaction update when receiving tokens."""
currency_endowment = {"FET": 75}
good_endowment = {"good_id": 30}
self.ownership_state.init(
amount_by_currency_id=currency_endowment,
quantities_by_good_id=good_endowment,
)
assert self.ownership_state.amount_by_currency_id == currency_endowment
assert self.ownership_state.quantities_by_good_id == good_endowment
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": 20},
tx_sender_fee=5,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": -10},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
self.ownership_state._update(tx_message=tx_message)
expected_amount_by_currency_id = {"FET": 90}
expected_quantities_by_good_id = {"good_id": 20}
assert (
self.ownership_state.amount_by_currency_id == expected_amount_by_currency_id
)
assert (
self.ownership_state.quantities_by_good_id == expected_quantities_by_good_id
)
class TestPreferencesDecisionMaker:
"""Test the preferences."""
@classmethod
def setup_class(cls):
"""Initialise the class."""
cls.preferences = Preferences()
cls.ownership_state = OwnershipState()
cls.good_holdings = {"good_id": 2}
cls.currency_holdings = {"FET": 100}
cls.utility_params = {"good_id": 20.0}
cls.exchange_params = {"FET": 10.0}
cls.tx_fee = 9
def test_preferences_properties(self):
"""Test the properties of the preferences class."""
with pytest.raises(AssertionError):
self.preferences.exchange_params_by_currency_id
with pytest.raises(AssertionError):
self.preferences.utility_params_by_good_id
def test_preferences_init(self):
"""Test the preferences init()."""
self.preferences.init(
exchange_params_by_currency_id=self.exchange_params,
utility_params_by_good_id=self.utility_params,
tx_fee=self.tx_fee,
)
assert self.preferences.utility_params_by_good_id is not None
assert self.preferences.exchange_params_by_currency_id is not None
assert self.preferences.transaction_fees["seller_tx_fee"] == 4
assert self.preferences.transaction_fees["buyer_tx_fee"] == 5
assert self.preferences.is_initialized
def test_logarithmic_utility(self):
"""Calculate the logarithmic utility and checks that it is not none.."""
self.preferences.init(
utility_params_by_good_id=self.utility_params,
exchange_params_by_currency_id=self.exchange_params,
tx_fee=self.tx_fee,
)
log_utility = self.preferences.logarithmic_utility(
quantities_by_good_id=self.good_holdings
)
assert log_utility is not None, "Log_utility must not be none."
def test_linear_utility(self):
"""Calculate the linear_utility and checks that it is not none."""
linear_utility = self.preferences.linear_utility(
amount_by_currency_id=self.currency_holdings
)
assert linear_utility is not None, "Linear utility must not be none."
def test_get_score(self):
"""Calculate the score."""
score = self.preferences.get_score(
quantities_by_good_id=self.good_holdings,
amount_by_currency_id=self.currency_holdings,
)
linear_utility = self.preferences.linear_utility(
amount_by_currency_id=self.currency_holdings
)
log_utility = self.preferences.logarithmic_utility(
quantities_by_good_id=self.good_holdings
)
assert (
score == log_utility + linear_utility
), "The score must be equal to the sum of log_utility and linear_utility."
def test_marginal_utility(self):
"""Test the marginal utility."""
delta_good_holdings = {"good_id": 1}
delta_currency_holdings = {"FET": -5}
self.ownership_state.init(
amount_by_currency_id=self.currency_holdings,
quantities_by_good_id=self.good_holdings,
)
marginal_utility = self.preferences.marginal_utility(
ownership_state=self.ownership_state,
delta_quantities_by_good_id=delta_good_holdings,
delta_amount_by_currency_id=delta_currency_holdings,
)
assert marginal_utility is not None, "Marginal utility must not be none."
def test_score_diff_from_transaction(self):
"""Test the difference between the scores."""
good_holdings = {"good_id": 2}
currency_holdings = {"FET": 100}
utility_params = {"good_id": 20.0}
exchange_params = {"FET": 10.0}
tx_fee = 3
self.ownership_state.init(
amount_by_currency_id=currency_holdings, quantities_by_good_id=good_holdings
)
self.preferences.init(
utility_params_by_good_id=utility_params,
exchange_params_by_currency_id=exchange_params,
tx_fee=tx_fee,
)
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=self.preferences.transaction_fees["seller_tx_fee"],
tx_counterparty_fee=self.preferences.transaction_fees["buyer_tx_fee"],
tx_quantities_by_good_id={"good_id": 10},
info={"some_info_key": "some_info_value"},
ledger_id="fetchai",
tx_nonce="transaction nonce",
)
cur_score = self.preferences.get_score(
quantities_by_good_id=good_holdings, amount_by_currency_id=currency_holdings
)
new_state = self.ownership_state.apply_transactions([tx_message])
new_score = self.preferences.get_score(
quantities_by_good_id=new_state.quantities_by_good_id,
amount_by_currency_id=new_state.amount_by_currency_id,
)
dif_scores = new_score - cur_score
score_difference = self.preferences.get_score_diff_from_transaction(
ownership_state=self.ownership_state, tx_message=tx_message
)
assert (
score_difference == dif_scores
), "The calculated difference must be equal to the return difference from the function."
@classmethod
def teardown_class(cls):
"""Teardown any state that was previously setup with a call to setup_class."""
class TestDecisionMaker:
"""Test the decision maker."""
@classmethod
def _patch_logger(cls):
cls.patch_logger_warning = mock.patch.object(
aea.decision_maker.base.logger, "warning"
)
cls.mocked_logger_warning = cls.patch_logger_warning.__enter__()
@classmethod
def _unpatch_logger(cls):
cls.mocked_logger_warning.__exit__()
@classmethod
def setup_class(cls):
"""Initialise the decision maker."""
cls._patch_logger()
cls.multiplexer = Multiplexer(
[DummyConnection(connection_id=DUMMY_CONNECTION_PUBLIC_ID)]
)
cls.outbox = OutBox(cls.multiplexer)
private_key_pem_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
eth_private_key_pem_path = os.path.join(CUR_PATH, "data", "fet_private_key.txt")
cls.wallet = Wallet(
{FETCHAI: private_key_pem_path, ETHEREUM: eth_private_key_pem_path}
)
cls.ledger_apis = LedgerApis({FETCHAI: DEFAULT_FETCHAI_CONFIG}, FETCHAI)
cls.agent_name = "test"
cls.ownership_state = OwnershipState()
cls.preferences = Preferences()
cls.decision_maker = DecisionMaker(
agent_name=cls.agent_name,
max_reactions=MAX_REACTIONS,
outbox=cls.outbox,
wallet=cls.wallet,
ledger_apis=cls.ledger_apis,
)
cls.multiplexer.connect()
cls.tx_id = "transaction0"
cls.tx_sender_addr = "agent_1"
cls.tx_counterparty_addr = "pk"
cls.info = {"some_info_key": "some_info_value"}
cls.ledger_id = "fetchai"
cls.decision_maker.start()
def test_properties(self):
"""Test the properties of the decision maker."""
assert self.decision_maker.outbox.empty()
assert isinstance(self.decision_maker.message_in_queue, Queue)
assert isinstance(self.decision_maker.message_out_queue, Queue)
assert isinstance(self.decision_maker.ledger_apis, LedgerApis)
assert isinstance(self.outbox, OutBox)
def test_decision_maker_execute(self):
"""Test the execute method."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="Transaction nonce",
)
self.decision_maker.message_in_queue.put_nowait(tx_message)
# test that after a while the queue has been consumed.
time.sleep(0.5)
assert self.decision_maker.message_in_queue.empty()
time.sleep(0.5)
assert not self.decision_maker.message_out_queue.empty()
# TODO test the content of the response.
response = self.decision_maker.message_out_queue.get() # noqa
def test_decision_maker_handle_state_update_initialize(self):
"""Test the handle method for a stateUpdate message with Initialize performative."""
good_holdings = {"good_id": 2}
currency_holdings = {"FET": 100}
utility_params = {"good_id": 20.0}
exchange_params = {"FET": 10.0}
tx_fee = 1
state_update_message = StateUpdateMessage(
performative=StateUpdateMessage.Performative.INITIALIZE,
amount_by_currency_id=currency_holdings,
quantities_by_good_id=good_holdings,
exchange_params_by_currency_id=exchange_params,
utility_params_by_good_id=utility_params,
tx_fee=tx_fee,
)
self.decision_maker.handle(state_update_message)
assert self.decision_maker.ownership_state.amount_by_currency_id is not None
assert self.decision_maker.ownership_state.quantities_by_good_id is not None
assert (
self.decision_maker.preferences.exchange_params_by_currency_id is not None
)
assert self.decision_maker.preferences.utility_params_by_good_id is not None
def test_decision_maker_handle_update_apply(self):
"""Test the handle method for a stateUpdate message with APPLY performative."""
good_holdings = {"good_id": 2}
currency_holdings = {"FET": 100}
currency_deltas = {"FET": -10}
good_deltas = {"good_id": 1}
state_update_message = StateUpdateMessage(
performative=StateUpdateMessage.Performative.APPLY,
amount_by_currency_id=currency_deltas,
quantities_by_good_id=good_deltas,
)
self.decision_maker.handle(state_update_message)
expected_amount_by_currency_id = {
key: currency_holdings.get(key, 0) + currency_deltas.get(key, 0)
for key in set(currency_holdings) | set(currency_deltas)
}
expected_quantities_by_good_id = {
key: good_holdings.get(key, 0) + good_deltas.get(key, 0)
for key in set(good_holdings) | set(good_deltas)
}
assert (
self.decision_maker.ownership_state.amount_by_currency_id
== expected_amount_by_currency_id
), "The amount_by_currency_id must be equal with the expected amount."
assert (
self.decision_maker.ownership_state.quantities_by_good_id
== expected_quantities_by_good_id
)
# TODO this used to work with the testnet
def test_decision_maker_handle_tx_message(self):
"""Test the handle tx message method."""
assert self.decision_maker.message_out_queue.empty()
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker.ledger_apis, "token_balance", return_value=1000000
):
with mock.patch.object(
self.decision_maker.ledger_apis,
"transfer",
return_value="This is a test digest",
):
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_handle_unknown_tx_message(self):
"""Test the handle tx message method."""
patch_logger_error = mock.patch.object(aea.decision_maker.base.logger, "error")
mocked_logger_error = patch_logger_error.__enter__()
with mock.patch(
"aea.decision_maker.messages.transaction.TransactionMessage._is_consistent",
return_value=True,
):
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id="bitcoin",
)
self.decision_maker.handle(tx_message)
mocked_logger_error.assert_called_with(
"[test]: ledger_id=bitcoin is not supported"
)
def test_decision_maker_handle_tx_message_not_ready(self):
"""Test that the decision maker is not ready to pursuit the goals.Cannot handle the message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker.ledger_apis, "token_balance", return_value=1000000
):
with mock.patch.object(
self.decision_maker.ledger_apis,
"transfer",
return_value="This is a test digest",
):
with mock.patch(
"aea.decision_maker.base.GoalPursuitReadiness.Status"
) as mocked_status:
mocked_status.READY.value = False
self.decision_maker.handle(tx_message)
assert not self.decision_maker.goal_pursuit_readiness.is_ready
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="transaction nonce",
)
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_hand_tx_ready_for_signing(self):
"""Test that the decision maker can handle a message that is ready for signing."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
ledger_id=self.ledger_id,
info=self.info,
signing_payload={"key": b"some_bytes"},
)
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_handle_tx_message_acceptable_for_settlement(self):
"""Test that a tx_message is acceptable for settlement."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker, "_is_acceptable_for_settlement", return_value=True
):
with mock.patch.object(
self.decision_maker, "_settle_tx", return_value="tx_digest"
):
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_tx_message_is_not_acceptable_for_settlement(self):
"""Test that a tx_message is not acceptable for settlement."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id=self.ledger_id,
info=self.info,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker, "_is_acceptable_for_settlement", return_value=True
):
with mock.patch.object(
self.decision_maker, "_settle_tx", return_value=None
):
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_execute_w_wrong_input(self):
"""Test the execute method with wrong input."""
default_message = DefaultMessage(
type=DefaultMessage.Type.BYTES, content=b"hello"
)
self.decision_maker.message_in_queue.put_nowait(default_message)
time.sleep(0.5)
self.mocked_logger_warning.assert_called_with(
"[{}]: Message received by the decision maker is not of protocol_id=internal.".format(
self.agent_name
)
)
def test_is_affordable_off_chain(self):
"""Test the off_chain message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="off_chain",
info=self.info,
tx_nonce="Transaction nonce",
)
assert self.decision_maker._is_affordable(tx_message)
def test_is_not_affordable_ledger_state_proxy(self):
"""Test that the tx_message is not affordable with initialized ledger_state_proxy."""
with mock.patch(
"aea.decision_maker.messages.transaction.TransactionMessage._is_consistent",
return_value=True,
):
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="bitcoin",
info=self.info,
)
var = self.decision_maker._is_affordable(tx_message)
assert not var
def test_is_affordable_ledger_state_proxy(self):
"""Test that the tx_message is affordable with initialized ledger_state_proxy."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id=self.ledger_id,
info=self.info,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker, "_is_acceptable_for_settlement", return_value=True
):
with mock.patch.object(
self.decision_maker, "_settle_tx", return_value="tx_digest"
):
self.decision_maker._is_affordable(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_settle_tx_off_chain(self):
"""Test the off_chain message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="off_chain",
info=self.info,
tx_nonce="Transaction nonce",
)
tx_digest = self.decision_maker._settle_tx(tx_message)
assert tx_digest == "off_chain_settlement"
def test_settle_tx_known_chain(self):
"""Test the off_chain message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id=self.ledger_id,
info=self.info,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker.ledger_apis, "transfer", return_value="tx_digest"
):
tx_digest = self.decision_maker._settle_tx(tx_message)
assert tx_digest == "tx_digest"
def test_is_utility_enhancing(self):
"""Test the utility enhancing for off_chain message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="off_chain",
info=self.info,
tx_nonce="Transaction nonce",
)
self.decision_maker.ownership_state._quantities_by_good_id = None
assert self.decision_maker._is_utility_enhancing(tx_message)
def test_sign_tx_fetchai(self):
"""Test the private function sign_tx of the decision maker for fetchai ledger_id."""
tx_hash = Web3.keccak(text="some_bytes")
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
ledger_id=self.ledger_id,
info=self.info,
signing_payload={"tx_hash": tx_hash},
)
tx_signature = self.decision_maker._sign_tx(tx_message)
assert tx_signature is not None
def test_sign_tx_fetchai_is_acceptable_for_signing(self):
"""Test the private function sign_tx of the decision maker for fetchai ledger_id."""
tx_hash = Web3.keccak(text="some_bytes")
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
ledger_id=self.ledger_id,
info=self.info,
signing_payload={"tx_hash": tx_hash},
)
tx_signature = self.decision_maker._sign_tx(tx_message)
assert tx_signature is not None
def test_sing_tx_offchain(self):
"""Test the private function sign_tx for the offchain ledger_id."""
tx_hash = Web3.keccak(text="some_bytes")
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
ledger_id="off_chain",
info=self.info,
signing_payload={"tx_hash": tx_hash},
)
tx_signature = self.decision_maker._sign_tx(tx_message)
assert tx_signature is not None
@classmethod
def teardown_class(cls):
"""Tear the tests down."""
cls._unpatch_logger()
cls.multiplexer.disconnect()
cls.decision_maker.stop()
class TestLedgerStateProxy:
"""Test the Ledger State Proxy."""
@classmethod
def setup_class(cls):
"""Set up the test."""
cls.ledger_apis = LedgerApis({FETCHAI: DEFAULT_FETCHAI_CONFIG}, FETCHAI)
cls.ledger_state_proxy = LedgerStateProxy(ledger_apis=cls.ledger_apis)
def test_ledger_apis(self):
"""Test the returned ledger_apis."""
assert self.ledger_state_proxy.ledger_apis == self.ledger_apis, "Must be equal."
def test_transaction_is_not_affordable(self):
"""Test if the transaction is affordable on the ledger."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="off_chain",
info={"some_info_key": "some_info_value"},
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.ledger_state_proxy.ledger_apis, "token_balance", return_value=0
):
result = self.ledger_state_proxy.is_affordable_transaction(
tx_message=tx_message
)
assert not result
def test_transaction_is_affordable(self):
"""Test if the transaction is affordable on the ledger."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id="transaction0",
tx_sender_addr="agent_1",
tx_counterparty_addr="pk",
tx_amount_by_currency_id={"FET": 20},
tx_sender_fee=5,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="off_chain",
info={"some_info_key": "some_info_value"},
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.ledger_state_proxy.ledger_apis, "token_balance", return_value=0
):
result = self.ledger_state_proxy.is_affordable_transaction(
tx_message=tx_message
)
assert result
| 41.925998 | 106 | 0.641042 |
11070c63ba36e05b385352144090c398a2ed7415 | 15,806 | py | Python | code/plotting/plot_lsst.py | modichirag/21cm_cleaning | 1615fea4e2d617bb6ef00770a49698901227daa8 | [
"MIT"
] | 1 | 2019-08-27T10:05:41.000Z | 2019-08-27T10:05:41.000Z | code/plotting/plot_lsst.py | modichirag/21cm_cleaning | 1615fea4e2d617bb6ef00770a49698901227daa8 | [
"MIT"
] | null | null | null | code/plotting/plot_lsst.py | modichirag/21cm_cleaning | 1615fea4e2d617bb6ef00770a49698901227daa8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Plots the power spectra and Fourier-space biases for the HI.
#
import warnings
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
#warnings.filterwarnings("ignore")
if rank!=0: warnings.filterwarnings("ignore")
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from pmesh.pm import ParticleMesh
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower
from nbodykit.cosmology import Planck15, EHPower, Cosmology
sys.path.append('../utils/')
sys.path.append('../recon/')
sys.path.append('../recon/cosmo4d/')
from cosmo4d.pmeshengine import nyquist_mask
from lab import mapbias as mapp
from lab import mapnoise as mapn
from lab import report as rp
from lab import dg
from getbiasparams import getbias
import tools
#
from matplotlib import rc, rcParams, font_manager
rcParams['font.family'] = 'serif'
fsize = 12-1
fontmanage = font_manager.FontProperties(family='serif', style='normal',
size=fsize, weight='normal', stretch='normal')
font = {'family': fontmanage.get_family()[0],
'style': fontmanage.get_style(),
'weight': fontmanage.get_weight(),
'size': fontmanage.get_size(),
}
#
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('-m', '--model', help='model name to use')
parser.add_argument('-a', '--aa', help='scale factor', default=0.5000, type=float)
parser.add_argument('-l', '--bs', help='boxsize', default=1024, type=float)
parser.add_argument('-n', '--nmesh', help='nmesh', default=256, type=int)
parser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)
parser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.03, type=float)
parser.add_argument( '--pp', help='upsample', default=1)
args = parser.parse_args()
figpath = './figs/'
dpath = '../../data/'
bs, nc, aa = args.bs, args.nmesh, args.aa
nc2 = nc*2
zz = 1/aa- 1
kmin = args.kmin
ang = args.angle
if args.pp: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc2, nc2, nc2])
else: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
##
def save2dphoto(Nmu=4, numd=1e-2, aa=None, scatter=False):
if numd > 1e-2:
print('Too high number density')
sys.exit()
num = int(numd*bs**3)
if aa is None: aas = [0.3333, 0.2000, 0.1429]
else: aas = [aa]
for ia, aa in enumerate(aas):
zz = 1/aa-1
sigz = lambda z : 120*((1+z)/5)**-0.5
##
cat = BigFileCatalog('/global/cscratch1/sd/chmodi/m3127/H1mass/highres/10240-9100/fastpm_%0.4f/Hcat-Numd-%04d/'%(aa, 1e-2*1e4))
if scatter:
pos = cat['Position'][:num].compute()
dz = np.random.normal(0, sigz(zz), size=pos[:, -1].size)
pos[:, -1] += dz
layout = pm.decompose(pos)
hmesh = pm.paint(pos, layout=layout)
else:
pos = cat['Position'][:num].compute()
layout = pm.decompose(pos)
hmesh = pm.paint(pos, layout=layout)
def tf(k): #Photoz smoothing
kmesh = sum(ki ** 2 for ki in k)**0.5
kmesh[kmesh == 0] = 1
mumesh = k[2]/kmesh
weights = np.exp(-kmesh**2 * mumesh**2 * sigz(zz)**2/2.)
return weights
hmesh /= hmesh.cmean()
if not scatter:
hmeshc = hmesh.r2c()
hmeshc.apply(lambda k, v: nyquist_mask(tf(k), v) * v, out=Ellipsis)
hmesh = hmeshc.c2r()
ph = FFTPower(hmesh, mode='2d', Nmu=Nmu).power
#
for iw, wopt in enumerate(['opt', 'pess']):
#for iw, wopt in enumerate(['opt']):
for it, thopt in enumerate(['opt', 'pess', 'reas']):
#for it, thopt in enumerate([ 'reas']):
if rank == 0: print(aa, wopt, thopt)
angle = np.round(mapn.wedge(zz, att=wopt, angle=True), 0)
#dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.1f/'%(aa, 0.03, angle)
dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_%s/'%(aa, 0.03, wopt)
dpath += 'L%04d-N%04d-R//thermal-%s-hex/ZA/opt_s999_h1massA_fourier_rsdpos/'%(bs, nc, thopt)
if scatter: ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/photog-Numd%04d-Nmu%d/'%(wopt, thopt, numd*1e4, Nmu)
else: ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/photo-Numd%04d-Nmu%d/'%(wopt, thopt, numd*1e4, Nmu)
try: os.makedirs(ofolder)
except: pass
if rank == 0: print(ofolder)
if args.pp:
datapp = BigFileMesh(dpath+'/dataw_up/', 'mapp').paint()
bpaths = [dpath+'upsample2/%d-0.00//best-fit'%nc2] + [dpath + 'upsample2/%d-0.00//%04d/fit_p/'%(nc2,i) for i in range(100, 50, -20)]
else:
datapp = BigFileMesh(dpath+'/dataw/', 'mapp').paint()
bpaths = [dpath+'%d-0.00//best-fit'%nc] + [dpath + '%d-0.00//%04d/fit_p/'%(nc,i) for i in range(100, 50, -20)]
for path in bpaths:
if os.path.isdir(path):
break
if rank == 0: print(path)
bfit = BigFileMesh(path, 'mapp').paint()
pxrh = FFTPower(hmesh, second=bfit, mode='2d', Nmu=Nmu).power
pxwh = FFTPower(hmesh, second=datapp, mode='2d', Nmu=Nmu).power
fname = ofolder + 'photo-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
np.savetxt(fname, ph['power'].real)
fname = ofolder + 'xdataw-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
np.savetxt(fname, pxwh['power'].real)
fname = ofolder + 'xrecon-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
np.savetxt(fname, pxrh['power'].real)
def make_plot(Nmu=4, wopt='opt', thopt='reas'):
sigz = lambda z : 120*((1+z)/5)**-0.5
nbar = 10**-2.5
b = 3.2
Dphoto = lambda k, mu, z: np.exp(-k**2 * mu**2 * sigz(z)**2/2.)
kk = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-opt/thermal-reas-hex/Nmu%d/recon-L%04d_%0.4f-up-k.txt'%(Nmu, bs, aa))
try: modes = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-opt/thermal-reas-hex/Nmu%d/recon-L%04d_%0.4f-up-modes.txt'%(Nmu, bs, aa))
except:
datap = mapp.Observable.load('/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin0.03_opt/L%04d-N0256-R/thermal-reas-hex/ZA/opt_s999_h1massA_fourier_rsdpos/datap_up/'%(aa, bs))
tmp = FFTPower(datap.mapp, mode='2d', Nmu=Nmu).power
modes = tmp['modes'].astype('float64')
np.savetxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-modes.txt'%(wopt, thopt, Nmu, bs, aa), modes)
pm1 = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
pm2 = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm2.txt'%(wopt, thopt, Nmu, bs, aa))
xm = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-xm.txt'%(wopt, thopt, Nmu, bs, aa))
xmw = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/dataw-L%04d_%0.4f-up-xm.txt'%(wopt, thopt, Nmu, bs, aa))
pm1w = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/dataw-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
mubins = np.linspace(0, 1, kk.shape[1]+1)
mu = (mubins[1:] + mubins[:-1])*0.5
pkd = np.loadtxt(dpath + '/pklin_%0.4f.txt'%aa)
# pk = np.loadtxt(dpath + '/pklin_1.0000.txt')
ipkd = ius(pkd[:, 0], pkd[:, 1])
rr = xm/(pm1*pm2)**0.5
rrw = xmw/(pm1w*pm2)**0.5
pkd = ipkd(kk)
fac = b**2*Dphoto(kk, mu, zz)**2 *nbar*pkd
rhosq = rr**2*fac/(1+fac)
rhosqw = rrw**2*fac/(1+fac)
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
for i in range(mu.size):
lbl1, lbl2 = None, None
if i < mu.size//2: lbl1 = '$\mu$=%0.3f-%0.3f'%(mubins[i], mubins[i+1])
else: lbl2 = '$\mu$=%0.3f-%0.3f'%(mubins[i], mubins[i+1])
ax[0].plot(kk[:, i], rhosq[:, i], 'C%d'%i, label=lbl1, lw=2)
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosq[:, i]**-1), 'C%d'%i, label=lbl2, lw=2)
ax[0].plot(kk[:, i], rhosqw[:, i], 'C%d--'%i, alpha=0.5)
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosqw[:, i]**-1), 'C%d--'%i, alpha=0.5)
ax[0].plot(kk[:, 0], Dphoto(kk[:, 0], mu[i], zz)**2, 'C%d'%i, lw=1, alpha=1, ls=":")
ax[1].set_ylim(1e-3, 100)
ax[1].set_yscale('log')
ax[1].axhline(1, color='k', ls="--")
ax[0].set_ylabel(r'$\rho^2$', fontdict=font)
#ax[1].set_ylabel(r'$N^{-1}(1+\rho^{-2})$', fontsize=14)
ax[1].set_ylabel(r'Var$(P_\times)/P_\times^2$', fontdict=font)
ax[0].legend(prop=fontmanage, loc=1)
ax[1].legend(prop=fontmanage, loc=4)
for axis in ax[:]: axis.set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font)
for axis in ax.flatten():
#axis.axhline(1, color='k', ls=':')
axis.set_xscale('log')
axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')
# Put on some more labels.
for axis in ax.flatten():
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
##and finish
plt.tight_layout(rect=[0, 0, 1, 0.95])
if rank == 0 and not args.pp: plt.savefig(figpath + '/photo_z%d_L%04d-Nmu%d.pdf'%(zz*10, bs, Nmu))
if rank == 0 and args.pp: plt.savefig(figpath + '/photo_z%d_L%04d-Nmu%d-up.pdf'%(zz*10, bs, Nmu))
def make_plot_data(aa, numd, Nmu=8, wopt='opt', thopt='reas', scatter=False):
#
mubins = np.linspace(0, 1, Nmu+1)
mu = (mubins[1:] + mubins[:-1])*0.5
kk = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-opt/thermal-reas-hex/Nmu%d/recon-L%04d_%0.4f-up-k.txt'%(Nmu, bs, aa))
modes = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-opt/thermal-reas-hex/Nmu%d/recon-L%04d_%0.4f-up-modes.txt'%(Nmu, bs, aa))
pr = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
pw = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/dataw-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
pm1 = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm1.txt'%(wopt, thopt, Nmu, bs, aa))
pm2 = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-pm2.txt'%(wopt, thopt, Nmu, bs, aa))
xm = np.loadtxt(dpath + '/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/recon-L%04d_%0.4f-up-xm.txt'%(wopt, thopt, Nmu, bs, aa))
rr = xm/(pm1*pm2)**0.5
pkd = np.loadtxt(dpath + '/pklin_%0.4f.txt'%aa)
ipkd = ius(pkd[:, 0], pkd[:, 1])
pkd = ipkd(kk)
if scatter : ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/photog-Numd%04d-Nmu%d/'%(wopt, thopt, numd*1e4, Nmu)
else: ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/photo-Numd%04d-Nmu%d/'%(wopt, thopt, numd*1e4, Nmu)
print(ofolder)
#get data
fname = ofolder + 'photo-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
ph = np.loadtxt(fname)
ph += 1/numd
fname = ofolder + 'xrecon-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
pxrh = np.loadtxt(fname)
fname = ofolder + 'xdataw-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
pxwh = np.loadtxt(fname)
rhosq = pxrh**2/ph/pr
rhosqw = pxwh**2/ph/pw
#get theory
sigz = lambda z : 120*((1+z)/5)**-0.5
Dphoto = lambda k, mu, z: np.exp(-k**2 * mu**2 * sigz(z)**2/2.)
nbar = 10**-2.5
b = 3.2
def iget(ii, k=1):
yy = rr[ii]
mask = ~np.isnan(yy)
return ius(mu[mask], yy[mask], k=k)
mus = np.linspace(0, 1, 500)
rhosqmu = np.zeros((kk.shape[0], mus.size))
for ik, kv in enumerate(kk[:, -1]):
fac = b**2*Dphoto(kv, mus, zz)**2 *nbar*ipkd(kv)
try: rhosqmu[ik] = iget(ik)(mus)**2*fac/(1+fac)
except Exception as e: print(ik, e)
rhosqav = np.zeros((kk.shape[0], mu.size))
for i in range(mu.size):
mask = (mus > mubins[i]) & (mus < mubins[i+1])
rhosqav[: ,i] = np.trapz(rhosqmu[:, mask], mus[mask])/(mubins[i+1]-mubins[i])
#make figure
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
for i in range(mu.size):
lbl1, lbl2 = None, None
if i <= mu.size: lbl1 = '$\mu$=%0.3f-%0.3f'%(mubins[i], mubins[i+1])
#else: lbl2 = '$\mu$=%0.3f-%0.3f'%(mubins[i], mubins[i+1])
if i ==0: lbl2 = r'Recon$_{\rm Sim}$'
ax[0].plot(kk[:, i], rhosq[:, i], 'C%d'%i, label=lbl1)
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosq[:, i]**-1), 'C%d'%i, label=lbl2)
#ax[0].plot(kk[:, i], rhosqw[:, i], 'C%d--'%i, alpha=0.4)
if i ==0: lbl2 = r'Noisy$_{\rm Sim}$'
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosqw[:, i]**-1), 'C%d:'%i, alpha=1, lw=0.5, label=lbl2)
ax[0].plot(kk[:, i], rhosqav[:, i], 'C%d--'%i, alpha=1, lw=1)
if i ==0: lbl2 = r'Recon$_{\rm Pred}$'
ax[1].plot(kk[:, i], modes[:, i]**-1*(1+rhosqav[:, i]**-1), 'C%d--'%i, alpha=1, lw=1, label=lbl2)
if i ==0: lbl0 = r'$D_{\rm photo}^2$'
else: lbl0 = None
ax[0].plot(kk[:, 0], Dphoto(kk[:, 0], mu[i], zz)**2, 'C%d'%i, lw=0.5, alpha=1, ls=":", label=lbl0)
#
ax[0].set_ylim(-.05, 1.1)
ax[1].set_ylim(9e-4, 100)
ax[1].set_yscale('log')
ax[1].axhline(1, color='k', ls="--")
ax[0].set_ylabel(r'$\rho^2$', fontdict=font)
#ax[1].set_ylabel(r'$N^{-1}(1+\rho^{-2})$', fontsize=14)
ax[1].set_ylabel(r'Var$(P_\times)/P_\times^2$', fontdict=font)
ax[0].legend(prop=fontmanage, loc=1, ncol=1)
ax[1].legend(prop=fontmanage, loc=3, ncol=1)
for axis in ax[:]: axis.set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font)
for axis in ax.flatten():
#axis.axhline(1, color='k', ls=':')
axis.set_xscale('log')
axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')
# Put on some more labels.
for axis in ax.flatten():
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
# and finish
plt.tight_layout(rect=[0, 0, 1, 0.95])
if rank == 0 and not args.pp: plt.savefig(figpath + '/photod_z%d_L%04d-Nmu%d.pdf'%(zz*10, bs, Nmu))
if rank == 0 and args.pp:
if scatter : plt.savefig(figpath + '/photodg_z%d_L%04d-Nmu%d-up.pdf'%(zz*10, bs, Nmu))
else : plt.savefig(figpath + '/photod_z%d_L%04d-Nmu%d-up.pdf'%(zz*10, bs, Nmu))
################
if __name__=="__main__":
#save2dphoto(Nmu=4, numd=10**-2.5, aa=0.2000)
#save2dphoto(Nmu=8, numd=10**-2.5, aa=0.2000)
#save2dphoto(Nmu=4, numd=10**-2.5, aa=0.2000, scatter=True)
#save2dphoto(Nmu=8, numd=10**-2.5, aa=0.2000, scatter=True)
#make_plot(Nmu=4)
#make_plot(Nmu=8)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=8)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=8, scatter=True)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=4)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=4, scatter=True)
#
| 41.704485 | 210 | 0.575161 |
1107964a13a8c587e9dedd0f0fb6a2581ecb0887 | 3,999 | py | Python | ndfinance/strategies/basic/__init__.py | gomtinQQ/NDFinance | 522bf0486e5f5337c522d0e34b088f386c7c3290 | [
"MIT"
] | 35 | 2020-09-26T16:31:45.000Z | 2022-01-01T12:12:21.000Z | ndfinance/strategies/basic/__init__.py | gomtinQQ/NDFinance | 522bf0486e5f5337c522d0e34b088f386c7c3290 | [
"MIT"
] | 1 | 2020-09-27T08:54:23.000Z | 2020-09-27T08:54:23.000Z | ndfinance/strategies/basic/__init__.py | gomtinQQ/NDFinance | 522bf0486e5f5337c522d0e34b088f386c7c3290 | [
"MIT"
] | 8 | 2020-10-06T23:51:22.000Z | 2022-02-16T12:11:10.000Z | from ndfinance.strategies import Strategy, PeriodicRebalancingStrategy
from ndfinance.brokers.base import order
from ndfinance.brokers.base.order import *
from ndfinance.strategies.utils import *
class SameWeightBuyHold(Strategy):
def __init__(self):
super(SameWeightBuyHold, self).__init__()
self.ordered = False
def _logic(self):
if not self.ordered:
weight = 1 / len(self.broker.assets)
[self.broker.order(order.Weight(asset, self.broker.portfolio.portfolio_value, 1, weight))
for asset in self.broker.assets.values()]
self.ordered = True
class SameWeightBuynRebalance(PeriodicRebalancingStrategy):
def __init__(self, rebalance_period):
super(SameWeightBuynRebalance, self).__init__(rebalance_period)
def register_engine(self, *args, **kwargs):
super(SameWeightBuynRebalance, self).register_engine(*args, **kwargs)
weight = 1 / len(self.broker.assets.keys())
self.weights = [weight for _ in self.broker.assets.keys()]
return self
def _logic(self):
self.broker.order(order.Rebalance(tickers=self.broker.assets.keys(), weights=self.weights))
class OscillatorStrategy(Strategy):
def __init__(self, breakout_threshold, oversold_threshold, overbought_threshold, osc_label,
use_short=False, use_time_cut=False, timecut_params=None, use_n_perc_rule=False, n_perc_params=None,
use_stop_loss=False, stop_loss_params=None, *args, **kwargs):
super(OscillatorStrategy, self).__init__()
self.use_short = use_short
self.breakout_threshold = breakout_threshold
self.oversold_threshold = oversold_threshold
self.overbought_threshold = overbought_threshold
self.osc_label = osc_label
self.use_time_cut = use_time_cut
self.timecut_params = timecut_params
self.use_n_perc_rule = use_n_perc_rule
self.n_perc_params = n_perc_params
self.use_stop_loss = use_stop_loss
self.stop_loss_params = stop_loss_params
def register_engine(self, *args, **kwargs):
super(OscillatorStrategy, self).register_engine(*args, **kwargs)
self.ticker = list(self.broker.assets.keys())[0]
return self
def _logic(self):
indicator_ = self.data_provider.get_ohlcvt(self.ticker, self.osc_label, n=2)
indicator = indicator_[-1]
indicator_prev = indicator_[0]
if not self.broker.portfolio.positions:
ordered = True
value = apply_n_percent_rule(self.broker.portfolio.portfolio_value, **self.n_perc_params) \
if self.use_n_perc_rule else self.broker.portfolio.portfolio_value
if (indicator >= self.breakout_threshold) & (indicator_prev < self.breakout_threshold):
self.broker.order(Weight(self.broker.assets[self.ticker], value, 1, 1))
elif (((indicator <= self.breakout_threshold) & (indicator_prev > self.breakout_threshold)) & self.use_short):
self.broker.order(Weight(self.broker.assets[self.ticker], value, -1, 1))
else:
ordered = False
if ordered & self.use_time_cut:
self.broker.order(TimeCutClose(self.broker.assets[self.ticker], self.indexer.timestamp, **self.timecut_params))
if ordered & self.use_stop_loss:
self.broker.order(StopLoss(self.broker.assets[self.ticker], **self.stop_loss_params))
elif self.broker.portfolio.positions[self.ticker].side == 1:
if (indicator <= self.overbought_threshold) & (indicator_prev > self.overbought_threshold):
self.broker.order(Close(self.broker.assets[self.ticker]))
elif self.broker.portfolio.positions[self.ticker].side == -1:
if (indicator >= self.oversold_threshold) & (indicator_prev < self.oversold_threshold):
self.broker.order(Close(self.broker.assets[self.ticker]))
| 44.932584 | 127 | 0.68017 |
1107cf1f8235f1761f09ec217b10ad75f1307704 | 1,662 | py | Python | 06_Trees/shortest_unique_prefix.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 61 | 2018-02-18T08:16:31.000Z | 2022-02-17T17:18:57.000Z | 06_Trees/shortest_unique_prefix.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 1 | 2018-02-23T20:06:18.000Z | 2019-12-29T18:52:20.000Z | 06_Trees/shortest_unique_prefix.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 30 | 2018-03-28T19:02:23.000Z | 2021-07-06T20:00:14.000Z | # Shortest Unique Prefix
# https://www.interviewbit.com/problems/shortest-unique-prefix/
#
# Find shortest unique prefix to represent each word in the list.
#
# Example:
#
# Input: [zebra, dog, duck, dove]
# Output: {z, dog, du, dov}
# where we can see that
# zebra = z
# dog = dog
# duck = du
# dove = dov
# NOTE : Assume that no word is prefix of another. In other words, the representation is always possible.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class TrieNode:
def __init__(self):
self.chars = {}
self.count = 0
def __getitem__(self, index):
return self.chars[index]
def __setitem__(self, index, item):
self.chars[index] = item
def __contains__(self, item):
return item in self.chars
class Trie:
def __init__(self):
self.root = TrieNode()
def __iadd__(self, word):
tmp = self.root
for char in word:
if char not in tmp:
tmp[char] = TrieNode()
tmp = tmp[char]
tmp.count += 1
return self
def uniquePrefix(self, word):
tmp, i = self.root, 0
for char in word:
tmp = tmp[char]
i += 1
if tmp.count == 1:
break
return word[:i]
class Solution:
# @param A : list of strings
# @return a list of strings
def prefix(self, A):
trie = Trie()
for word in A:
trie += word
return [trie.uniquePrefix(word) for word in A]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # | 24.086957 | 106 | 0.496992 |
110af0aa9cc468fbee2f90b29540e3ee61251308 | 1,975 | py | Python | daemon.py | hletrd/TRIPOL_polarizer | 124d202bf876635bd402306fb5d7572fd45ce599 | [
"MIT"
] | null | null | null | daemon.py | hletrd/TRIPOL_polarizer | 124d202bf876635bd402306fb5d7572fd45ce599 | [
"MIT"
] | null | null | null | daemon.py | hletrd/TRIPOL_polarizer | 124d202bf876635bd402306fb5d7572fd45ce599 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, send_from_directory
import serial
import serial.tools.list_ports
import threading
app = Flask(__name__)
def run_server():
app.run(host=bind_ip, debug=True, port=bind_port)
@app.route('/')
def index():
return render_template('_basic.html', ports=serialhandler.get_port_list())
@app.route('/get/angle/now')
def get_angle():
return str(serialhandler.angle_now)
@app.route('/get/angle/to')
def get_angle_to():
return str(serialhandler.angle_to)
@app.route('/open/<path:port>')
def open_serial(port):
serialhandler.connect(port[1:])
return '1'
@app.route('/move/<string:angle>')
def move_angle(angle):
if (360 >= float(angle) >= 0):
serialhandler.move_angle(str(float(angle)))
return '1'
return '0'
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path, as_attachment=False)
class SerialHandler(object):
def __init__(self):
self.Serial = serial.Serial()
self.Serial.baudrate = 115200
self.Serial.timeout = 0.1
self.angle_now = 0.0
self.angle_to = '0.0'
self.q = ''
def get_port_list(self):
result = serial.tools.list_ports.comports()
return result
def connect(self, port):
self.Serial.port = port
self.Serial.open()
threading.Timer(0.2, self.read_serial).start()
def move_angle(self, angle):
self.Serial.write(angle.encode('utf-8'))
self.angle_to = angle
def read_serial(self):
threading.Timer(0.2, self.read_serial).start()
try:
while self.Serial.in_wating > 0:
self.q += self.Serial.read().decode('utf-8')
except:
while self.Serial.inWaiting() > 0:
self.q += self.Serial.read(1).decode('utf-8')
splitted = self.q.split('\n\n')
last = splitted[len(splitted)-1]
if 'angpos:' in last and 'speed:' in last:
self.q = ''
self.angle_now = (float) (last.split('angpos:')[1].split('\n')[0])
if __name__ == '__main__':
bind_ip = '127.0.0.1'
bind_port = 8000
serialhandler = SerialHandler()
run_server() | 23.511905 | 75 | 0.698228 |
110bd5c7d26cb1039d7248113ac5574d56217da1 | 134 | py | Python | tests/agave_mock_server/wsgi.py | vdjserver/agave-cli | 4ad0826779cd760eb8a102978c456fee214ce8a3 | [
"BSD-3-Clause"
] | null | null | null | tests/agave_mock_server/wsgi.py | vdjserver/agave-cli | 4ad0826779cd760eb8a102978c456fee214ce8a3 | [
"BSD-3-Clause"
] | 1 | 2019-06-11T21:48:12.000Z | 2019-06-11T22:11:35.000Z | tests/agave_mock_server/wsgi.py | vdjserver/agave-cli | 4ad0826779cd760eb8a102978c456fee214ce8a3 | [
"BSD-3-Clause"
] | null | null | null | from agave_mock_server import app as application
if __name__ == "__main__":
application.run(host="0.0.0.0", ssl_context="adhoc")
| 26.8 | 56 | 0.738806 |
110ec99e58e5ce9d328a5556af8ee117cc5ebd9a | 3,304 | py | Python | src/utils.py | senadkurtisi/neural-style-transfer | 0048d8b184959de095f0821f63205c8ce3ff2dff | [
"MIT"
] | null | null | null | src/utils.py | senadkurtisi/neural-style-transfer | 0048d8b184959de095f0821f63205c8ce3ff2dff | [
"MIT"
] | null | null | null | src/utils.py | senadkurtisi/neural-style-transfer | 0048d8b184959de095f0821f63205c8ce3ff2dff | [
"MIT"
] | null | null | null | from PIL import Image
import numpy as np
import torch
import torchvision.transforms.transforms as transforms
import os
from config import cfg
def preprocess_img(img_path):
""" Loads the desired image and prepares it
for VGG19 model.
Parameters:
img_path: path to the image
Returns:
processed: loaded image after preprocessing
"""
prep = transforms.Compose([transforms.Resize((cfg.IMG_SIZE, cfg.IMG_SIZE)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x[torch.LongTensor([2, 1, 0])]),
transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961],
std=[1, 1, 1]),
transforms.Lambda(lambda x: x.mul_(255)),
])
img = Image.open(img_path)
processed = prep(img)
if cfg.cuda:
processed = processed.cuda()
return processed.unsqueeze(0)
def get_init_img(mode='noise', source_img=None):
""" Constructs the initial image for the NST algorithm.
Parameters:
mode: how to initialize the image? {'noise', 'other'}
source_img: image used for initialization of @mode is set to 'other'
Returns:
opt_image: initialized image
"""
assert mode in ['noise', 'other'], f"{mode} is and illegal initialization mode!"
if mode == 'style' or mode == 'other':
assert (source_img is not None), f"Can't initialize from {mode}!"
if mode == 'noise':
if cfg.cuda:
opt_image = np.random.normal(loc=0, scale=90.,
size=(1, 3, cfg.IMG_SIZE,
cfg.IMG_SIZE)).astype(np.float32)
opt_image = torch.from_numpy(opt_image).float().cuda()
else:
pass
else:
opt_image = (source_img.detach()).clone()
# Make sure that gradients are being calculated for this image
# During forward pass
opt_image.requires_grad = True
return opt_image
def gram_matrix(x):
""" Calculates the Gram matrix for the
feature maps contained in x.
Parameters:
x: feature maps
Returns:
G: gram matrix
"""
b, c, h, w = x.size()
F = x.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
def postprocess(img):
""" Prepares the image for display and saving. """
postp = transforms.Compose([transforms.Lambda(lambda x: x.mul_(1. / 255)),
transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961],
std=[1, 1, 1]),
transforms.Lambda(lambda x: x[torch.LongTensor([2, 1, 0])]), # turn to RGB
])
img = postp(img)
# In order to have more visually appealing images
# We need to clip the pixel values
img[img > 1] = 1
img[img < 0] = 0
img = transforms.ToPILImage()(img)
return img
def get_file_name(path):
""" Extracts only the filename from the given
path. Extension is removed as well.
"""
base = os.path.basename(path)
return os.path.splitext(base)[0]
| 29.765766 | 106 | 0.553874 |
1111178ffeca17f97dbc94edf513c3e6554c30c4 | 5,179 | py | Python | myfunc.py | dedo94/GUIGG | be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d | [
"MIT"
] | 1 | 2019-02-15T22:38:40.000Z | 2019-02-15T22:38:40.000Z | myfunc.py | dedo94/GUIGG | be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d | [
"MIT"
] | null | null | null | myfunc.py | dedo94/GUIGG | be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d | [
"MIT"
] | null | null | null | import os
import platform
import sys
from os.path import relpath
sys.path.append('/usr/local/bin/dot')
sys.path.append('/usr/bin/dot')
from graphviz import Digraph
# struttura dati
class node:
def __init__(self, id, istruction, *nxt_node):
self.id = id
self.ist = istruction
self.next_node = []
for i in nxt_node:
self.next_node.append(i)
if str(istruction).count("->") == str(istruction).count(":") == 1: # se contiene le informazioni nel formato richiesto le separo
strz = str(istruction).split("->") # analizzo istruction e la divido in
self.snd = strz[0].strip() # mittente
strz = strz[1].split(":")
self.recv = strz[0].strip() # destinatario
self.msg = strz[1].strip() # messaggio
else:
self.snd = self.recv = self.msg = "null"
# definisce il path
def pathfy(filepath):
prgpath = os.path.dirname(os.path.abspath(__file__))
pathz = relpath(filepath, prgpath)
return pathz
# data una struttura dati ed un id, restituisce la sua posizione
def id_to_pos(str_gr, id_node): # data un struttura e un id
for x in range(str_gr.__len__()): # restituisce la posizione del
if str_gr[x].id == id_node: # nodo al suoi interno
return x # se presente
# data una struttura dati ed una istruzione, restituisce la posizione
def ist_to_pos(str_gr, ist): # data un struttura e un istruzione
for x in range(str_gr.__len__()): # restituisce la posizione del
if str_gr[x].ist == ist: # nodo al suoi interno
return x # se presente
# data una struttura dati ed una istruzione, restituisce il suo id
def ist_to_id(str_gr, ist): # data un struttura e un istruzione
for x in range(str_gr.__len__()): # restituisce l'id del
if str_gr[x].ist == ist: # nodo associato
return str_gr[x].id
# data una istruzione ed un numero di partenza, riassegna tutti gli id a partire dal numero dato
def reassign_id(str_gr, start_id):
new_str_gr = []
for el in range(str_gr.__len__()):
if str_gr[el].ist != "start": # se diversi da start e end
new_str_gr.append(node(int(str_gr[el].id) + start_id, str_gr[el].ist))
for ele in range(str_gr[el].next_node.__len__()):
new_str_gr[-1].next_node.append(int(str_gr[el].next_node[ele]) + start_id)
return new_str_gr
# data una struttura e una istruzione, restituisce il predecessore
def prec_node(graph, node_ist): # funzione che restituice i nodi che precedono quello dato
graph_gr = graph
pred = []
id_nodo = -1
for z in range(graph_gr.__len__()):
if node_ist == graph_gr[z].ist:
id_nodo = graph_gr[z].id
if id_nodo == -1:
print("Can't find node in that graph")
else:
for x in range(graph_gr.__len__()):
for y in range(graph_gr[x].next_node.__len__()):
if graph_gr[x].next_node[y] == id_nodo:
pred.append(graph_gr[x].id)
return pred
# restituisce l'id massimo contenuto in una struttura
def max_id(str_gr):
max = 0
for x in range(str_gr.__len__()):
if int(str_gr[x].id) > max:
max = int(str_gr[x].id)
max += 1
return max
# stampa una struttura
def print_str(struct_gr, space):
for k in range(struct_gr.__len__()):
if space == 1:
print("---")
print(struct_gr[k].id)
print(struct_gr[k].ist)
print(struct_gr[k].next_node)
# data una struttura ed un id restituisce la posizione
def find_pos(gr, id):
for el in range(gr.__len__()):
if int(gr[el].id) == int(id):
return el
| 43.158333 | 181 | 0.448542 |
1111834ed10ea00b3973a4e7b45b84a2fd41c455 | 2,466 | py | Python | EllucianEthosPythonClient/ChangeNotificationUtils.py | rmetcalf9/EllucainEthosPythonClient | 6913322b1e583f655f67399f2baa763833583c27 | [
"MIT"
] | 1 | 2021-02-09T22:05:50.000Z | 2021-02-09T22:05:50.000Z | EllucianEthosPythonClient/ChangeNotificationUtils.py | rmetcalf9/EllucainEthosPythonClient | 6913322b1e583f655f67399f2baa763833583c27 | [
"MIT"
] | 1 | 2020-07-02T11:44:54.000Z | 2020-07-02T11:45:38.000Z | EllucianEthosPythonClient/ChangeNotificationUtils.py | rmetcalf9/EllucainEthosPythonClient | 6913322b1e583f655f67399f2baa763833583c27 | [
"MIT"
] | 1 | 2021-01-13T21:35:11.000Z | 2021-01-13T21:35:11.000Z | from .ChangeNotificationMessage import ChangeNotificationMessage
import json
def requestBatchOfPagesAndReturnRemainingCountLib(
pageLimit,
lastProcessedID,
clientAPIInstance,
loginSession,
processIndividualMessage
):
params = {
"limit": str(pageLimit)
}
if lastProcessedID is not None:
params["lastProcessedID"] = lastProcessedID
result = clientAPIInstance.sendGetRequest(
url="/consume",
params=params,
loginSession=loginSession,
injectHeadersFn=None
)
if result.status_code != 200:
clientAPIInstance.raiseResponseException(result)
remainingMessages = int(result.headers["x-remaining"])
resultDict = json.loads(result.content)
for curResult in resultDict:
changeNotification = ChangeNotificationMessage(dict=curResult, clientAPIInstance=clientAPIInstance)
processIndividualMessage(changeNotification=changeNotification)
return remainingMessages
class ChangeNotificationIterator:
clientAPIInstance = None
loginSession = None
pageLimit = None
maxRequests = None
requestsRemaining = None
curIdx = None
curResultList = None
def __init__(self, loginSession, pageLimit, maxRequests, clientAPIInstance):
self.clientAPIInstance = clientAPIInstance
self.loginSession = loginSession
self.pageLimit = pageLimit
self.maxRequests = maxRequests
self.requestsRemaining = self.maxRequests
self.curIdx = 0
self.curResultList = []
def __iter__(self):
self.requestsRemaining = self.maxRequests
self.curIdx = 0
self.curResultList = []
return self
def loadNewPageOfResults(self):
self.curIdx = 0
self.curResultList = []
def processIndividualMessage(changeNotification):
self.curResultList.append(changeNotification)
requestBatchOfPagesAndReturnRemainingCountLib(
pageLimit=self.pageLimit,
clientAPIInstance=self.clientAPIInstance,
loginSession=self.loginSession,
processIndividualMessage=processIndividualMessage,
lastProcessedID=None
)
def __next__(self):
if self.curIdx >= len(self.curResultList):
if self.requestsRemaining==0:
raise StopIteration
self.requestsRemaining -= 1
self.loadNewPageOfResults()
if self.curIdx >= len(self.curResultList):
# We tried getting a new page but there are still not results
# so terminate
raise StopIteration
retVal = self.curResultList[self.curIdx]
self.curIdx += 1
return retVal
| 27.098901 | 103 | 0.744931 |
1112b034f98c7dc617526ab156487a42f2db45b6 | 1,821 | py | Python | schafkopf/players/models/evaluate_calssifier.py | Taschee/schafkopf | 96c5b9199d9260b4fdd74de8a6e54805b407407b | [
"MIT"
] | 10 | 2018-07-30T14:02:25.000Z | 2022-01-19T23:48:31.000Z | schafkopf/players/models/evaluate_calssifier.py | TimiH/schafkopf-1 | deafaa28d6cba866d097b4347dd84ce37b3b594d | [
"MIT"
] | 1 | 2018-08-12T07:25:51.000Z | 2018-08-27T21:04:04.000Z | schafkopf/players/models/evaluate_calssifier.py | Taschee/schafkopf | 96c5b9199d9260b4fdd74de8a6e54805b407407b | [
"MIT"
] | 2 | 2019-01-23T10:02:57.000Z | 2019-08-26T22:05:52.000Z | import keras
import numpy as np
from schafkopf.players.data.load_data import load_data_bidding
from schafkopf.players.data.encodings import decode_on_hot_hand
import matplotlib.pyplot as plt
x_test, y_test = load_data_bidding('../data/test_data.p')
x_train, y_train = load_data_bidding('../data/train_data.p')
modelpath = "bigger_classifier50.hdf5"
model = keras.models.load_model(modelpath)
predictions = model.predict_classes(x_test)
false_pred_list = []
pairs = [(i, j) for i in range(9) for j in range(9)]
false_counts = {pair: 0 for pair in pairs}
for pred, x, y in zip(predictions, x_test, y_test):
y_ind = np.where(y == 1)[0][0]
if pred != y_ind:
false_pred_list.append((pred, y_ind))
print('Predicted {} instead of {}'.format(pred, y_ind))
print('Hand : ', decode_on_hot_hand(x))
num_false = len(false_pred_list)
print('Number of false predictions : ', num_false)
for pair in false_pred_list:
false_counts[pair] += 1
fig, ax = plt.subplots(1, 1)
tick_labels = ['No game', 'Partner, bells', 'Partner, Leaves', 'Partner, Acorns',
'Wenz', 'Solo, Bells', 'Solo, Hearts', 'Solo, Leaves', 'Solo, Acorns']
for y_pred, y_true in pairs:
plt.scatter(y_pred, y_true, s=3*false_counts[(y_pred, y_true)], c='blue', alpha=0.6)
ax.set_xticks(np.arange(0, 9, 1))
ax.set_xticklabels(tick_labels, rotation='vertical', fontsize=11)
ax.set_yticks(np.arange(0, 9, 1))
ax.set_yticklabels(tick_labels, rotation='horizontal', fontsize=11)
ax.set_xlabel('Bidding network', fontsize=13)
ax.set_ylabel('Human player', fontsize=13)
ax.axis('equal')
plt.tight_layout()
plt.show()
test_scores = model.evaluate(x_test, y_test)
val_scores = model.evaluate(x_train, y_train)
print('Total Test accuracy : ', test_scores[1])
print('Total Train accuracy : ', val_scores[1])
| 29.852459 | 88 | 0.713344 |
1112cf8fd2ea3b082bd270c70d54466062312420 | 8,338 | py | Python | personnages3d/filtre.py | mxbossard/personnages3d | 87c2ab8dc9b502c0074f1dec04b832803dee1462 | [
"Apache-2.0"
] | null | null | null | personnages3d/filtre.py | mxbossard/personnages3d | 87c2ab8dc9b502c0074f1dec04b832803dee1462 | [
"Apache-2.0"
] | null | null | null | personnages3d/filtre.py | mxbossard/personnages3d | 87c2ab8dc9b502c0074f1dec04b832803dee1462 | [
"Apache-2.0"
] | 1 | 2021-10-01T02:03:42.000Z | 2021-10-01T02:03:42.000Z |
"""Ce script est un exemple de matplotlib"""
import numpy as np
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type == 'simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a[-1]
if __name__ == '__main__':
data = [[1.0], [1.12], [1.17], [1.18], [1.26], [1.33], [1.37], [1.4], [1.46],
[1.49], [1.51], [1.52], [1.53], [1.51], [1.52], [1.52], [1.52], [1.51],
[1.52], [1.51], [1.51], [1.51], [1.51], [1.52], [1.52], [1.54], [1.56],
[1.59], [1.64], [1.69], [1.71], [1.73], [1.78], [1.83], [1.88], [1.94],
[1.96], [2.01], [2.04], [2.1], [2.13], [2.16], [2.2], [2.27], [2.3],
[2.37], [2.41], [2.46], [2.54], [2.58], [2.64], [2.67], [2.72], [2.76],
[2.84], [2.89], [2.93], [2.97], [3.03], [3.08], [3.1], [3.15], [3.18],
[3.24], [3.31], [3.33], [3.37], [3.42], [3.42], [3.44], [3.46], [3.51],
[3.51], [3.55], [3.59], [3.6], [3.64], [3.63], [3.66], [3.65], [3.66],
[3.69], [3.69], [3.71], [3.77], [3.76], [3.79], [3.84], [3.86], [3.94],
[4.0], [4.08], [4.05], [4.11], [4.16], [4.24], [4.27], [4.35], [4.35],
[4.42], [4.43], [4.47], [4.54], [4.57], [4.62], [4.64], [4.68], [4.74],
[4.79], [4.84], [4.93], [4.95], [5.08], [5.15], [5.21], [5.25], [5.3],
[5.35], [5.39], [5.46], [5.55], [5.57], [5.6], [5.7], [5.73], [5.77],
[5.95], [5.93], [6.06], [6.1], [6.08], [6.13], [6.14], [6.17], [6.25],
[6.36], [6.44], [6.61], [6.58], [6.59], [6.71], [6.75], [6.78], [6.8],
[6.85], [6.84], [6.95], [6.9], [7.03], [7.03], [7.12], [7.17], [7.13],
[7.11], [7.22], [7.25], [7.29], [7.4], [7.42], [7.44], [7.48], [7.46],
[7.49], [7.39], [7.56], [7.64], [7.6], [7.64], [7.65], [7.63], [7.66],
[7.6], [7.66], [7.62], [7.54], [7.44], [7.43], [7.38], [7.39], [7.22],
[7.19], [7.04], [6.97], [6.92], [6.83], [6.83], [6.64], [6.58], [6.52],
[6.44], [6.24], [6.24], [6.08], [5.99], [5.93], [5.89], [5.74], [5.65],
[5.6], [5.51], [5.46], [5.39], [5.26], [5.2], [5.08], [4.94], [4.81],
[4.75], [4.71], [4.66], [4.56], [4.44], [4.39], [4.33], [4.18], [4.15],
[4.04], [3.96], [3.84], [3.75], [3.65], [3.58], [3.52], [3.46], [3.34],
[3.29], [3.18], [3.11], [3.01], [2.97], [2.87], [2.79], [2.69], [2.59],
[2.52], [2.44], [2.39], [2.29], [2.22], [2.15], [2.08], [2.03], [1.92],
[1.82], [1.74], [1.68], [1.62], [1.55], [1.46], [1.42], [1.33], [1.27],
[1.19], [1.14], [1.08], [1.03], [1.0], [0.93], [0.9], [0.85], [0.81],
[0.78], [0.77], [0.73], [0.72], [0.69], [0.68], [0.66], [0.66], [0.65],
[0.64], [0.64], [0.63], [0.63], [0.62], [0.61], [0.59], [0.59], [0.57],
[0.56], [0.57], [0.56], [0.56], [0.55], [0.61], [0.61], [0.61], [0.61],
[0.59], [0.61], [0.61], [0.61], [0.59], [0.59], [0.57], [0.55], [0.56],
[0.56], [0.57], [0.57], [0.6], [0.56], [0.57], [0.57], [0.57], [0.56],
[0.56], [0.57], [0.58], [0.57], [0.56], [0.55], [0.58], [0.57], [0.75],
[0.74], [0.56], [0.56], [0.61], [0.55], [0.68], [0.55], [0.93], [0.71],
[0.7], [0.74], [0.7], [0.69], [0.69], [0.52], [0.69], [0.52], [0.67],
[0.52], [0.52], [0.52], [0.72], [0.65], [0.7], [0.71], [0.82], [0.93],
[0.55], [0.74], [0.72], [0.75], [0.57], [0.58], [0.63], [0.63], [0.65],
[0.69], [0.72], [0.79], [0.82], [0.83], [0.89], [0.94], [0.96], [1.03],
[1.07], [1.11], [1.13], [1.17], [1.21], [1.23], [1.29], [1.32], [1.36],
[1.41], [1.46], [1.5], [1.55], [1.58], [1.62], [1.65], [1.72], [1.74],
[1.78], [1.82], [1.88], [1.91], [1.96], [2.01], [2.06], [2.11], [2.17],
[2.24], [2.27], [2.31], [2.35], [2.4], [2.45], [2.48], [2.56], [2.59],
[2.65], [2.68], [2.74], [2.78], [2.83], [2.86], [2.92], [2.98], [3.01],
[3.1], [3.11], [3.18], [3.24], [3.26], [3.31], [3.42], [3.51], [3.56],
[3.61], [3.68], [3.67], [3.78], [3.82], [3.83], [3.89], [3.93], [4.03],
[4.06], [4.11], [4.19], [4.25], [4.27], [4.36], [4.42], [4.48], [4.53],
[4.6], [4.62], [4.66], [4.76], [4.81], [4.91], [5.04], [4.96], [5.07],
[5.23], [5.26], [5.33], [5.34], [5.43], [5.43], [5.53], [5.58], [5.61],
[5.68], [5.72], [5.77], [5.9], [5.95], [6.04], [6.17], [6.15], [6.27],
[6.21], [6.3], [6.36], [6.39], [6.45], [6.54], [6.7], [6.84], [6.78],
[6.95], [6.83], [6.91], [6.89], [7.04], [7.22], [7.39], [7.42], [7.38],
[7.36], [7.38], [7.45], [7.49], [7.51], [7.38], [7.44], [7.43], [7.43],
[7.37], [7.4], [7.36], [7.43], [7.29], [7.26], [7.12], [7.08], [6.96],
[6.84], [6.91], [6.78], [6.77], [6.72], [6.63], [6.62], [6.55], [6.51],
[6.39], [6.26], [6.07], [6.02], [5.87], [5.85], [5.79], [5.74], [5.72],
[5.63], [5.58], [5.51], [5.47], [5.38], [5.3], [5.25], [5.14], [5.08],
[5.06], [4.95], [4.98], [4.96], [4.84], [4.83], [4.78], [4.75], [4.69],
[4.65], [4.54], [4.47], [4.4], [4.35], [4.5], [4.21], [4.14], [4.1],
[4.01], [3.95], [3.86], [3.74], [3.69], [3.63], [3.58], [3.5], [3.46],
[3.4], [3.34], [3.27], [3.21], [3.15], [3.02], [2.99], [2.94], [2.85],
[2.78], [2.7], [2.65], [2.61], [2.54], [2.5], [2.45], [2.4], [2.31],
[2.24], [2.19], [2.16], [2.09], [2.01], [1.95], [1.91], [1.89], [1.83],
[1.77], [1.73], [1.7], [1.64], [1.58], [1.52], [1.48], [1.43], [1.4],
[1.36], [1.34], [1.31], [1.27], [1.27], [1.23], [1.22], [1.2], [1.2],
[1.29], [1.27], [1.26], [1.3], [1.35], [1.4], [1.42], [1.45], [1.49],
[1.54], [1.64], [1.69], [1.78], [1.82], [1.88], [1.93], [1.96], [1.98],
[2.04], [2.1], [2.13], [2.19], [2.26], [2.3], [2.35], [2.42], [2.47],
[2.52], [2.62], [2.68], [2.71], [2.76], [2.79], [2.8], [2.85], [2.89],
[2.94], [3.03], [3.1], [3.17], [3.24], [3.25], [3.3], [3.33], [3.37],
[3.42], [3.45], [3.5], [3.54], [3.59], [3.66], [3.69], [3.78], [3.84],
[3.87], [3.91], [4.01], [4.02], [4.05], [4.1], [4.18], [4.24], [4.32],
[4.41], [4.51], [4.54], [4.67], [4.69], [4.7], [4.73], [4.79], [4.87],
[4.94], [5.07], [5.14], [5.17], [5.22], [5.32], [5.42], [5.49], [5.52],
[5.59], [5.64], [5.68], [5.79], [5.91], [5.83], [5.92], [5.99], [6.05],
[6.09], [6.16], [6.23], [6.43], [6.44], [6.51], [6.52], [6.74], [6.65],
[6.66], [6.62], [6.77], [6.67], [6.84], [6.88], [6.97], [6.87], [6.88],
[7.01], [7.16], [7.16], [7.31], [7.44], [7.4], [7.47], [7.51], [7.55],
[7.49], [7.54], [7.61], [7.6], [7.57], [7.5], [7.71], [7.62], [7.53],
[7.56], [7.53], [7.54], [7.48], [7.49], [7.41], [7.32], [7.22], [7.12],
[7.1], [7.02], [6.91], [6.9], [6.93], [6.85], [6.8], [6.75], [6.6],
[6.62], [6.48], [6.35], [6.27], [6.2], [6.09], [6.14], [5.95], [5.89],
[5.67], [5.62], [5.52], [5.47], [5.39], [5.33], [5.18], [5.11], [5.05],
[4.92], [4.89], [4.75], [4.71], [4.58], [4.49], [4.39], [4.29], [4.22],
[4.13], [4.07], [4.0], [3.88], [3.8], [3.75], [3.63], [3.59], [3.52],
[3.44], [3.34], [3.25], [3.15], [3.06], [3.0], [2.94], [2.83], [2.75],
[2.65], [2.56], [2.51], [2.44], [2.34], [2.27], [2.21], [2.15], [2.09],
[2.01], [1.95], [1.89], [1.83], [1.76], [1.68], [1.64], [1.55], [1.46],
[1.41], [1.34], [1.26], [1.24], [1.15], [1.11], [1.06], [0.99], [0.95],
[0.9], [0.83], [0.79], [0.75], [0.71], [0.67], [0.6], [0.56], [0.55],
[0.56], [0.61], [0.53], [2.49], [1000]]
print(len(data)) # 824
for i in range(750):
d = []
for j in range(10):
d.append(data[i + j])
d = np.ravel(d)
m = moving_average(d, 8, type='simple')
print(m)
| 63.166667 | 83 | 0.333173 |
11149a0fbf72edf9f65a573647189aedcbd0a01f | 309 | py | Python | tests/test_utils.py | wxy6655/pymycobot | 504716257af7b36c9750929ad3b0b7fe96582e14 | [
"MIT"
] | 37 | 2021-01-16T13:57:06.000Z | 2022-03-27T06:16:37.000Z | tests/test_utils.py | wxy6655/pymycobot | 504716257af7b36c9750929ad3b0b7fe96582e14 | [
"MIT"
] | 27 | 2021-01-18T08:09:08.000Z | 2022-03-08T01:25:33.000Z | tests/test_utils.py | wxy6655/pymycobot | 504716257af7b36c9750929ad3b0b7fe96582e14 | [
"MIT"
] | 19 | 2021-01-19T01:52:42.000Z | 2022-02-10T06:44:06.000Z | import os
import sys
# Add relevant ranger module to PATH... there surely is a better way to do this...
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from pymycobot import utils
port = utils.get_port_list()
print(port)
detect_result = utils.detect_port_of_basic()
print(detect_result)
| 22.071429 | 82 | 0.754045 |
1117ea4f825935be2c13190135ae12facb794dea | 3,347 | py | Python | backend_api/vozila_specials/migrations/0001_initial.py | KoliosterNikolayIliev/vozila_backend | a1c5036a77cb78d7968bbcc6e66e9015c982be8b | [
"MIT"
] | null | null | null | backend_api/vozila_specials/migrations/0001_initial.py | KoliosterNikolayIliev/vozila_backend | a1c5036a77cb78d7968bbcc6e66e9015c982be8b | [
"MIT"
] | 4 | 2021-09-08T09:25:21.000Z | 2022-02-20T12:14:04.000Z | backend_api/vozila_specials/migrations/0001_initial.py | KoliosterNikolayIliev/vozila_backend | a1c5036a77cb78d7968bbcc6e66e9015c982be8b | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-04 18:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProjectPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('image', models.ImageField(upload_to='images')),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('video', models.URLField(blank=True)),
('likes_count', models.IntegerField(default=0, editable=False)),
('dislikes_count', models.IntegerField(default=0, editable=False)),
('description', models.TextField(blank=True)),
('vehicle_type', models.CharField(choices=[('car', 'Car'), ('motorcycle', 'Motorcycle'), ('motor-home', 'Motor-home')], default=None, max_length=50)),
('special_post_type', models.CharField(choices=[('rims', 'Rims'), ('electric-mods', 'Electric-mods')], default='Other', max_length=50)),
('brands', models.CharField(choices=[('Audi', 'Audi'), ('BMW', 'BMW'), ('Mercedes', 'Mercedes'), ('Renault', 'Renault'), ('Citroen', 'Citroen'), ('Lamborghini', 'Lamborghini'), ('Ferrari', 'Ferrari'), ('Lada', 'Lada')], default=None, max_length=50)),
('owner', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='project_posts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='LikeP',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vozila_specials.projectpost')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DisLikeP',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vozila_specials.projectpost')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CommentP',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vozila_specials.projectpost')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 54.868852 | 266 | 0.612788 |
11185bdb3235153c501d87ad0d59a5aa426df74b | 385 | py | Python | inhouse/users/migrations/0011_alter_user_username.py | samuelmmorse/In-House | 9a6e103c0d53598e0b9028754fbc1fdc830cf9bd | [
"Apache-2.0"
] | 1 | 2022-02-03T18:15:09.000Z | 2022-02-03T18:15:09.000Z | inhouse/users/migrations/0011_alter_user_username.py | samuelmmorse/In-House | 9a6e103c0d53598e0b9028754fbc1fdc830cf9bd | [
"Apache-2.0"
] | 37 | 2022-02-02T21:30:16.000Z | 2022-03-08T16:18:48.000Z | inhouse/users/migrations/0011_alter_user_username.py | samuelmmorse/In-House | 9a6e103c0d53598e0b9028754fbc1fdc830cf9bd | [
"Apache-2.0"
] | null | null | null | # Generated by Django 4.0.1 on 2022-02-21 03:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0010_alter_user_picture"),
]
operations = [
migrations.AlterField(
model_name="user",
name="username",
field=models.CharField(max_length=200),
),
]
| 20.263158 | 51 | 0.597403 |
111a060dfd860a5ffaba0f5cb789e1d77010aef4 | 1,742 | py | Python | PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py | AsherVo/depthai-gui | f6d5da7c00f09239d07ff77dd2e4433d40e43633 | [
"Apache-2.0"
] | 46 | 2021-01-05T13:41:54.000Z | 2022-03-29T09:47:20.000Z | PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py | AsherVo/depthai-gui | f6d5da7c00f09239d07ff77dd2e4433d40e43633 | [
"Apache-2.0"
] | 7 | 2021-01-29T22:26:05.000Z | 2022-02-24T10:16:35.000Z | PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py | AsherVo/depthai-gui | f6d5da7c00f09239d07ff77dd2e4433d40e43633 | [
"Apache-2.0"
] | 10 | 2021-03-11T15:00:40.000Z | 2022-03-24T02:28:39.000Z | from pathlib import Path
from common import DeviceNode, get_property_value
from PyFlow.Core.Common import *
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
class NeuralNetworkNode(DeviceNode):
def __init__(self, name):
super(NeuralNetworkNode, self).__init__(name)
self.input = self.createInputPin('in', 'AnyPin')
self.blob = self.createInputPin('blob', 'StringPin')
self.out = self.createOutputPin('out', 'NeuralTensorPin')
self.blob.setInputWidgetVariant("FilePathWidget")
self.input.enableOptions(PinOptions.AllowAny)
self.input.enableOptions(PinOptions.AllowMultipleConnections)
self.out.enableOptions(PinOptions.AllowMultipleConnections)
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('AnyPin')
helper.addInputDataType('StringPin')
helper.addOutputDataType('NeuralTensorPin')
helper.addInputStruct(StructureType.Multi)
helper.addOutputStruct(StructureType.Multi)
return helper
@staticmethod
def category():
return 'NeuralNetwork'
@staticmethod
def keywords():
return []
@staticmethod
def description():
return "Description in rst format."
def build_pipeline(self, pipeline):
detection_nn = pipeline.createNeuralNetwork()
path = get_property_value(self, "blob")
if path is None or len(path) == 0:
raise RuntimeError(f"Blob file path must be set in the {self.name} node")
detection_nn.setBlobPath(str(Path(path).resolve().absolute()))
self.connection_map["out"] = detection_nn.out
self.connection_map["in"] = detection_nn.input
| 35.55102 | 85 | 0.695178 |