hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ed80b7eb69a728a482933d1c1030be79406a25c | 4,090 | py | Python | examples/adafruit_io_mqtt/adafruit_io_groups.py | ryanplusplus/Adafruit_CircuitPython_AdafruitIO | 5efcef473a7223e537c1910179da7fffd90a9185 | [
"Unlicense",
"MIT-0",
"MIT"
] | 47 | 2021-02-15T23:02:36.000Z | 2022-03-04T21:30:03.000Z | examples/adafruit_io_mqtt/adafruit_io_groups.py | ryanplusplus/Adafruit_CircuitPython_AdafruitIO | 5efcef473a7223e537c1910179da7fffd90a9185 | [
"Unlicense",
"MIT-0",
"MIT"
] | 43 | 2019-02-22T03:20:22.000Z | 2022-01-14T15:44:19.000Z | examples/adafruit_io_mqtt/adafruit_io_groups.py | ryanplusplus/Adafruit_CircuitPython_AdafruitIO | 5efcef473a7223e537c1910179da7fffd90a9185 | [
"Unlicense",
"MIT-0",
"MIT"
] | 28 | 2019-02-19T21:35:20.000Z | 2022-03-27T17:43:31.000Z | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Subscribing to an Adafruit IO Group
# and Publishing to the feeds in the group
import time
from random import randint
import board
import busio
from digitalio import DigitalInOut
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
import neopixel
import adafruit_minimqtt.adafruit_minimqtt as MQTT
from adafruit_io.adafruit_io import IO_MQTT
### WiFi ###
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# If you are using a board with pre-defined ESP32 Pins:
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
# If you have an externally connected ESP32:
# esp32_cs = DigitalInOut(board.D9)
# esp32_ready = DigitalInOut(board.D10)
# esp32_reset = DigitalInOut(board.D5)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
"""Use below for Most Boards"""
status_light = neopixel.NeoPixel(
board.NEOPIXEL, 1, brightness=0.2
) # Uncomment for Most Boards
"""Uncomment below for ItsyBitsy M4"""
# status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)
# Uncomment below for an externally defined RGB LED
# import adafruit_rgbled
# from adafruit_esp32spi import PWMOut
# RED_LED = PWMOut.PWMOut(esp, 26)
# GREEN_LED = PWMOut.PWMOut(esp, 27)
# BLUE_LED = PWMOut.PWMOut(esp, 25)
# status_light = adafruit_rgbled.RGBLED(RED_LED, BLUE_LED, GREEN_LED)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Define callback functions which will be called when certain events happen.
# pylint: disable=unused-argument
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
# This is a good place to subscribe to feed changes. The client parameter
# passed to this function is the Adafruit IO MQTT client so you can make
# calls against it easily.
print("Connected to Adafruit IO!")
# Subscribe to Group
io.subscribe(group_key=group_name)
# pylint: disable=unused-argument
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print("Disconnected from Adafruit IO!")
# pylint: disable=unused-argument
def message(client, feed_id, payload):
# Message function will be called when a subscribed feed has a new value.
# The feed_id parameter identifies the feed, and the payload parameter has
# the new value.
print("Feed {0} received new value: {1}".format(feed_id, payload))
# Connect to WiFi
print("Connecting to WiFi...")
wifi.connect()
print("Connected!")
# Initialize MQTT interface with the esp interface
MQTT.set_socket(socket, esp)
# Initialize a new MQTT Client object
mqtt_client = MQTT.MQTT(
broker="io.adafruit.com",
username=secrets["aio_username"],
password=secrets["aio_key"],
)
# Initialize an Adafruit IO MQTT Client
io = IO_MQTT(mqtt_client)
# Connect the callback methods defined above to Adafruit IO
io.on_connect = connected
io.on_disconnect = disconnected
io.on_message = message
# Group name
group_name = "weatherstation"
# Feeds within the group
temp_feed = "weatherstation.temperature"
humid_feed = "weatherstation.humidity"
# Connect to Adafruit IO
print("Connecting to Adafruit IO...")
io.connect()
print("Publishing new messages to group feeds every 5 seconds...")
while True:
io.loop()
temp_reading = randint(0, 100)
print("Publishing value {0} to feed: {1}".format(temp_reading, temp_feed))
io.publish(temp_feed, temp_reading)
humid_reading = randint(0, 100)
print("Publishing value {0} to feed: {1}".format(humid_reading, humid_feed))
io.publish(humid_feed, humid_reading)
time.sleep(5)
| 31.705426 | 88 | 0.763081 | 587 | 4,090 | 5.194208 | 0.315162 | 0.036077 | 0.019679 | 0.02099 | 0.135126 | 0.083306 | 0.055756 | 0.03411 | 0.03411 | 0.03411 | 0 | 0.023926 | 0.151834 | 4,090 | 128 | 89 | 31.953125 | 0.855001 | 0.424939 | 0 | 0 | 0 | 0 | 0.190072 | 0.021914 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.016667 | 0.216667 | 0 | 0.266667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ed8398d08d23ce47895ea9d5f261522c86dd1a4 | 1,082 | py | Python | setup.py | duanyytop/ckb-sdk-python | 4c7b1676fbb130abfe795290ebba32c1d6d97a08 | [
"MIT"
] | 1 | 2021-09-17T06:05:20.000Z | 2021-09-17T06:05:20.000Z | setup.py | duanyytop/ckb-sdk-python | 4c7b1676fbb130abfe795290ebba32c1d6d97a08 | [
"MIT"
] | null | null | null | setup.py | duanyytop/ckb-sdk-python | 4c7b1676fbb130abfe795290ebba32c1d6d97a08 | [
"MIT"
] | null | null | null | import os
import io
from setuptools import setup, find_packages
HERE = os.path.dirname(os.path.realpath(__file__))
README = os.path.join(HERE, 'README.md')
with io.open(README, encoding='utf-8') as f:
long_description = f.read()
VERSION = os.path.join(HERE, 'ckb_toolkit', 'version.py')
with io.open(VERSION, encoding='utf-8') as f:
package = {}
exec(f.read(), package)
version = package['VERSION']
setup(name='ckb-toolkit',
version=version,
description='Nervos CKB Toolkit',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/duanyytop/ckb-python-toolkit',
author='dylan',
author_email='dylan@nervina.io',
license='MIT',
packages=find_packages(),
install_requires=['jsonrpcclient[requests]',
'typing-extensions', 'coincurve'],
scripts=[],
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
],
include_package_data=True,
)
| 29.243243 | 60 | 0.641405 | 127 | 1,082 | 5.322835 | 0.566929 | 0.035503 | 0.029586 | 0.04142 | 0.044379 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003529 | 0.214418 | 1,082 | 36 | 61 | 30.055556 | 0.791765 | 0 | 0 | 0 | 0 | 0 | 0.245841 | 0.021257 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.09375 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ed86b6aaa7016a6e2d535463b8dced4480c51e4 | 4,672 | py | Python | ocbot/web/routes_slack.py | OperationCode/operationcode_pyback | c4b8d466baaef16f005c35b290cb48e597a65a32 | [
"MIT"
] | 16 | 2018-04-03T17:19:51.000Z | 2020-09-12T16:00:43.000Z | ocbot/web/routes_slack.py | OperationCode/operation_code_pybot | c4b8d466baaef16f005c35b290cb48e597a65a32 | [
"MIT"
] | 34 | 2017-10-31T15:53:15.000Z | 2018-03-02T17:24:13.000Z | ocbot/web/routes_slack.py | OperationCode/operationcode_pyback | c4b8d466baaef16f005c35b290cb48e597a65a32 | [
"MIT"
] | 10 | 2018-03-24T01:58:06.000Z | 2021-01-21T06:50:54.000Z | import logging
import threading
from flask import request, make_response, json, redirect, url_for
from config.configs import configs
from ocbot import app
from ocbot.pipeline.routing import RoutingHandler
from ocbot.pipeline.slash_command_handlers.log_handlers import can_view_logs, get_temporary_url, handle_log_view
from ocbot.pipeline.slash_command_handlers.lunch_handler import create_lunch_event
from ocbot.pipeline.slash_command_handlers.testgreet_handler import can_test, create_testgreet_event
from ocbot.web.route_decorators import url_verification, validate_response
VERIFICATION_TOKEN = configs['VERIFICATION_TOKEN']
logger = logging.getLogger(__name__)
logger.level = logging.DEBUG
@app.route('/event_endpoint', methods=['POST'])
@url_verification
@validate_response('token', VERIFICATION_TOKEN, 'json')
def events_route():
"""
Any event based response will get routed here.
Decorates first make sure it's a verified route and this isn't a challenge event
Lastly forwards event data to route director
"""
response_data = request.get_json()
logger.debug(f'Event received: {json.dumps(response_data)}')
route_id = response_data['event']['type']
threading.Thread(target=RoutingHandler,
kwargs={"json_data": response_data, 'route_id': route_id}).start()
return make_response('', 200)
@app.route("/user_interaction", methods=['POST'])
@validate_response('token', VERIFICATION_TOKEN, 'form')
def interaction_route():
"""
Receives request from slack interactive messages.
These are the messages that contain key: 'token_id'
"""
data = json.loads(request.form['payload'])
logger.info(f"Interaction received: {data}")
route_id = data['callback_id']
threading.Thread(target=RoutingHandler,
kwargs={'json_data': data, 'route_id': route_id}).start()
return make_response('', 200)
@app.route("/zap_airtable_endpoint", methods=['POST'])
def zap_endpoint():
"""
Endpoint for Zapier to send events when a new
mentor request in submitted in airtable
"""
data = request.get_json()
logger.info(f'Zapier event received: {data}')
threading.Thread(target=RoutingHandler,
kwargs={'json_data': data, 'route_id': 'new_airtable_request'}).start()
return make_response((json.dumps({'status': 'ok'}), 200))
@app.route('/test/testgreet', methods=['POST'])
@validate_response('token', VERIFICATION_TOKEN, 'values')
def test_greet():
"""
Endpoint for simulating a Slack 'team_join' event.
Sends the notification to whichever channel the user
was in when running the slash-command
"""
req = request.values
logger.info(f"testgreet received from {req['user_name']} : {req}")
if not can_test(req['user_id']):
logger.info(f"{req['user_name']} attempted to testgreet and was denied")
return make_response("You are not authorized to do that.", 200)
event = create_testgreet_event(req)
threading.Thread(target=RoutingHandler,
kwargs={'json_data': event, 'route_id': 'team_join'}).start()
return make_response('Test completed.', 200)
@app.route("/get_logs", methods=['POST'])
@validate_response('token', VERIFICATION_TOKEN, 'values')
def get_logs():
"""
Endpoint used by Slack /logs command
"""
req = request.values
logger.info(f'Log request received: {req}')
if not can_view_logs(req['user_id']):
logger.info(f"{req['user_name']} attempted to view logs and was denied")
return make_response("You are not authorized to do that.", 200)
url = get_temporary_url(req['user_id'], req['text'])
logger.info(f"Created log URL for {req['user_name']} : {url.url}")
return make_response(f'{request.host_url}logs/{url.url}', 200)
@app.route('/lunch', methods=['POST'])
@validate_response('token', VERIFICATION_TOKEN, 'values')
def random_lunch():
"""
Endpoint for getting random lunch event.
Sends the notification to whichever channel the user
was in when running the slash-command
"""
req = request.values
logger.info(f"Lunch request received from {req['user_name']} : {req}")
lunch_val = create_lunch_event(req)
return make_response(lunch_val, 200)
@app.route("/logs/<variable>")
def show_logs(variable):
"""
Routes user to log
:param variable: Randomly generated string
"""
return handle_log_view(variable)
@app.route('/options_load', methods=['POST'])
def options_route():
"""
Can provide dynamically created options for interactive messages.
Currently unused.
"""
return redirect(url_for('HTTP404'))
| 34.607407 | 112 | 0.704623 | 611 | 4,672 | 5.206219 | 0.252046 | 0.033952 | 0.045269 | 0.05187 | 0.388871 | 0.361836 | 0.310594 | 0.253694 | 0.253694 | 0.198994 | 0 | 0.006968 | 0.170591 | 4,672 | 134 | 113 | 34.865672 | 0.813935 | 0.175086 | 0 | 0.186667 | 0 | 0 | 0.23883 | 0.021933 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106667 | false | 0 | 0.133333 | 0 | 0.373333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d57ec22c55b9fe1a9b170349aaaf1d68422b908 | 2,936 | py | Python | searchconsole/account.py | symbiotech/google-searchconsole | 0f3a18c58fc74b91c6ad9e08b442ba80f0c2502e | [
"MIT"
] | null | null | null | searchconsole/account.py | symbiotech/google-searchconsole | 0f3a18c58fc74b91c6ad9e08b442ba80f0c2502e | [
"MIT"
] | null | null | null | searchconsole/account.py | symbiotech/google-searchconsole | 0f3a18c58fc74b91c6ad9e08b442ba80f0c2502e | [
"MIT"
] | null | null | null | # encoding: utf-8
from . import query
class Account:
"""
An account can be associated with a number of web
properties.
You should navigate to a web property to run queries.
Usage:
>>> import searchconsole
>>> account = searchconsole.authenticate(
... client_config='auth/client_secrets.json',
... credentials='auth/credentials.dat'
... )
>>> account
<searchconsole.account.Account(client_id='...')>
>>> account[0]
<searchconsole.account.WebProperty(url='...')>
>>> account[www_webproperty_com]
<searchconsole.account.WebProperty(url='...')>
"""
def __init__(self, service, credentials):
self.service = service
self.credentials = credentials
@property
def webproperties(self):
"""
A list of all web properties associated with this account. You may
select a specific web property using an index or by indexing the
account directly with the properties exact URI.
Usage:
>>> account.webproperties[0]
<searchconsole.account.WebProperty(url='...')>
"""
raw_properties = self.service.sites().list().execute().get(
'siteEntry', [])
return [WebProperty(raw, self) for raw in raw_properties]
def __getitem__(self, item):
if isinstance(item, str):
properties = [p for p in self.webproperties if p.url == item]
web_property = properties[0] if properties else None
else:
web_property = self.webproperties[item]
return web_property
def __repr__(self):
if hasattr(self.credentials, 'client_id'):
repr_str = "client_id='{}'".format(self.credentials.client_id)
else:
repr_str = "email='{}'".format(self.credentials.signer_email)
return "<searchconsole.account.Account({})>".format(repr_str)
class WebProperty:
"""
A web property is a particular website you're tracking
in Google Search Console. You will use a web property
to make your Search Analytics queries.
Usage:
>>> webproperty = account[www_webproperty_com]
>>> webproperty.query.range(start='today', days=-7).dimension('date').get()
<searchconsole.query.Report(rows=...)>
"""
permission_levels = {
'siteFullUser': 1,
'siteOwner': 2,
'siteRestrictedUser': 3,
'siteUnverifiedUser': 4
}
def __init__(self, raw, account):
self.account = account
self.raw = raw
self.url = raw['siteUrl']
self.permission = raw['permissionLevel']
self.query = query.Query(self)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def __repr__(self):
return "<searchconsole.account.WebProperty(url='{site_url}')>".format(
site_url=self.url
)
| 29.959184 | 79 | 0.61921 | 320 | 2,936 | 5.5 | 0.378125 | 0.04375 | 0.070455 | 0.077273 | 0.039773 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00414 | 0.259537 | 2,936 | 97 | 80 | 30.268041 | 0.805428 | 0.371935 | 0 | 0.090909 | 0 | 0 | 0.12515 | 0.052695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159091 | false | 0 | 0.022727 | 0.022727 | 0.386364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d5944cf18ff876b7a19f160fdee473e30de7ffb | 2,546 | py | Python | src/pytorch_adapt/frameworks/lightning/lightning.py | MarkusSagen/pytorch-adapt | 947b9f1b748d2078cecbf4a00c34f73108d9ecde | [
"MIT"
] | 1 | 2021-12-15T19:36:01.000Z | 2021-12-15T19:36:01.000Z | src/pytorch_adapt/frameworks/lightning/lightning.py | MarkusSagen/pytorch-adapt | 947b9f1b748d2078cecbf4a00c34f73108d9ecde | [
"MIT"
] | null | null | null | src/pytorch_adapt/frameworks/lightning/lightning.py | MarkusSagen/pytorch-adapt | 947b9f1b748d2078cecbf4a00c34f73108d9ecde | [
"MIT"
] | null | null | null | from collections import defaultdict
import pytorch_lightning as pl
import torch
from ...containers import Optimizers
from .. import utils as f_utils
def set_adapter_optimizers_to_pl(adapter, pl_optimizers):
if isinstance(adapter.optimizers, Optimizers):
keys = adapter.optimizers.keys()
adapter.optimizers = {k: v for k, v in zip(keys, pl_optimizers)}
def single_dataloader_collect(outputs):
outputs_as_dict = defaultdict(list)
for x in outputs:
for k, v in x.items():
outputs_as_dict[k].append(v)
return {k: torch.cat(v) for k, v in outputs_as_dict.items()}
def multi_dataloader_collect(outputs):
return [single_dataloader_collect(x) for x in outputs]
class Lightning(pl.LightningModule):
def __init__(self, adapter, validator=None):
super().__init__()
self.models = torch.nn.ModuleDict(adapter.models)
self.misc = torch.nn.ModuleDict(adapter.misc)
adapter.models = self.models
adapter.misc = self.misc
self.validator = validator
self.adapter = adapter
self.automatic_optimization = False
def forward(self, x, domain=None):
return self.adapter.inference(x, domain=domain)
def training_step(self, batch, batch_idx):
set_adapter_optimizers_to_pl(self.adapter, self.optimizers())
losses = self.adapter.training_step(
batch,
custom_backward=self.manual_backward,
)
for k, v in losses.items():
self.log(k, v)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return f_utils.collector_step(self, batch, f_utils.create_output_dict)
def validation_epoch_end(self, outputs):
required_data = self.validator.required_data
if len(required_data) > 1:
outputs = multi_dataloader_collect(outputs)
data = {k: v for k, v in zip(required_data, outputs)}
else:
outputs = single_dataloader_collect(outputs)
data = {required_data[0]: outputs}
score = self.validator.score(**data)
self.log("validation_score", score)
def configure_optimizers(self):
optimizers = list(self.adapter.optimizers.values())
lr_schedulers = []
for interval in ["epoch", "step"]:
for v in self.adapter.lr_schedulers.filter_by_scheduler_type(
f"per_{interval}"
):
lr_schedulers.append({"lr_scheduler": v, "interval": interval})
return optimizers, lr_schedulers
| 34.405405 | 79 | 0.66143 | 317 | 2,546 | 5.097792 | 0.271293 | 0.009901 | 0.01547 | 0.021658 | 0.075495 | 0.014851 | 0.014851 | 0 | 0 | 0 | 0 | 0.001553 | 0.241163 | 2,546 | 73 | 80 | 34.876712 | 0.834886 | 0 | 0 | 0 | 0 | 0 | 0.023174 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155172 | false | 0 | 0.086207 | 0.051724 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d5a9be19d355c1b2904b2d86faf394e45e5df77 | 5,740 | py | Python | libvvtest/outpututils.py | rrdrake/vvtest | 875aaa81d2fac5986554b7b4fbeb2aebb3b57ac6 | [
"Unlicense"
] | 7 | 2021-03-26T21:33:09.000Z | 2022-03-01T17:06:55.000Z | libvvtest/outpututils.py | rrdrake/vvtest | 875aaa81d2fac5986554b7b4fbeb2aebb3b57ac6 | [
"Unlicense"
] | null | null | null | libvvtest/outpututils.py | rrdrake/vvtest | 875aaa81d2fac5986554b7b4fbeb2aebb3b57ac6 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
import os, sys
from os.path import join as pjoin
import time
import traceback
from . import pathutil
def XstatusString( tcase, test_dir, cwd ):
"""
Returns a formatted string containing the job and its status.
"""
ref = tcase.getSpec()
s = ''
skipreason = None
if tcase.getStat().skipTest():
skipreason = tcase.getStat().getReasonForSkipTest()
if skipreason:
s += '%-8s' % 'skip'
else:
s += '%-8s' % tcase.getStat().getResultStatus()
s += ' %7s' % format_test_run_time( tcase )
s += ' %14s' % format_test_run_date( tcase )
s += ' ' + location_display_string( ref, test_dir, cwd )
if skipreason:
s += ' skip_reason="'+skipreason+'"'
return s
def location_display_string( tspec, test_dir, cwd ):
""
displ = tspec.getDisplayString()
loc = pathutil.relative_execute_directory( displ, test_dir, cwd )
tid = tspec.getTestID()
if tid.computeExecuteDirectory( shorten=True ) != \
tid.computeExecuteDirectory( shorten=False ):
xdir = tspec.getExecuteDirectory()
xdir = pathutil.relative_execute_directory( xdir, test_dir, cwd )
loc = xdir + ' -> ' + loc
return loc
def get_log_file_path( testdir, tspec ):
""
fn = tspec.getLogFilename()
return pjoin( testdir, tspec.getExecuteDirectory(), fn )
def get_test_command_line( logfilename ):
""
cmdmark = 'Command : '
if os.path.isfile( logfilename ):
cmd = '*** could not get command line from log file: '+logfilename
try:
with open( logfilename, 'rt' ) as fp:
for line in fp:
if line.startswith( cmdmark ):
cmd = line.split( cmdmark, 1 )[1].strip()
break
except Exception:
pass
else:
cmd = '*** unable to get command line (log file missing): '+logfilename
return cmd
def file_read_with_limit( filename, max_KB ):
""
maxsize = max( 128, max_KB * 1024 )
fsz = os.path.getsize( filename )
buf = ''
if fsz < maxsize:
with open( filename, 'r' ) as fp:
buf = fp.read()
else:
hdr = int( float(maxsize) * 0.20 + 0.5 )
bot = fsz - int( float(maxsize) * 0.70 + 0.5 )
with open( filename, 'r' ) as fp:
buf = fp.read( hdr )
buf += '\n\n*** the middle of this file has been removed ***\n\n'
fp.seek( bot )
buf += fp.read()
return buf
def make_date_stamp( testdate, optrdate, timefmt="%Y_%m_%d" ):
""
if optrdate != None:
if type( optrdate ) == type(''):
datestr = optrdate
else:
tup = time.localtime( optrdate )
datestr = time.strftime( timefmt, tup )
else:
tup = time.localtime( testdate )
datestr = time.strftime( timefmt, tup )
return datestr
def partition_tests_by_result( tcaseL ):
""
parts = { 'fail':[], 'timeout':[], 'diff':[],
'pass':[], 'notrun':[], 'notdone':[],
'skip':[] }
for tcase in tcaseL:
if tcase.getStat().skipTest():
parts[ 'skip' ].append( tcase )
else:
result = tcase.getStat().getResultStatus()
parts[ result ].append( tcase )
return parts
def results_summary_string( testparts ):
""
sumL = []
for result in [ 'pass', 'fail', 'diff', 'timeout',
'notdone', 'notrun', 'skip' ]:
sumL.append( result+'='+str( len( testparts[result] ) ) )
return ', '.join( sumL )
def format_test_run_date( tcase ):
""
xdate = tcase.getStat().getStartDate( 0 )
if xdate > 0:
return time.strftime( "%m/%d %H:%M:%S", time.localtime(xdate) )
else:
return ''
def format_test_run_time( tcase ):
""
xtime = tcase.getStat().getRuntime( -1 )
if xtime < 0:
return ''
else:
return colon_separated_time( xtime )
def colon_separated_time( nseconds ):
""
hr,mn,sc = get_hour_minute_second( nseconds )
if hr == 0:
return '%d:%02d' % (mn,sc)
return '%d:%02d:%02d' % (hr,mn,sc)
def get_hour_minute_second( nseconds ):
""
h = int( nseconds / 3600 )
m = int( ( nseconds - 3600*h ) / 60 )
s = int( ( nseconds - 3600*h - 60*m ) + 0.5 )
return h,m,s
def pretty_time( nseconds ):
""
h,m,s = get_hour_minute_second( nseconds )
sh = str(h)+'h'
sm = str(m)+'m'
if h == 0 and m == 0 and s == 0:
s = 1
ss = str(s) + 's'
if h > 0: return sh+' '+sm+' '+ss
if m > 0: return sm+' '+ss
return ss
def print3( *args ):
sys.stdout.write( ' '.join( [ str(arg) for arg in args ] ) + '\n' )
sys.stdout.flush()
def capture_traceback( excinfo ):
"""
This should be called in an except block of a try/except, and the argument
should be sys.exc_info(). It extracts and formats the traceback for the
exception. Returns a pair ( the exception string, the full traceback ).
"""
xt,xv,xtb = excinfo
# the "exception only" function may return multiple lines, but the last
# line is always the exception description
xsL = traceback.format_exception_only( xt, xv )
xs = xsL[-1]
tb = 'Traceback (most recent call last):\n' + \
''.join( traceback.format_list(
traceback.extract_stack()[:-2] +
traceback.extract_tb( xtb ) ) ) + ''.join( xsL )
return xs,tb
| 26.451613 | 79 | 0.566551 | 702 | 5,740 | 4.537037 | 0.347578 | 0.026374 | 0.015699 | 0.017896 | 0.101413 | 0.018838 | 0.018838 | 0.018838 | 0.018838 | 0 | 0 | 0.018182 | 0.300523 | 5,740 | 216 | 80 | 26.574074 | 0.775093 | 0.106446 | 0 | 0.201342 | 0 | 0 | 0.074336 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100671 | false | 0.020134 | 0.033557 | 0 | 0.248322 | 0.006711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d5baa45120b260610692f203c5d97f0a7321995 | 13,956 | py | Python | bay/plugins/build.py | eventbrite/bay | 3a157457a55713067213aeafe385a1cc49aa2698 | [
"Apache-2.0"
] | 22 | 2017-03-01T19:52:30.000Z | 2020-09-09T16:27:21.000Z | bay/plugins/build.py | eventbrite/bay | 3a157457a55713067213aeafe385a1cc49aa2698 | [
"Apache-2.0"
] | 25 | 2017-02-23T23:08:56.000Z | 2019-10-09T00:02:52.000Z | bay/plugins/build.py | eventbrite/bay | 3a157457a55713067213aeafe385a1cc49aa2698 | [
"Apache-2.0"
] | 4 | 2017-03-12T03:51:17.000Z | 2021-09-08T11:36:36.000Z | import attr
import click
import datetime
import sys
from docker.errors import NotFound
from .base import BasePlugin
from ..cli.colors import CYAN, GREEN, RED, remove_ansi
from ..cli.argument_types import ContainerType, HostType
from ..cli.tasks import Task
from ..constants import PluginHook
from ..docker.build import Builder
from ..docker.introspect import FormationIntrospector
from ..docker.runner import FormationRunner
from ..exceptions import BuildFailureError, ImagePullFailure
from .gc import GarbageCollector
from ..utils.sorting import dependency_sort
def _get_providers(app):
providers = {}
for container in app.containers:
provides_volume = container.extra_data.get("provides-volume", None)
if provides_volume:
providers[provides_volume] = container
return providers
def _handle_build_failure(app, logfile_name):
click.echo(RED("Build failed! Last 15 lines of log:"))
# TODO: More efficient tailing
lines = []
with open(logfile_name, "r") as fh:
for line in fh:
lines = lines[-14:] + [line]
for line in lines:
click.echo(" " + remove_ansi(line).rstrip())
click.echo("See full build log at {log}".format(
log=click.format_filename(logfile_name)),
err=True
)
app.run_hooks(PluginHook.DOCKER_FAILURE)
sys.exit(1)
@attr.s
class BuildPlugin(BasePlugin):
"""
Plugin for building containers.
"""
provides = ["build"]
def load(self):
self.add_command(build)
self.add_catalog_type("registry")
self.add_hook(PluginHook.PRE_RUN_CONTAINER, self.pre_start)
self.add_hook(PluginHook.POST_BUILD, self.post_build)
def pre_start(self, host, instance, task):
"""
Safety net to stop you booting volume-providing containers normally,
and to catch and build volume containers if they're needed
"""
# Safety net
if instance.container.extra_data.get("provides-volume", None):
raise ValueError("You cannot run a volume-providing container {}".format(instance.container.name))
# If the container has named volumes, see if they're provided by anything else
# and if so, if they're built.
# First, collect what volumes are provided by what containers
providers = _get_providers(self.app)
# Now see if any of the volumes we're trying to add need it
for _, volume in instance.container.named_volumes.items():
name = volume.source
if name in providers:
# Alright, this is one that could be provided. Does it already exist?
try:
host.client.inspect_volume(name)
except NotFound:
# Aha! Build it!
try:
logfile_name = self.app.config.get_path(
'bay',
'build_log_path',
self.app,
)
Builder(
host,
providers[name],
self.app,
parent_task=task,
logfile_name=logfile_name,
verbose=True,
).build()
except BuildFailureError:
_handle_build_failure(self.app, logfile_name)
def post_build(self, host, container, task):
"""
Intercepts builds of volume-providing containers and unpacks them.
Volumes are stored with the ID of the corresponding volume-providing image. This will only run the container
to recreate the volume if the image"s ID (hash) has changed.
"""
image_details = host.client.inspect_image(container.image_name_tagged)
provides_volume = container.extra_data.get("provides-volume", None)
def should_extract_volume():
if not provides_volume:
return False
try:
volume_details = host.client.inspect_volume(provides_volume)
except NotFound:
return True
labels = volume_details.get("Labels") or {}
return labels.get("build_id") != image_details["Id"]
if should_extract_volume():
# Stop all containers that have the volume mounted
formation = FormationIntrospector(host, self.app.containers).introspect()
# Keep track of instances to remove after they are stopped
instances_to_remove = formation.get_instances_using_volume(provides_volume)
if instances_to_remove:
formation.remove_instances(instances_to_remove)
stop_task = Task("Stopping containers", parent=task)
FormationRunner(self.app, host, formation, stop_task).run()
stop_task.finish(status="Done", status_flavor=Task.FLAVOR_GOOD)
remove_task = Task("Removing containers", parent=task)
for instance in instances_to_remove:
host.client.remove_container(instance.name)
remove_task.update(status="Removed {}".format(instance.name))
remove_task.finish(status="Done", status_flavor=Task.FLAVOR_GOOD)
# Prune any orphan stopped containers, so we don't get conflict errors
GarbageCollector(host).gc_containers(task)
volume_task = Task("(Re)creating volume {}".format(provides_volume), parent=task)
# Recreate the volume with the new image ID
try:
host.client.remove_volume(provides_volume)
volume_task.update(status="Removed {}. Recreating".format(provides_volume))
except NotFound:
volume_task.update(status="Volume {} not found. Creating")
host.client.create_volume(provides_volume, labels={"build_id": image_details["Id"]})
# Configure the container
volume_mountpoints = ["/volume/"]
volume_binds = {provides_volume: {"bind": "/volume/", "mode": "rw"}}
container_pointer = host.client.create_container(
container.image_name,
detach=False,
volumes=volume_mountpoints,
host_config=host.client.create_host_config(
binds=volume_binds,
),
)
# Start it in the foreground so we wait till it exits (detach=False above)
volume_task.update(status="Extracting")
host.client.start(container_pointer)
host.client.wait(container_pointer["Id"])
host.client.remove_container(container_pointer["Id"])
volume_task.update(status="Done", status_flavor=Task.FLAVOR_GOOD)
@click.command()
@click.argument('containers', type=ContainerType(profile=True), nargs=-1)
@click.option('--host', '-h', type=HostType(), default='default')
@click.option('--cache/--no-cache', default=True)
@click.option('--recursive/--one', '-r/-1', default=True)
@click.option('--verbose/--quiet', '-v/-q', default=True)
# TODO: Add a proper requires_docker check
@click.pass_obj
def build(app, containers, host, cache, recursive, verbose):
"""
Build container images, along with its build dependencies.
"""
app.run_hooks(PluginHook.INIT_GROUP_BUILD)
# `bay build` is equivalent to `bay build profile`
if not containers:
containers = [ContainerType.Profile]
logfile_name = app.config.get_path('bay', 'build_log_path', app)
containers_to_pull = []
containers_to_build = []
pulled_containers = set()
failed_pulls = set()
task = Task("Building", parent=app.root_task)
start_time = datetime.datetime.now().replace(microsecond=0)
providers = _get_providers(app)
# Go through the containers, expanding "ContainerType.Profile" into a list
# of default boot containers in the profile.
profile = None
for container in containers:
if container is ContainerType.Profile:
profile = app.profiles[1]
for con in app.containers:
# When building the profile, rebuild system containers too
if app.containers.options(con).get('in_profile') or con.system:
containers_to_pull.append(con)
else:
containers_to_build.append(container)
for volume in container.named_volumes.values():
if volume.source in providers:
containers_to_build.append(providers[volume.source])
# Expand containers_to_pull (At this point just the default boot containers
# from profile) to include runtime dependencies.
containers_to_pull = dependency_sort(containers_to_pull, app.containers.dependencies)
# Expand containers_to_pull to include volumes that are required by all containers in the
# dependency chain.
def container_volume_dependencies(container):
volume_deps = set()
for volume in container.named_volumes.values():
if volume.source in providers:
volume_deps.add(providers[volume.source])
return volume_deps
containers_to_pull = dependency_sort(containers_to_pull, container_volume_dependencies)
if profile is not None and profile.ignore_dependencies:
# List the containers defined in the current profile and its ancestors
profile_containers = set(sum([list(p.containers.keys()) for p in app.profiles[1:]], []))
# If dependencies are ignored, only keep the containers defined in the profile
containers_to_pull = [c for c in containers_to_pull if c.name in profile_containers]
# Try pulling each container to pull, and add it to containers_to_build if
# it fails. If it works, remember we pulled it, so we don't have to pull it
# again later.
for container in containers_to_pull:
try:
host.images.pull_image_version(
app,
container.image_name,
container.image_tag,
parent_task=task,
fail_silently=False,
)
except ImagePullFailure:
failed_pulls.add(container)
containers_to_build.append(container)
else:
pulled_containers.add(container)
ancestors_to_build = []
# For each container to build, find its ancestry, trying to pull each
# ancestor and stopping short if it works.
for container in containers_to_build:
# Always add `container` to final build list, even if recursive is
# False.
ancestors_to_build.append(container)
if recursive:
# We need to look at the ancestry starting from the oldest, up to
# and not including the `container`
ancestry = app.containers.build_ancestry(container)
for ancestor in reversed(ancestry):
try:
# If we've already attempted to pull it and failed, short
# circuit to failure block.
if ancestor in failed_pulls:
raise ImagePullFailure("We've already attempted to pull this image, and failed.")
# Check if we've pulled it already
if ancestor not in pulled_containers:
host.images.pull_image_version(
app,
ancestor.image_name,
ancestor.image_tag,
parent_task=task,
fail_silently=False,
)
except ImagePullFailure:
failed_pulls.add(ancestor)
ancestors_to_build.insert(0, ancestor)
else:
# We've pulled the current ancestor successfully, so skip
# all the older ancestors.
pulled_containers.add(ancestor)
break
# Sort ancestors so we build the most depended on first.
sorted_ancestors_to_build = dependency_sort(ancestors_to_build,
lambda x: [app.containers.build_parent(x)])
# dependency_sort would insert back the pulled containers into the ancestry
# chain, so we only include ones that were in the list before
ancestors_to_build = [
container
for container in sorted_ancestors_to_build
if container in ancestors_to_build
]
task.add_extra_info(
"Order: {order}".format(
order=CYAN(", ".join([container.name for container in ancestors_to_build])),
),
)
app.run_hooks(PluginHook.PRE_GROUP_BUILD, host=host, containers=ancestors_to_build, task=task)
for container in ancestors_to_build:
image_builder = Builder(
host,
container,
app,
parent_task=task,
logfile_name=logfile_name,
docker_cache=cache,
verbose=verbose,
)
try:
image_builder.build()
except BuildFailureError:
app.run_hooks(PluginHook.CONTAINER_FAILURE, host=host, containers=ancestors_to_build, task=task)
_handle_build_failure(app, logfile_name)
app.run_hooks(PluginHook.POST_GROUP_BUILD, host=host, containers=ancestors_to_build, task=task)
task.finish(status="Done", status_flavor=Task.FLAVOR_GOOD)
# Show total build time metric after everything is complete
end_time = datetime.datetime.now().replace(microsecond=0)
time_delta_str = str(end_time - start_time)
if time_delta_str.startswith('0:'):
# no point in showing hours, unless it runs for more than one hour
time_delta_str = time_delta_str[2:]
click.echo("Total build time [{}]".format(GREEN(time_delta_str)))
| 42.036145 | 116 | 0.622743 | 1,609 | 13,956 | 5.234307 | 0.222498 | 0.016623 | 0.024697 | 0.012467 | 0.181192 | 0.153289 | 0.125505 | 0.106626 | 0.074092 | 0.04512 | 0 | 0.001428 | 0.297363 | 13,956 | 331 | 117 | 42.163142 | 0.857434 | 0.193537 | 0 | 0.197425 | 0 | 0 | 0.054451 | 0 | 0 | 0 | 0 | 0.006042 | 0 | 1 | 0.034335 | false | 0.004292 | 0.06867 | 0 | 0.133047 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d5c97ac015a15044eb61b7046d8427fedd68fcd | 889 | py | Python | nablapps/interactive/models/games.py | NablaWebkom/nablaweb | 9247c5e3f7b5d965d9437c74530638f925d0e9c6 | [
"MIT"
] | 1 | 2019-10-07T13:59:19.000Z | 2019-10-07T13:59:19.000Z | nablapps/interactive/models/games.py | NablaWebkom/nablaweb | 9247c5e3f7b5d965d9437c74530638f925d0e9c6 | [
"MIT"
] | 2 | 2019-10-07T14:47:37.000Z | 2019-10-07T14:49:49.000Z | nablapps/interactive/models/games.py | NablaWebkom/nablaweb | 9247c5e3f7b5d965d9437c74530638f925d0e9c6 | [
"MIT"
] | null | null | null | # Models for game_list
from django import template
from django.db import models
register = template.Library()
class Game(models.Model):
class Meta:
ordering = ["index"]
index = models.IntegerField(help_text="Bruk helst 10, 20, 30 osv.")
title = models.TextField()
url = models.TextField(
help_text="Denne lenken må være relativ ettersom den settes sammen med url-en vår. Altså: hvis man vil lenke til https://nabla.no/kodegolf/ må man skrive inn 'kodegolf/' (uten fnutter). (Ikke hele lenken, da får man: https://nabla.no/https://nabla.no/kodegolf/, som ikke fungerer.)"
)
picture = models.ImageField(
upload_to="uploads/game_pictures",
null=True,
blank=True,
verbose_name="Bilde",
help_text="Bruk samme størrelse på alle bilder, helst 770x300 px",
)
def __str__(self):
return self.title
| 32.925926 | 290 | 0.673791 | 121 | 889 | 4.859504 | 0.68595 | 0.040816 | 0.061224 | 0.068027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017291 | 0.219348 | 889 | 26 | 291 | 34.192308 | 0.829971 | 0.022497 | 0 | 0 | 0 | 0.05 | 0.438293 | 0.024221 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0.05 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d5d476ef8064a1f801bca06d8f771721e4ae33b | 288 | py | Python | solution/practice/data-structures/linked-list/print-the-elements-of-a-linked-list/solution.py | satyam857/HackerRank | b6c0c199a5e320b1b59fdedafda2630258314b76 | [
"MIT"
] | null | null | null | solution/practice/data-structures/linked-list/print-the-elements-of-a-linked-list/solution.py | satyam857/HackerRank | b6c0c199a5e320b1b59fdedafda2630258314b76 | [
"MIT"
] | null | null | null | solution/practice/data-structures/linked-list/print-the-elements-of-a-linked-list/solution.py | satyam857/HackerRank | b6c0c199a5e320b1b59fdedafda2630258314b76 | [
"MIT"
] | 1 | 2018-10-22T10:42:45.000Z | 2018-10-22T10:42:45.000Z | # Complete the printLinkedList function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def printLinkedList(head):
if head == None:
return;
n = head
while(n != None):
print(n.data)
n = n.next | 16.941176 | 46 | 0.607639 | 31 | 288 | 5.645161 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.288194 | 288 | 17 | 47 | 16.941176 | 0.853659 | 0.447917 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.142857 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d5f794c240a2713b4e7ab394e05f2c97c787a51 | 4,945 | py | Python | video/segment.py | ArseniKavalchuk/ffmpeg-hls-view | 943e8eb5de7357c720d332a3ef13be98fd1efdf5 | [
"Apache-2.0"
] | 1 | 2020-10-23T15:56:31.000Z | 2020-10-23T15:56:31.000Z | video/segment.py | ArseniKavalchuk/ffmpeg-rtsp-hls-view | 943e8eb5de7357c720d332a3ef13be98fd1efdf5 | [
"Apache-2.0"
] | null | null | null | video/segment.py | ArseniKavalchuk/ffmpeg-rtsp-hls-view | 943e8eb5de7357c720d332a3ef13be98fd1efdf5 | [
"Apache-2.0"
] | null | null | null | import ffmpeg
import os
import logging
import glob
logger = logging.getLogger('segmenter')
class BaseSegmenter:
def __init__(self, in_src, out_dir, channel_id):
if not channel_id or channel_id <= 0:
raise ValueError("channel_id must be a positive integer")
if not in_src or not out_dir:
raise ValueError("in_src and out_dir are mandatory parameters")
if not os.path.isdir(out_dir):
raise ValueError("out_dir: {} must exist".format(out_dir))
self.channel_id = channel_id
self.in_src = in_src
self.out_dir = out_dir + '/channel_' + str(channel_id)
if not os.path.isdir(self.out_dir):
os.makedirs(self.out_dir)
else:
file_list = glob.glob(self.out_dir + '/*.ts') + \
glob.glob(self.out_dir + '/*.m3u8') + \
glob.glob(self.out_dir + '/*.mp4')
for f in file_list:
os.remove(f)
self.ff_input = ffmpeg.input(in_src, max_delay=500000, rtsp_transport='tcp')
def run(self):
if not self.ff_segment_out:
raise Exception('Command is not ready')
if logger.isEnabledFor(logging.DEBUG):
logger.debug("FFmpeg CLI command: '%s'", " ".join(ffmpeg.compile(self.ff_segment_out)))
return (self.ff_segment_out.run())
def run_async(self):
if not self.ff_segment_out:
raise Exception('Command is not ready')
if logger.isEnabledFor(logging.DEBUG):
logger.debug("FFmpeg CLI command: '%s'", " ".join(ffmpeg.compile(self.ff_segment_out)))
return self.ff_segment_out.run_async()
class CodecCopyHLSFormatSegmenter(BaseSegmenter):
def __init__(self, in_src, out_dir, channel_id):
super().__init__(in_src, out_dir, channel_id)
self.ff_segment_out = ffmpeg.output(
self.ff_input.video,
self.out_dir + '/playlist.m3u8',
format='hls',
vcodec='copy',
# g=25,
hls_flags='delete_segments',
hls_segment_type='fmp4',
hls_init_time=0,
hls_list_size=5,
hls_allow_cache=0,
hls_time=1).overwrite_output()
class CodecX264ResizeHLSFormatSegmenter(BaseSegmenter):
def __init__(self, in_src, out_dir, channel_id):
super().__init__(in_src, out_dir, channel_id)
self.ff_segment_out = (ffmpeg
.filter(
self.ff_input.video,
'scale',
size='320x240',
force_original_aspect_ratio='decrease')
.output(
self.out_dir + '/playlist.m3u8',
format='hls',
preset='ultrafast',
threads=1,
vcodec='libx264',
**{'profile:v':'main'},
x264opts='keyint=25:min-keyint=25:no-scenecut',
flags='+cgop',
hls_flags='delete_segments',
hls_segment_type='mpegts',
hls_init_time=0,
hls_list_size=5,
hls_allow_cache=0,
hls_time=1)
.overwrite_output())
class CodecX264HLSFormatSegmenter(BaseSegmenter):
def __init__(self, in_src, out_dir, channel_id):
super().__init__(in_src, out_dir, channel_id)
self.ff_segment_out = ffmpeg.output(
self.ff_input.video,
self.out_dir + '/playlist.m3u8',
format='hls',
preset='ultrafast',
threads=1,
vcodec='libx264',
**{'profile:v':'main'},
x264opts='keyint=25:min-keyint=25:no-scenecut',
#force_key_frames='expr:gte(t,n_forced*2)',
flags='+cgop',
#framerate=25,
# g=50,
# bufsize='6M',
# video_bitrate='3M',
hls_flags='delete_segments',
hls_segment_type='',
#hls_segment_type='fmp4',
hls_init_time=0,
hls_list_size=5,
hls_allow_cache=0,
hls_time=1).overwrite_output()
class CodecX264SegmentFormatSegmenter(BaseSegmenter):
def __init__(self, in_src, out_dir, channel_id):
super().__init__(in_src, out_dir, channel_id)
self.ff_segment_out = ffmpeg.output(
self.ff_input.video,
self.out_dir + '/sample%03d.ts',
vcodec='libx264',
preset='ultrafast',
threads=1,
**{'profile:v':'main'},
x264opts='keyint=25:min-keyint=25:no-scenecut',
flags='+cgop',
g=25,
bufsize='6M',
format='ssegment',
segment_list=self.out_dir + '/playlist.m3u8',
segment_wrap=12,
segment_list_flags='live',
segment_list_size=10,
segment_list_type='m3u8',
segment_time=1).overwrite_output()
| 35.833333 | 99 | 0.558342 | 574 | 4,945 | 4.5 | 0.24216 | 0.060395 | 0.042586 | 0.061943 | 0.624855 | 0.583043 | 0.583043 | 0.546651 | 0.546651 | 0.546651 | 0 | 0.030267 | 0.325177 | 4,945 | 138 | 100 | 35.833333 | 0.743782 | 0.025076 | 0 | 0.534483 | 0 | 0 | 0.127545 | 0.021811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060345 | false | 0 | 0.034483 | 0 | 0.155172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d632ebfcf390e585a2cf63ec0c29c0477321357 | 1,947 | py | Python | gecosistema_win_service/example.py | valluzzi/gecosistema_win_service | 5bfd2e6e85eddb750bfb7715930ef5612c71a4e1 | [
"MIT"
] | null | null | null | gecosistema_win_service/example.py | valluzzi/gecosistema_win_service | 5bfd2e6e85eddb750bfb7715930ef5612c71a4e1 | [
"MIT"
] | null | null | null | gecosistema_win_service/example.py | valluzzi/gecosistema_win_service | 5bfd2e6e85eddb750bfb7715930ef5612c71a4e1 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------------
# Licence:
# Copyright (c) 2012-2020 Luzzi Valerio
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Name: watchdog.py
# Purpose:
#
# Author: Luzzi Valerio
#
# Created: 29/01/2020
# -------------------------------------------------------------------------------
import os,sys
import datetime
from gecosistema_win_service import *
# Patch
# pip install pywin32
# copy Python37\Lib\site-packages\pywin32_system32\pywintypes37.dll Python37\Lib\site-packages\win32
### Install service
##python example.py stop
##python example.py --startup=auto install
##python example.py start
### Remove service
##python example.py stop
##python example.py remove
class WatchDogSvc(WindowsService):
_svc_name_ = "WatchDogSvc"
_svc_display_name_ = _svc_name_
_svc_description_ = "This is an Example Python Windows Service"
#Override this method
def run(self):
if self:
now = datetime.datetime.now()
filename = "D:/WatchDogSvc.log"
with open(filename, "a") as stream:
stream.write(now.strftime("running @ %H:%M:%S...\n"))
Run = staticmethod(run)
if __name__ == '__main__':
DEBUG = len(sys.argv)<=1
if not DEBUG:
WatchDogSvc.install()
else:
WatchDogSvc.Run(False)
| 27.041667 | 100 | 0.639445 | 239 | 1,947 | 5.108787 | 0.569038 | 0.053235 | 0.061425 | 0.037674 | 0.067158 | 0.067158 | 0.067158 | 0.067158 | 0 | 0 | 0 | 0.019834 | 0.197227 | 1,947 | 71 | 101 | 27.422535 | 0.761356 | 0.609656 | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d65822e35959222811499bf6a00a06314dc624b | 2,706 | py | Python | bib2reSTcitation.py | marvinbernhardt/bib2reSTcitation | b200412f3dccfee5718feb80b48c5cc5c61d884c | [
"MIT"
] | null | null | null | bib2reSTcitation.py | marvinbernhardt/bib2reSTcitation | b200412f3dccfee5718feb80b48c5cc5c61d884c | [
"MIT"
] | null | null | null | bib2reSTcitation.py | marvinbernhardt/bib2reSTcitation | b200412f3dccfee5718feb80b48c5cc5c61d884c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import sys
import re
def displaymatch(match):
if match is None:
return None
return '<Match: %r, groups=%r>' % (match.group(), match.groups())
def bib2rest(input_bibfile, output_txtfile):
fields = ('author', 'title', 'journal', 'year', 'doi')
patterns = {field: re.compile(r"^(?: |\t)*" + field + r"[ \t]*=[ \t]*\{(.+)\}(?: |\t)*,(?: |\t)*$", re.IGNORECASE) for field in fields}
start_pattern = re.compile(r"^(?: |\t)*\@(book|article|incollection|inproceedings)\{(.+), *$")
end_pattern = re.compile("^(?: |\t)*}(?: |\t)*$")
with open(input_bibfile, 'r') as input_handle, open(output_txtfile, 'w') as output_handle:
in_a_bib_block = False
ref_data = {}
output_handle.write("Bibliography\n============\n\n")
for line in input_handle:
if not in_a_bib_block:
if start_pattern.match(line):
matches = start_pattern.match(line)
in_a_bib_block = True
ref_data = {}
ref_data['type'] = matches.group(1)
ref_data['key'] = matches.group(2)
else:
pass
else:
# in a bib block
if end_pattern.match(line):
in_a_bib_block = False
rest_ref_block = '.. [' + ref_data['key'] + '] '
for f, field in enumerate((field for field in fields
if field in ref_data)):
if f > 0:
rest_ref_block += ', '
if field == 'doi':
rest_ref_block += 'https://dx.doi.org/'
rest_ref_block += ref_data[field]
output_handle.write(rest_ref_block + "\n")
for field in fields:
if patterns[field].match(line):
ref_data[field] = patterns[field].match(line).group(1)
else:
pass
if __name__ == '__main__':
throot = "/".join(sys.path[0].split("/")[:])
parser = argparse.ArgumentParser(description='bib2reSTcitation is a tool to convert bib tex file to reStructuredText Markup citation format.')
parser.add_argument('-o', '--output', help='output file path')
parser.add_argument('-i', '--input', help='input file path')
args = parser.parse_args()
input_file = args.input
if input_file is None:
input_file = 'tex.bib'
output_file = args.output
if output_file is None:
output_file = "references.txt"
bib2rest(input_file, output_file)
| 41 | 146 | 0.518108 | 310 | 2,706 | 4.319355 | 0.329032 | 0.041822 | 0.022405 | 0.041075 | 0.130695 | 0.040329 | 0.040329 | 0 | 0 | 0 | 0 | 0.005028 | 0.338507 | 2,706 | 65 | 147 | 41.630769 | 0.743017 | 0.013304 | 0 | 0.160714 | 0 | 0 | 0.161544 | 0.031859 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0.035714 | 0.053571 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d68fdce50c6e1052d14d5b7d0245789b81e2d22 | 276 | py | Python | python/Data Structures/Arrays/replaceGreatestRight.py | sinderpl/CodingExamples | 9bc59a0345589bf51fc74fe9ad527e9498b9b5c9 | [
"MIT"
] | null | null | null | python/Data Structures/Arrays/replaceGreatestRight.py | sinderpl/CodingExamples | 9bc59a0345589bf51fc74fe9ad527e9498b9b5c9 | [
"MIT"
] | null | null | null | python/Data Structures/Arrays/replaceGreatestRight.py | sinderpl/CodingExamples | 9bc59a0345589bf51fc74fe9ad527e9498b9b5c9 | [
"MIT"
] | null | null | null | class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
curr = -1
for idx in range(len(arr) -1, -1, -1):
temp = arr[idx]
arr[idx] = curr
if temp > curr:
curr = temp
return arr | 27.6 | 59 | 0.456522 | 34 | 276 | 3.705882 | 0.529412 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025157 | 0.423913 | 276 | 10 | 60 | 27.6 | 0.767296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d6b5bc99fe656be916957a3fe9bf8ca0a97853c | 3,166 | py | Python | singleImageHSV.py | josephwccheng/InvisibleCloakOpenCV | b3abb5a95b8f050e74ee97b701e0ecc51046d66e | [
"MIT"
] | null | null | null | singleImageHSV.py | josephwccheng/InvisibleCloakOpenCV | b3abb5a95b8f050e74ee97b701e0ecc51046d66e | [
"MIT"
] | null | null | null | singleImageHSV.py | josephwccheng/InvisibleCloakOpenCV | b3abb5a95b8f050e74ee97b701e0ecc51046d66e | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
#####################################################
#
# Author: Joseph Cheng
# HSV: Hue Saturation Value
# Hue is the color
# Saturation is the greyness
# Value is the brightness of the pixel
#
#
#####################################################
# STEP 1: Capture and store a background frame
# Creating a VideoCapture object
# This will be used for image acquisition later in the code.
cap = cv2.VideoCapture("JosephInvisible.mp4")
# We give some time for the camera to warm-up!
time.sleep(3)
background = 0
for i in range(30):
ret, background = cap.read()
# Laterally invert the image / flip the image.
background = np.flip(background, axis=1)
# imgPlot = plt.imshow(background)
# plt.show()
#Step 2: Color detection
# Capturing the live frame
# ret, img = cap.read()
img = cv2.imread("ImageForColor.PNG")
# Laterally invert the image / flip the image
img = np.flip(img, axis=1)
# imgPlot = plt.imshow(img)
# plt.show()
# converting from BGR to HSV color space
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
imgPlot = plt.imshow(hsv)
plt.show()
# First Range boundary
lower_first_boundary = np.array([90, 100, 10])
upper_first_boundary = np.array([108, 220, 140])
mask = cv2.inRange(hsv, lower_first_boundary, upper_first_boundary)
# Second Range boundary
# lower_second_boundary = np.array([90, 100, 10])
# upper_second_boundary = np.array([108, 220, 140])
# mask2 = cv2.inRange(hsv, lower_second_boundary, upper_second_boundary)
# Generating the final mask to detect color of my clothing, acts like an or gate
# mask = mask1 + mask2
cv2.imshow("mask", mask)
cv2.waitKey(0)
#####################################################
# Morphological Transformation - basic operations on image shape
# Erosion - erodes away boundaries of foreground object (will only consider 1 if all surrounding are 1)
# Dilation - if any pixel contains 1 everything is 1
# Opening - erosion followed by dilation - useful in removing noise
# Closing - dilation followed by erosion - closing small holes inside the foreground
#####################################################
kernal = np.ones((3, 3), np.uint8)
img_open = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal, iterations=1)
img_dilate = cv2.morphologyEx(img_open, cv2.MORPH_DILATE, kernal, iterations=1)
# creating an inverted mask to segment out the cloth from the frame
clothing_mask = cv2.bitwise_not(img_dilate)
imgPlot = plt.imshow(clothing_mask)
plt.show()
# Segmenting the cloth out of the frame using bitwise and with the inverted mask
res1 = cv2.bitwise_and(img, img, mask=clothing_mask)
cv2.imshow("res1", res1)
cv2.waitKey(0)
# creating image showing static background frame pixels only for the masked region
# error might happen here because the bits are different between background and captured image
#res2 = cv2.bitwise_and(background, background, mask=clothing_mask)
# Generating the final output
# final_output = cv2.addWeighted(res1, 1, res2, 1, 0)
# cv2.imshow("magic", final_output)
# cv2.waitKey(0) | 32.639175 | 104 | 0.679406 | 439 | 3,166 | 4.826879 | 0.403189 | 0.016517 | 0.030203 | 0.021708 | 0.100991 | 0.08117 | 0.058518 | 0 | 0 | 0 | 0 | 0.03308 | 0.169299 | 3,166 | 97 | 105 | 32.639175 | 0.772624 | 0.562539 | 0 | 0.137931 | 0 | 0 | 0.042594 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137931 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d6ba03cf017f1a2f946aece4eb5018d6320c47a | 5,371 | py | Python | netharn/layers/perceptron.py | VIAME/netharn | c9491d655c5d91cb0ee6055f30e68282108e6b67 | [
"Apache-2.0"
] | null | null | null | netharn/layers/perceptron.py | VIAME/netharn | c9491d655c5d91cb0ee6055f30e68282108e6b67 | [
"Apache-2.0"
] | null | null | null | netharn/layers/perceptron.py | VIAME/netharn | c9491d655c5d91cb0ee6055f30e68282108e6b67 | [
"Apache-2.0"
] | null | null | null | from netharn.layers import common
from netharn.layers import rectify
from netharn.layers import conv_norm
import numpy as np
class MultiLayerPerceptronNd(common.Module):
"""
A multi-layer perceptron network for n dimensional data
Choose the number and size of the hidden layers, number of output channels,
wheather to user residual connections or not, nonlinearity, normalization,
dropout, and more.
Args:
dim (int): specify if the data is 0, 1, 2, 3, or 4 dimensional.
in_channels (int):
hidden_channels (List[int]): or an int specifying the number of hidden
layers (we choose the channel size to linearly interpolate between
input and output channels)
out_channels (int):
dropout (float, default=0): amount of dropout to use
norm (str, default='batch'): type of normalization layer
(e.g. batch or group), set to None for no normalization.
noli (str, default='relu'): type of nonlinearity
residual (bool, default=False):
if true includes a resitual skip connection between inputs and
outputs.
CommandLine:
xdoctest -m ~/code/netharn/netharn/layers/perceptron.py MultiLayerPerceptronNd:0
Example:
>>> from netharn.layers.perceptron import *
>>> import ubelt as ub
>>> self = MultiLayerPerceptronNd(dim=1, in_channels=128, hidden_channels=3, out_channels=2)
>>> print(self)
MultiLayerPerceptronNd...
>>> shape = self.output_shape_for([1, 128, 7])
>>> print('shape = {!r}'.format(shape))
>>> print('shape.hidden = {}'.format(ub.repr2(shape.hidden, nl=1)))
shape = (1, 2, 7)
shape.hidden = {
'hidden0': {'conv': (1, 96, 7), 'norm': (1, 96, 7), 'noli': (1, 96, 7)},
'dropout0': (1, 96, 7),
'hidden1': {'conv': (1, 65, 7), 'norm': (1, 65, 7), 'noli': (1, 65, 7)},
'dropout1': (1, 65, 7),
'hidden2': {'conv': (1, 34, 7), 'norm': (1, 34, 7), 'noli': (1, 34, 7)},
'dropout2': (1, 34, 7),
'output': (1, 2, 7),
}
>>> import netharn as nh
>>> nh.OutputShapeFor(self)._check_consistency([1, 128, 7])
(1, 2, 7)
>>> print('self._hidden_channels = {!r}'.format(self._hidden_channels))
Example:
>>> from netharn.layers.perceptron import *
>>> import ubelt as ub
>>> self = MultiLayerPerceptronNd(0, 128, [256, 64], residual=True,
>>> norm='group', out_channels=2)
>>> print(self)
>>> input_shape = (None, 128)
>>> print(ub.repr2(self.output_shape_for(input_shape).hidden, nl=-1))
Example:
>>> from netharn.layers.perceptron import *
>>> import ubelt as ub
>>> self = MultiLayerPerceptronNd(0, 128, [], residual=False,
>>> norm='group', out_channels=2)
>>> print(self)
>>> input_shape = (None, 128)
>>> print(ub.repr2(self.output_shape_for(input_shape).hidden, nl=-1))
"""
def __init__(self, dim, in_channels, hidden_channels, out_channels,
bias=True, dropout=0, noli='relu', norm='batch',
residual=False):
super(MultiLayerPerceptronNd, self).__init__()
dropout_cls = rectify.rectify_dropout(dim)
conv_cls = rectify.rectify_conv(dim=dim)
curr_in = in_channels
if isinstance(hidden_channels, int):
n = hidden_channels
hidden_channels = np.linspace(in_channels, out_channels, n + 1,
endpoint=False)[1:]
hidden_channels = hidden_channels.round().astype(np.int).tolist()
self._hidden_channels = hidden_channels
hidden = self.hidden = common.Sequential()
for i, curr_out in enumerate(hidden_channels):
layer = conv_norm.ConvNormNd(dim, curr_in, curr_out, kernel_size=1,
bias=False, noli=noli, norm=norm)
hidden.add_module('hidden{}'.format(i), layer)
hidden.add_module('dropout{}'.format(i), dropout_cls(p=dropout))
curr_in = curr_out
outkw = {'bias': bias}
if dim > 0:
outkw['kernel_size'] = 1
self.hidden.add_module('output', conv_cls(curr_in, out_channels, **outkw))
if residual:
self.skip = conv_cls(in_channels, out_channels, **outkw)
else:
self.skip = None
self.in_channels = in_channels
self.out_channels = out_channels
def forward(self, inputs):
outputs = self.hidden(inputs)
if self.skip:
outputs = self.skip(inputs) + outputs
return outputs
def output_shape_for(self, input_shape):
outputs = self.hidden.output_shape_for(input_shape)
if self.skip:
import netharn as nh
skip = nh.OutputShapeFor(self.skip)(input_shape)
outputs.hidden['skip'] = skip
return outputs
def receptive_field_for(self, input_field=None):
import netharn as nh
field = nh.ReceptiveFieldFor(self.hidden)(input_field)
if self.skip:
skip = nh.ReceptiveFieldFor(self.skip)(field)
field.hidden['skip'] = skip
return field
| 40.383459 | 100 | 0.582945 | 642 | 5,371 | 4.742991 | 0.228972 | 0.05977 | 0.033498 | 0.02266 | 0.160263 | 0.145484 | 0.145484 | 0.145484 | 0.145484 | 0.145484 | 0 | 0.031341 | 0.293055 | 5,371 | 132 | 101 | 40.689394 | 0.770608 | 0.501024 | 0 | 0.12963 | 0 | 0 | 0.022615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.111111 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d6e573aac9be9a3ddc8cd881787cdec4d5f95b8 | 2,199 | py | Python | tests/event_broker/test_multi_subscribe.py | filfreire/questions-three | 1d1d621d5647407bf2d1b271e0b9c7c9f1afc5c8 | [
"MIT"
] | 5 | 2019-07-22T06:04:07.000Z | 2021-07-23T06:01:51.000Z | tests/event_broker/test_multi_subscribe.py | filfreire/questions-three | 1d1d621d5647407bf2d1b271e0b9c7c9f1afc5c8 | [
"MIT"
] | 15 | 2020-07-28T17:33:40.000Z | 2021-08-23T17:30:05.000Z | tests/event_broker/test_multi_subscribe.py | filfreire/questions-three | 1d1d621d5647407bf2d1b271e0b9c7c9f1afc5c8 | [
"MIT"
] | 4 | 2019-08-25T22:41:59.000Z | 2020-10-21T14:28:15.000Z | from functools import partial
from unittest import TestCase, main
from expects import expect, be_empty, equal
from twin_sister.expects_matchers import complain
from twin_sister.fakes import FunctionSpy
from questions_three.constants import TestEvent
from questions_three.event_broker import EventBroker
class TestMultiSubscribe(TestCase):
def tearDown(self):
EventBroker.reset()
def test_requires_event_or_events(self):
expect(partial(EventBroker.subscribe, func=lambda **k: None)).to(complain(TypeError))
def test_event_and_events_are_mutually_exclusive(self):
expect(
partial(
EventBroker.subscribe,
event=TestEvent.test_started,
events=(TestEvent.test_ended, TestEvent.test_erred),
func=lambda **k: None,
)
).to(complain(TypeError))
def test_subscribes_to_first(self):
all_events = list(TestEvent)
events = (all_events[1], all_events[3], all_events[5])
spy = FunctionSpy()
EventBroker.subscribe(events=events, func=spy)
EventBroker.publish(event=events[0])
spy.assert_was_called()
def test_subscribes_to_last(self):
all_events = list(TestEvent)
events = (all_events[1], all_events[3], all_events[5])
spy = FunctionSpy()
EventBroker.subscribe(events=events, func=spy)
EventBroker.publish(event=events[-1])
spy.assert_was_called()
def test_does_not_subscribe_to_unspecified(self):
all_events = list(TestEvent)
events = (all_events[1], all_events[3], all_events[5])
spy = FunctionSpy()
EventBroker.subscribe(events=events, func=spy)
EventBroker.publish(event=all_events[2])
expect(spy.call_history).to(be_empty)
def test_publishes_event_type(self):
all_events = list(TestEvent)
events = (all_events[1], all_events[3], all_events[5])
published = events[1]
spy = FunctionSpy()
EventBroker.subscribe(events=events, func=spy)
EventBroker.publish(event=published)
expect(spy["event"]).to(equal(published))
if "__main__" == __name__:
main()
| 33.830769 | 93 | 0.673488 | 261 | 2,199 | 5.425287 | 0.283525 | 0.108051 | 0.036723 | 0.048023 | 0.543785 | 0.491525 | 0.456215 | 0.456215 | 0.456215 | 0.398305 | 0 | 0.00939 | 0.225102 | 2,199 | 64 | 94 | 34.359375 | 0.821596 | 0 | 0 | 0.346154 | 0 | 0 | 0.005912 | 0 | 0 | 0 | 0 | 0 | 0.038462 | 1 | 0.134615 | false | 0 | 0.134615 | 0 | 0.288462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d6e9a3e1c2529b39e7ddfcb30d0694645fa2929 | 5,075 | py | Python | opto/opto/classes/Optimizer.py | tholiao/opto | 370a016a1ccef462b185b0f9538fb33aab81d566 | [
"MIT"
] | 5 | 2019-03-25T17:52:28.000Z | 2022-03-19T06:20:41.000Z | opto/opto/classes/Optimizer.py | tholiao/opto | 370a016a1ccef462b185b0f9538fb33aab81d566 | [
"MIT"
] | null | null | null | opto/opto/classes/Optimizer.py | tholiao/opto | 370a016a1ccef462b185b0f9538fb33aab81d566 | [
"MIT"
] | 6 | 2018-02-19T05:33:40.000Z | 2019-11-21T04:17:22.000Z | # Compatibility Python 2/3
from __future__ import division, print_function, absolute_import
from builtins import range
# ----------------------------------------------------------------------------------------------------------------------
import numpy as np
from dotmap import DotMap
from .Logs import Logs
from timeit import default_timer as timer
import logging
logger = logging.getLogger(__name__)
__author__ = 'Roberto Calandra'
class Optimizer(object):
def __init__(self, task, stopCriteria, parameters=DotMap()):
"""
Abstract class
:param task: R.opto.classes.OptTask -- Task to be optimized.
:param stopCriteria: R.opto.classes.StopCriteria -- The stopping criteria for the optimization
:param parameters: DotMap() or list of additional parameters for the optimizer:
verbosity: 0=nothing, 1=Minimizing, 2=terminated time, 3=progress bar, 4=Iters, 5=more...
visualize:
indent:
"""
# Arguments
self.task = task # Task to be optimized.
self.stopCriteria = stopCriteria # StopCriteria
# Optional
self.verbosity = parameters.get('verbosity', 1)
self.visualize = parameters.get('visualize', 0) # Warning: might substantially reduce performance!
self.indent = parameters.get('indent', 0)
# Internal
self.name = 'Optimizer' # Name of the Optimizer
self.order = 0 # Order of the Optimizer
self.MOO = False # Single or multi-objective optimizer?
# Structure that keep the Logs of the optimization process and related events
self.store_full_x = True
self.store_full_fx = True
self.store_full_gx = parameters.get('store_full_gx', False)
self._startTime = None
self._logs = Logs(store_x=self.store_full_x, store_fx=self.store_full_fx, store_gx=self.store_full_gx)
def optimize(self):
"""
Interface to start the optimization process
:return: optimized parameters (as a 1D np.array)
"""
if self.verbosity > 0:
if self.task.isSOO():
logging.info('%s %d parameters using %s' % ('Minimizing', self.task.get_n_parameters(), self.name)) # TODO: minimizing/maximizing
else:
logging.info('Optimizing %d objective / %d parameters using %s' % (self.task.get_n_objectives(), self.task.get_n_parameters(), self.name))
logging.info('Optimization started')
self._startTime = timer()
out = self._optimize()
# TODO: assert size parameters
# if self.task.get_n_parameters() == 1:
# out = np.array(out).flatten()
# assert out.shape == (self.task.get_n_parameters(),), 'Internal error: wrong dimension'
# else:
# assert out.shape[0] == self.task.get_n_parameters(), 'Internal error: wrong dimension'
end = timer()
logging.info('Optimization completed in %f[s]' % (end - self._startTime))
logging.info('Optimization ended with flag: ') # TODO: self.stopCriteria
return out
def _evaluate(self, x):
"""
Evaluate the objective function and update the logs
:return:
"""
def update_best():
if self.task.isSOO():
if self.task.task == {'minimize'}:
idx = np.argmin(self._logs.get_objectives())
else:
idx = np.argmax(self._logs.get_objectives())
self._logs.data.opt_fx = self._logs.get_objectives()[0, idx]
self._logs.data.opt_x = self._logs.get_parameters()[:, idx]
else:
# MOO
pass
if self.order == 0:
fx = self.task.evaluate(x, order=0)
self._logs.add_evals(x=x.T, fx=fx, time=self.stopCriteria.get_time()) # store logs
update_best()
return fx
if self.order == 1:
fx, gx = self.task.evaluate(x, order=1)
self._logs.add_evals(x=x.T, fx=fx, gx=gx, time=self.stopCriteria.get_time()) # store logs
update_best()
return fx, gx
if self.order == 2:
fx, gx, hx = self.task.evaluate(x, order=2)
self._logs.add_evals(x=x.T, fx=fx, gx=gx, time=self.stopCriteria.get_time()) # store logs
update_best()
return fx, gx, hx
def _optimize(self):
"""
Wrapper optimization process
:return:
"""
raise NotImplementedError('Implement in subclass')
# def get_f(self):
# """
# return the objective function. (LEGACY) please use self._evaluate() instead to get access to the obj.func.
# :return:
# """
# return self.task.f
def get_logs(self):
"""
Return the logs collected during the optimization
:return:
"""
return self._logs
| 39.038462 | 155 | 0.568079 | 581 | 5,075 | 4.817556 | 0.280551 | 0.042872 | 0.027867 | 0.025723 | 0.174705 | 0.143265 | 0.143265 | 0.121829 | 0.121829 | 0.0786 | 0 | 0.006532 | 0.306207 | 5,075 | 129 | 156 | 39.341085 | 0.788412 | 0.314483 | 0 | 0.149254 | 0 | 0 | 0.0816 | 0 | 0 | 0 | 0 | 0.007752 | 0 | 1 | 0.089552 | false | 0.014925 | 0.104478 | 0 | 0.283582 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d704f2f7940709c3bf91f303ecaf73f3e4a2d3f | 1,359 | py | Python | tests/bugs/core_3675_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_3675_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_3675_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_3675
# title: CREATE INDEX considers NULL and empty string being the same in compound indices
# decription:
# tracker_id: CORE-3675
# min_versions: ['2.5.2']
# versions: 2.5.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.2
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(from_backup='core3675.fbk', init=init_script_1)
test_script_1 = """
show table t;
select * from t;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
F1 VARCHAR(1) Nullable
F2 VARCHAR(1) Nullable
F3 VARCHAR(1) Nullable
F4 VARCHAR(1) Nullable
CONSTRAINT T1_UNQ:
Unique key (F1, F2, F3, F4)
F1 F2 F3 F4
====== ====== ====== ======
a b c d
a <null> c d
a c d
a b <null> d
a b <null>
a b <null>
a b <null> <null>
a <null> <null> <null>
<null> <null> <null> <null>
"""
@pytest.mark.version('>=2.5.2')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 24.267857 | 95 | 0.552612 | 189 | 1,359 | 3.783069 | 0.396825 | 0.078322 | 0.083916 | 0.08951 | 0.064336 | 0.064336 | 0 | 0 | 0 | 0 | 0 | 0.06413 | 0.323032 | 1,359 | 55 | 96 | 24.709091 | 0.713043 | 0.197204 | 0 | 0.117647 | 0 | 0 | 0.554217 | 0 | 0 | 0 | 0 | 0 | 0.029412 | 1 | 0.029412 | false | 0 | 0.058824 | 0 | 0.088235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d73a3c5a76c4a1ecfc8ba69c9bdec3726bf3080 | 7,724 | py | Python | analysisCodes/hotrgTc.py | brucelyu/tensorRGflow | e70edb2da11f0be4db2882ddd7a07a8a6e0d4f06 | [
"MIT"
] | 12 | 2021-03-06T10:06:54.000Z | 2022-01-31T14:20:28.000Z | analysisCodes/hotrgTc.py | brucelyu/tensorRGflow | e70edb2da11f0be4db2882ddd7a07a8a6e0d4f06 | [
"MIT"
] | null | null | null | analysisCodes/hotrgTc.py | brucelyu/tensorRGflow | e70edb2da11f0be4db2882ddd7a07a8a6e0d4f06 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : hotrgTc.py
# Author : Xinliang(Bruce) Lyu <lyu@issp.u-tokyo.ac.jp>
# Date : 22.02.2021
# Last Modified Date: 22.02.2021
# Last Modified By : Xinliang(Bruce) Lyu <lyu@issp.u-tokyo.ac.jp>
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 16 16:54:09 2020
Perform the similar analysis of Hinczewski and Berker (2008), Fig. 4,
but here we use HOTRG, to determine critical temperature
@author: brucelyu
"""
from HOTRG import normFlowHOTRG
import pylab
import scipy.linalg as LA
import numpy as np
import os
import pickle as pkl
import argparse
# argument parser
parser = argparse.ArgumentParser(
"Generate of flow of tensor represented by its Frobenius norm." +
" HOTRG is applied")
parser.add_argument("--chi", dest = "chi", type = int,
help = "bound dimension (default: 10)",
default = 10)
parser.add_argument("--maxiter", dest = "maxiter", type = int,
help = "maximal HOTRG iteration (default: 35)",
default = 35)
parser.add_argument("--rootiter", dest = "rootiter", type = int,
help = "iteration of finding the Tc (default:21)",
default = 21)
parser.add_argument("--isGilt", help = "whether to use Gilts",
action = "store_true")
parser.add_argument("--isSym", help = "whether to use Z2 symmetric tensor",
action = "store_true")
parser.add_argument("--gilteps", dest = "gilteps", type = float,
help = "a number smaller than which we think the" +
"singluar values for the environment spectrum is zero" +
"(default: 1e-7)",
default = 1e-7)
parser.add_argument("--Tlow", dest = "Tlow", type = float,
help = "Estimated lower bound for critical temperature",
default = 0.989)
parser.add_argument("--Thi", dest = "Thi", type = float,
help = "Estimated higher bound for critical temperature",
default = 1.01)
parser.add_argument("--cgeps", dest = "cgeps", type = float,
help = "a number smaller than which we think the" +
"singluar values for the environment in RG spectrum is zero" +
"(default: 1e-10)",
default = 1e-10)
parser.add_argument("--Ngilt", dest = "Ngilt", type = int,
help = "How many times do we perform Gilt in oneHOTRG",
choices = [1,2], default = 2)
parser.add_argument("--legcut", dest = "legcut", type = int,
help = "number of leg to cut in gilt_hotrgplaq",
choices = [2,4], default = 2)
parser.add_argument("--loopred", dest = "loopred", type = str,
help = "which loop reduction scheme to use",
choices = ["Gilt", "FET"], default = "Gilt")
# This is for the full environment truncation (FET)
parser.add_argument("--chitid", dest = "chitid", type = int,
help = "bound dimension (default: 5)",
default = 5)
# This is to stabilize the flow generated by the Gilt
parser.add_argument("--stbk", dest = "stbk", type = int,
help = "A int after which we will try to stabilize the gilt process",
default = 1000)
# read from argument parser
args = parser.parse_args()
chi = args.chi
iter_max = args.maxiter
iterRoot = args.rootiter
isGilt = args.isGilt
isSym = args.isSym
gilteps = args.gilteps
cgeps = args.cgeps
Ngilt = args.Ngilt
legcut = args.legcut
loopred = args.loopred
chitid = args.chitid
stablek = args.stbk
argsFET = {'chitid':chitid, 'maxiter':40, 'initscheme':'Gilt',
'giltdeg':0.5}
# # input and output file name
# if isGilt:
# if rgver == "old-Gilt-HOTRG":
# figdir = "gilt_hotrg_flow"
# elif rgver == "Gilt-HOTRG-imp":
# figdir = "gilt_hotrg_imp_flow"
# else:
# raise ValueError("--ver argument is not valid.")
# else:
# figdir = "hotrgflow"
# input and output file name
if isGilt:
figdir = "gilt_hotrg{:d}{:d}_flow".format(Ngilt, legcut)
else:
figdir = "hotrg"
# create a directory with the name ?? to save all the figures
# if the directory does not exist
if isGilt:
chieps = "eps{:.0e}_chi{:02d}".format(gilteps, chi)
else:
chieps = "chi{:02d}".format(chi)
savedirectory = "../out/" + figdir + "/" + chieps
if not os.path.exists(savedirectory):
os.makedirs(savedirectory)
Tcfile = savedirectory + "/Tc.pkl"
# read Tc if exists
if not os.path.exists(Tcfile):
Tlow = args.Tlow
Thi = args.Thi
else:
with open(Tcfile,"rb") as f:
Tlow, Thi = pkl.load(f)
# The accuracy of the current estimation of the bisection method
Tdiff = abs(Thi - Tlow) / (Thi + Tlow)
accTc = "{:.2e}".format(Tdiff)
accTc = accTc[-2:]
print("Start the bisection algorithm...")
AnormL = normFlowHOTRG(Tlow,[chi,chi], iter_max, isDisp = False,
isGilt = isGilt, isSym = isSym,
gilt_eps = gilteps, cg_eps = cgeps,
N_gilt = Ngilt, legcut = legcut,
stableStep = stablek)[0]
AnormH = normFlowHOTRG(Thi,[chi,chi], iter_max, isDisp = False,
isGilt = isGilt, isSym = isSym,
gilt_eps = gilteps, cg_eps = cgeps,
N_gilt = Ngilt, legcut = legcut,
stableStep = stablek)[0]
for i in range(iterRoot):
print("Performing {}-th iteration to find the Tc".format(i+1))
print("Tc estimated lowbound = {},\n highbound = {}".format(Tlow, Thi))
# Examine the tensor RG flow at Ttry
Ttry = 0.5*(Tlow + Thi)
Tdiff = abs(Thi - Ttry) / Ttry
AnormTry = normFlowHOTRG(Ttry,[chi,chi], iter_max, isDisp = False,
isGilt = isGilt, isSym = isSym,
gilt_eps = gilteps, cg_eps = cgeps,
N_gilt = Ngilt, legcut = legcut,
stableStep = stablek)[0]
# Plot the flow of tensor norm every three steps
if (i+1) % 3 == 0:
pylab.figure()
pylab.title("Difference from Tc = {:.2e}, with $\chi = ${:d}".format(Tdiff,chi))
pylab.plot(AnormL[1:],"bo--",label="low T = {:.10f}".format(Tlow))
pylab.plot(AnormH[1:],"k.-",label="hi T = {:.10f}".format(Thi))
pylab.plot(AnormTry[1:], "gx-.",label="try T = {:.10f}".format(Ttry))
# pylab.ylim([0.10,0.50])
pylab.yscale("log")
pylab.legend()
pylab.xlabel("RG step")
pylab.ylabel("$|A|$")
pylab.savefig(savedirectory + "/chi{:02d}{:02d}.png".format(chi,i+1), dpi=300)
# pylab.show()
# Calculate the "distances" of the Ttry tensor RG flow with
# the original Thi and Tlow flows respectively
if isGilt:
distwithHi = np.abs(AnormTry[-1] - AnormH[-1])
distwithL = np.abs(AnormTry[-1] - AnormL[-1])
else:
distwithHi = LA.norm(np.array(AnormTry[1:]) - np.array(AnormH[1:]))
distwithL = LA.norm(np.array(AnormTry[1:]) - np.array(AnormL[1:]))
# Determine Ttry is Thi or Tlow
if distwithL < distwithHi:
Tlow = Ttry
AnormL = AnormTry.copy()
else:
Thi = Ttry
AnormH = AnormTry.copy()
# save the lower and upper bound of Tc
with open(Tcfile,"wb") as f:
pkl.dump([Tlow,Thi],f)
# Append all figures
# orgfile = savedirectory + '/chi{:02d}*.png '.format(chi)
# tarfile = savedirectory + '/allchi.png'
# os.system('convert ' + orgfile + "-append " + tarfile)
# os.system('rm ' + orgfile)
| 37.862745 | 89 | 0.576256 | 966 | 7,724 | 4.567288 | 0.308489 | 0.028558 | 0.053944 | 0.00884 | 0.236174 | 0.191296 | 0.151405 | 0.137806 | 0.12466 | 0.109701 | 0 | 0.024066 | 0.289876 | 7,724 | 203 | 90 | 38.049261 | 0.78031 | 0.204687 | 0 | 0.176471 | 0 | 0 | 0.217391 | 0.003774 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.051471 | 0 | 0.051471 | 0.022059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d7412d1cd28b6de9850ed3634e8ab4057bb4f22 | 32,442 | py | Python | src/modeling.py | gengjia0214/jai | 865ec9fdf432288ecab806cd1ecb8a4c747ee689 | [
"BSD-3-Clause"
] | 9 | 2020-02-20T23:14:55.000Z | 2020-07-09T03:28:04.000Z | src/modeling.py | gengjia0214/jai | 865ec9fdf432288ecab806cd1ecb8a4c747ee689 | [
"BSD-3-Clause"
] | null | null | null | src/modeling.py | gengjia0214/jai | 865ec9fdf432288ecab806cd1ecb8a4c747ee689 | [
"BSD-3-Clause"
] | null | null | null | """
Modeling Pipeline
Author: Jia Geng
Email: gjia0214@gmail.com | jxg570@miami.edu
"""
import gc
import os
import random
import copy
import torch
import torch.nn as nn
from sklearn import metrics
from torch.optim.lr_scheduler import *
from torch.optim.optimizer import Optimizer
from datautils.datahandler import DataHandler
from builder import *
from datetime import datetime
from tqdm.notebook import tqdm as tqdm_notebook
from tqdm import tqdm as tqdm_terminal
from functools import partial
import numpy as np
import pickle
def compute_metric(ground_truth: list, prediction: list,):
"""
Compute a bunch of performance metrics based on the key:
accuracy, precision, recall and f1 score; by class, micro, macro
:param ground_truth: list of ground truth labels
:param prediction: list of predicted labels
:return: performance metrics by class, micro, macro
"""
# compute performance metrics
acc = metrics.accuracy_score(y_true=ground_truth, y_pred=prediction)
precision_by_class = metrics.precision_score(y_true=ground_truth, y_pred=prediction, average=None, zero_division=0)
precision_micro = metrics.precision_score(y_true=ground_truth, y_pred=prediction, average='micro', zero_division=0)
precision_macro = metrics.precision_score(y_true=ground_truth, y_pred=prediction, average='macro', zero_division=0)
recall_by_class = metrics.recall_score(y_true=ground_truth, y_pred=prediction, average=None, zero_division=0)
recall_micro = metrics.recall_score(y_true=ground_truth, y_pred=prediction, average='micro', zero_division=0)
recall_macro = metrics.recall_score(y_true=ground_truth, y_pred=prediction, average='macro', zero_division=0)
f1_by_class = metrics.f1_score(y_true=ground_truth, y_pred=prediction, average=None, zero_division=0)
f1_micro = metrics.f1_score(y_true=ground_truth, y_pred=prediction, average='micro', zero_division=0)
f1_macro = metrics.f1_score(y_true=ground_truth, y_pred=prediction, average='macro', zero_division=0)
metric_dict = {'accuracy': acc,
'precision': {'by_class': precision_by_class, 'micro': precision_micro, 'macro': precision_macro},
'recall': {'by_class': recall_by_class, 'micro': recall_micro, 'macro': recall_macro},
'f1': {'by_class': f1_by_class, 'micro': f1_micro, 'macro': f1_macro}}
return metric_dict
class _Logger:
"""
Logger for collecting the loss, predictions and maintain a confusion matrix during model training/evaluation
"""
def __init__(self, n_classes: int, criteria: str, verbose: bool):
"""
Constructor
:param n_classes: number of classes
:param criteria: criteria for model selection: accuracy, precision, recall and f1 score
:param verbose: whether to print out the logging process, turn on for debugging
"""
# At least need to use top-1 acc, should also support one more criteria
assert criteria in ['accuracy', 'recall', 'precision', 'f1']
self.criteria = criteria
self.n_classes = n_classes
self.verbose = verbose
# cache best performance
self.best_criteria_metric = -1
self.best_acc = -1
self.best_epoch = -1
# batch & epoch log
self.batch_loss = {'train': [], 'eval': []}
self.epoch_loss = {'train': [], 'eval': []}
self.epoch_perf = {'train': [], 'eval': []} # for storing the acc of each epoch
# best epoch log
# unlock and refresh when meet better epoch
self.best_comprehensive_metric = None
self.best_ground_truths = {'train': [], 'eval': []}
self.best_predictions = {'train': [], 'eval': []}
self.best_data_ids = {'train': [], 'eval': []}
# row index for truth, column index for prediction
# unlock and refresh when meet better epoch
self.best_confusion_matrix = {'train': np.zeros((n_classes, n_classes)),
'eval': np.zeros((n_classes, n_classes))}
# temp pointer only contains at data for the current epoch
self.temp_batch_loss = {'train': [], 'eval': []}
self.temp_ground_truth = {'train': [], 'eval': []}
self.temp_prediction = {'train': [], 'eval': []}
self.temp_confusion_matrix = {'train': np.zeros((n_classes, n_classes)),
'eval': np.zeros((n_classes, n_classes))}
self.temp_data_ids = {'train': [], 'eval': []}
def login_batch(self, phase: str, ground_truth: list, predictions: list, loss: float or None,
data_ids=None):
"""
Login the batch data
:param phase: train or eval
:param ground_truth: ground truth
:param predictions: predicted class
:param loss: batch loss
:param data_ids: data id if provided
:return: void
"""
# performance metrics
for i, (t, p) in enumerate(zip(ground_truth, predictions)):
# update the list container
self.temp_ground_truth[phase].append(t)
self.temp_prediction[phase].append(p)
if data_ids is not None:
self.temp_data_ids[phase].append(data_ids[i])
else:
self.temp_data_ids[phase].append(None)
# row index for truth, column index for prediction
self.temp_confusion_matrix[phase][t][p] += 1
# loss log for the current epoch
# login to the temp as well as the global pointer
# for test model on a testing set and report performance only, loss input should be none
if loss is not None:
self.temp_batch_loss[phase].append(loss)
self.batch_loss[phase].append(loss)
def login_epoch(self, phase: str, criteria: str, epoch: int, evaluation=False):
"""
Operations when an iteration complete
For training phase:
1. update the epoch log
2. calculate the performance
3. reset the temp pointers
For eval phase:
1. update the epoch log
2. calculate the performance
3. update best perf and best epoch when better model find, send the signal to agent
4. reset the temp pointers
5. update the epoch idx
:param phase: train or eval
:param criteria: criteria metric key: accuracy, precision, recall or f1 score
:param epoch: the current epoch index
:param evaluation: whether the agent is doing evaluation on test set
:return: epoch_loss, acc, selected_metric
"""
# flag
find_better_model = False
# update the epoch log
epoch_loss = np.mean(self.temp_batch_loss[phase])
self.epoch_loss[phase].append(epoch_loss)
# compute the performance metrics
perf_metrics = compute_metric(ground_truth=self.temp_ground_truth[phase], prediction=self.temp_prediction[phase])
acc = perf_metrics['accuracy']
selected_metric = perf_metrics[criteria]['macro'] if criteria != 'accuracy' else acc
self.epoch_perf[phase].append(acc)
# for eval phase => reset temp pointer & check whether get better performance
if phase == 'eval':
# update best epoch info
if selected_metric > self.best_criteria_metric or evaluation:
self.best_criteria_metric = selected_metric
self.best_acc = acc
self.best_epoch = epoch
self.best_ground_truths = self.temp_ground_truth
self.best_predictions = self.temp_prediction
self.best_data_ids = self.temp_data_ids
self.best_confusion_matrix = self.temp_confusion_matrix
self.best_comprehensive_metric = perf_metrics # login all metrics into this pointer
find_better_model = True
# reset the temp pointers
self.temp_batch_loss = {'train': [], 'eval': []}
self.temp_ground_truth = {'train': [], 'eval': []}
self.temp_prediction = {'train': [], 'eval': []}
self.temp_data_ids = {'train': [], 'eval': []}
self.temp_confusion_matrix = {'train': np.zeros((self.n_classes, self.n_classes)),
'eval': np.zeros((self.n_classes, self.n_classes))}
# collect the garbage
gc.collect()
return epoch_loss, acc, selected_metric, find_better_model
def load(self, src: dict or str):
"""
Load the logger from dictionary or a pickle file
This is called by the modeling agents
:param src: logger state dict or a pickle file path
:return: void
"""
if isinstance(src, str): # load the pickle file
if not src.endswith('pth'):
raise Exception('src path should end with .pth')
with open(src, 'rb') as file:
state_dict = pickle.load(file=file)
elif isinstance(src, dict):
state_dict = src
else:
raise Exception('src need to be either the logger state dict or a path to the pickle file')
for attr, val in state_dict.items():
self.__setattr__(attr, val)
def save(self, dst_pth: str,):
"""
Save the logger state. This method usually won't used by users.
The modeling agents will handle the serialization from surface
:param dst_pth: dst folder
:return: void
"""
if not dst_pth.endswith('pth'):
raise Exception('dst_pth should end with .pth')
with open(dst_pth, 'wb') as file:
pickle.dump(obj=self.get_logger_state(), file=file)
def get_logger_state(self):
"""
Get the logger state
:return: logger state dict
"""
return self.__dict__
def get_best_perf(self):
"""
Getter to get the best perf metric, i.e. auc
:return: selected performance metric
"""
return self.best_criteria_metric
def get_best_confusion_matrix(self):
"""
Getter to get the confusion matrix of the best epoch
:return: confusion matrix
"""
return self.best_confusion_matrix
def get_best_predictions(self):
"""
Getter to get the predictions of the best epoch
:return: data_ids, ground truth, and predictions
"""
return self.best_data_ids, self.best_ground_truths, self.best_predictions
def reset(self):
"""
Re-initialize the logger
:return: void
"""
self.__init__(n_classes=self.n_classes, criteria=self.criteria, verbose=self.verbose)
class __BaseAgent:
"""
Agent interface class
Parent class for Trainer and Evaluator
"""
def __init__(self, model: nn.Module or ModelConfig, loss_module: nn.Module or None,
n_classes: int, criteria: str, verbose: bool,
optimizer: partial or None, scheduler: partial or None,
prefix: str, checkpoint_folder: str,
new_head=None, blocks_to_freeze=None,
*args):
"""
Abstract class of Base Agent.
Check child class doc for param details.
"""
if not os.path.isdir(checkpoint_folder):
raise FileExistsError('File does not exits: {}'.format(checkpoint_folder))
assert (blocks_to_freeze is None or isinstance(blocks_to_freeze, list)), 'blocks_to_freeze must be a list of block names or None'
# agent type
self.agent = None
self.running_env = 'notebook'
self.pbar = {'notebook': tqdm_notebook, 'terminal': tqdm_terminal}
# key model settings
# set up the model config instance if provided
if isinstance(model, nn.Module):
self.model = model
elif isinstance(model, ModelConfig):
self.model = Builder.assemble(model)
else:
raise Exception('Model must be either an nn.Module or a Config instance')
self.loss_module = loss_module
self.n_classes = n_classes
self.criteria = criteria
self.device = None
# optimizer & scheduler
self.optimizer = optimizer
self.scheduler = scheduler
# check point setting
self.base_epoch = 0
self.checkpoint_folder = checkpoint_folder
self.prefix = prefix
self.manual_load_model_param = False # arg reserved for evaluator
# set up args for transfer learning
# when new_head_args is not None, transfer learning is activated
# the model config instance will be modified during initialization
self.new_head = new_head
self.blocks_to_freeze = blocks_to_freeze if blocks_to_freeze is not None else []
if self.new_head is not None:
# sanity check
error_msg1 = "new_head_args must be a nn.module or a dictionary, use builder.get_head_block_args() to get the arg dict"
assert isinstance(self.new_head, dict) or isinstance(self.new_head, nn.Module), error_msg1
error_msg2 = "When new head is provided, the model arg must be a ModelConfig instead of nn.Module."
assert not isinstance(self.model, ModelConfig), error_msg2
# initialize logger
self.logger = _Logger(n_classes=n_classes, criteria=criteria, verbose=verbose)
def initialize(self, device: str, pretrained_param_path=None, n_threads=12):
"""
Initialize the model, optimizer and the scheduler.
Put the model on the specified device.
Optionally, load the pretrained params and replace the head module (if new_head is not None) for transfer learning.
:param device: which device to be trained on e.g. 'cpu' - on cpu, 'cuda:0' - on gpu 0, 'cuda:1' - on gpu 1
:param pretrained_param_path: path to pretrained model param
:param n_threads: configure the thread usage, only applicable when using cpu for computing
:return: void
"""
if device == 'cpu':
torch.set_num_threads(n_threads)
self.device = device
# move model to device before initialize the optimizers and scheduler
if self.agent == 'trainer':
self.model = self.model.to(device)
self.loss_module = self.loss_module.to(device)
# initialize the optimizer and scheduler
if self.optimizer is not None:
self.optimizer = self.optimizer(self.model.parameters())
# use scheduler
if self.scheduler is not None:
self.scheduler = self.scheduler(optimizer=self.optimizer)
# load the pretrained param if provided
if pretrained_param_path is not None:
# sanity check
error_msg = 'pretrained_param_path must end with .pth'
assert isinstance(pretrained_param_path, str) and pretrained_param_path.endswith('pth'), error_msg
meta_state = torch.load(f=pretrained_param_path, map_location='cpu')
# load the model state
# compatible with pure model state or the logger output
model_state = meta_state['model_state'] if 'model_state' in meta_state else meta_state
self.model.load_state_dict(model_state)
# turn on/off the requires_grads
for (param_name, param) in self.model.named_parameters():
requires_grad = True
for frozen_block in self.blocks_to_freeze:
if param_name.startswith(frozen_block): # decide which block to freeze based on the prefix
requires_grad = False
print("Param {} requires_grad was turned off".format(param_name))
param.requires_grad = requires_grad
# replace the old head with new head
if self.new_head is not None:
# sanity check, the model must have the head_block
error_msg = 'Model does not have the head_block. Head replacement only supports the mech created by the Builder class'
assert 'head_block' in [name[0] for name in self.model.named_modules()], error_msg
# set it to the new head
new_head = AvgPoolFCHead(**self.new_head) if isinstance(self.new_head, dict) else self.new_head
new_head = new_head.to(self.device)
self.model.head_block = new_head
print('Head module has been replaced.')
elif self.agent == 'evaluator':
# load the model
if not self.manual_load_model_param:
self.load_checkpoint()
self.model = self.model.to(device)
def load_model_params(self, model_state_dict: dict):
"""
Load pre-trained model params
:param model_state_dict: model state dict
:return: void
"""
self.model.load_state_dict(model_state_dict)
def load_checkpoint(self):
"""
Load the model check point directly from the checkpoint folder
:return: void
"""
# load metastate
metastate_dict = torch.load(os.path.join(self.checkpoint_folder, "{}_metastate_best.pth".format(self.prefix)),
map_location=torch.device('cpu'))
# load model param
self.model.load_state_dict(metastate_dict['model_state'])
# only load the optimizer, scheduler and logger for trainer
if self.agent == 'trainer':
# load the epoch number
self.base_epoch = metastate_dict['epoch']
# load optimizer and scheduler
if self.optimizer is not None:
self.optimizer: Optimizer
self.optimizer.load_state_dict(metastate_dict['optimizer_state'])
if self.scheduler is not None:
self.scheduler.load_state_dict(metastate_dict['scheduler_state'])
# load the logger
self.logger.load(metastate_dict['logger_state'])
def save_checkpoint(self, *args):
pass
def get_logger(self):
"""
Getter to get the Logger
:return: Logger
"""
return self.logger
def reset_logger(self):
"""
Reset the logger
:return: void
"""
self.logger.reset()
def use_notebook(self):
"""
Switch to notebook mode - affect the tqdm pbar
:return: void
"""
self.running_env = 'notebook'
def use_terminal(self):
"""
Switch to terminal mode - affect the tqdm pbar
:return: void
"""
self.running_env = 'terminal'
class Trainer(__BaseAgent):
"""
TODO: tutorials
TODO: make it able to choose micro/macro for model selection
"""
def __init__(self, model: nn.Module or Builder or ModelConfig, loss_module: nn.Module,
n_classes: int, criteria: str,
optimizer: partial or torch.optim.optimizer.Optimizer, scheduler: partial or None,
prefix: str, checkpoint_folder: str,
new_head=None, blocks_to_freeze=None, verbose=False):
"""
Constructor
:param model: model architecture (nn.Module) or a Builder/Config Instance that builds the model. For transfer learning, pass the Config
Instance.
:param loss_module: loss module
:param n_classes: number of classes
:param criteria: criteria for model selection: accuracy, precision, recall and f1 score (will use macro average)
:param optimizer: parameter optimizer. DO NOT pass the instance (i.e. DO NOT do encoder_optimizer=Adam(...)) instead, just pass the Class
interface (i.e. encoder_optimizer=Adam) or, if need to specify the optimizer params, use partial: (e.g. encoder_optimizer=partial(Adam,
lr=1e-5, ...)) instance initialization will be handled by the trainer
:param scheduler: learning rate scheduler. pass the interface instead of the instance
:param prefix: checkpoint naming prefix
:param checkpoint_folder: checkpoint
:param new_head: arg for transfer learning. If pass an nn.Module, the module will replace the current head block. The
:param blocks_to_freeze: arg for transfer learning. Pass the block prefix to freeze the blocks, e.g. ['init_block'] will freeze all
parameters whose name starting with 'init_block'.
:param verbose: whether to print out additional message for debugging
"""
super().__init__(model=model, loss_module=loss_module,
n_classes=n_classes, criteria=criteria, verbose=verbose,
optimizer=optimizer, scheduler=scheduler,
prefix=prefix, checkpoint_folder=checkpoint_folder,
new_head=new_head, blocks_to_freeze=blocks_to_freeze)
self.agent = 'trainer'
def train(self, datahandler: DataHandler, epochs, seed):
"""
Model training pipeline with loss & performance metrics logging & automatic checkpoint
:param datahandler: data handler
:param epochs: total number of training epochs
:param seed: seed for random state
:return:
"""
assert isinstance(self.optimizer, Optimizer), 'Need to call initialize() before train()!'
# fix random state
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# fix the cudnn backend (might break the code if pytorch refactored this)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# memory monitoring
memory_usage = {'train': -1, 'eval': -1}
# placeholder
epoch_recorder = {'train': {},
'eval': {}}
# main loop
for epoch in self.pbar[self.running_env](range(self.base_epoch, epochs), total=epochs - self.base_epoch, desc='Epochs'):
print("=====Start Epoch {}======\n".format(epoch))
for phase in ['train', 'eval']:
# switch phase setting - only collect the feature/temporal importance during trainig time
if phase == 'train':
self.model.train()
self.loss_module.train()
else:
self.model.eval()
self.loss_module.eval() # might not be necessary as the loss function does not hold any params
pbar_msg = 'Epoch {} Phase {}'.format(epoch, phase)
for i, mini_batch in self.pbar[self.running_env](enumerate(datahandler[phase]), total=len(datahandler[phase]), desc=pbar_msg):
# zero the gradient
if phase == 'train':
self.optimizer.zero_grad()
# grab X, Y
if isinstance(mini_batch, dict):
X_mini_batch, Y_mini_batch = mini_batch['x'], mini_batch['y']
else:
X_mini_batch, Y_mini_batch = mini_batch
# move to computing device
X_mini_batch = X_mini_batch.to(self.device)
Y_mini_batch = Y_mini_batch.to(self.device)
# enable the gradient flow if in training phase
with torch.set_grad_enabled(phase == 'train'):
# run through model and get the output_scores
output_scores = self.model(X_mini_batch)
# TODO: refactor the loss_module forward pass
loss = self.loss_module(X=output_scores, Y=Y_mini_batch)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
self.optimizer.step()
# batch logging - ground truth, prediction, batch loss
loss_cpu = loss.cpu().detach().tolist()
ground_truth = Y_mini_batch.cpu().detach().view(-1).tolist()
predictions = output_scores.cpu().detach().argmax(-1).view(-1).tolist()
self.logger.login_batch(phase=phase, ground_truth=ground_truth, predictions=predictions,
loss=loss_cpu)
if self.device != 'cpu':
memory_usage[phase] = torch.cuda.memory_allocated(device=self.device)
# clear the memory
del loss
del output_scores
del X_mini_batch
del Y_mini_batch
# ITERATION END - login the compute the performance metrics
epoch_loss, acc, selected_metric, find_better_model = self.logger.login_epoch(phase=phase, criteria=self.criteria, epoch=epoch)
epoch_recorder[phase]['loss'] = np.round(epoch_loss, 4)
epoch_recorder[phase]['acc'] = np.round(acc, 4)
epoch_recorder[phase]['perf'] = np.round(selected_metric, 4)
# EPOCH END - done with train & eval
# report
print("+++ Epoch {} Report +++".format(epoch))
print("Train Loss: {} Train Accuracy: {} Train {}: {}".format(epoch_recorder['train']['loss'], epoch_recorder['train']['acc'],
self.criteria, epoch_recorder['train']['perf']))
print("Eval Loss: {} Eval Accuracy: {} Eval {}: {}".format(epoch_recorder['eval']['loss'], epoch_recorder['eval']['acc'],
self.criteria, epoch_recorder['eval']['perf']))
if self.device != 'cpu':
print("Memory usage during train: {} | Memory usage during eval: {}".format(memory_usage['train'], memory_usage['eval']))
# scheduler step
if self.scheduler is not None:
if isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step(loss)
else:
self.scheduler.step()
# checkpoint
if find_better_model:
print('+++')
print("Better model found at epoch={}, eval accuracy={}, eval {}={}".format(epoch, acc, self.criteria, selected_metric))
self.save_checkpoint(epoch=epoch, last_epoch=False)
print('Checkpoint saved.')
print('+++')
print("\n======End Epoch {}=======\n".format(epoch))
# for last epoch
if epoch == epochs - 1:
self.save_checkpoint(epoch, last_epoch=True)
def save_checkpoint(self, epoch, last_epoch=False):
"""
Save the model check point
:param epoch: epoch number
:param last_epoch: saving mode for last epoch
:return: void
"""
# epoch number, logger, model, optimizer and scheduler
metastate_dict = {'epoch': epoch + 1,
'logger_state': self.logger.get_logger_state(),
'model_state': self.model.state_dict()}
# optimizer and scheduler
if self.optimizer is not None:
self.optimizer: Optimizer
metastate_dict['optimizer_state'] = self.optimizer.state_dict()
if self.scheduler is not None:
metastate_dict['scheduler_state'] = self.scheduler.state_dict()
# model structure text and training date
string_rep = str(self.model)
with open(os.path.join(self.checkpoint_folder, '{}_model_arch.txt'.format(self.prefix)), 'w') as fp:
now = datetime.now()
now = now.strftime("%m/%d/%Y, %H:%M:%S")
fp.writelines(now + '\n')
fp.writelines(string_rep)
# save the meta state
if not last_epoch: # an additional checkpoint for the last epoch
torch.save(metastate_dict, os.path.join(self.checkpoint_folder, "{}_metastate_best.pth".format(self.prefix)))
else: # the best epoch so far
torch.save(metastate_dict, os.path.join(self.checkpoint_folder, "{}_metastate_last.pth".format(self.prefix)))
class Evaluator(__BaseAgent):
"""
TODO: describe workflow
"""
def __init__(self, model: nn.Module,
n_classes: int, criteria: str,
prefix: str, checkpoint_folder: str,
verbose=False, manual_load_model_param=False):
"""
Constructor
:param model: model architecture
:param n_classes: number of classes
:param criteria: criteria for model selection: accuracy, precision, recall and f1 score (will use macro average)
:param prefix: checkpoint naming prefix
:param checkpoint_folder: checkpoint directory
:param verbose: whether to print out additional message for debugging
:param manual_load_model_param: whether to manually load model param
"""
super().__init__(model=model, loss_module=None,
n_classes=n_classes, criteria=criteria, verbose=verbose,
optimizer=None, scheduler=None,
prefix=prefix, checkpoint_folder=checkpoint_folder)
self.agent = 'evaluator'
self.manual_load_model_param = manual_load_model_param
def evaluate(self, datahandler: DataHandler):
"""
Evaluation on the data.
The evaluation pipeline will compute auc, acc, roc curve and optionally feature rank.
:param datahandler: data handler, data should be loaded on the eval key
:return: acc, auc, roc curve, auc_by_seq_len, feature_ranker object (if fit_ranker=True)
"""
# main loop
for i, mini_batch in self.pbar[self.running_env](enumerate(datahandler['eval']), total=len(datahandler['eval']), desc='Evaluation'):
# grab X, Y
X_mini_batch, Y_mini_batch = mini_batch['x'], mini_batch['y']
# information
data_ids = None
if 'info' in mini_batch:
info = mini_batch['info']
if 'id' in info:
data_ids = info['id']
# ground truth
ground_truth = Y_mini_batch.cpu().view(-1).tolist()
# move to computing device
X_mini_batch = X_mini_batch.to(self.device)
# run through model and get the output_scores
output_scores = self.model(X_mini_batch)
prediction = output_scores.detach().argmax(-1).cpu().view(-1).tolist()
# batch logging - ground truth, prediction, batch loss
self.logger.login_batch(phase='eval', ground_truth=ground_truth, predictions=prediction,
loss=None, data_ids=data_ids)
# ITERATION END - login the compute the performance metrics
self.logger.login_epoch(phase='eval', criteria=self.criteria, epoch=0, evaluation=True)
self.report_evaluation_results()
# output
output = {'performance': self.get_comprehensive_metrics(),
'confusion_matrix': self.get_confusion_matrix()}
return output
def report_evaluation_results(self):
"""
Report the evaluation results
:return: void
"""
# report
print("Performance Report {}".format(self.prefix))
print("Accuracy: {}".format(self.logger.best_acc))
print("{} : {}".format(self.criteria.capitalize(), self.logger.best_criteria_metric))
def get_accuracy(self):
"""
Getter to get the evaluation accuracy
:return: void
"""
return self.logger.best_acc
def get_comprehensive_metrics(self):
"""
Getter to get the evaluation auc
:return: performance metric dict
"""
return copy.deepcopy(self.logger.best_comprehensive_metric)
def get_confusion_matrix(self):
"""
Getter to get the confusion matrix
:return: confusion matrix
"""
return self.logger.best_confusion_matrix['eval'].copy()
| 41.699229 | 145 | 0.607885 | 3,887 | 32,442 | 4.904039 | 0.125804 | 0.019043 | 0.007082 | 0.008394 | 0.332809 | 0.259154 | 0.208005 | 0.190903 | 0.164778 | 0.137236 | 0 | 0.003223 | 0.301893 | 32,442 | 777 | 146 | 41.752896 | 0.83844 | 0.268078 | 0 | 0.160221 | 0 | 0.002762 | 0.086806 | 0.00508 | 0 | 0 | 0 | 0.005148 | 0.019337 | 1 | 0.080111 | false | 0.002762 | 0.046961 | 0 | 0.168508 | 0.041436 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d743fc5ff11340168a1f5be064069815e115ae3 | 5,805 | py | Python | cogs/Competitive.py | DevvyDont/CraneDuels | a6c8ed9f2fd5dbc01cc3419856fae3d4a587bdc4 | [
"MIT"
] | null | null | null | cogs/Competitive.py | DevvyDont/CraneDuels | a6c8ed9f2fd5dbc01cc3419856fae3d4a587bdc4 | [
"MIT"
] | null | null | null | cogs/Competitive.py | DevvyDont/CraneDuels | a6c8ed9f2fd5dbc01cc3419856fae3d4a587bdc4 | [
"MIT"
] | null | null | null | from discord import Embed
from discord.errors import InvalidArgument
from discord.ext import commands
import json
import os.path
from discord.ext.commands.core import command
DEBUG = True
"""
Example of the structure of the json file for storing elos
If we ever want to add a new mode we can just add a new enum to use
self.rank_data = {
MODE_UNO: {
123456789: 420,
987654321: 690
},
MODE_CRANE: {
123456789: 888,
019293848: 1000
}
}
"""
MODE_CRANE = 'craneduels'
MODE_UNO = 'uno'
MODES = {
# MODE_UNO: "Toono",
MODE_CRANE: "Crane Duels"
}
ELO_JSON_PATH = 'config/competitive_rank_data.json'
DEFAULT_ELO = 650
MINIMUM_ELO = 300
SOFT_CAP_ELO = 2000
MAX_ELO = 100000 # Need this just for sanity purposes
RANK_RANGES = (
("Silver", MINIMUM_ELO, 600 ),
("Gold", 601, 950 ),
("Platinum", 951, 1400 ),
("Diamond", 1401, SOFT_CAP_ELO),
("Grandmaster", SOFT_CAP_ELO+1, MAX_ELO )
)
def getRankFromElo(elo: int):
elo = min(elo, MAX_ELO)
elo = max(elo, MINIMUM_ELO)
for rank_tuple in RANK_RANGES:
name, low, high = rank_tuple
if low <= elo <= high:
percent_through_rank = (elo-low)/(high-low)
div = 1
if percent_through_rank > .33:
div += 1
if percent_through_rank > .66:
div += 1
return name, div if elo <= SOFT_CAP_ELO else ""
raise InvalidArgument(f"Received elo out of bounds: {elo}")
class Competitive(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.rank_data = {} # Stores a dict that maps game/mode enum -> another dict thats ID -> elo, example of structure is above class decl
self.init_json_file()
# Checks the json file to make sure all games exist, fixes it if we're missing something
def check_json_file(self):
need_save = False
for mode in MODES:
if mode not in self.rank_data:
print(f"[Competitive.py] json missing mode: {mode}, initializing empty dict")
self.rank_data[mode] = {}
need_save = True
if need_save:
self.save_json_file()
def init_json_file(self):
if not os.path.exists(ELO_JSON_PATH):
with open(ELO_JSON_PATH, 'w') as f:
json.dump({}, f)
with open(ELO_JSON_PATH) as f:
self.rank_data = json.load(f)
print(f"[Competitive.py] Succecssfully loaded ranked data from {ELO_JSON_PATH}")
self.check_json_file()
def save_json_file(self):
with open(ELO_JSON_PATH, 'w') as f:
json.dump(self.rank_data, f, indent=4)
# Used to retrive the ELO of a user, take in discord id and the enum for what mode/game
def get_elo(self, user_id, mode_id):
mode_dict: dict = self.rank_data.get(mode_id)
# If the user isn't in there, they are default elo
return mode_dict.get(str(user_id), DEFAULT_ELO)
# Used to retrieve all ELOs of a user, returns a dict that maps mode id -> elo
def get_all_elos(self, user_id):
ret = {}
for mode, mode_data in self.rank_data.items():
ret[mode] = mode_data.get(str(user_id), DEFAULT_ELO)
return ret
# Updates a users elo in a given mode and changes it by delta, e.g. to increase someones elo by 5 make delta=5
def update_elo(self, user_id, mode_id, delta):
old = self.get_elo(user_id, mode_id) # Get elo
elo = old
elo += delta # Update elo
if elo <= MINIMUM_ELO: # Don't let them go below the cap
elo = MINIMUM_ELO
self.rank_data[mode_id][str(user_id)] = elo # Update
self.save_json_file() # Save the json file with our changes
return elo
@commands.command(name='leaderboard')
async def _command_leaderboard(self, ctx):
msg = "```"
for mode, player_data in self.rank_data.items():
msg += f"{MODES[mode]}\n----------------------\n"
if not player_data:
msg += "Nobody is ranked yet :(\n"
ordered = dict(sorted(player_data.items(), key=lambda pelo: pelo[1], reverse=True))
place = 1
for id, elo in ordered.items():
member = await self.bot.fetch_user(int(id))
name = str(member) if member else f"Unknown ({id})"
rank, div = getRankFromElo(elo)
msg += f"{place}: {name} - {rank} {div} ({elo})\n"
place += 1
msg += "----------------------\n\n"
msg += "```"
await ctx.send(msg)
@commands.command(name='rank')
async def _command_rank(self, ctx):
elos = self.get_all_elos(ctx.author.id)
msg = ""
for game, elo in elos.items():
rank, div = getRankFromElo(elo)
msg += f"**{MODES[game]}**: `{rank} {div}` ({elo})\n"
await ctx.send(f"{ctx.author.mention}\n{msg}")
if DEBUG:
@commands.command(name='elochange')
async def _command_elochange(self, ctx, mode=None, delta: int=None):
if not mode or not delta:
await ctx.send(f"Command usage: `!elochange [{' | '.join([m for m in MODES.keys()])}] <amount to change by>`")
return
if mode not in MODES:
await ctx.send("Invalid mode: `" + mode + "`")
return
new_elo = self.update_elo(ctx.author.id, mode, delta)
rank, div = getRankFromElo(new_elo)
await ctx.send(f"New elo for `{MODES[mode]}` is `{new_elo} ({rank} {div})`")
def setup(bot):
bot.add_cog(Competitive(bot))
| 30.552632 | 143 | 0.57261 | 801 | 5,805 | 4.002497 | 0.270911 | 0.027449 | 0.03743 | 0.014972 | 0.097629 | 0.091703 | 0.019339 | 0.019339 | 0.019339 | 0.019339 | 0 | 0.024762 | 0.311283 | 5,805 | 189 | 144 | 30.714286 | 0.777139 | 0.115245 | 0 | 0.083333 | 0 | 0.008333 | 0.139569 | 0.025923 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.05 | 0 | 0.183333 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d7812855fb69eaa94b6d295abeec4a4d24cc06e | 4,203 | py | Python | discord-bot/music.py | rdebek/discord-bot | 2da5e4f776bb72cf08b7e9cdf0fa99bd62ddd299 | [
"MIT"
] | null | null | null | discord-bot/music.py | rdebek/discord-bot | 2da5e4f776bb72cf08b7e9cdf0fa99bd62ddd299 | [
"MIT"
] | null | null | null | discord-bot/music.py | rdebek/discord-bot | 2da5e4f776bb72cf08b7e9cdf0fa99bd62ddd299 | [
"MIT"
] | null | null | null | from discord.ext import commands
from player import Player
FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'}
class Music(commands.Cog):
def __init__(self, bot, color_theme):
self.players = []
self.bot = bot
self.color_theme = color_theme
async def get_player(self, ctx):
for item in self.players:
if item.guild == ctx.guild.id:
return item
player = Player(bot=self.bot, color_theme=self.color_theme, guild=ctx.guild.id)
self.players.append(player)
return player
@commands.command(name="disconnect", aliases=["dc", "leave"])
async def _disconnect(self, ctx):
player = await self.get_player(ctx)
await player._disconnect(ctx)
@commands.command(name="clear")
async def _clear(self, ctx):
player = await self.get_player(ctx)
await player._clear(ctx)
@_disconnect.error
async def disconnect_error(self, ctx, error):
return await ctx.send("Couldn't leave for some reason.")
@commands.command(name="play", aliases=["p"])
async def play(self, ctx, *args):
player = await self.get_player(ctx)
await player.play(ctx, *args)
@play.error
async def error(self, ctx, error):
if "webpage" in str(error):
return await ctx.send("You probably entered a invalid link, try again.")
if "out of range" in str(error):
return await ctx.send("Couldn't find the phrase you're looking for.")
if "attribute" in str(error):
return await ctx.send("Please be a little slower with the commands, I can't keep up.")
else:
raise error
@commands.command(name="volume", aliases=["v"])
async def _volume(self, ctx, volume: int):
player = await self.get_player(ctx)
await player._volume(ctx, volume)
@commands.command(name="queue", aliases=["q"])
async def _queue(self, ctx):
player = await self.get_player(ctx)
await player._queue(ctx)
@commands.command(name="skip", aliases=["s"])
async def _skip(self, ctx, *args):
player = await self.get_player(ctx)
await player._skip(ctx, *args)
@commands.command(name="song", aliases=["now", "name"])
async def _song(self, ctx):
player = await self.get_player(ctx)
await player._song(ctx)
@commands.command(name="lyrics", aliases=[])
async def _lyrics(self, ctx):
player = await self.get_player(ctx)
await player._lyrics(ctx)
@_lyrics.error
async def lyrics_error(self, ctx, error):
if isinstance(error, commands.errors.CommandInvokeError):
return await ctx.send("Lyrics couldn't be found.")
else:
raise error
@commands.command(name="remove", aliases=["rm", "r"])
async def _remove(self, ctx, song_number: int):
player = await self.get_player(ctx)
await player._remove(ctx, song_number)
@commands.command(name="skip_to", aliases=["st"])
async def _skip_to(self, ctx, song_number: int):
player = await self.get_player(ctx)
await player._skip_to(ctx, song_number)
@_remove.error
@_skip_to.error
async def error(self, ctx, error):
if isinstance(error, commands.errors.BadArgument):
await ctx.send("Please give me a valid number ._.")
else:
raise error
@commands.command(name="play_top", aliases=["pt"])
async def _play_top(self, ctx, *args):
player = await self.get_player(ctx)
await player._play_top(ctx, *args)
@commands.command(name="play_skip", aliases=["ps"])
async def _play_skip(self, ctx, *args):
player = await self.get_player(ctx)
await player._play_skip(ctx, *args)
@commands.command(name="shuffle", aliases=["randomize"])
async def _shuffle(self, ctx):
player = await self.get_player(ctx)
await player._shuffle(ctx)
@commands.command(name="pause", aliases=["stop"])
async def _pause(self, ctx):
player = await self.get_player(ctx)
await player._pause(ctx) | 34.735537 | 98 | 0.630502 | 552 | 4,203 | 4.663043 | 0.208333 | 0.059052 | 0.103341 | 0.097902 | 0.458819 | 0.432401 | 0.372183 | 0.325175 | 0.307692 | 0.25136 | 0 | 0.000943 | 0.243398 | 4,203 | 121 | 99 | 34.735537 | 0.808491 | 0 | 0 | 0.22449 | 0 | 0 | 0.113226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010204 | false | 0 | 0.020408 | 0 | 0.112245 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d7937c2b3a48324e9a6ff27ade9cf323de26ee1 | 3,895 | py | Python | TSP/TSPA.py | hiiroo/ModernHeuristics | 2c02a76a571c1608a2096fed7491f60e0d3f9bde | [
"MIT"
] | null | null | null | TSP/TSPA.py | hiiroo/ModernHeuristics | 2c02a76a571c1608a2096fed7491f60e0d3f9bde | [
"MIT"
] | null | null | null | TSP/TSPA.py | hiiroo/ModernHeuristics | 2c02a76a571c1608a2096fed7491f60e0d3f9bde | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2019 Ali Mert Ceylan, Adopted from original resources provided
by Korhan Karabulut for COMP 5658 Modern Heuristics Course
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from math import exp
from random import random
from tqdm import tqdm
from TSPInstance import TSPInstance
from PermutationGenome import PermutationGenome
class TSPA:
"""
public void run( int tMax, TSPInstance instance ) {
PermutationGenome best = null;
int bestFitness = Integer.MAX_VALUE;
int temperature = tMax + 1;
long startTime = System.currentTimeMillis();
PermutationGenome vc = new PermutationGenome( 52, instance );
for ( int t = 0; t < tMax; t++ ) {
vc.twoOpt();
for ( int i = 0; i < 1000; i++ ) {
if ( vc.getFitness() == 7542 ) {
break;
}
PermutationGenome vn = vc.getRandomNeighbor();
vn.twoOpt();
if ( vn.getFitness() < vc.getFitness() ) {
vc = vn;
if ( vc.getFitness() < bestFitness ) {
bestFitness = vc.getFitness();
best = vc;
}
}
else {
int diff = vc.getFitness() - vn.getFitness();
if ( Math.random() < Math.exp( diff / temperature ) ) {
vc = vn;
}
}
}
temperature--;
}
long elapsedTime = System.currentTimeMillis() - startTime;
System.out.println( "\nSimulated Annealing ran for " + elapsedTime + " milliseconds" );
System.out.println( "Best Fitness: " + bestFitness );
System.out.println( best );
}
"""
@staticmethod
def run(tMax:int, instance:TSPInstance):
best = None
best_fitness = 1e10
temperature = tMax + 1
vc = PermutationGenome.from_instance(52, instance)
for t in tqdm(range(tMax)):
vc.two_opt()
for i in range(1000):
if vc.fitness == 7542 : break
vn = vc.random_neighbor() #best_neighbor()
vn.two_opt()
if(vn.fitness() < vc.fitness()):
vc = vn
if(vc.fitness() < best_fitness):
best_fitness = vc.fitness()
best = vc
else:
diff = vc.fitness() - vn.fitness()
if(random() < exp(diff/temperature)):
vc = vn
temperature-=1
print("Simulated Annealing")
print(best_fitness)
print(best)
if __name__ == "__main__":
tsp_instance = TSPInstance("berlin52.tsp")
tsp_instance.read_data()
TSPA.run(50, tsp_instance)
| 35.733945 | 95 | 0.576637 | 427 | 3,895 | 5.208431 | 0.409836 | 0.039568 | 0.021583 | 0.007194 | 0.029676 | 0.029676 | 0 | 0 | 0 | 0 | 0 | 0.015625 | 0.342747 | 3,895 | 108 | 96 | 36.064815 | 0.853125 | 0.673684 | 0 | 0.057143 | 0 | 0 | 0.03115 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.142857 | 0 | 0.2 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d79f0e4cbdb7d5315b91cbc4c77238af366e46c | 5,024 | py | Python | theforce/deprecated/regression/kernels.py | changwmyung/AutoForce | c8a429b05685841e1f17e8655b981753e1d086be | [
"MIT"
] | 19 | 2019-10-21T06:56:22.000Z | 2022-03-14T06:43:42.000Z | theforce/deprecated/regression/kernels.py | 17DongGeonKim/AutoForce | c8a429b05685841e1f17e8655b981753e1d086be | [
"MIT"
] | 8 | 2019-10-23T07:48:49.000Z | 2022-03-23T03:47:44.000Z | theforce/deprecated/regression/kernels.py | 17DongGeonKim/AutoForce | c8a429b05685841e1f17e8655b981753e1d086be | [
"MIT"
] | 4 | 2021-09-25T11:28:44.000Z | 2022-02-25T06:50:48.000Z |
import torch
from torch.nn import Module, Parameter
from theforce.regression.algebra import positive, free_form
import warnings
class RBF(Module):
"""
Parameters: scale, variance
"""
def __init__(self, scale, variance):
super(RBF, self).__init__()
self._scale = Parameter(free_form(scale))
self._variance = Parameter(free_form(variance))
def cov_matrix(self, x, xx, d_dtheta=None, wrt=0, sumd=True):
"""
It returns K if d_dtheta=None.
If d_dtheta is given it means that some derivative should be
calculated. "theta" could be x, xx, or sth else.
If d_dtheta = ones_like(x) and wrt=0 (ones_like(xx) and wrt=1)
it returns dK/dx (dK/dxx).
If d_dtheta has a dimensionality greater than 2 it means that
it holds derivative of x (or xx depending on wrt) wrt sth else.
Then, it will return dK/dtheta.
It will sum over the features dimension if sumd=True.
"""
assert wrt == 0 or wrt == 1
scale = positive(self._scale)
r = (x[:, None, ] - xx[None, ]) / scale
cov = (-(r**2).sum(dim=-1)/2).exp() * self.diag()
if d_dtheta is not None:
r = r / scale
for _ in range(d_dtheta.dim()-2):
r = torch.unsqueeze(r, -1)
cov = torch.unsqueeze(cov, -1)
_derivative = (r*torch.unsqueeze(d_dtheta, dim=1-wrt)
) * (-(-1)**(wrt))
if sumd:
cov = cov * _derivative.sum(dim=2)
else:
cov = torch.unsqueeze(cov, dim=2) * _derivative
return cov
def matrices(self, x, xx, d_dx=False, d_dxx=False, d_dxdxx=False):
# covariance matrix
iscale = 1.0/positive(self._scale)
r = (x[:, None, ] - xx[None, ]) * iscale
cov = (-(r**2).sum(dim=-1)/2).exp() * self.diag()
# derivatives
_dx = _dxx = _dxdxx = None
if d_dx or d_dxx or d_dxdxx:
rr = (r*iscale)
if d_dx or d_dxx:
cov_r = cov[..., None] * rr
if d_dx:
_dx = -cov_r
if d_dxx:
_dxx = cov_r
if d_dxdxx:
rirj = -rr[..., None] * rr[..., None, :]
d = torch.LongTensor(torch.arange(iscale.size()[0]))
rirj[..., d, d] = rirj[..., d, d] + iscale**2
_dxdxx = rirj * cov[..., None, None]
return cov, _dx, _dxx, _dxdxx
def diag(self):
return positive(self._variance)
def diag_derivatives(self, d_dtheta):
warnings.warn(
'diag_derivatives in RBF is deprecated! It may be incorrect!')
scale = positive(self._scale)
for _ in range(d_dtheta.dim()-2):
scale = torch.unsqueeze(scale, -1)
return ((d_dtheta / scale)**2).sum() * self.diag()
def extra_repr(self):
print('\nRBF parameters: \nscale: {}\nvariance: {}\n'.format(
positive(self._scale).data, positive(self._variance).data))
class Cauchy(Module):
def __init__(self, scale, variance):
super(Cauchy, self).__init__()
self._scale = Parameter(free_form(scale))
self._variance = Parameter(free_form(variance))
def matrices(self, x, xx, d_dx=False, d_dxx=False, d_dxdxx=False):
# covariance matrix
scale = positive(self._scale)
r = (x[:, None, ]-xx[None, ])/scale
cov = 1.0/(1.0+(r**2).sum(dim=-1))
# derivatives
_dx = _dxx = _dxdxx = None
if d_dx or d_dxx:
a = (-2*r*cov[..., None]**2/scale) * self.diag()
if d_dx:
_dx = a
if d_dxx:
_dxx = -a
if d_dxdxx:
_dxdxx = -(8*r[..., None, :]*r[..., None]*cov[..., None, None] - 2*torch.eye(scale.size(0))
)*cov[..., None, None]**2/(scale[None, ]*scale[:, None]) * self.diag()
cov = self.diag()*cov
return cov, _dx, _dxx, _dxdxx
def diag(self):
return positive(self._variance)
def test_if_works():
from theforce.regression.algebra import jitcholesky
variance = torch.tensor(1.0)
scale = torch.ones(7)
kern = RBF(scale, variance)
X = torch.rand(1000, 7)
K = kern.cov_matrix(X, X)
L, ridge = jitcholesky(K)
print('ridge for cholesky decomposition: {}\n'.format(ridge))
print(kern, '\n')
#
X1 = torch.rand(19, 7)
X2 = torch.rand(33, 7)
dX = torch.rand(33, 7, 3)
K = kern.cov_matrix(X1, X2, d_dtheta=dX, wrt=1, sumd=False)
print(K.shape)
dX = torch.ones_like(X1)
K = kern.cov_matrix(X1, X2, d_dtheta=dX, wrt=0, sumd=False)
print(K.shape)
a, b, c, d = kern.matrices(X1, X2, True, True, True)
print([p.shape for p in [a, b, c, d]])
#--------------------------------------
kern = Cauchy(scale, variance)
a, b, c, d = kern.matrices(X1, X2, True, True, True)
print([p.shape for p in [a, b, c, d]])
if __name__ == '__main__':
test_if_works()
| 32.205128 | 103 | 0.540406 | 697 | 5,024 | 3.733142 | 0.195122 | 0.016141 | 0.017294 | 0.006149 | 0.39854 | 0.351268 | 0.32475 | 0.32475 | 0.29362 | 0.29362 | 0 | 0.019828 | 0.307325 | 5,024 | 155 | 104 | 32.412903 | 0.727874 | 0.113654 | 0 | 0.371429 | 0 | 0 | 0.034999 | 0 | 0 | 0 | 0 | 0 | 0.009524 | 1 | 0.095238 | false | 0 | 0.047619 | 0.019048 | 0.219048 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d7f4790c6df62a6fbce782e191af0e7f88e6ecd | 21,431 | py | Python | RLBotPack/Kamael/Kamael.py | robbai/RLBotPack | 33afb2e28ff76e6c5774c03be20bdcdd356fb9d0 | [
"MIT"
] | null | null | null | RLBotPack/Kamael/Kamael.py | robbai/RLBotPack | 33afb2e28ff76e6c5774c03be20bdcdd356fb9d0 | [
"MIT"
] | null | null | null | RLBotPack/Kamael/Kamael.py | robbai/RLBotPack | 33afb2e28ff76e6c5774c03be20bdcdd356fb9d0 | [
"MIT"
] | null | null | null | import math
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot.utils.game_state_util import GameState, BallState, CarState, Physics, Vector3 as vector3, Rotator
from Utilities import *
from States import *
import cProfile, pstats, io
import time
import numpy as np
def profile(fnc):
"""A decorator that uses cProfile to profile a function"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return retval
return inner
class Kamael(BaseAgent):
def initialize_agent(self):
self.controller_state = None #SimpleControllerState()
self.me = physicsObject()
self.ball = physicsObject()
self.me.team = self.team
self.allies = []
self.enemies = []
self.start = 5
self.flipStart = 0
self.flipping = False
self.controller = None
self.gameInfo = None
self.onSurface = False
self.boosts = []
self.bigBoosts = []
self.fieldInfo = []
self.positions = []
self.time = 0
self.deltaTime = 0
self.maxSpd = 2300
self.ballPred = []
self.oldPreds = []
self.selectedBallPred = None
self.ballDelay = 0.00001
self.renderCalls = []
self.ballPredObj = None
self.forward = True
self.velAngle = 0
self.onWall = False
self.wallLimit = 90
self.stateTimer = 0
self.contested = True
self.flipTimer = 0
self.goalPred = None
self.currentSpd = 1
#octane hitbox
self.hitbox_set = False
self.carLength = 118.007
self.carWidth = 84.2
self.carHeight = 36.159
self.defaultElevation = 17.1
self.groundCutOff = 120#93+(self.carHeight*.8)
self.ballGrounded = False
self.closestEnemyToMe = None
self.closestEnemyToMeDistance = math.inf
self.closestEnemyToBall = None
self.closestEnemyDistances = [0,0,0,0,0]
self.enemyAttacking = False
self.enemyBallInterceptDelay = 0
self.enemyPredTime = 0
self.closestEnemyToBallDistance = math.inf
#self.enemyBallTime = 0
self.enemyTargetVec = Vector([0,0,0])
self.contestedThreshold = 300
self.superSonic = False
self.wallShot = False
self.openGoal = False
self.boostConsumptionRate = 33.3
self.boostAccelerationRate = 991.666
self.allowableJumpDifference = 65
self.singleJumpLimit = 233 + self.defaultElevation + self.allowableJumpDifference #233 = maximum height gained from single jump
self.doubleJumpLimit = 498 + self.defaultElevation+ self.allowableJumpDifference #498 = maximum height gained from double jump
#self.jumpLimit = 280
self.wallShotsEnabled = True
self.DoubleJumpShotsEnabled = True
# if self.team == 1:
# self.DoubleJumpShotsEnabled = False
self.touch = None
self.targetDistance = 1500
self.fpsLimit = 1/120
self.gravity = -650
self.jumpPhysics = physicsObject()
self.hits = []
self.fakeDeltaTime = 1/60
self.accelerationTick = self.boostAccelerationRate*(1/60)
self.currentHit = None
self.resetLimit = 5
self.resetCount = 0
self.resetTimer = 0
self.timid = False
self.dribbling = False
self.goalward = False
self.stubbornessTimer = 0
self.stubbornessMax = 600
self.stubbornessMin = 000
self.stubborness = self.stubbornessMin
self.activeState = PreemptiveStrike(self)
self.contestedTimeLimit = .5
self.demoSpawns = [[Vector([-2304, -4608,0]),Vector([2304, -4608,0])],[Vector([2304, 4608,0]),Vector([-2304, 4608,0])]]
self.rotationNumber = 1
# self.dtype = [('physics', [('location', '<f4', 3), ('rotation', [('pitch', '<f4'), ('yaw', '<f4'), ('roll', '<f4')]), ('velocity', '<f4', 3), ('angular_velocity', '<f4', 3)]), ('game_seconds', '<f4')]
# self.Dtype = np.dtype([('physics', [('location', '<f4', 3),
# ('rotation', [('pitch', '<f4'), ('yaw', '<f4'), ('roll', '<f4')]),
# ('velocity', '<f4', 3),
# ('angular_velocity', '<f4', 3)]),
# ('game_seconds', '<f4')])
self.reachLength = 120
self.debugging = False
self.angleLimit = 60
#print(self.get_match_settings())
# self.me.velocity = Vector([0,2300,0])
# print("max vels")
# print(jumpSimulatorNormalizing(self,2))
# print(jumpSimulatorNormalizing(self, 2, doubleJump=False))
#
# print("stationary vel")
# self.me.velocity = Vector([0, 0, 0])
# print(jumpSimulatorNormalizing(self, 2))
# print(jumpSimulatorNormalizing(self, 2,doubleJump = False))
def init_match_config(self, match_config: 'MatchConfig'):
self.matchSettings = match_config
def demoRelocation(self,car):
#print("running demo relocation")
if car.team == 0:
if distance2D(self.ball.location,self.demoSpawns[0][0]) < distance2D(self.ball.location,self.demoSpawns[0][1]):
return self.demoSpawns[0][0]
else:
return self.demoSpawns[0][1]
else:
if distance2D(self.ball.location, self.demoSpawns[1][0]) < distance2D(self.ball.location,
self.demoSpawns[1][1]):
return self.demoSpawns[1][0]
else:
return self.demoSpawns[1][1]
def getActiveState(self):
if type(self.activeState) == LeapOfFaith:
return 0
if type(self.activeState) == PreemptiveStrike:
return 1
if type(self.activeState) == GetBoost:
return 2
if type(self.activeState) == GroundAssault:
return 3
if type(self.activeState) == GroundShot:
return 4
if type(self.activeState) == HolyProtector:
return 5
if type(self.activeState) == BlessingOfDexterity:
return 6
def setHalfFlip(self):
self.activeState = BlessingOfDexterity(self)
def setJumpPhysics(self):
car_up = Vector([0, 0, 1]).align_to(self.me.rotation)
self.jumpPhysics.location = self.me.location
self.jumpPhysics.velocity = self.me.velocity + car_up.scale(300)
self.jumpPhysics.avelocity = self.me.avelocity
def calcDeltaV(self, position, time):
carPos = self.me.location
carVel = self.jumpPhysics.velocity
return Vector([
(position[0] - carVel[0] * time - carPos[0]) / (0.5 * time * time),
(position[1] - carVel[1] * time - carPos[1]) / (0.5 * time * time),
(position[2] - carVel[2] * time - carPos[2]) / (0.5 * time * time) - self.gravity,
])
def determineFacing(self):
offset = self.me.location + self.me.velocity.normalize().scale(500)
loc = toLocal(offset,self.me)
angle = correctAngle(math.degrees(math.atan2(loc[1],loc[0])))
if abs(angle) > 90:
if self.currentSpd <= self.stubborness:
self.forward = True
else:
self.forward = False
#self.forward = False
else:
self.forward = True
#self.forward = False
self.velAngle = angle
def setPowershot(self,delay,target):
self.activeState = RighteousVolley(self,delay,target)
def setJumping(self,targetType,target = None):
_time = self.time
if _time - self.flipTimer >= 1.9:
self.activeState = LeapOfFaith(self, targetType,target = target)
self.flipTimer = _time
def setDashing(self,target):
self.activeState = WaveDashing(self,target)
def getCurrentSpd(self):
return self.me.velocity.magnitude()
def updateSelectedBallPrediction(self,ballStruct):
x = physicsObject()
x.location = Vector([ballStruct.physics.location.x, ballStruct.physics.location.y, ballStruct.physics.location.z])
x.velocity = Vector([ballStruct.physics.velocity.x, ballStruct.physics.velocity.y, ballStruct.physics.velocity.z])
x.rotation = Vector([ballStruct.physics.rotation.pitch, ballStruct.physics.rotation.yaw, ballStruct.physics.rotation.roll])
x.avelocity = Vector([ballStruct.physics.angular_velocity.x, ballStruct.physics.angular_velocity.y, ballStruct.physics.angular_velocity.z])
x.local_location = localizeVector(x.location, self.me)
self.ballPredObj = x
def preprocess(self, game):
self.oldPreds = self.ballPred
self.ballPred = self.get_ball_prediction_struct()
self.players = [self.index]
car = game.game_cars[self.index]
self.timid = False
self.deltaTime = clamp(1, self.fpsLimit, game.game_info.seconds_elapsed - self.time)
self.time = game.game_info.seconds_elapsed
self.me.demolished = car.is_demolished
if not car.is_demolished:
self.me.location = Vector([car.physics.location.x, car.physics.location.y, car.physics.location.z])
self.me.velocity = Vector([car.physics.velocity.x, car.physics.velocity.y, car.physics.velocity.z])
self.me.rotation = Vector([car.physics.rotation.pitch, car.physics.rotation.yaw, car.physics.rotation.roll])
self.me.avelocity = Vector([car.physics.angular_velocity.x, car.physics.angular_velocity.y, car.physics.angular_velocity.z])
self.me.boostLevel = car.boost
self.onSurface = car.has_wheel_contact
self.superSonic = car.is_super_sonic
self.currentSpd = clamp(2300, 1, self.getCurrentSpd())
else:
self.me.location = self.demoRelocation(car)
self.me.velocity = Vector([0,0,0])
self.me.rotation = Vector([0,0,0])
self.me.avelocity = Vector([0,0,0])
self.me.boostLevel = 34
self.onSurface = True
self.superSonic = False
self.currentSpd = 0.0001
#print(self.me.rotation[0])
if not self.hitbox_set:
self.fieldInfo = self.get_field_info()
self.carLength = car.hitbox.length
self.carWidth = car.hitbox.width
self.carHeight = car.hitbox.height
self.groundCutOff = 93+(self.carHeight*.72)
self.hitbox_set = True
self.reachLength = 85+(car.hitbox.length*.665)
print(f"Kamael on team {self.team} hitbox (length:{self.carLength} width:{self.carWidth} height:{self.carHeight}) ")
if self.stubbornessTimer > 0:
self.stubbornessTimer -= self.deltaTime
if self.stubbornessTimer <=0:
self.stubborness = self.stubbornessMin
ball = game.game_ball.physics
self.ball.location = Vector([ball.location.x, ball.location.y, ball.location.z])
self.ball.velocity = Vector([ball.velocity.x, ball.velocity.y, ball.velocity.z])
self.ball.rotation = Vector([ball.rotation.pitch, ball.rotation.yaw, ball.rotation.roll])
self.ball.avelocity = Vector([ball.angular_velocity.x, ball.angular_velocity.y, ball.angular_velocity.z])
self.me.matrix = rotator_to_matrix(self.me)
self.ball.local_location = localizeVector(self.ball.location,self.me)
ball.lastTouch = game.game_ball.latest_touch.time_seconds
ball.lastToucher = game.game_ball.latest_touch.player_name
touch = ballTouch(game.game_ball.latest_touch)
if not self.touch:
self.touch = touch
if self.touch != touch:
self.touch = touch
self.allies.clear()
self.enemies.clear()
for i in range(game.num_cars):
if i != self.index:
car = game.game_cars[i]
_obj = physicsObject()
_obj.index = i
_obj.team = car.team
_obj.demolished = car.is_demolished
if not car.is_demolished:
_obj.location = Vector([car.physics.location.x, car.physics.location.y, car.physics.location.z])
_obj.velocity = Vector([car.physics.velocity.x, car.physics.velocity.y, car.physics.velocity.z])
_obj.rotation = Vector([car.physics.rotation.pitch, car.physics.rotation.yaw, car.physics.rotation.roll])
_obj.avelocity = Vector([car.physics.angular_velocity.x, car.physics.angular_velocity.y, car.physics.angular_velocity.z])
_obj.boostLevel = car.boost
_obj.local_location = localizeVector(_obj,self.me)
_obj.onSurface = car.has_wheel_contact
else:
#print(f"relocated demo'd player {car.name}")
_obj.location = self.demoRelocation(car)
_obj.velocity = Vector([0, 0, 0])
_obj.rotation = Vector([0, 0, 0])
_obj.avelocity = Vector([0, 0, 0])
_obj.boostLevel = 33
_obj.onSurface = True
if car.team == self.team:
self.allies.append(_obj)
else:
self.enemies.append(_obj)
self.gameInfo = game.game_info
self.boosts = []
self.bigBoosts = []
for index in range(self.fieldInfo.num_boosts):
packetBoost = game.game_boosts[index]
fieldInfoBoost = self.fieldInfo.boost_pads[index]
boostStatus = False
if packetBoost.timer <=0:
if packetBoost.is_active:
boostStatus = True
boostLocation = [fieldInfoBoost.location.x,fieldInfoBoost.location.y,fieldInfoBoost.location.z]
boost_obj = Boost_obj([fieldInfoBoost.location.x,fieldInfoBoost.location.y,fieldInfoBoost.location.z],fieldInfoBoost.is_full_boost, boostStatus)
self.boosts.append(boost_obj)
if boost_obj.bigBoost:
self.bigBoosts.append(boost_obj)
self.onWall = False
self.wallShot = False
if self.onSurface:
if self.me.location[2] >= self.wallLimit:
self.onWall = True
#if type(self.activeState) != PreemptiveStrike:
self.hits = findHits(self, self.groundCutOff, self.singleJumpLimit,self.doubleJumpLimit) #findHits(self, self.groundCutOff, self.jumpLimit)
# for i in range(1000):
# convertToArray(self)
# for i in range(1000):
# newConvertToArray(self)
#self.ballPred = newConvertToArray(self)
#self.hits = findHits_testing(self, self.groundCutOff, self.jumpLimit)
# print("==========")
# for each in self.hits:
# if each != None:
# print(each)
self.determineFacing()
self.goalPred = None
self.gravity = game.game_info.world_gravity_z
self.dribbling = dirtyCarryCheck(self)
self.findClosestToEnemies()
self.resetCount +=1
if self.resetCount >= self.resetLimit:
findEnemyHits(self)
self.resetCount = 0
else:
self.enemyBallInterceptDelay = self.enemyPredTime - self.time
drawAsterisks(self.enemyTargetVec,self)
def findClosestToEnemies(self):
if len(self.enemies) > 0:
self.closestEnemyToBall, self.closestEnemyToBallDistance = findEnemyClosestToLocation2D(self,self.ball.location)
self.closestEnemyToMe, self.closestEnemyToMeDistance = findEnemyClosestToLocation2D(self, self.me.location)
self.contested = False
self.enemyAttacking = False
if self.closestEnemyToBallDistance <=self.contestedThreshold:
self.contested = True
self.enemyAttacking = True
elif self.enemyAttackingBall():
self.enemyAttacking = True
if self.closestEnemyToBall != None:
closestEnemyToBallTargetDistance = distance2D(self.enemyTargetVec,self.closestEnemyToBall.location)
#self.closestEnemyDistances.append(self.closestEnemyToBallDistance)
self.closestEnemyDistances.append(closestEnemyToBallTargetDistance)
del self.closestEnemyDistances[0]
else:
self.closestEnemyToBall = self.me
self.closestEnemyToMe = self.me
self.closestEnemyToBallDistance = 0
self.closestEnemyToMeDistance = 0
self.contested = False
self.enemyAttacking = False
# if self.enemyBallInterceptDelay != 0:
# print(f"{self.enemyBallInterceptDelay}")
def enemyAttackingBall(self):
current = math.inf
for each in self.closestEnemyDistances:
if each < current:
current = each
else:
return False
return True
def wallHyperSpeedJump(self):
controls = []
timers = []
if self.forward:
throttle = 1
pitch = 1
else:
throttle = -1
pitch = -1
controls.append(SimpleControllerState(jump=True,throttle=throttle))
timers.append(self.fakeDeltaTime*1.5)
controls.append(SimpleControllerState(jump = False,throttle = throttle))
timers.append(self.fakeDeltaTime/2)
controls.append(SimpleControllerState(jump=True,pitch=-pitch,throttle=throttle))
timers.append(self.fakeDeltaTime )
self.activeState = Action_chain(self, controls, timers)
def createJumpChain(self, timeAlloted, targetHeight, jumpSim):
# targetHeight,targetHeightTimer,heightMax,maxHeightTime
controls = []
timers = []
pitch = 0
if jumpSim[2] <= self.singleJumpLimit:
pitch = 1
targetTime = timeAlloted-self.fakeDeltaTime*2
if jumpSim[1] != 0:
targetTime = min(timeAlloted-self.fakeDeltaTime*2,jumpSim[1]-self.fakeDeltaTime*2)
pitch = 0
#print("new timer mechanism")
# if abs(targetHeight - jumpSim[0]) < 10:
# if abs(jumpSim[1] - timeAlloted) <= self.fakeDeltaTime :
#print(f"possibly ideal jump")
#targetTime = jumpSim[1]
if self.debugging:
print(f"{jumpSim[2]} height in {targetTime} current time {self.time}")
# if targetTime != jumpSim[1]:
# print(f"Guess we're winging it! {self.currentHit.pred_vector[2]} {jumpSim[2]}")
#print(targetTime)
controls.append(SimpleControllerState(jump=True,pitch = pitch))
if targetTime > 0.2+self.fakeDeltaTime*2:
targetTime-= self.fakeDeltaTime*2
timers.append(0.2)
else:
timers.append(targetTime - self.fakeDeltaTime*2)
controls.append(SimpleControllerState(jump=False))
timers.append(self.fakeDeltaTime)
if targetHeight < self.singleJumpLimit:
controls.append(SimpleControllerState())
timers.append(clamp(0.6, self.fakeDeltaTime , targetTime - .2))
controls.append(0)
timers.append(self.fakeDeltaTime)
else:
controls.append(SimpleControllerState(jump=True))
timers.append(self.fakeDeltaTime )
self.activeState = Action_chain(self,controls, timers)
#@profile
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
oldTimer = self.time
self.preprocess(packet)
if len(self.allies) > 0:
newTeamStateManager(self)
else:
soloStateManager_testing(self)
#orientationStateManager(self)
# for i in range(10000):
# timeWithAccelAgentless_jitted(self.currentSpd*1,self.me.boostLevel*1,10000,self.fakeDeltaTime,self.boostConsumptionRate)
# timeWithAccelAgentless_normal(self.currentSpd,self.me.boostLevel,10000,self.fakeDeltaTime,self.boostConsumptionRate)
action = self.activeState.update()
self.controller_state = action
if self.debugging:
self.renderer.begin_rendering()
self.renderer.draw_string_3d(self.me.location.data, 2, 2, str(type(self.activeState).__name__),
self.renderer.white())
# numbers = [f"{x.time_difference():.2f}" if x != None else None for x in self.hits]
# # 0 ground, 1 jumpshot, 2 wallshot , 3 catch canidate,4 double jump shot
# textOutput = f"ground shot:{numbers[0]} jumpshot:{numbers[1]} wallshot:{numbers[2]} highJump:{numbers[3]}"
# self.renderer.draw_string_2d(20, 200, 3, 3, textOutput, self.renderer.white())
for each in self.renderCalls:
each.run()
self.renderer.end_rendering()
self.renderCalls.clear()
return action
| 40.057944 | 210 | 0.604825 | 2,248 | 21,431 | 5.70774 | 0.18194 | 0.017302 | 0.002572 | 0.005611 | 0.237472 | 0.167095 | 0.148936 | 0.117995 | 0.110669 | 0.0929 | 0 | 0.025418 | 0.285894 | 21,431 | 534 | 211 | 40.132959 | 0.81299 | 0.131958 | 0 | 0.164975 | 0 | 0.002538 | 0.010092 | 0.003724 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053299 | false | 0 | 0.022843 | 0.002538 | 0.124365 | 0.010152 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d821dc5c2dd6c4807f6af2be0673c13c7e4abd0 | 7,775 | py | Python | HighLevelPythonInterface/examples/benchmarks/2D/02_run_dtmm_benchmarks.py | warthan07/Nemaktis | 54b1e64c1d40668e6dc22b11eac5487a09b58478 | [
"MIT"
] | 2 | 2020-02-25T08:13:32.000Z | 2022-02-18T12:12:34.000Z | HighLevelPythonInterface/examples/benchmarks/2D/02_run_dtmm_benchmarks.py | warthan07/Nemaktis | 54b1e64c1d40668e6dc22b11eac5487a09b58478 | [
"MIT"
] | null | null | null | HighLevelPythonInterface/examples/benchmarks/2D/02_run_dtmm_benchmarks.py | warthan07/Nemaktis | 54b1e64c1d40668e6dc22b11eac5487a09b58478 | [
"MIT"
] | 1 | 2022-02-18T12:13:40.000Z | 2022-02-18T12:13:40.000Z | import time
import os
import json
import dtmm
dtmm.conf.set_fftlib("mkl_fft")
import numpy as np
import nemaktis as nm
import matplotlib.pyplot as plt
from copy import deepcopy
from propagate_fields import *
########################
# Simulation constants #
########################
ne = 1.75
no = 1.5
wavelength = 0.6
k0 = 2*np.pi/wavelength
Ly = 10
Lz = 10
angle = np.pi/4
def nx(x,y,z):
return np.zeros(x.shape)
def ny(x,y,z):
return -np.sin(angle)*np.ones(x.shape)
def nz(x,y,z):
return np.cos(angle)*np.ones(x.shape)
for diffraction in [1,3,5,7]:
####################
# DTMM simulations #
####################
Ny_vals = np.logspace(np.log10(31), np.log10(1001), 20).astype(int)
dy_vals = Ly/(Ny_vals-1)
elapsed_times = np.zeros(len(Ny_vals))
if not os.path.exists('results'):
os.makedirs('results')
f = open("results/elapsed_times_dtmm_diff=%d.dat" % (diffraction,), "w")
f.write("# Ny\tdy\tt\n")
for iN in range(0,len(Ny_vals)):
Ny = Ny_vals[iN]
Nz = Ny
print("")
print("##################")
print("### Ny=%d ###" % (Ny,))
print("##################")
nfield = nm.DirectorField(
mesh_lengths=(1, Ly, Lz), mesh_dimensions=(1, Ny, Nz))
nfield.init_from_funcs(nx,ny,nz)
dims = nfield.get_mesh_dimensions()
spacings = nfield.get_mesh_spacings()
optical_data = dtmm.director2data(
nfield.vals, no = no, ne = ne, nhost = no,
thickness = spacings[2]/spacings[1]*np.ones(dims[2]))
t0 = time.perf_counter()
gaussian_window = dtmm.window.gaussian_beam(
(dims[1],dims[0]), 1/(np.sqrt(2)*spacings[1]), k0)
field_data_in = dtmm.illumination_data(
(dims[1],dims[0]), [wavelength], pixelsize=spacings[1], n=no,
window = gaussian_window)
field_data_out = dtmm.transfer_field(
field_data_in, optical_data, nin=no, diffraction=diffraction, ret_bulk=True)[0]
elapsed_times[iN] = time.perf_counter()-t0
f.write("%d\t%f\t%f\n" % (Ny,dy_vals[iN],elapsed_times[iN]))
# In order to be consistent with the BPM backend, we absorb the phase factor k0*no*z
z_vals = spacings[2]*np.arange(0,Nz+2)[:,np.newaxis,np.newaxis,np.newaxis,np.newaxis,np.newaxis]
field_data_out *= np.exp(-1j*k0*no*z_vals)
bulk_filename = "results/dtmm_sol_diff=%d_Ny=%d" % (diffraction,Ny,)
print("{ Saving optical fields to "+bulk_filename+".vti }")
lengths = nfield.get_mesh_lengths()
vti_data = vtkImageData()
vti_data.SetDimensions(dims[0], dims[1], dims[2])
vti_data.SetOrigin(-lengths[0]/2, -lengths[1]/2, -lengths[2]/2)
vti_data.SetSpacing(spacings[0], spacings[1], spacings[2])
Np = dims[0]*dims[1]*dims[2]
E_inputX = field_data_out[:-2,0,0,[0,2],:,:].transpose(
(1,0,2,3)).reshape((2,Np)).transpose()
E_inputY = field_data_out[:-2,1,0,[0,2],:,:].transpose(
(1,0,2,3)).reshape((2,Np)).transpose()
E_real_inputX = vn.numpy_to_vtk(np.real(E_inputX))
E_real_inputX.SetName("E_real_inputX_0_0")
vti_data.GetPointData().AddArray(E_real_inputX)
E_imag_inputX = vn.numpy_to_vtk(np.imag(E_inputX))
E_imag_inputX.SetName("E_imag_inputX_0_0")
vti_data.GetPointData().AddArray(E_imag_inputX)
E_real_inputY = vn.numpy_to_vtk(np.real(E_inputY))
E_real_inputY.SetName("E_real_inputY_0_0")
vti_data.GetPointData().AddArray(E_real_inputY)
E_imag_inputY = vn.numpy_to_vtk(np.imag(E_inputY))
E_imag_inputY.SetName("E_imag_inputY_0_0")
vti_data.GetPointData().AddArray(E_imag_inputY)
writer = vtkXMLImageDataWriter()
writer.SetFileName(bulk_filename+".vti")
writer.SetInputData(vti_data)
writer.Write()
f.close()
################################################
# Calculation of exact solution based on FFT #
################################################
print("")
print("#################################")
print("# Calculation of exact solution #")
print("#################################")
optical_axis = np.array([0,-np.sin(angle),np.cos(angle)]).reshape((3,1))
eps = no**2*np.identity(3)+(ne**2-no**2)*np.kron(optical_axis,optical_axis.T)
Ny = Ny_vals[-1]
Nz = Ny
mesh_lengths = [1,Ly,Lz]
mesh_dims = [1,Ny,Nz]
input_field = import_input_field(
"results/dtmm_sol_diff=%d_Ny=%d.vti" % (diffraction,Ny_vals[-1],), mesh_lengths, mesh_dims, pol="Y")
exact_field_vals = propagate_fields(
input_field, mesh_lengths, mesh_dims, wavelength, eps)
z_vals = np.linspace(0,Lz,Nz)[:,np.newaxis,np.newaxis,np.newaxis]
exact_field_vals = np.squeeze(exact_field_vals*np.exp(-1j*k0*no*z_vals))
fft_exact_field_vals = np.fft.fft(exact_field_vals,axis=1)
fft_exact_ampl_vals = np.sqrt(np.sum(np.abs(fft_exact_field_vals)**2,axis=-1))
ys_ref = np.linspace(-Ly/2,Ly,Ny)
zs_ref = np.linspace(0,Lz,Nz)
q_vals = np.tile(2*np.pi*np.fft.fftfreq(Ny,Ly/(Ny-1))/k0, (Nz,1))
NA = 0.4
mask = q_vals**2 < NA**2*(1-(q_vals/no)**2)
##########################################
# Calculation of the computational error #
##########################################
class complex_interp:
def __init__(self, zs, ys, vals, kx, ky):
self.interp_real = RectBivariateSpline(zs, ys, np.real(vals), kx=kx, ky=ky)
self.interp_imag = RectBivariateSpline(zs, ys, np.imag(vals), kx=kx, ky=ky)
def __call__(self, zs, ys):
return self.interp_real(zs,ys)+1j*self.interp_imag(zs,ys)
print("")
print("#######################################")
print("# Calculation of computational errors #")
print("#######################################")
max_errors = np.zeros(elapsed_times.shape)
min_errors = np.zeros(elapsed_times.shape)
mean_errors = np.zeros(elapsed_times.shape)
with open("results/errors_and_times_dtmm_2D_diff=%d.dat" % (diffraction,), mode='w') as f:
f.write("# Ny\tdy\tt\terr_mean\terr_min\terr_max\n")
for iN in range(0,len(Ny_vals)):
Ny = Ny_vals[iN]
Nz = Ny
field_vals = import_field("results/dtmm_sol_diff=%d_Ny=%d.vti" % (diffraction,Ny,), pol="Y")[:,:,0,:]
ys = np.linspace(-Ly/2,Ly,Ny)
zs = np.linspace(0,Lz,Nz)
Ex = complex_interp(zs, ys, field_vals[:,:,0], kx=3, ky=3)
Ey = complex_interp(zs, ys, field_vals[:,:,1], kx=3, ky=3)
fft_Ex_err = np.fft.fft(exact_field_vals[:,:,0]-Ex(zs_ref,ys_ref),axis=-1)
fft_Ey_err = np.fft.fft(exact_field_vals[:,:,1]-Ey(zs_ref,ys_ref),axis=-1)
fft_err = \
np.sqrt(np.abs(fft_Ex_err)**2+np.abs(fft_Ey_err)**2) / \
(1e-8+2*np.abs(fft_exact_ampl_vals))
max_errors[iN] = np.max(fft_err[mask])
min_errors[iN] = np.min(fft_err[mask])
mean_errors[iN] = np.mean(fft_err[mask])
f.write("%d\t%f\t%f\t%f\t%f\t%f\n" % (
Ny,dy_vals[iN],elapsed_times[iN],mean_errors[iN],min_errors[iN],max_errors[iN]))
# plt.subplot(1,2,1)
# plt.loglog(dy_vals,mean_errors)
# plt.xlabel("Mesh spacing dy (µm)")
# plt.ylabel("Computational error for DTMM backend")
# plt.subplot(1,2,2)
# plt.imshow(np.abs(exact_field_vals[:,:,1]),extent=(-Ly/2,Ly/2,0,Lz),origin="lower")
# plt.title("Amplitude of Ey")
# plt.colorbar()
# plt.show()
| 36.502347 | 113 | 0.567846 | 1,133 | 7,775 | 3.68932 | 0.19947 | 0.025837 | 0.030144 | 0.025837 | 0.304306 | 0.252153 | 0.183971 | 0.119856 | 0.069378 | 0.069378 | 0 | 0.026316 | 0.222894 | 7,775 | 212 | 114 | 36.674528 | 0.665508 | 0.065595 | 0 | 0.125874 | 0 | 0.006993 | 0.0953 | 0.05546 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034965 | false | 0 | 0.076923 | 0.027972 | 0.146853 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d83510c9b2aea13524aba6e26c438bd0f24e73f | 1,208 | py | Python | tests/test_serializers.py | Alzpeta/oarepo-validate | c3d917a833354bbbf569f2956424a27831e64de5 | [
"MIT"
] | null | null | null | tests/test_serializers.py | Alzpeta/oarepo-validate | c3d917a833354bbbf569f2956424a27831e64de5 | [
"MIT"
] | 3 | 2020-11-05T16:58:38.000Z | 2021-11-16T08:40:46.000Z | tests/test_serializers.py | Alzpeta/oarepo-validate | c3d917a833354bbbf569f2956424a27831e64de5 | [
"MIT"
] | 1 | 2021-09-22T11:59:47.000Z | 2021-09-22T11:59:47.000Z | import uuid
from invenio_pidstore.minters import recid_minter
from invenio_records import Record
from oarepo_validate import JSONSerializer
def test_serializer(app, db):
data = {'test': 'blah'}
record_uuid = uuid.uuid4()
pid = recid_minter(record_uuid, data)
rec = Record.create(data, id_=record_uuid)
serializer = JSONSerializer()
serialized = serializer.transform_record(pid, rec)
serialized.pop('created')
serialized.pop('updated')
assert serialized == {'id': '1', 'links': {}, 'metadata': {'control_number': '1', 'test': 'blah'}, 'revision': 0}
def test_search_serializer(app, db):
data = {'test': 'blah'}
record_uuid = uuid.uuid4()
pid = recid_minter(record_uuid, data)
serializer = JSONSerializer()
serialized = serializer.transform_search_hit(pid, {
'_source': data,
'_version': 0,
'_index': "draft-restoration-restoration-object-v1.0.0-1601321953",
'_type': "_doc",
'_id': str(record_uuid),
'_score': 1,
})
serialized.pop('created')
serialized.pop('updated')
assert serialized == {'id': '1', 'links': {}, 'metadata': {'control_number': '1', 'test': 'blah'}, 'revision': 0}
| 31.789474 | 117 | 0.647351 | 138 | 1,208 | 5.463768 | 0.376812 | 0.079576 | 0.039788 | 0.050398 | 0.61008 | 0.469496 | 0.469496 | 0.469496 | 0.469496 | 0.469496 | 0 | 0.023566 | 0.192053 | 1,208 | 37 | 118 | 32.648649 | 0.748975 | 0 | 0 | 0.466667 | 0 | 0 | 0.191225 | 0.044702 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d83672a14d9bf74f5a3f7e74ed8fe2bf08bf3a4 | 3,741 | py | Python | objects/CSCG/_3d/mesh/elements/element/main.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | 1 | 2020-10-14T12:48:35.000Z | 2020-10-14T12:48:35.000Z | objects/CSCG/_3d/mesh/elements/element/main.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | objects/CSCG/_3d/mesh/elements/element/main.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""""""
import sys
if './' not in sys.path: sys.path.append('./')
from screws.freeze.main import FrozenOnly
from objects.CSCG._3d.mesh.elements.element.sub_geometry.sub_geometry import ElementSubGeometry
import numpy as np
from objects.CSCG._3d.mesh.elements.element.sides.main import _3dCSCG_Mesh_Element_Sides
from objects.CSCG._3d.mesh.elements.element.coordinate_transformation import _3dCSCG_Mesh_Element_CT
from objects.CSCG._3d.mesh.elements.element.do import _3dCSCG_MeshElement_Do
from objects.CSCG._3d.mesh.elements.element.IS import _3dCSCG_MeshElement_IS
class _3dCSCG_Mesh_Element(FrozenOnly):
"""The mesh element class"""
def __init__(self, elements, i):
self._elements_ = elements
self._mesh_ = elements._mesh_
self._i_ = i
self._type_wrt_metric_ = None
self._in_region_ = self._mesh_.do.find.region_name_of_element(i)
self._ct_ = None
self._sub_geometry_ = None
self._sides_ = None
self._do_ = None
self._IS_ = None
self._freeze_self_()
@property
def i(self):
"""The global numbering of this element."""
return self._i_
@property
def position(self):
"""The elements.map[i] reflects the position of an element."""
return self._elements_.map[self.i]
@property
def in_region(self):
"""This element is in which domain regions?"""
return self._in_region_
@property
def spacing(self):
"""What is the spacing of this element in the domain regions?
This property basically reflects the relative position of this element in the domain regions.
"""
region, localRegionIndices = self._mesh_.do.find.region_name_and_local_indices_of_element(self.i)
elementsSpacing = self._elements_.spacing[region]
_spacing_ = np.zeros((3,2))
for i in range(3):
_spacing_[i, 0] = elementsSpacing[i][localRegionIndices[i]]
_spacing_[i, 1] = elementsSpacing[i][localRegionIndices[i]+1]
return _spacing_
@property
def type_wrt_metric(self):
"""Return an element metric type object reflecting the element type."""
if self._type_wrt_metric_ is None:
self._type_wrt_metric_ = \
self._mesh_.domain.regions[
self.in_region].type_wrt_metric.___CLASSIFY_ELEMENT_of_spacing___(
self.spacing)
return self._type_wrt_metric_
@property
def coordinate_transformation(self):
"""The coordinate transformation object of this element."""
if self._ct_ is None:
self._ct_ = _3dCSCG_Mesh_Element_CT(self)
return self._ct_
@property
def sub_geometry(self):
if self._sub_geometry_ is None:
self._sub_geometry_ = ElementSubGeometry(self)
return self._sub_geometry_
@property
def sides(self):
if self._sides_ is None:
self._sides_ = _3dCSCG_Mesh_Element_Sides(self)
return self._sides_
@property
def do(self):
if self._do_ is None:
self._do_ = _3dCSCG_MeshElement_Do(self)
return self._do_
@property
def IS(self):
if self._IS_ is None:
self._IS_ = _3dCSCG_MeshElement_IS(self)
return self._IS_
if __name__ == '__main__':
# mpiexec -n 5 python objects\CSCG\_3d\mesh\elements\element\main.py
from objects.CSCG._3d.master import MeshGenerator
elements = [2, 2, 2]
mesh = MeshGenerator('crazy', c=0.3, bounds=([0,3], [0,3], [0,3]))(elements)
if 0 in mesh.elements:
e = mesh.elements[0]
ess = e.sides
N = ess['S']
print(N.trace_element) | 32.25 | 105 | 0.65544 | 482 | 3,741 | 4.719917 | 0.197095 | 0.042198 | 0.04 | 0.044835 | 0.141538 | 0.141538 | 0.106374 | 0 | 0 | 0 | 0 | 0.013158 | 0.248329 | 3,741 | 116 | 106 | 32.25 | 0.795875 | 0.139535 | 0 | 0.121951 | 0 | 0 | 0.005696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134146 | false | 0 | 0.109756 | 0 | 0.378049 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d8382878dbce6ddf7ce8ce123d7a9ded47e0a2c | 2,640 | py | Python | HW4-3/lib/agent/preprocess.py | b05611038/MLDS_2019SPRING | 0591a1a6f461da0a02b9e1b83f37ad3579f36f4d | [
"MIT"
] | 3 | 2019-06-20T06:47:30.000Z | 2021-11-05T03:16:37.000Z | HW4-3/lib/agent/preprocess.py | b05611038/MLDS_2019SPRING | 0591a1a6f461da0a02b9e1b83f37ad3579f36f4d | [
"MIT"
] | null | null | null | HW4-3/lib/agent/preprocess.py | b05611038/MLDS_2019SPRING | 0591a1a6f461da0a02b9e1b83f37ad3579f36f4d | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torchvision
import torchvision.transforms as T
from PIL import Image
#transform dictionary format pass in the class
# {'implenmented string': True}
class Transform(object):
def __init__(self, preprocess_dict, device):
self.implemented_list = self.implenmented()
self.preprocess_dict = preprocess_dict
keys = preprocess_dict.keys()
for key in keys:
if key not in self.implemented_list:
raise KeyError(key, 'is not the implemented observation preprocess method.')
self.device = device
self.transform = self._init_torchvision_method(preprocess_dict)
def __call__(self, observation, memory = None):
if self.preprocess_dict['slice_scoreboard'] == True:
observation = self._slice_scoreboard(observation)
observation = Image.fromarray(observation)
observation = self.transform(observation)
if self.preprocess_dict['gray_scale'] == True:
observation = self._gray_scale(observation)
if self.preprocess_dict['minus_observation'] == True:
observation = self._minus_observation(observation, memory)
return observation
def insert_init_memory(self, observation):
if self.preprocess_dict['slice_scoreboard'] == True:
observation = self._slice_scoreboard(observation)
observation = Image.fromarray(observation)
observation = self.transform(observation)
if self.preprocess_dict['gray_scale'] == True:
observation = self._gray_scale(observation)
return observation
def _init_torchvision_method(self, preprocess_dict):
method = [T.Resize((84, 84)), T.ToTensor()]
return T.Compose(method)
def _gray_scale(self, tensor, r = 0.2126, g = 0.7125, b = 0.0722):
tensor = r * tensor[0, :, :] + g * tensor[1, :, :] + b * tensor[2, :, :]
return tensor.unsqueeze(0)
def _minus_observation(self, observation, memory):
if memory is None:
raise RuntimeError("Please use agent.insert_memory() to insert initial data.")
return observation.to(self.device) - memory
def _slice_scoreboard(self, image):
image = image[24:, :, :]
return image
def image_size(self):
height = 84
length = 84
channel = 3
if self.preprocess_dict['gray_scale'] == True:
channel = 1
return (height, length, channel)
def implenmented(self):
implemented_list = ['slice_scoreboard', 'gray_scale', 'minus_observation']
return implemented_list
| 33 | 92 | 0.655303 | 295 | 2,640 | 5.664407 | 0.267797 | 0.100539 | 0.096948 | 0.071813 | 0.300419 | 0.281867 | 0.281867 | 0.262118 | 0.262118 | 0.262118 | 0 | 0.015625 | 0.248485 | 2,640 | 79 | 93 | 33.417722 | 0.826613 | 0.028409 | 0 | 0.267857 | 0 | 0 | 0.090199 | 0.0082 | 0 | 0 | 0 | 0 | 0 | 1 | 0.160714 | false | 0 | 0.089286 | 0 | 0.410714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d88fd8f7b8ecb88133d8ec5e772645139507eaa | 4,557 | py | Python | py/desigal/sky.py | biprateep/DESI-stack | 368e5d93e663b303ce93af55836c94265d5baa3a | [
"MIT"
] | 1 | 2022-01-27T15:46:14.000Z | 2022-01-27T15:46:14.000Z | py/desigal/sky.py | biprateep/DESI-stack | 368e5d93e663b303ce93af55836c94265d5baa3a | [
"MIT"
] | null | null | null | py/desigal/sky.py | biprateep/DESI-stack | 368e5d93e663b303ce93af55836c94265d5baa3a | [
"MIT"
] | null | null | null | import os
import multiprocessing
from pathlib import Path
from joblib import Parallel, delayed
import numpy as np
import desispec.io
import desispec.fluxcalibration
# TODO: Parallelization should be done in the target dimension if decided to run on all targets
def _preprocess_sky_frame(
night, exp, petal, fiber, camera, spectro_redux_path, release, **kwargs
):
exp_path = (
spectro_redux_path / release / "exposures" / str(night) / str(exp).zfill(8)
)
sky_path = exp_path / f"sky-{camera}{petal}-{str(exp).zfill(8)}.fits"
calib_path = exp_path / f"fluxcalib-{camera}{petal}-{str(exp).zfill(8)}.fits"
cframe_path = exp_path / f"cframe-{camera}{petal}-{str(exp).zfill(8)}.fits"
sky_data = desispec.io.read_sky(str(sky_path))
sky_data.fibermap = None # Need to set this for flux calibration function to work
calib_data = desispec.io.read_flux_calibration(str(calib_path))
cframe_data = desispec.io.read_frame(str(cframe_path), skip_resolution=True)
sky_idx = fiber % 500 # find the array idx of the fiber
# The following uses a for loop internally for a part.
# To reduce compute we may want to select specific objects here rather than calibratng the whole
desispec.fluxcalibration.apply_flux_calibration(
sky_data, calib_data
) # modifies sky_data.flux and sky_data.ivar
exp_sky_flux = sky_data.flux[sky_idx]
exp_ivar = cframe_data.ivar[sky_idx]
exp_sky_mask = np.logical_and(sky_data.mask[sky_idx], cframe_data.mask[sky_idx])
return exp_sky_flux, exp_ivar, exp_sky_mask
def get_sky(
spectra=None, fibermap=None, exp_fibermap=None, release="everest", n_workers=-1,
):
"""Get equivalent sky spectra for a set of coadded spectra.
Parameters
----------
spectra : desispec.spectra.Spectra,
coadded spectra as a desispec object, by default None.
fibermap : astropy.Table, optional
If spectra object is not provided alternatively,
fibermap and exp_fibermap may be provided, by default None.
exp_fibermap : astropy.Table, optional
If spectra object is not provided alternatively,
fibermap and exp_fibermap may be provided, by default None.
release : str, optional
Name of data release, by default "everest".
n_workers : int, optional
Number of CPUs to spread the IO on. Vaulues<=0 will use all available CPUs, by default -1.
Returns
-------
tuple of dictionaries
sky_flux, sky_mask values for all the spectra broken in terms of camera.
"""
if (spectra is None) and (fibermap is None) and (exp_fibermap is None):
raise ValueError("Either spectra or fibermap and exp_fibermap must be provided")
if spectra is not None:
exp_fibermap = spectra.exp_fibermap
fibermap = spectra.fibermap
if (fibermap is None) ^ (exp_fibermap is None):
raise ValueError(
"fibermap and exp_fibermap must be both provided or spectra should be provided"
)
if n_workers <= 0:
n_workers = multiprocessing.cpu_count()
else:
n_workers = min(int(n_workers), multiprocessing.cpu_count())
spectro_redux_path = Path(os.environ["DESI_SPECTRO_REDUX"])
sky_flux = {"b": [], "r": [], "z": []}
sky_mask = {"b": [], "r": [], "z": []}
for target in fibermap["TARGETID"]:
target_table = exp_fibermap[exp_fibermap["TARGETID"] == target]
for camera in ["b", "r", "z"]:
exp_sky_flux, exp_ivar, exp_sky_mask = zip(
*Parallel(n_jobs=n_workers)(
delayed(_preprocess_sky_frame)(
night, exp, petal, fiber, camera, spectro_redux_path, release
)
for night, exp, petal, fiber in zip(
target_table["NIGHT"],
target_table["EXPID"],
target_table["PETAL_LOC"],
target_table["FIBER"],
)
)
)
exp_sky_flux = np.array(exp_sky_flux)
exp_ivar = np.array(exp_ivar)
exp_sky_mask = np.array(exp_sky_mask)
sky_flux[camera].append(
np.sum(exp_sky_flux * exp_ivar, axis=0) / np.sum(exp_ivar, axis=0)
)
sky_mask[camera].append(1 * np.all(exp_sky_mask, axis=0))
sky_flux = {key: np.array(value) for (key, value) in sky_flux.items()}
sky_mask = {key: np.array(value) for (key, value) in sky_mask.items()}
return sky_flux, sky_mask
| 39.284483 | 100 | 0.644942 | 625 | 4,557 | 4.5056 | 0.256 | 0.025568 | 0.021307 | 0.017045 | 0.28196 | 0.241832 | 0.199219 | 0.170455 | 0.151278 | 0.129261 | 0 | 0.004425 | 0.25609 | 4,557 | 115 | 101 | 39.626087 | 0.826254 | 0.254992 | 0 | 0.027397 | 0 | 0 | 0.109195 | 0.04265 | 0 | 0 | 0 | 0.008696 | 0 | 1 | 0.027397 | false | 0 | 0.09589 | 0 | 0.150685 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d8c4d2e42c7e76999617276cbd6cb05860d2fc1 | 2,232 | py | Python | No_0703_Kth Largest Element in a Stream/by_min_heap.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 32 | 2020-01-05T13:37:16.000Z | 2022-03-26T07:27:09.000Z | No_0703_Kth Largest Element in a Stream/by_min_heap.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | null | null | null | No_0703_Kth Largest Element in a Stream/by_min_heap.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 8 | 2020-06-18T16:17:27.000Z | 2022-03-15T23:58:18.000Z | '''
Description:
Design a class to find the kth largest element in a stream. Note that it is the kth largest element in the sorted order, not the kth distinct element.
Your KthLargest class will have a constructor which accepts an integer k and an integer array nums, which contains initial elements from the stream. For each call to the method KthLargest.add, return the element representing the kth largest element in the stream.
Example:
int k = 3;
int[] arr = [4,5,8,2];
KthLargest kthLargest = new KthLargest(3, arr);
kthLargest.add(3); // returns 4
kthLargest.add(5); // returns 5
kthLargest.add(10); // returns 5
kthLargest.add(9); // returns 8
kthLargest.add(4); // returns 8
Note:
You may assume that nums' length ≥ k-1 and k ≥ 1.
'''
from typing import List
from heapq import heapify, heappop, heappush, heappushpop
class KthLargest:
def __init__(self, k: int, nums: List[int]):
self.k = k
self.arr = nums
heapify( self.arr)
# Keep popping smaller elemnts till size = k
while len( self.arr ) > self.k:
heappop( self.arr )
def add(self, val: int) -> int:
heap_top = 0
# Always keep heap size = k
# Top element = kth largest element
if len( self.arr ) < self.k:
heappush( self.arr, val)
else:
heappushpop( self.arr, val)
return self.arr[heap_top]
# Your KthLargest object will be instantiated and called as such:
# obj = KthLargest(k, nums)
# param_1 = obj.add(val)
# n : the length of input nums
# k : the value of specified order
## Time Complexity:
#
# O( n log n ) for __init()__
# O( log k ) for add
## Space Complexity:
#
# O( k ) to keep the min-heap
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'array k add_sequence')
def test_bench():
t = TestEntry( array = [4,5,8,2], k = 3, add_sequence = [3, 5, 10, 9, 4])
obj = KthLargest( k = t.k, nums = t.array )
# expected output:
'''
4
5
5
8
8
'''
for number in t.add_sequence:
print( obj.add( number ) )
return
if __name__ == '__main__':
test_bench() | 22.09901 | 263 | 0.617384 | 327 | 2,232 | 4.146789 | 0.366972 | 0.041298 | 0.050147 | 0.044248 | 0.075221 | 0.036873 | 0 | 0 | 0 | 0 | 0 | 0.023082 | 0.28181 | 2,232 | 101 | 264 | 22.09901 | 0.821585 | 0.517473 | 0 | 0 | 0 | 0 | 0.036634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.115385 | 0 | 0.346154 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d8ca2861b205d338f99194e7413014a868e02f4 | 1,778 | py | Python | tbot/web/handlers/api/twitch/chat_alerts.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | null | null | null | tbot/web/handlers/api/twitch/chat_alerts.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | 10 | 2022-02-14T11:40:20.000Z | 2022-03-09T22:44:03.000Z | tbot/web/handlers/api/twitch/chat_alerts.py | thomaserlang/tbot | 99cfa204d86ef35cf2cc9482ae5a44abb35b443a | [
"MIT"
] | 1 | 2020-09-19T16:38:24.000Z | 2020-09-19T16:38:24.000Z | import logging, good
from tornado import web
from ..base import Api_handler, Level
class Handler(Api_handler):
__schema__ = good.Schema({
str: [{
'message': good.All(str, good.Length(min=0, max=200)),
good.Optional('min_amount'): good.All(good.Coerce(int), good.Range(min=0, max=1000)),
}],
}, default_keys=good.Optional)
@Level(1)
async def get(self, channel_id):
alerts = await self.db.fetchall(
'SELECT type, message, min_amount FROM twitch_chat_alerts WHERE channel_id=%s',
(channel_id,)
)
grouped_alerts = {}
for a in alerts:
l = grouped_alerts.setdefault(a['type'], [])
l.append({
'message': a['message'],
'min_amount': a['min_amount'] or 0,
})
self.write_object(grouped_alerts)
@Level(1)
async def put(self, channel_id):
data = self.validate()
for key in data:
ins = []
for d in data[key]:
if d['message']:
ins.append((
channel_id,
key,
d['message'],
d.get('min_amount', 0),
))
await self.db.execute(
'DELETE FROM twitch_chat_alerts WHERE channel_id=%s AND type=%s;',
(channel_id, key,)
)
if ins:
await self.db.executemany('''
INSERT INTO twitch_chat_alerts
(channel_id, type, message, min_amount)
VALUES
(%s, %s, %s, %s)
''', ins
)
await self.get(channel_id) | 32.925926 | 97 | 0.468504 | 190 | 1,778 | 4.215789 | 0.363158 | 0.101124 | 0.041199 | 0.034956 | 0.087391 | 0.087391 | 0.087391 | 0.087391 | 0 | 0 | 0 | 0.012536 | 0.41676 | 1,778 | 54 | 98 | 32.925926 | 0.759884 | 0 | 0 | 0.04 | 0 | 0 | 0.238336 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.06 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d8d7f07ee7f1fb1abddfc13378b9331565be05e | 1,342 | py | Python | fraid/rps/viewsets.py | mikaelengstrom/fraid | 3a1cf6ebe39a2105218d8b28eaeec825b9a80858 | [
"MIT"
] | null | null | null | fraid/rps/viewsets.py | mikaelengstrom/fraid | 3a1cf6ebe39a2105218d8b28eaeec825b9a80858 | [
"MIT"
] | null | null | null | fraid/rps/viewsets.py | mikaelengstrom/fraid | 3a1cf6ebe39a2105218d8b28eaeec825b9a80858 | [
"MIT"
] | null | null | null | from rest_framework import (viewsets, mixins, status, response)
from rest_framework.decorators import detail_route
from . import (serializers, models, constants)
class InvalidStringException(Exception):
pass
def str_to_const(string):
_s = string.upper()
if _s == 'ROCK':
return constants.ROCK
elif _s == 'PAPER':
return constants.PAPER
elif _s == 'SCISSORS':
return constants.SCISSORS
else:
raise InvalidStringException
class GameViewSet(mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = models.Game.objects.all()
serializer_class = serializers.GameSerializer
@detail_route(methods=['post'], url_path='throw')
def throw(self, request, pk=None):
game = self.get_object()
try:
user_throw = str_to_const(request.data['throw'])
except InvalidStringException:
return response.Response({
'error': 'Invalid parameter {}'.format(request.data['throw'])
}, status=status.HTTP_400_BAD_REQUEST)
throw = game.throw(user_throw)
serializer = serializers.ThrowSerializer(instance=throw)
return response.Response(serializer.data, status=status.HTTP_201_CREATED)
| 31.209302 | 81 | 0.662444 | 136 | 1,342 | 6.375 | 0.514706 | 0.051903 | 0.039216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005894 | 0.241431 | 1,342 | 42 | 82 | 31.952381 | 0.845776 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0.030303 | 0.090909 | 0 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d8e0ee11e5fb5600838ccf66f631721bafdb54e | 4,989 | py | Python | gestia.py | SunandaS76/GestIA | 277e273e954fdf0fd94aad37ba5770c01f23a5e6 | [
"MIT"
] | null | null | null | gestia.py | SunandaS76/GestIA | 277e273e954fdf0fd94aad37ba5770c01f23a5e6 | [
"MIT"
] | null | null | null | gestia.py | SunandaS76/GestIA | 277e273e954fdf0fd94aad37ba5770c01f23a5e6 | [
"MIT"
] | null | null | null | import sys
import time
import cv2
import keyboard
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from config import *
from detector import *
import json
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, *args, **kwargs):
# Set up environment
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self.setupUi(self)
# Connect application logic
self.startButton.clicked.connect(self.startRecording)
self.stopButton.clicked.connect(self.stopRecording)
self.assignFist.clicked.connect(self.updateFist)
self.assignDaddyF.clicked.connect(self.updateDaddyF)
self.assignPalmO.clicked.connect(self.updatePalmO)
self.assignPalmC.clicked.connect(self.updatePalmC)
self.assignThumbsU.clicked.connect(self.updateThumbsU)
self.assignThumbsD.clicked.connect(self.updateThumbsD)
self.buttonPressed = False
# Configure timer for screen recording
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.show_frame)
# Get webcam object
self.webcam = cv2.VideoCapture(0)
ret, self.frame = self.webcam.read()
# GUI enable/disable logic
self.stopButton.setEnabled(False)
# Inference Model Object
self.inferenceObject = InferenceModel()
self.inferenceObject.initialize()
self.actions = None
self.__load_actions()
self.__set_labels()
def stopRecording(self):
self.startButton.setEnabled(True)
self.timer.stop()
self.stopButton.setEnabled(False)
def startRecording(self):
self.startButton.setEnabled(False)
self.stopButton.setEnabled(True)
self.timer.start(1)
def show_frame(self):
# Capture frame from webcam
ret, self.frame = self.webcam.read()
if not ret:
return
# Interference process
label_detected = self.inferenceObject.processFrame(self.frame)
self.label.setText("Detection: " + label_detected)
if (label_detected != "nothing"):
print(label_detected, '\n')
if(label_detected == "fist"):
keyboard.press_and_release('Caps lock')
if(label_detected == "palm_close"):
keyboard.press_and_release("Percent")
# Process image to show on QtGui
image = QtGui.QImage(self.frame, self.frame.shape[1], self.frame.shape[0],
self.frame.shape[1] * self.frame.shape[2],
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap()
pixmap.convertFromImage(image.rgbSwapped())
# Show image as a pixelmap on labelCam
self.labelCam.setPixmap(pixmap)
def updateFist(self):
action = self.__update_actions('fist')
print('fist 1')
self.actionFist.setText(action)
def updateDaddyF(self):
self.actionDaddyF.setText("PRESS ANY KEY")
print('fist 1')
self.actionDaddyF.setText(self.__update_actions('daddy_finger'))
def updatePalmC(self):
self.actionPalmC.setText("PRESS ANY KEY")
print('fist 1')
self.actionPalmC.setText(self.__update_actions('palm_close'))
def updatePalmO(self):
self.actionPalmO.setText("PRESS ANY KEY")
print('fist 1')
self.actionPalmO.setText(self.__update_actions('palm_open'))
def updateThumbsU(self):
self.actionThumbsU.setText("PRESS ANY KEY")
print('fist 1')
self.actionThumbsU.setText(self.__update_actions('thumbs_up'))
def updateThumbsD(self):
self.actionThumbsD.setText("PRESS ANY KEY")
print('fist 1')
self.actionThumbsD.setText(self.__update_actions('thumbs_down'))
def __update_actions(self, gesture):
if self.buttonPressed:
return self.actions[gesture].upper()
self.buttonPressed = True
action = keyboard.read_key()
if len(action) == 0:
return self.actions[gesture].upper()
self.actions[gesture] = action
self.__save_actions()
self.buttonPressed = False
return action.upper()
def __load_actions(self):
with open('data/actions.json', 'r') as f:
self.actions = json.load(f)
def __save_actions(self):
with open('data/actions.json', 'w') as f:
json.dump(self.actions, f)
def __set_labels(self):
self.actionThumbsD.setText(self.actions['thumbs_down'].upper())
self.actionThumbsU.setText(self.actions['thumbs_up'].upper())
self.actionFist.setText(self.actions['fist'].upper())
self.actionPalmO.setText(self.actions['palm_open'].upper())
self.actionPalmC.setText(self.actions['palm_close'].upper())
self.actionDaddyF.setText(self.actions['daddy_finger'].upper())
if __name__ == "__main__":
app = QtWidgets.QApplication([])
window = MainWindow()
window.show()
app.exec_()
| 33.483221 | 82 | 0.646021 | 550 | 4,989 | 5.710909 | 0.278182 | 0.042025 | 0.045845 | 0.026743 | 0.165552 | 0.128621 | 0.091054 | 0.050939 | 0 | 0 | 0 | 0.005013 | 0.240329 | 4,989 | 148 | 83 | 33.709459 | 0.823747 | 0.052716 | 0 | 0.127273 | 0 | 0 | 0.064687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.072727 | 0 | 0.245455 | 0.063636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d91bbf3521fb204188d37328c0cfa2d88a3d719 | 72,911 | py | Python | tests/3_endpoints/test_3_0_ldp.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | tests/3_endpoints/test_3_0_ldp.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | tests/3_endpoints/test_3_0_ldp.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | import hashlib
import pdb
import pytest
from base64 import b64encode
from datetime import timedelta
from hashlib import sha1, sha256, blake2b
from uuid import uuid4
from werkzeug.http import http_date
import arrow
import requests
from flask import g
from rdflib import Graph
from rdflib.compare import isomorphic
from rdflib.namespace import RDF
from rdflib.term import Literal, URIRef
from lakesuperior import env
from lakesuperior.dictionaries.namespaces import ns_collection as nsc
from lakesuperior.model.ldp.ldpr import Ldpr
digest_algo = env.app_globals.config['application']['uuid']['algo']
@pytest.fixture(scope='module')
def random_uuid():
return str(uuid4())
@pytest.mark.usefixtures('client_class')
@pytest.mark.usefixtures('db')
class TestLdp:
"""
Test HTTP interaction with LDP endpoint.
"""
def test_get_root_node(self):
"""
Get the root node from two different endpoints.
The test triplestore must be initialized, hence the `db` fixture.
"""
ldp_resp = self.client.get('/ldp')
rest_resp = self.client.get('/rest')
assert ldp_resp.status_code == 200
assert rest_resp.status_code == 200
def test_put_empty_ldpr(self):
"""
Check response headers for a PUT operation with empty payload.
"""
url = f'/ldp/empty-{uuid4()}'
rsp = self.client.put(url)
assert rsp.status_code == 201
assert rsp.data == bytes(url.replace('/ldp', g.webroot), 'utf-8')
def test_put_empty_ldpnr(self):
"""
Check response headers for a PUT operation with empty payload.
Without specifying a MIME type, an LDP-NR should be created.
"""
url = f'/ldp/empty-{uuid4()}'
rsp = self.client.put(url)
get_rsp = self.client.get(url)
assert 'application/octet-stream' in get_rsp.content_type
assert (
'<http://www.w3.org/ns/ldp#NonRDFSource>;rel="type"'
in get_rsp.headers.get_all('link'))
def test_put_empty_ldprs(self):
"""
Check response headers for a PUT operation with empty payload.
Specify MIME type to force LDPC creation.
"""
url = f'/ldp/empty-{uuid4()}'
rsp = self.client.put(url, content_type='text/turtle')
get_rsp = self.client.get(url)
assert 'text/turtle' in get_rsp.content_type
assert (
'<http://www.w3.org/ns/ldp#RDFSource>;rel="type"'
in get_rsp.headers.get_all('link'))
def test_put_existing_ldpnr_empty(self):
"""
Trying to PUT an existing LDP-NR should return a 204 if the LDP
interaction type is the same.
"""
url = f'/ldp/overwrite-{uuid4()}'
self.client.put(url)
put2_resp = self.client.put(url)
assert put2_resp.status_code == 204
def test_put_existing_ldprs_empty(self):
"""
Trying to PUT an existing LDP-RS should return a 204 if the LDP
interaction type is the same.
"""
url = f'/ldp/overwrite-{uuid4()}'
self.client.put(url, content_type='text/turtle')
put2_resp = self.client.put(url, content_type='text/turtle')
with open('tests/data/marcel_duchamp_single_subject.ttl', 'rb') as f:
put2_resp = self.client.put(
url, data=f, content_type='text/turtle')
assert put2_resp.status_code == 204
def test_put_existing_ldpnr_conflict(self):
"""
Trying to PUT an existing LDP-RS should return a 415 if the LDP
interaction type is different.
"""
url = f'/ldp/overwrite-{uuid4()}'
put1_resp = self.client.put(url)
assert put1_resp.status_code == 201
with open('tests/data/marcel_duchamp_single_subject.ttl', 'rb') as f:
put2_resp = self.client.put(
url, data=f, content_type='text/turtle')
assert put2_resp.status_code == 415
def test_put_existing_ldpnr_conflict(self):
"""
Trying to PUT an existing LDP-RS should return a 415 if the LDP
interaction type is different.
"""
url = f'/ldp/overwrite-{uuid4()}'
put1_resp = self.client.put(url, content_type='text/turtle')
put2_resp = self.client.put(url, data=b'hello')
assert put2_resp.status_code == 415
def test_put_force_ldpnr(self):
"""
Test forcing LDP-NR creation even with an RDF content type.
"""
url = f'/ldp/force_ldpnr-{uuid4()}'
with open('tests/data/marcel_duchamp_single_subject.ttl', 'rb') as f:
rsp = self.client.put(
url, data=f, content_type='text/turtle',
headers={
'link': '<http://www.w3.org/ns/ldp#NonRDFSource>;rel="type"'
})
f.seek(0)
data = f.read()
get_rsp = self.client.get(url)
assert 'text/turtle' in get_rsp.content_type
assert (
'<http://www.w3.org/ns/ldp#NonRDFSource>;rel="type"'
in get_rsp.headers.get_all('link'))
assert get_rsp.data == data
def test_put_tree(self, client):
"""
PUT a resource with several path segments.
The test should create intermediate path segments that are LDPCs,
accessible to PUT or POST.
"""
path = '/ldp/test_tree/a/b/c/d/e/f/g'
self.client.put(path)
assert self.client.get(path).status_code == 200
assert self.client.get('/ldp/test_tree/a/b/c').status_code == 200
assert self.client.post(
'/ldp/test_tree/a/b',
content_type='text/turtle').status_code == 201
with open('tests/data/marcel_duchamp_single_subject.ttl', 'rb') as f:
put_int_resp = self.client.put(
'ldp/test_tree/a', data=f, content_type='text/turtle')
assert put_int_resp.status_code == 204
# @TODO More thorough testing of contents
def test_put_nested_tree(self, client):
"""
Verify that containment is set correctly in nested hierarchies.
First put a new hierarchy and verify that the root node is its
container; then put another hierarchy under it and verify that the
first hierarchy is the container of the second one.
"""
uuid1 = 'test_nested_tree/a/b/c/d'
uuid2 = uuid1 + '/e/f/g'
path1 = '/ldp/' + uuid1
path2 = '/ldp/' + uuid2
self.client.put(path1, content_type='text/turtle')
cont1_data = self.client.get('/ldp').data
gr1 = Graph().parse(data=cont1_data, format='turtle')
assert gr1[ URIRef(g.webroot + '/') : nsc['ldp'].contains : \
URIRef(g.webroot + '/test_nested_tree') ]
self.client.put(path2, content_type='text/turtle')
cont2_data = self.client.get(path1).data
gr2 = Graph().parse(data=cont2_data, format='turtle')
assert gr2[ URIRef(g.webroot + '/' + uuid1) : \
nsc['ldp'].contains : \
URIRef(g.webroot + '/' + uuid1 + '/e') ]
def test_put_ldp_rs(self, client):
"""
PUT a resource with RDF payload and verify.
"""
with open('tests/data/marcel_duchamp_single_subject.ttl', 'rb') as f:
self.client.put('/ldp/ldprs01', data=f, content_type='text/turtle')
resp = self.client.get('/ldp/ldprs01',
headers={'accept' : 'text/turtle'})
assert resp.status_code == 200
gr = Graph().parse(data=resp.data, format='text/turtle')
assert URIRef('http://vocab.getty.edu/ontology#Subject') in \
gr.objects(None, RDF.type)
def test_put_ldp_nr(self, rnd_img):
"""
PUT a resource with binary payload and verify checksums.
"""
rnd_img['content'].seek(0)
resp = self.client.put('/ldp/ldpnr01', data=rnd_img['content'],
headers={
'Content-Type': 'image/png',
'Content-Disposition' : 'attachment; filename={}'.format(
rnd_img['filename'])})
assert resp.status_code == 201
resp = self.client.get(
'/ldp/ldpnr01', headers={'accept' : 'image/png'})
assert resp.status_code == 200
assert sha1(resp.data).hexdigest() == rnd_img['hash']
def test_put_ldp_nr_multipart(self, rnd_img):
"""
PUT a resource with a multipart/form-data payload.
"""
rnd_img['content'].seek(0)
resp = self.client.put(
'/ldp/ldpnr02',
data={
'file': (
rnd_img['content'], rnd_img['filename'],
'image/png',
)
}
)
assert resp.status_code == 201
resp = self.client.get(
'/ldp/ldpnr02', headers={'accept' : 'image/png'})
assert resp.status_code == 200
assert sha1(resp.data).hexdigest() == rnd_img['hash']
def test_get_ldp_nr(self, rnd_img):
"""
PUT a resource with binary payload and test various retieval methods.
"""
uid = '/ldpnr03'
path = '/ldp' + uid
content = b'This is some exciting content.'
resp = self.client.put(path, data=content,
headers={
'Content-Type': 'text/plain',
'Content-Disposition' : 'attachment; filename=exciting.txt'})
assert resp.status_code == 201
uri = g.webroot + uid
# Content retrieval methods.
resp_bin1 = self.client.get(path)
assert resp_bin1.status_code == 200
assert resp_bin1.data == content
resp_bin2 = self.client.get(path, headers={'accept' : 'text/plain'})
assert resp_bin2.status_code == 200
assert resp_bin2.data == content
resp_bin3 = self.client.get(path + '/fcr:content')
assert resp_bin3.status_code == 200
assert resp_bin3.data == content
# Metadata retrieval methods.
resp_md1 = self.client.get(path, headers={'accept' : 'text/turtle'})
assert resp_md1.status_code == 200
gr1 = Graph().parse(data=resp_md1.data, format='text/turtle')
assert gr1[ URIRef(uri) : nsc['rdf'].type : nsc['ldp'].Resource]
resp_md2 = self.client.get(path + '/fcr:metadata')
assert resp_md2.status_code == 200
gr2 = Graph().parse(data=resp_md2.data, format='text/turtle')
assert isomorphic(gr1, gr2)
def test_put_ldprs_invalid_rdf(self):
"""
Verify that PUTting invalid RDF body returns HTTP 400.
However, when forcing LDP-RS, invalid RDF is always accepted.
"""
from lakesuperior.endpoints.ldp import rdf_serializable_mimetypes
rdfstr = b'This is valid RDF because it ends with a period.'
for mt in rdf_serializable_mimetypes:
rsp_notok = self.client.put(
f'/ldp/{uuid4()}', data=rdfstr, content_type=mt)
assert rsp_notok.status_code == 400
rsp_ok = self.client.put(
f'/ldp/{uuid4()}', data=rdfstr, content_type=mt,
headers={
'link': '<http://www.w3.org/ns/ldp#NonRDFSource>;rel="type"'
}
)
assert rsp_ok.status_code == 201
def test_post_ldprs_invalid_rdf(self):
"""
Verify that POSTing invalid RDF body returns HTTP 400.
However, when forcing LDP-RS, invalid RDF is always accepted.
"""
from lakesuperior.endpoints.ldp import rdf_serializable_mimetypes
rdfstr = b'This is valid RDF because it ends with a period.'
for mt in rdf_serializable_mimetypes:
rsp_notok = self.client.post(
f'/ldp', data=rdfstr, content_type=mt)
assert rsp_notok.status_code == 400
rsp_ok = self.client.post(
f'/ldp', data=rdfstr, content_type=mt,
headers={
'link': '<http://www.w3.org/ns/ldp#NonRDFSource>;rel="type"'
}
)
assert rsp_ok.status_code == 201
def test_metadata_describe_header(self):
"""
Verify that a "describe" Link header is presented for LDP-NR metadata.
"""
uid = f'/{uuid4()}'
self.client.put(f'/ldp{uid}', data=b'ciao')
md_rsp = self.client.get(f'/ldp{uid}/fcr:metadata')
assert (
f'<{g.tbox.uid_to_uri(uid)}>; rel=describes'
in md_rsp.headers.get_all('Link'))
def test_put_mismatched_ldp_rs(self, rnd_img):
"""
Verify MIME type / LDP mismatch.
PUT a LDP-RS, then PUT a LDP-NR on the same location and verify it
fails.
"""
path = '/ldp/' + str(uuid4())
rnd_img['content'].seek(0)
ldp_nr_resp = self.client.put(path, data=rnd_img['content'],
headers={
'Content-Disposition' : 'attachment; filename={}'.format(
rnd_img['filename'])})
assert ldp_nr_resp.status_code == 201
with open('tests/data/marcel_duchamp_single_subject.ttl', 'rb') as f:
ldp_rs_resp = self.client.put(path, data=f,
content_type='text/turtle')
assert ldp_rs_resp.status_code == 415
def test_put_mismatched_ldp_nr(self, rnd_img):
"""
Verify MIME type / LDP mismatch.
PUT a LDP-NR, then PUT a LDP-RS on the same location and verify it
fails.
"""
path = '/ldp/' + str(uuid4())
with open('tests/data/marcel_duchamp_single_subject.ttl', 'rb') as f:
ldp_rs_resp = self.client.put(path, data=f,
content_type='text/turtle')
assert ldp_rs_resp.status_code == 201
rnd_img['content'].seek(0)
ldp_nr_resp = self.client.put(path, data=rnd_img['content'],
headers={
'Content-Disposition' : 'attachment; filename={}'.format(
rnd_img['filename'])})
assert ldp_nr_resp.status_code == 415
def test_missing_reference(self, client):
"""
PUT a resource with RDF payload referencing a non-existing in-repo
resource.
"""
self.client.get('/ldp')
data = '''
PREFIX ns: <http://example.org#>
PREFIX res: <http://example-source.org/res/>
<> ns:p1 res:bogus ;
ns:p2 <{0}> ;
ns:p3 <{0}/> ;
ns:p4 <{0}/nonexistent> .
'''.format(g.webroot)
put_rsp = self.client.put('/ldp/test_missing_ref', data=data, headers={
'content-type': 'text/turtle'})
assert put_rsp.status_code == 201
resp = self.client.get('/ldp/test_missing_ref',
headers={'accept' : 'text/turtle'})
assert resp.status_code == 200
gr = Graph().parse(data=resp.data, format='text/turtle')
assert URIRef('http://example-source.org/res/bogus') in \
gr.objects(None, URIRef('http://example.org#p1'))
assert URIRef(g.webroot + '/') in (
gr.objects(None, URIRef('http://example.org#p2')))
assert URIRef(g.webroot + '/') in (
gr.objects(None, URIRef('http://example.org#p3')))
assert URIRef(g.webroot + '/nonexistent') not in (
gr.objects(None, URIRef('http://example.org#p4')))
def test_post_resource(self, client):
"""
Check response headers for a POST operation with empty payload.
"""
res = self.client.post('/ldp/')
assert res.status_code == 201
assert 'Location' in res.headers
def test_post_ldp_nr(self, rnd_img):
"""
POST a resource with binary payload and verify checksums.
"""
rnd_img['content'].seek(0)
resp = self.client.post('/ldp/', data=rnd_img['content'],
headers={
'slug': 'ldpnr04',
'Content-Type': 'image/png',
'Content-Disposition' : 'attachment; filename={}'.format(
rnd_img['filename'])})
assert resp.status_code == 201
resp = self.client.get(
'/ldp/ldpnr04', headers={'accept' : 'image/png'})
assert resp.status_code == 200
assert sha1(resp.data).hexdigest() == rnd_img['hash']
def test_post_slug(self):
"""
Verify that a POST with slug results in the expected URI only if the
resource does not exist already.
"""
slug01_resp = self.client.post('/ldp', headers={'slug' : 'slug01'})
assert slug01_resp.status_code == 201
assert slug01_resp.headers['location'] == \
g.webroot + '/slug01'
slug02_resp = self.client.post('/ldp', headers={'slug' : 'slug01'})
assert slug02_resp.status_code == 201
assert slug02_resp.headers['location'] != \
g.webroot + '/slug01'
def test_post_404(self):
"""
Verify that a POST to a non-existing parent results in a 404.
"""
assert self.client.post('/ldp/{}'.format(uuid4()))\
.status_code == 404
def test_post_409(self, rnd_img):
"""
Verify that you cannot POST to a binary resource.
"""
rnd_img['content'].seek(0)
self.client.put('/ldp/post_409', data=rnd_img['content'], headers={
'Content-Disposition' : 'attachment; filename={}'.format(
rnd_img['filename'])})
assert self.client.post('/ldp/post_409').status_code == 409
def test_patch_root(self):
"""
Test patching root node.
"""
path = '/ldp/'
self.client.get(path)
uri = g.webroot + '/'
with open('tests/data/sparql_update/simple_insert.sparql') as data:
resp = self.client.patch(path,
data=data,
headers={'content-type' : 'application/sparql-update'})
assert resp.status_code == 204
resp = self.client.get(path)
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[ URIRef(uri) : nsc['dc'].title : Literal('Hello') ]
def test_patch(self):
"""
Test patching a resource.
"""
path = '/ldp/test_patch01'
self.client.put(path, content_type='text/turtle')
uri = g.webroot + '/test_patch01'
with open('tests/data/sparql_update/simple_insert.sparql') as data:
resp = self.client.patch(path,
data=data,
headers={'content-type' : 'application/sparql-update'})
assert resp.status_code == 204
resp = self.client.get(path)
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[ URIRef(uri) : nsc['dc'].title : Literal('Hello') ]
self.client.patch(path,
data=open('tests/data/sparql_update/delete+insert+where.sparql'),
headers={'content-type' : 'application/sparql-update'})
resp = self.client.get(path)
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[ URIRef(uri) : nsc['dc'].title : Literal('Ciao') ]
def test_patch_no_single_subject(self):
"""
Test patching a resource violating the single-subject rule.
"""
path = '/ldp/test_patch_ssr'
self.client.put(path, content_type='text/turtle')
uri = g.webroot + '/test_patch_ssr'
nossr_qry = 'INSERT { <http://bogus.org> a <urn:ns:A> . } WHERE {}'
abs_qry = 'INSERT {{ <{}> a <urn:ns:A> . }} WHERE {{}}'.format(uri)
frag_qry = 'INSERT {{ <{}#frag> a <urn:ns:A> . }} WHERE {{}}'\
.format(uri)
# @TODO Leave commented until a decision is made about SSR.
assert self.client.patch(
path, data=nossr_qry,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
assert self.client.patch(
path, data=abs_qry,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
assert self.client.patch(
path, data=frag_qry,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
def test_patch_ldp_nr_metadata(self):
"""
Test patching a LDP-NR metadata resource from the fcr:metadata URI.
"""
path = '/ldp/ldpnr01'
with open('tests/data/sparql_update/simple_insert.sparql') as data:
self.client.patch(path + '/fcr:metadata',
data=data,
headers={'content-type' : 'application/sparql-update'})
resp = self.client.get(path + '/fcr:metadata')
assert resp.status_code == 200
uri = g.webroot + '/ldpnr01'
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[URIRef(uri) : nsc['dc'].title : Literal('Hello')]
with open(
'tests/data/sparql_update/delete+insert+where.sparql') as data:
patch_resp = self.client.patch(path + '/fcr:metadata',
data=data,
headers={'content-type' : 'application/sparql-update'})
assert patch_resp.status_code == 204
resp = self.client.get(path + '/fcr:metadata')
assert resp.status_code == 200
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[ URIRef(uri) : nsc['dc'].title : Literal('Ciao') ]
def test_patch_ldpnr(self):
"""
Verify that a direct PATCH to a LDP-NR results in a 415.
"""
with open(
'tests/data/sparql_update/delete+insert+where.sparql') as data:
patch_resp = self.client.patch('/ldp/ldpnr01',
data=data,
headers={'content-type': 'application/sparql-update'})
assert patch_resp.status_code == 415
def test_patch_invalid_mimetype(self, rnd_img):
"""
Verify that a PATCH using anything other than an
`application/sparql-update` MIME type results in an error.
"""
self.client.put(
'/ldp/test_patch_invalid_mimetype', content_type='text/turtle')
rnd_img['content'].seek(0)
ldpnr_resp = self.client.patch('/ldp/ldpnr01/fcr:metadata',
data=rnd_img,
headers={'content-type' : 'image/jpeg'})
ldprs_resp = self.client.patch('/ldp/test_patch_invalid_mimetype',
data=b'Hello, I\'m not a SPARQL update.',
headers={'content-type' : 'text/plain'})
assert ldprs_resp.status_code == ldpnr_resp.status_code == 415
def test_patch_srv_mgd_pred(self, rnd_img):
"""
Verify that adding or removing a server-managed predicate fails.
"""
uid = '/test_patch_sm_pred'
path = f'/ldp{uid}'
self.client.put(path, content_type='text/turtle')
self.client.put(path + '/child1', content_type='text/turtle')
uri = g.webroot + uid
ins_qry1 = f'INSERT {{ <> <{nsc["ldp"].contains}> <http://bogus.com/ext1> . }} WHERE {{}}'
ins_qry2 = (
f'INSERT {{ <> <{nsc["fcrepo"].created}>'
f'"2019-04-01T05:57:36.899033+00:00"^^<{nsc["xsd"].dateTime}> . }}'
'WHERE {}'
)
# The following won't change the graph so it does not raise an error.
ins_qry3 = f'INSERT {{ <> a <{nsc["ldp"].Container}> . }} WHERE {{}}'
del_qry1 = (
f'DELETE {{ <> <{nsc["ldp"].contains}> ?o . }} '
f'WHERE {{ <> <{nsc["ldp"].contains}> ?o . }}'
)
del_qry2 = f'DELETE {{ <> a <{nsc["ldp"].Container}> . }} WHERE {{}}'
# No-op as ins_qry3
del_qry3 = (
f'DELETE {{ <> a <{nsc["ldp"].DirectContainer}> .}} '
'WHERE {}'
)
assert self.client.patch(
path, data=ins_qry1,
headers={'content-type': 'application/sparql-update'}
).status_code == 412
assert self.client.patch(
path, data=ins_qry1,
headers={
'content-type': 'application/sparql-update',
'prefer': 'handling=lenient',
}
).status_code == 204
assert self.client.patch(
path, data=ins_qry2,
headers={'content-type': 'application/sparql-update'}
).status_code == 412
assert self.client.patch(
path, data=ins_qry2,
headers={
'content-type': 'application/sparql-update',
'prefer': 'handling=lenient',
}
).status_code == 204
assert self.client.patch(
path, data=ins_qry3,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
assert self.client.patch(
path, data=del_qry1,
headers={'content-type': 'application/sparql-update'}
).status_code == 412
assert self.client.patch(
path, data=del_qry1,
headers={
'content-type': 'application/sparql-update',
'prefer': 'handling=lenient',
}
).status_code == 204
assert self.client.patch(
path, data=del_qry2,
headers={'content-type': 'application/sparql-update'}
).status_code == 412
assert self.client.patch(
path, data=ins_qry2,
headers={
'content-type': 'application/sparql-update',
'prefer': 'handling=lenient',
}
).status_code == 204
assert self.client.patch(
path, data=del_qry3,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
def test_delete(self):
"""
Test delete response codes.
"""
self.client.put('/ldp/test_delete01')
delete_resp = self.client.delete('/ldp/test_delete01')
assert delete_resp.status_code == 204
bogus_delete_resp = self.client.delete('/ldp/test_delete101')
assert bogus_delete_resp.status_code == 404
def test_tombstone(self):
"""
Test tombstone behaviors.
For POST on a tombstone, check `test_resurrection`.
"""
tstone_resp = self.client.get('/ldp/test_delete01')
assert tstone_resp.status_code == 410
assert tstone_resp.headers['Link'] == \
'<{}/test_delete01/fcr:tombstone>; rel="hasTombstone"'\
.format(g.webroot)
tstone_path = '/ldp/test_delete01/fcr:tombstone'
assert self.client.get(tstone_path).status_code == 405
assert self.client.put(tstone_path).status_code == 405
assert self.client.delete(tstone_path).status_code == 204
assert self.client.get('/ldp/test_delete01').status_code == 404
def test_delete_recursive(self):
"""
Test response codes for resources deleted recursively and their
tombstones.
"""
child_suffixes = ('a', 'a/b', 'a/b/c', 'a1', 'a1/b1')
self.client.put('/ldp/test_delete_recursive01')
for cs in child_suffixes:
self.client.put('/ldp/test_delete_recursive01/{}'.format(cs))
assert self.client.delete(
'/ldp/test_delete_recursive01').status_code == 204
tstone_resp = self.client.get('/ldp/test_delete_recursive01')
assert tstone_resp.status_code == 410
assert tstone_resp.headers['Link'] == \
'<{}/test_delete_recursive01/fcr:tombstone>; rel="hasTombstone"'\
.format(g.webroot)
for cs in child_suffixes:
child_tstone_resp = self.client.get(
'/ldp/test_delete_recursive01/{}'.format(cs))
assert child_tstone_resp.status_code == tstone_resp.status_code
assert 'Link' not in child_tstone_resp.headers.keys()
def test_put_fragments(self):
"""
Test the correct handling of fragment URIs on PUT and GET.
"""
with open('tests/data/fragments.ttl', 'rb') as f:
self.client.put(
'/ldp/test_fragment01', content_type='text/turtle', data=f)
rsp = self.client.get('/ldp/test_fragment01')
gr = Graph().parse(data=rsp.data, format='text/turtle')
assert gr[
URIRef(g.webroot + '/test_fragment01#hash1')
: URIRef('http://ex.org/p2') : URIRef('http://ex.org/o2')]
def test_patch_fragments(self):
"""
Test the correct handling of fragment URIs on PATCH.
"""
self.client.put('/ldp/test_fragment_patch', content_type='text/turtle')
with open('tests/data/fragments_insert.sparql', 'rb') as f:
self.client.patch(
'/ldp/test_fragment_patch',
content_type='application/sparql-update', data=f)
ins_rsp = self.client.get('/ldp/test_fragment_patch')
ins_gr = Graph().parse(data=ins_rsp.data, format='text/turtle')
assert ins_gr[
URIRef(g.webroot + '/test_fragment_patch#hash1234')
: URIRef('http://ex.org/p3') : URIRef('http://ex.org/o3')]
with open('tests/data/fragments_delete.sparql', 'rb') as f:
self.client.patch(
'/ldp/test_fragment_patch',
headers={
'Content-Type' : 'application/sparql-update',
},
data=f
)
del_rsp = self.client.get('/ldp/test_fragment_patch')
del_gr = Graph().parse(data=del_rsp.data, format='text/turtle')
assert not del_gr[
URIRef(g.webroot + '/test_fragment_patch#hash1234')
: URIRef('http://ex.org/p3') : URIRef('http://ex.org/o3')]
@pytest.mark.usefixtures('client_class')
@pytest.mark.usefixtures('db')
class TestMimeType:
"""
Test ``Accept`` headers and input & output formats.
"""
def test_accept(self):
"""
Verify the default serialization method.
"""
accept_list = {
('', 'text/turtle'),
('text/turtle', 'text/turtle'),
('application/rdf+xml', 'application/rdf+xml'),
('application/n-triples', 'application/n-triples'),
('application/bogus', 'text/turtle'),
(
'application/rdf+xml;q=0.5,application/n-triples;q=0.7',
'application/n-triples'),
(
'application/rdf+xml;q=0.5,application/bogus;q=0.7',
'application/rdf+xml'),
('application/rdf+xml;q=0.5,text/n3;q=0.7', 'text/n3'),
(
'application/rdf+xml;q=0.5,application/ld+json;q=0.7',
'application/ld+json'),
}
for mimetype, fmt in accept_list:
rsp = self.client.get('/ldp', headers={'Accept': mimetype})
assert rsp.mimetype == fmt
gr = Graph(identifier=g.webroot + '/').parse(
data=rsp.data, format=fmt)
assert nsc['fcrepo'].RepositoryRoot in set(gr.objects())
def test_provided_rdf(self):
"""
Test several input RDF serialiation formats.
"""
self.client.get('/ldp')
gr = Graph()
gr.add((
URIRef(g.webroot + '/test_mimetype'),
nsc['dcterms'].title, Literal('Test MIME type.')))
test_list = {
'application/n-triples',
'application/rdf+xml',
'text/n3',
'text/turtle',
'application/ld+json',
}
for mimetype in test_list:
rdf_data = gr.serialize(format=mimetype)
self.client.put(
'/ldp/test_mimetype', data=rdf_data, content_type=mimetype)
rsp = self.client.get('/ldp/test_mimetype')
rsp_gr = Graph(identifier=g.webroot + '/test_mimetype').parse(
data=rsp.data, format='text/turtle')
assert (
URIRef(g.webroot + '/test_mimetype'),
nsc['dcterms'].title, Literal('Test MIME type.')) in rsp_gr
@pytest.mark.usefixtures('client_class')
class TestDigestHeaders:
"""
Test Digest and ETag headers.
"""
def test_etag_digest(self):
"""
Verify ETag and Digest headers on creation.
The headers must correspond to the checksum of the binary content.
"""
uid = '/test_etag1'
path = '/ldp' + uid
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content)
put_rsp = self.client.put(
path, data=content, content_type='text/plain')
assert content_cksum.hexdigest() in \
put_rsp.headers.get('etag').split(',')
assert put_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum.digest()).decode()
get_rsp = self.client.get(path)
assert content_cksum.hexdigest() in \
put_rsp.headers.get('etag').split(',')
assert get_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum.digest()).decode()
def test_etag_ident(self):
"""
Verify that two resources with the same content yield identical ETags.
"""
path1 = f'/ldp/{uuid4()}'
path2 = f'/ldp/{uuid4()}'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content)
self.client.put(path1, data=content, content_type='text/plain')
self.client.put(path2, data=content, content_type='text/plain')
get_rsp1 = self.client.get(path1)
get_rsp2 = self.client.get(path2)
assert get_rsp1.headers.get('etag') == get_rsp2.headers.get('etag')
assert get_rsp1.headers.get('digest') == get_rsp2.headers.get('digest')
def test_etag_diff(self):
"""
Verify that two resources with different content yield different ETags.
"""
path1 = f'/ldp/{uuid4()}'
path2 = f'/ldp/{uuid4()}'
content1 = b'some interesting content.'
content_cksum1 = hashlib.new(digest_algo, content1)
content2 = b'Some great content.'
content_cksum2 = hashlib.new(digest_algo, content2)
self.client.put(path1, data=content1, content_type='text/plain')
self.client.put(path2, data=content2, content_type='text/plain')
get_rsp1 = self.client.get(path1)
get_rsp2 = self.client.get(path2)
assert get_rsp1.headers.get('etag') != get_rsp2.headers.get('etag')
assert get_rsp1.headers.get('digest') != get_rsp2.headers.get('digest')
def test_etag_update(self):
"""
Verify that ETag and digest change when the resource is updated.
The headers should NOT change if the same binary content is
re-submitted.
"""
path = f'/ldp/{uuid4()}'
content1 = uuid4().bytes
content_cksum1 = hashlib.new(digest_algo, content1)
content2 = uuid4().bytes
content_cksum2 = hashlib.new(digest_algo, content2)
self.client.put(path, data=content1, content_type='text/plain')
get_rsp = self.client.get(path)
assert content_cksum1.hexdigest() == \
get_rsp.headers.get('etag').strip('"')
assert get_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum1.digest()).decode()
put_rsp = self.client.put(
path, data=content2, content_type='text/plain')
assert content_cksum2.hexdigest() == \
put_rsp.headers.get('etag').strip('"')
assert put_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum2.digest()).decode()
get_rsp = self.client.get(path)
assert content_cksum2.hexdigest() == \
get_rsp.headers.get('etag').strip('"')
assert get_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum2.digest()).decode()
def test_etag_rdf(self):
"""
Verify that LDP-RS resources don't get an ETag.
TODO This is by design for now; when a reliable hashing method
for a graph is devised, this test should change.
"""
path = f'/ldp/{uuid4()}'
put_rsp = self.client.put(path, content_type='text/turtle')
assert not put_rsp.headers.get('etag')
assert not put_rsp.headers.get('digest')
get_rsp = self.client.get(path)
assert not get_rsp.headers.get('etag')
assert not get_rsp.headers.get('digest')
def test_digest_put(self):
"""
Test the ``Digest`` header with PUT to verify content integrity.
"""
path1 = f'/ldp/{uuid4()}'
path2 = f'/ldp/{uuid4()}'
path3 = f'/ldp/{uuid4()}'
content = uuid4().bytes
content_sha1 = sha1(content).hexdigest()
content_sha256 = sha256(content).hexdigest()
content_blake2b = blake2b(content).hexdigest()
assert self.client.put(path1, data=content, headers={
'digest': 'sha1=abcd'}).status_code == 409
assert self.client.put(path1, data=content, headers={
'digest': f'sha1={content_sha1}'}).status_code == 201
assert self.client.put(path2, data=content, headers={
'digest': f'SHA1={content_sha1}'}).status_code == 201
assert self.client.put(path3, data=content, headers={
'digest': f'SHA256={content_sha256}'}).status_code == 201
assert self.client.put(path3, data=content, headers={
'digest': f'blake2b={content_blake2b}'}).status_code == 204
def test_digest_post(self):
"""
Test the ``Digest`` header with POST to verify content integrity.
"""
path = '/ldp'
content = uuid4().bytes
content_sha1 = sha1(content).hexdigest()
content_sha256 = sha256(content).hexdigest()
content_blake2b = blake2b(content).hexdigest()
assert self.client.post(path, data=content, headers={
'digest': 'sha1=abcd'}).status_code == 409
assert self.client.post(path, data=content, headers={
'digest': f'sha1={content_sha1}'}).status_code == 201
assert self.client.post(path, data=content, headers={
'digest': f'SHA1={content_sha1}'}).status_code == 201
assert self.client.post(path, data=content, headers={
'digest': f'SHA256={content_sha256}'}).status_code == 201
assert self.client.post(path, data=content, headers={
'digest': f'blake2b={content_blake2b}'}).status_code == 201
assert self.client.post(path, data=content, headers={
'digest': f'bogusmd={content_blake2b}'}).status_code == 400
bencoded = b64encode(content_blake2b.encode())
assert self.client.post(
path, data=content,
headers={'digest': f'blake2b={bencoded}'}
).status_code == 400
@pytest.mark.usefixtures('client_class')
class TestETagCondHeaders:
"""
Test Digest and ETag headers.
"""
def test_if_match_get(self):
"""
Test the If-Match header on GET requests.
Test providing single and multiple ETags.
"""
path = '/ldp/test_if_match1'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
self.client.put(
path, data=content, headers={'content-type': 'text/plain'})
get_rsp = self.client.get(path, headers={
'if-match': f'"{content_cksum}"'})
assert get_rsp.status_code == 200
get_rsp = self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 412
get_rsp = self.client.get(path, headers={
'if-match': f'"{content_cksum}", "{bogus_cksum}"'})
assert get_rsp.status_code == 200
def test_if_match_put(self):
"""
Test the If-Match header on PUT requests.
Test providing single and multiple ETags.
"""
path = '/ldp/test_if_match1'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
get_rsp = self.client.get(path)
old_cksum = get_rsp.headers.get('etag')
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{content_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{content_cksum}", "{bogus_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{old_cksum}", "{bogus_cksum}"'})
assert put_rsp.status_code == 204
# Now contents have changed.
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{old_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{content_cksum}"'})
assert put_rsp.status_code == 204
# Exactly the same content was uploaded, so the ETag should not have
# changed.
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{content_cksum}"'})
assert put_rsp.status_code == 204
# Catch-all: Proceed if resource exists at the given location.
put_rsp = self.client.put(path, data=content, headers={
'if-match': '*'})
assert put_rsp.status_code == 204
# This is wrong syntax. It will not update because the literal asterisk
# won't match.
put_rsp = self.client.put(path, data=content, headers={
'if-match': '"*"'})
assert put_rsp.status_code == 412
# Test delete.
del_rsp = self.client.delete(path, headers={
'if-match': f'"{old_cksum}"', 'Prefer': 'no-tombstone'})
assert del_rsp.status_code == 412
del_rsp = self.client.delete(path, headers={
'if-match': f'"{content_cksum}"', 'Prefer': 'no-tombstone'})
assert del_rsp.status_code == 204
put_rsp = self.client.put(path, data=content, headers={
'if-match': '*'})
assert put_rsp.status_code == 412
def test_if_none_match_get(self):
"""
Test the If-None-Match header on GET requests.
Test providing single and multiple ETags.
"""
path = '/ldp/test_if_none_match1'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
self.client.put(
path, data=content, headers={'content-type': 'text/plain'})
get_rsp1 = self.client.get(path, headers={
'if-none-match': f'"{content_cksum}"'})
assert get_rsp1.status_code == 304
get_rsp2 = self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"'})
assert get_rsp2.status_code == 200
get_rsp3 = self.client.get(path, headers={
'if-none-match': f'"{content_cksum}", "{bogus_cksum}"'})
assert get_rsp3.status_code == 304
# 404 has precedence on ETag handling.
get_rsp = self.client.get('/ldp/bogus', headers={
'if-none-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 404
get_rsp = self.client.get('/ldp/bogus', headers={
'if-none-match': f'"{content_cksum}"'})
assert get_rsp.status_code == 404
def test_if_none_match_put(self):
"""
Test the If-None-Match header on PUT requests.
Test providing single and multiple ETags.
Uses a previously created resource.
"""
path = '/ldp/test_if_none_match1'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
get_rsp = self.client.get(path)
old_cksum = get_rsp.headers.get('etag')
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{old_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{old_cksum}", "{bogus_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{bogus_cksum}"'})
assert put_rsp.status_code == 204
# Now contents have changed.
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{content_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{old_cksum}"'})
assert put_rsp.status_code == 204
# Catch-all: fail if any resource exists at the given location.
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': '*'})
assert put_rsp.status_code == 412
# Test delete.
del_rsp = self.client.delete(path, headers={
'if-none-match': f'"{content_cksum}"', 'Prefer': 'no-tombstone'})
assert del_rsp.status_code == 412
del_rsp = self.client.delete(path, headers={
'if-none-match': f'"{bogus_cksum}"', 'Prefer': 'no-tombstone'})
assert del_rsp.status_code == 204
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': '*'})
assert put_rsp.status_code == 201
# This is wrong syntax. It will update because the literal asterisk
# won't match.
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': '"*"'})
assert put_rsp.status_code == 204
def test_etag_notfound(self):
"""
Verify that 404 and 410 have precedence on ETag handling.
"""
path = f'/ldp/{uuid4()}'
bogus_cksum = uuid4().hex
get_rsp = self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 404
get_rsp = self.client.get(path, headers={
'if-match': '*'})
assert get_rsp.status_code == 404
get_rsp = self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 404
self.client.put(path)
self.client.delete(path)
get_rsp = self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 410
get_rsp = self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 410
get_rsp = self.client.get(path, headers={
'if-match': '*'})
assert get_rsp.status_code == 410
@pytest.mark.usefixtures('client_class')
class TestModifyTimeCondHeaders:
"""
Test time-related conditional headers.
"""
@pytest.fixture(scope='class')
def timeframe(self):
"""
Times used in these tests: UTC midnight of today, yesterday, tomorrow.
"""
today = arrow.utcnow().floor('day')
yesterday = today.shift(days=-1)
tomorrow = today.shift(days=1)
path = f'/ldp/{uuid4()}'
self.client.put(path)
return path, today, yesterday, tomorrow
def test_nothing(self):
"""
For some reason, without this the fixture won't initialize properly.
"""
self.client.get('/')
def test_if_modified_since(self, timeframe):
"""
Test various uses of the If-Modified-Since header.
"""
path, today, yesterday, tomorrow = timeframe
assert self.client.head(
path, headers={'if-modified-since': http_date(today.timestamp)}
).status_code == 200
assert self.client.get(
path, headers={'if-modified-since': http_date(today.timestamp)}
).status_code == 200
assert self.client.head(
path, headers={'if-modified-since': http_date(yesterday.timestamp)}
).status_code == 200
assert self.client.get(
path, headers={'if-modified-since': http_date(yesterday.timestamp)}
).status_code == 200
assert self.client.head(
path, headers={'if-modified-since': http_date(tomorrow.timestamp)}
).status_code == 304
assert self.client.get(
path, headers={'if-modified-since': http_date(tomorrow.timestamp)}
).status_code == 304
def test_if_unmodified_since(self, timeframe):
"""
Test various uses of the If-Unmodified-Since header.
"""
path, today, yesterday, tomorrow = timeframe
assert self.client.head(
path, headers={'if-unmodified-since': http_date(today.timestamp)}
).status_code == 304
assert self.client.get(
path, headers={'if-unmodified-since': http_date(today.timestamp)}
).status_code == 304
assert self.client.head(
path, headers={'if-unmodified-since': http_date(yesterday.timestamp)}
).status_code == 304
assert self.client.get(
path, headers={'if-unmodified-since': http_date(yesterday.timestamp)}
).status_code == 304
assert self.client.head(
path, headers={'if-unmodified-since': http_date(tomorrow.timestamp)}
).status_code == 200
assert self.client.get(
path, headers={'if-unmodified-since': http_date(tomorrow.timestamp)}
).status_code == 200
def test_time_range(self, timeframe):
"""
Test conditions inside and outside of a time range.
"""
path, today, yesterday, tomorrow = timeframe
# Send me the resource if it has been modified between yesterday
# and tomorrow.
assert self.client.get(path, headers={
'if-modified-since': http_date(yesterday.timestamp),
'if-unmodified-since': http_date(tomorrow.timestamp),
}).status_code == 200
# Send me the resource if it has been modified between today
# and tomorrow.
assert self.client.get(path, headers={
'if-modified-since': http_date(today.timestamp),
'if-unmodified-since': http_date(tomorrow.timestamp),
}).status_code == 200
# Send me the resource if it has been modified between yesterday
# and today.
assert self.client.get(path, headers={
'if-modified-since': http_date(yesterday.timestamp),
'if-unmodified-since': http_date(today.timestamp),
}).status_code == 304
# Send me the resource if it has been modified between two days ago
# and yesterday.
assert self.client.get(path, headers={
'if-modified-since': http_date(yesterday.shift(days=-1).timestamp),
'if-unmodified-since': http_date(yesterday.timestamp),
}).status_code == 304
# Send me the resource if it has been modified between tomorrow
# and two days from today.
assert self.client.get(path, headers={
'if-modified-since': http_date(tomorrow.timestamp),
'if-unmodified-since': http_date(tomorrow.shift(days=1).timestamp),
}).status_code == 304
def test_time_etag_combo(self, timeframe):
"""
Test evaluation priorities among ETag and time headers.
"""
_, today, yesterday, tomorrow = timeframe
path = f'/ldp/{uuid4()}'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
self.client.put(
path, data=content, headers={'content-type': 'text/plain'})
# Negative ETag match wins.
assert self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"',
'if-modified-since': http_date(yesterday.timestamp),
}).status_code == 412
assert self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"',
'if-unmodified-since': http_date(tomorrow.timestamp),
}).status_code == 412
assert self.client.get(path, headers={
'if-none-match': f'"{content_cksum}"',
'if-modified-since': http_date(yesterday.timestamp),
}).status_code == 304
assert self.client.get(path, headers={
'if-none-match': f'"{content_cksum}"',
'if-unmodified-since': http_date(tomorrow.timestamp),
}).status_code == 304
# Positive ETag match wins.
assert self.client.get(path, headers={
'if-match': f'"{content_cksum}"',
'if-unmodified-since': http_date(yesterday.timestamp),
}).status_code == 200
assert self.client.get(path, headers={
'if-match': f'"{content_cksum}"',
'if-modified-since': http_date(tomorrow.timestamp),
}).status_code == 200
assert self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"',
'if-unmodified-since': http_date(yesterday.timestamp),
}).status_code == 200
assert self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"',
'if-modified-since': http_date(tomorrow.timestamp),
}).status_code == 200
@pytest.mark.usefixtures('client_class')
class TestRange:
"""
Test byte range retrieval.
This should not need too deep testing since it's functionality implemented
in Werkzeug/Flask.
"""
@pytest.fixture(scope='class')
def bytestream(self):
"""
Create a sample bytestream with predictable (8x8 bytes) content.
"""
return b''.join([bytes([n] * 8) for n in range(8)])
def test_get_range(self, bytestream):
"""
Get different ranges of the bitstream.
"""
path = '/ldp/test_range'
self.client.put(path, data=bytestream)
# First 8 bytes.
assert self.client.get(
path, headers={'range': 'bytes=0-7'}).data == b'\x00' * 8
# Last 4 bytes of first block, first 4 of second block.
assert self.client.get(
path, headers={'range': 'bytes=4-11'}
).data == b'\x00' * 4 + b'\x01' * 4
# Last 8 bytes.
assert self.client.get(
path, headers={'range': 'bytes=56-'}).data == b'\x07' * 8
def test_fail_ranges(self, bytestream):
"""
Test malformed or unsupported ranges.
"""
path = '/ldp/test_range'
# TODO This shall be a 206 when multiple ranges are supported.
fail_rsp = self.client.get(path, headers={'range': 'bytes=0-1, 7-8'})
assert fail_rsp.status_code == 501
# Bad ranges will be ignored.
for rng in ((10, 4), ('', 3), (3600, 6400)):
bad_rsp = self.client.get(
path, headers={'range': 'bytes={rng[0]}-{rng[1]}'})
assert bad_rsp.status_code == 200
assert bad_rsp.data == bytestream
assert int(bad_rsp.headers['content-length']) == len(bytestream)
def test_range_rsp_headers(self, bytestream):
"""
Test various headers for a ranged response.
"""
path = '/ldp/test_range'
start_b = 0
end_b = 7
full_rsp = self.client.get(path)
part_rsp = self.client.get(path, headers={
'range': f'bytes={start_b}-{end_b}'})
for hdr_name in ['etag', 'digest', 'content-type']:
assert part_rsp.headers[hdr_name] == full_rsp.headers[hdr_name]
for hdr in part_rsp.headers['link']:
assert hdr in full_rsp.headers['link']
assert int(part_rsp.headers['content-length']) == end_b - start_b + 1
assert part_rsp.headers['content-range'] == \
f'bytes {start_b}-{end_b} / {len(bytestream)}'
@pytest.mark.usefixtures('client_class')
class TestPrefHeader:
"""
Test various combinations of `Prefer` header.
"""
@pytest.fixture(scope='class')
def cont_structure(self):
"""
Create a container structure to be used for subsequent requests.
"""
parent_path = '/ldp/test_parent'
self.client.put(parent_path, content_type='text/turtle')
self.client.put(parent_path + '/child1', content_type='text/turtle')
self.client.put(parent_path + '/child2', content_type='text/turtle')
self.client.put(parent_path + '/child3', content_type='text/turtle')
return {
'path' : parent_path,
'response' : self.client.get(parent_path),
}
def test_put_prefer_handling(self, random_uuid):
"""
Trying to PUT an existing resource should:
- Return a 204 if the payload is empty
- Return a 204 if the payload is RDF, server-managed triples are
included and the 'Prefer' header is set to 'handling=lenient'
- Return a 412 (ServerManagedTermError) if the payload is RDF,
server-managed triples are included and handling is set to 'strict',
or not set.
"""
path = '/ldp/put_pref_header01'
assert self.client.put(path, content_type='text/turtle').status_code == 201
assert self.client.get(path).status_code == 200
assert self.client.put(path, content_type='text/turtle').status_code == 204
# Default handling is strict.
with open('tests/data/rdf_payload_w_srv_mgd_trp.ttl', 'rb') as f:
rsp_default = self.client.put(
path,
headers={
'Content-Type' : 'text/turtle',
},
data=f
)
assert rsp_default.status_code == 412
with open('tests/data/rdf_payload_w_srv_mgd_trp.ttl', 'rb') as f:
rsp_len = self.client.put(
path,
headers={
'Prefer' : 'handling=lenient',
'Content-Type' : 'text/turtle',
},
data=f
)
assert rsp_len.status_code == 204
with open('tests/data/rdf_payload_w_srv_mgd_trp.ttl', 'rb') as f:
rsp_strict = self.client.put(
path,
headers={
'Prefer' : 'handling=strict',
'Content-Type' : 'text/turtle',
},
data=f
)
assert rsp_strict.status_code == 412
# @HOLD Embed children is debated.
def _disabled_test_embed_children(self, cont_structure):
"""
verify the "embed children" prefer header.
"""
self.client.get('/ldp')
parent_path = cont_structure['path']
cont_resp = cont_structure['response']
cont_subject = URIRef(g.webroot + '/test_parent')
#minimal_resp = self.client.get(parent_path, headers={
# 'Prefer' : 'return=minimal',
#})
incl_embed_children_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; include={}'\
.format(Ldpr.EMBED_CHILD_RES_URI),
})
omit_embed_children_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; omit={}'\
.format(Ldpr.EMBED_CHILD_RES_URI),
})
default_gr = Graph().parse(data=cont_resp.data, format='turtle')
incl_gr = Graph().parse(
data=incl_embed_children_resp.data, format='turtle')
omit_gr = Graph().parse(
data=omit_embed_children_resp.data, format='turtle')
assert isomorphic(omit_gr, default_gr)
children = set(incl_gr[cont_subject : nsc['ldp'].contains])
assert len(children) == 3
children = set(incl_gr[cont_subject : nsc['ldp'].contains])
for child_uri in children:
assert set(incl_gr[ child_uri : : ])
assert not set(omit_gr[ child_uri : : ])
def test_return_children(self, cont_structure):
"""
verify the "return children" prefer header.
"""
self.client.get('/ldp')
parent_path = cont_structure['path']
cont_resp = cont_structure['response']
cont_subject = URIRef(g.webroot + '/test_parent')
incl_children_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; include={}'\
.format(Ldpr.RETURN_CHILD_RES_URI),
})
omit_children_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; omit={}'\
.format(Ldpr.RETURN_CHILD_RES_URI),
})
default_gr = Graph().parse(data=cont_resp.data, format='turtle')
incl_gr = Graph().parse(data=incl_children_resp.data, format='turtle')
omit_gr = Graph().parse(data=omit_children_resp.data, format='turtle')
assert isomorphic(incl_gr, default_gr)
children = incl_gr[cont_subject : nsc['ldp'].contains]
for child_uri in children:
assert not omit_gr[cont_subject : nsc['ldp'].contains : child_uri]
def test_inbound_rel(self, cont_structure):
"""
verify the "inbound relationships" prefer header.
"""
self.client.put('/ldp/test_target', content_type='text/turtle')
data = '<> <http://ex.org/ns#shoots> <{}> .'.format(
g.webroot + '/test_target')
self.client.put('/ldp/test_shooter', data=data,
headers={'Content-Type': 'text/turtle'})
cont_resp = self.client.get('/ldp/test_target')
incl_inbound_resp = self.client.get('/ldp/test_target', headers={
'Prefer' : 'return=representation; include="{}"'\
.format(Ldpr.RETURN_INBOUND_REF_URI),
})
omit_inbound_resp = self.client.get('/ldp/test_target', headers={
'Prefer' : 'return=representation; omit="{}"'\
.format(Ldpr.RETURN_INBOUND_REF_URI),
})
default_gr = Graph().parse(data=cont_resp.data, format='turtle')
incl_gr = Graph().parse(data=incl_inbound_resp.data, format='turtle')
omit_gr = Graph().parse(data=omit_inbound_resp.data, format='turtle')
subject = URIRef(g.webroot + '/test_target')
inbd_subject = URIRef(g.webroot + '/test_shooter')
assert isomorphic(omit_gr, default_gr)
assert len(set(incl_gr[inbd_subject : : ])) == 1
assert incl_gr[
inbd_subject : URIRef('http://ex.org/ns#shoots') : subject]
assert not len(set(omit_gr[inbd_subject : :]))
def test_srv_mgd_triples(self, cont_structure):
"""
verify the "server managed triples" prefer header.
"""
self.client.get('/ldp')
parent_path = cont_structure['path']
cont_resp = cont_structure['response']
cont_subject = URIRef(g.webroot + '/test_parent')
incl_srv_mgd_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; include={}'\
.format(Ldpr.RETURN_SRV_MGD_RES_URI),
})
omit_srv_mgd_resp = self.client.get(parent_path, headers={
'Prefer' : 'return=representation; omit={}'\
.format(Ldpr.RETURN_SRV_MGD_RES_URI),
})
default_gr = Graph().parse(data=cont_resp.data, format='turtle')
incl_gr = Graph().parse(data=incl_srv_mgd_resp.data, format='turtle')
omit_gr = Graph().parse(data=omit_srv_mgd_resp.data, format='turtle')
assert isomorphic(incl_gr, default_gr)
for pred in {
nsc['fcrepo'].created,
nsc['fcrepo'].createdBy,
nsc['fcrepo'].lastModified,
nsc['fcrepo'].lastModifiedBy,
nsc['ldp'].contains,
}:
assert set(incl_gr[ cont_subject : pred : ])
assert not set(omit_gr[ cont_subject : pred : ])
for type in {
nsc['fcrepo'].Resource,
nsc['ldp'].Container,
nsc['ldp'].Resource,
}:
assert incl_gr[ cont_subject : RDF.type : type ]
assert not omit_gr[ cont_subject : RDF.type : type ]
def test_delete_no_tstone(self):
"""
Test the `no-tombstone` Prefer option.
"""
self.client.put('/ldp/test_delete_no_tstone01')
self.client.put('/ldp/test_delete_no_tstone01/a')
self.client.delete('/ldp/test_delete_no_tstone01', headers={
'prefer' : 'no-tombstone'})
resp = self.client.get('/ldp/test_delete_no_tstone01')
assert resp.status_code == 404
child_resp = self.client.get('/ldp/test_delete_no_tstone01/a')
assert child_resp.status_code == 404
#@pytest.mark.usefixtures('client_class')
#@pytest.mark.usefixtures('db')
#class TestDigest:
# """
# Test digest and ETag handling.
# """
# @pytest.mark.skip(reason='TODO Need to implement async digest queue')
# def test_digest_post(self):
# """
# Test ``Digest`` and ``ETag`` headers on resource POST.
# """
# resp = self.client.post('/ldp/')
# assert 'Digest' in resp.headers
# assert 'ETag' in resp.headers
# assert (
# b64encode(bytes.fromhex(
# resp.headers['ETag'].replace('W/', '')
# )).decode('ascii') ==
# resp.headers['Digest'].replace('SHA256=', ''))
#
#
# @pytest.mark.skip(reason='TODO Need to implement async digest queue')
# def test_digest_put(self):
# """
# Test ``Digest`` and ``ETag`` headers on resource PUT.
# """
# resp_put = self.client.put('/ldp/test_digest_put')
# assert 'Digest' in resp_put.headers
# assert 'ETag' in resp_put.headers
# assert (
# b64encode(bytes.fromhex(
# resp_put.headers['ETag'].replace('W/', '')
# )).decode('ascii') ==
# resp_put.headers['Digest'].replace('SHA256=', ''))
#
# resp_get = self.client.get('/ldp/test_digest_put')
# assert 'Digest' in resp_get.headers
# assert 'ETag' in resp_get.headers
# assert (
# b64encode(bytes.fromhex(
# resp_get.headers['ETag'].replace('W/', '')
# )).decode('ascii') ==
# resp_get.headers['Digest'].replace('SHA256=', ''))
#
#
# @pytest.mark.skip(reason='TODO Need to implement async digest queue')
# def test_digest_patch(self):
# """
# Verify that the digest and ETag change on resource change.
# """
# path = '/ldp/test_digest_patch'
# self.client.put(path)
# rsp1 = self.client.get(path)
#
# self.client.patch(
# path, data=b'DELETE {} INSERT {<> a <http://ex.org/Test> .} '
# b'WHERE {}',
# headers={'Content-Type': 'application/sparql-update'})
# rsp2 = self.client.get(path)
#
# assert rsp1.headers['ETag'] != rsp2.headers['ETag']
# assert rsp1.headers['Digest'] != rsp2.headers['Digest']
@pytest.mark.usefixtures('client_class')
@pytest.mark.usefixtures('db')
class TestVersion:
"""
Test version creation, retrieval and deletion.
"""
def test_create_versions(self):
"""
Test that POSTing multiple times to fcr:versions creates the
'hasVersions' triple and yields multiple version snapshots.
"""
self.client.put('/ldp/test_version', content_type='text/turtle')
create_rsp = self.client.post('/ldp/test_version/fcr:versions')
assert create_rsp.status_code == 201
rsrc_rsp = self.client.get('/ldp/test_version')
rsrc_gr = Graph().parse(data=rsrc_rsp.data, format='turtle')
assert len(set(rsrc_gr[: nsc['fcrepo'].hasVersions :])) == 1
info_rsp = self.client.get('/ldp/test_version/fcr:versions')
assert info_rsp.status_code == 200
info_gr = Graph().parse(data=info_rsp.data, format='turtle')
assert len(set(info_gr[: nsc['fcrepo'].hasVersion :])) == 1
self.client.post('/ldp/test_version/fcr:versions')
info2_rsp = self.client.get('/ldp/test_version/fcr:versions')
info2_gr = Graph().parse(data=info2_rsp.data, format='turtle')
assert len(set(info2_gr[: nsc['fcrepo'].hasVersion :])) == 2
def test_version_with_slug(self):
"""
Test a version with a slug.
"""
self.client.put('/ldp/test_version_slug', content_type='text/turtle')
create_rsp = self.client.post('/ldp/test_version_slug/fcr:versions',
headers={'slug' : 'v1'})
new_ver_uri = create_rsp.headers['Location']
assert new_ver_uri == g.webroot + '/test_version_slug/fcr:versions/v1'
info_rsp = self.client.get('/ldp/test_version_slug/fcr:versions')
info_gr = Graph().parse(data=info_rsp.data, format='turtle')
assert info_gr[
URIRef(new_ver_uri) :
nsc['fcrepo'].hasVersionLabel :
Literal('v1')]
def test_dupl_version(self):
"""
Make sure that two POSTs with the same slug result in two different
versions.
"""
path = '/ldp/test_duplicate_slug'
self.client.put(path, content_type='text/turtle')
v1_rsp = self.client.post(path + '/fcr:versions',
headers={'slug' : 'v1'})
v1_uri = v1_rsp.headers['Location']
dup_rsp = self.client.post(path + '/fcr:versions',
headers={'slug' : 'v1'})
dup_uri = dup_rsp.headers['Location']
assert v1_uri != dup_uri
def test_revert_version(self):
"""
Take a version snapshot, update a resource, and then revert to the
previous vresion.
"""
rsrc_path = '/ldp/test_revert_version'
payload1 = '<> <urn:demo:p1> <urn:demo:o1> .'
payload2 = '<> <urn:demo:p1> <urn:demo:o2> .'
self.client.put(rsrc_path, headers={
'content-type': 'text/turtle'}, data=payload1)
self.client.post(
rsrc_path + '/fcr:versions', headers={'slug': 'v1'})
v1_rsp = self.client.get(rsrc_path)
v1_gr = Graph().parse(data=v1_rsp.data, format='turtle')
assert v1_gr[
URIRef(g.webroot + '/test_revert_version')
: URIRef('urn:demo:p1')
: URIRef('urn:demo:o1')
]
self.client.put(rsrc_path, headers={
'content-type': 'text/turtle'}, data=payload2)
v2_rsp = self.client.get(rsrc_path)
v2_gr = Graph().parse(data=v2_rsp.data, format='turtle')
assert v2_gr[
URIRef(g.webroot + '/test_revert_version')
: URIRef('urn:demo:p1')
: URIRef('urn:demo:o2')
]
self.client.patch(rsrc_path + '/fcr:versions/v1')
revert_rsp = self.client.get(rsrc_path)
revert_gr = Graph().parse(data=revert_rsp.data, format='turtle')
assert revert_gr[
URIRef(g.webroot + '/test_revert_version')
: URIRef('urn:demo:p1')
: URIRef('urn:demo:o1')
]
def test_resurrection(self):
"""
Delete and then resurrect a resource.
Make sure that the resource is resurrected to the latest version.
"""
path = '/ldp/test_lazarus'
self.client.put(path, content_type='text/turtle')
self.client.post(path + '/fcr:versions', headers={'slug': 'v1'})
self.client.put(
path, headers={'content-type': 'text/turtle'},
data=b'<> <urn:demo:p1> <urn:demo:o1> .')
self.client.post(path + '/fcr:versions', headers={'slug': 'v2'})
self.client.put(
path, headers={'content-type': 'text/turtle'},
data=b'<> <urn:demo:p1> <urn:demo:o2> .')
self.client.delete(path)
assert self.client.get(path).status_code == 410
self.client.post(path + '/fcr:tombstone')
laz_data = self.client.get(path).data
laz_gr = Graph().parse(data=laz_data, format='turtle')
assert laz_gr[
URIRef(g.webroot + '/test_lazarus')
: URIRef('urn:demo:p1')
: URIRef('urn:demo:o2')
]
| 35.205698 | 98 | 0.576635 | 8,745 | 72,911 | 4.65809 | 0.078902 | 0.071683 | 0.037658 | 0.025457 | 0.727237 | 0.666331 | 0.610802 | 0.572211 | 0.532638 | 0.50642 | 0 | 0.020796 | 0.287076 | 72,911 | 2,070 | 99 | 35.222705 | 0.76287 | 0.137647 | 0 | 0.510056 | 0 | 0.002414 | 0.197663 | 0.060299 | 0 | 0 | 0 | 0.002415 | 0.199517 | 1 | 0.060338 | false | 0 | 0.01609 | 0.000805 | 0.086082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d93229576f67b3cf03c2c7712ccfa02da5e0379 | 2,040 | py | Python | examples/example_026.py | evereux/catia_python | 08948585899b12587b0415ce3c9191a408b34897 | [
"MIT"
] | 90 | 2019-02-21T10:05:28.000Z | 2022-03-19T01:53:41.000Z | examples/example_026.py | evereux/catia_python | 08948585899b12587b0415ce3c9191a408b34897 | [
"MIT"
] | 99 | 2019-05-21T08:29:12.000Z | 2022-03-25T09:55:15.000Z | examples/example_026.py | evereux/catia_python | 08948585899b12587b0415ce3c9191a408b34897 | [
"MIT"
] | 26 | 2019-04-04T06:31:36.000Z | 2022-03-30T07:24:47.000Z | #! /usr/bin/python3.6
"""
Example 26:
Prompt the user to select a product and get it's bounding box parameters
.. warning:
Currently there must be NO other existing Measure Inertias saved
ANYWHERE in your product tree as these may be returned and not
product you have selected.
"""
##########################################################
# insert syspath to project folder so examples can be run.
# for development purposes.
import os
import sys
sys.path.insert(0, os.path.abspath('..\\pycatia'))
##########################################################
import win32con
import win32gui
from pycatia import catia
def close_inertia_window():
# for future debugging from https://stackoverflow.com/questions/55547940/how-to-get-a-list-of-the-name-of-every-open-window
# def winEnumHandler(hwnd, ctx):
# if win32gui.IsWindowVisible(hwnd):
# print(hex(hwnd), win32gui.GetWindowText(hwnd))
#
# win32gui.EnumWindows(winEnumHandler, None)
handle = win32gui.FindWindow(None, "Measure Inertia")
win32gui.PostMessage(handle, win32con.WM_CLOSE, 0, 0)
caa = catia()
document = caa.active_document
product = document.product
selection = document.selection
selection.clear()
c = True
while c is True:
input("Selection product to measure.\nPress <ENTER> when selection made.")
selection = document.selection
caa.start_command("Measure Inertia")
parameters = product.parameters
print(f"BBOx = {parameters.item('BBOx').value_as_string()}.")
print(f"BBOy = {parameters.item('BBOy').value_as_string()}.")
print(f"BBOz = {parameters.item('BBOz').value_as_string()}.")
print(f"BBLx = {parameters.item('BBLx').value_as_string()}.")
print(f"BBLy = {parameters.item('BBLy').value_as_string()}.")
print(f"BBLz = {parameters.item('BBLz').value_as_string()}.")
selection.clear()
close_inertia_window()
prompt = input("Continue? (Y/N):")
if prompt.lower()[0] == 'n':
c = False
else:
c = True
| 29.142857 | 127 | 0.645588 | 252 | 2,040 | 5.150794 | 0.507937 | 0.027735 | 0.060092 | 0.069337 | 0.07319 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018969 | 0.173039 | 2,040 | 69 | 128 | 29.565217 | 0.750445 | 0.323039 | 0 | 0.181818 | 0 | 0 | 0.350777 | 0.215863 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.151515 | 0 | 0.181818 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d950659300f78db9a3ef0896e4c8d3f830c4f0e | 7,060 | py | Python | docker/laaws-demo-webui/cgi-bin/auid-to-json.py | lockss/laaws-demo | f7aeb1b3a15f5955ae7ab41b64b5e45d07e0433e | [
"BSD-3-Clause"
] | null | null | null | docker/laaws-demo-webui/cgi-bin/auid-to-json.py | lockss/laaws-demo | f7aeb1b3a15f5955ae7ab41b64b5e45d07e0433e | [
"BSD-3-Clause"
] | null | null | null | docker/laaws-demo-webui/cgi-bin/auid-to-json.py | lockss/laaws-demo | f7aeb1b3a15f5955ae7ab41b64b5e45d07e0433e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# CGI script to extract content of AU as WARC and report result
# in JSON as specified in WASAPI
from warcio.warcwriter import WARCWriter
from warcio.statusandheaders import StatusAndHeaders
from argparse import ArgumentParser
import requests
import json
import tempfile
import re
import cgi
import cgitb
import urllib.parse
import sys
import os.path
import hashlib
from functools import partial
# Disable traceback to requester as output must be JSON
cgitb.enable(display=0, logdir="/usr/local/apache2/logs/cgitb")
message = 'Content-Type:application/json' + '\n\n'
# URL prefix for WSAPI service
service = "http://laaws-repo:8080/repos/"
warcPath1 = "/usr/local/apache2/htdocs/"
warcDir = "warcs/"
warcDirPath = warcPath1 + warcDir
warcFile = tempfile.NamedTemporaryFile(dir=warcDirPath, suffix=".warc.gz", delete=False)
warcName = os.path.basename(warcFile.name)
warcPath = warcDirPath + warcName
warcHost = 'demo.laaws.lockss.org'
repoName = None
# URL prefix for OpenWayback
wayback = "http://laaws-openwayback:8080/wayback/"
ingestdate = "20170201"
# Return a Dictionary with the params for the LAAWS repo request
def makeRepoParamsFromArgs():
parser = ArgumentParser()
parser.add_argument("--artifact", dest="my_artifact", help="Artifact ID")
parser.add_argument("--auid", dest="my_auid", help="AU ID of artifact")
parser.add_argument("--uri", dest="my_uri", help="URI of artifact")
parser.add_argument("--aspect", dest="my_aspect", help="Aspect of artifact")
parser.add_argument("--timestamp", dest="my_timestamp", help="Datetime of artifact")
parser.add_argument("--acquired", dest="my_acquired", help="Datetime acquired")
parser.add_argument("--hash", dest="my_hash", help="Artifact hash")
parser.add_argument("--committed", dest="my_committed", help="False means uncommitted only", default="true")
parser.add_argument("--includeAllAspects", dest="my_includeAllAspects", help="True means include all aspects", default="false")
parser.add_argument("--includeAllVersions", dest="my_includeAllVersions", help="True means include all versions", default="false")
parser.add_argument("--limit", dest="my_limit", help="Count of artifacts for paging")
parser.add_argument("--next_artifact", dest="my_next_artifact", help="Next artifact index")
args = parser.parse_args()
ret = {}
if (args.my_artifact != None):
ret['artifact'] = args.my_artifact
if (args.my_auid != None):
ret['auid'] = args.my_auid
if (args.my_uri != None):
ret['uri'] = args.my_uri
if (args.my_aspect != None):
ret['aspect'] = args.my_aspect
if (args.my_timestamp != None):
ret['timestamp'] = args.my_timestamp
if (args.my_acquired != None):
ret['acquired'] = args.my_acquired
if (args.my_hash != None):
ret['hash'] = args.my_hash
if (args.my_committed != None):
ret['committed'] = args.my_committed
if (args.my_includeAllAspects != None):
ret['includeAllAspects'] = args.my_includeAllAspects
if (args.my_includeAllVersions != None):
ret['includeAllVersions'] = args.my_includeAllVersions
if (args.my_limit != None):
ret['limit'] = args.my_limit
if (args.my_next_artifact != None):
ret['next'] = args.my_next
if (ret == {}):
ret['committed'] = "false"
return ret
def processRepoData(data):
uris = []
items = data['items']
if ( 'auid' in params1 ):
for art in items:
if ( art['auid'] == params1['auid'] ):
uris.append(art['uri'])
else:
for art in items:
uris.append(art['uri'])
return uris
def reportError(report):
descriptor = {
"includes-extras":False,
"files":[
],
"error":report
}
ret = json.dumps(descriptor)
return ret
def writeWarc(uris, warcFile):
ret = ""
sha1 = ""
if len(uris) < 1:
ret = reporError("No URIs to export")
else:
with warcFile as output:
writer = WARCWriter(output, gzip=True)
stem = wayback + ingestdate + '/'
owuri = 'foo'
for uri in uris:
owuri = stem + uri
resp = requests.get(owuri, headers={'Accept-Encoding': 'identity'},
stream=True)
if (resp.status_code == 200):
# get raw headers from urllib3
headers_list = resp.raw.headers.items()
http_headers = StatusAndHeaders('200 OK', headers_list,
protocol='HTTP/1.0')
record = writer.create_warc_record(uri, 'response',
payload=resp.raw,
http_headers=http_headers)
writer.write_record(record)
with open(warcFile.name, mode='rb') as f:
m = hashlib.sha1()
bytes = 0
for buf in iter(partial(f.read, 4096), b''):
m.update(buf)
bytes += len(buf)
sha1 = m.hexdigest()
descriptor = {
"includes-extras":False,
"files":[
{
"checksum":"sha1:" + sha1,
"content-type":"application/warc",
"filename":warcName,
"locations":[
'http://' + warcHost + '/' + warcDir + warcName
],
"size":"{}".format(bytes)
}
]
}
ret = json.dumps(descriptor)
return ret
params1 = {}
try:
if(len(sys.argv) > 1):
# Run from command line
params1 = makeRepoParamsFromArgs()
else:
# get data from web page form
input_data=cgi.FieldStorage()
if 'auid' in input_data:
params1['auid'] = input_data['auid'].value
params1['committed'] = "false"
# Find the repo
repoResponse = requests.get(service)
status = repoResponse.status_code
if (status == 200):
repoData = repoResponse.json()
repoName = repoData[0]
else:
message += reportError("Repo request error: {0}\n{1}".format(status,repoResponse))
if (repoName != None):
# List the artifacts
repoResponse = requests.get(service + repoName + "/artifacts/", params=params1)
status = repoResponse.status_code
if(status == 200):
# LAAWS repo request successful
# parse the JSON we got back
repoData = repoResponse.json()
uris = processRepoData(repoData)
message += writeWarc(uris, warcFile)
else:
# LAAWS repo request unsuccessful
message += reportError("repo request error: {0}\n{1}".format(status,repoResponse))
except:
e = sys.exc_info()
message += reportError(cgitb.text(e))
print(message)
| 35.3 | 134 | 0.589802 | 782 | 7,060 | 5.23913 | 0.290281 | 0.035148 | 0.049793 | 0.01855 | 0.132292 | 0.063949 | 0.048816 | 0.029778 | 0.029778 | 0.029778 | 0 | 0.012094 | 0.285552 | 7,060 | 199 | 135 | 35.477387 | 0.800159 | 0.069122 | 0 | 0.170732 | 0 | 0 | 0.173555 | 0.019216 | 0.006098 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.085366 | 0 | 0.134146 | 0.006098 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d96171a77724c2daf45636b712ea7b4c2164248 | 1,504 | py | Python | foliofunctions.py | becahp/ibict-folio | 7f82acd1f9c1d37c3858f51540a703c12a9d92ce | [
"Apache-2.0"
] | null | null | null | foliofunctions.py | becahp/ibict-folio | 7f82acd1f9c1d37c3858f51540a703c12a9d92ce | [
"Apache-2.0"
] | null | null | null | foliofunctions.py | becahp/ibict-folio | 7f82acd1f9c1d37c3858f51540a703c12a9d92ce | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Function definition is here
def folio2solr(folioReg):
#folioReg is the dictonary
##Checar campos problematicos
#contributors
if len(folioReg['contributors']): #diferente de zero
auxContributors = folioReg['contributors'][0]['name']
else:
auxContributors = []
#alternativeTitles
if len(folioReg['alternativeTitles']): #diferente de zero
auxAltTitles = folioReg['alternativeTitles'][0]['alternativeTitle']
else:
auxAltTitles = []
##publication
if len(folioReg['publication']): #diferente de zero
#publisher
if 'publisher' in folioReg['publication'][0]:
auxPublisher = folioReg['publication'][0]['publisher']
else:
auxPublisher = []
#dateOfPublication
if 'dateOfPublication' in folioReg['publication'][0]:
auxDateOfPublication = folioReg['publication'][0]['dateOfPublication']
else:
auxDateOfPublication = []
else:
auxPublisher = []
auxDateOfPublication = []
#Mapping
solrReg = {
'id': folioReg['hrid'],
'title': folioReg['title'],
'title_alt': auxAltTitles,
'edition': folioReg['editions'],
'series': folioReg['series'],
'author': auxContributors, #folioReg['contributors'][0]['name'],
'topic': folioReg['subjects'],
'publisher': auxContributors,
'publishDate': auxDateOfPublication,
'language': folioReg['languages']
}
return solrReg;
| 29.490196 | 82 | 0.625665 | 121 | 1,504 | 7.768595 | 0.413223 | 0.101064 | 0.085106 | 0.076596 | 0.085106 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006969 | 0.236702 | 1,504 | 50 | 83 | 30.08 | 0.811847 | 0.170213 | 0 | 0.264706 | 0 | 0 | 0.237825 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d9673abce324fd2c4264d7ff064fc9ecdf36925 | 2,399 | py | Python | civis_jupyter_notebooks/tests/test_git_utils.py | civisanalytics/civis-jupyter-notebook | 16e3fb804c53e7e146aeebe0cc56192ad51dc668 | [
"BSD-3-Clause"
] | 1 | 2020-05-20T10:17:32.000Z | 2020-05-20T10:17:32.000Z | civis_jupyter_notebooks/tests/test_git_utils.py | civisanalytics/civis-jupyter-notebook | 16e3fb804c53e7e146aeebe0cc56192ad51dc668 | [
"BSD-3-Clause"
] | 26 | 2017-09-08T20:59:00.000Z | 2021-08-23T19:42:22.000Z | civis_jupyter_notebooks/tests/test_git_utils.py | civisanalytics/civis-jupyter-notebook | 16e3fb804c53e7e146aeebe0cc56192ad51dc668 | [
"BSD-3-Clause"
] | 4 | 2017-09-13T20:44:09.000Z | 2020-01-22T21:24:53.000Z | import unittest
from unittest.mock import patch, MagicMock
import os
import logging
from git.exc import GitCommandError
from git import Repo
from civis_jupyter_notebooks.git_utils import CivisGit, CivisGitError
REPO_URL = 'http://www.github.com/civisanalytics.foo.git'
REPO_MOUNT_PATH = '/root/work'
GIT_REPO_REF = 'master'
class GitUtilsTest(unittest.TestCase):
def setUp(self):
os.environ['GIT_REPO_URL'] = REPO_URL
os.environ['GIT_REPO_REF'] = GIT_REPO_REF
logging.disable(logging.INFO)
@patch('civis_jupyter_notebooks.git_utils.Repo.clone_from')
def test_clone_repository_throws_error(self, repo_clone):
repo_clone.side_effect = GitCommandError('clone', 'failed')
self.assertRaises(CivisGitError, lambda: CivisGit().clone_repository())
@patch('civis_jupyter_notebooks.git_utils.Repo.clone_from')
def test_clone_repository_succeeds(self, repo_clone):
repo_clone.return_value = MagicMock(spec=Repo)
CivisGit(repo_mount_path=REPO_MOUNT_PATH).clone_repository()
repo_clone.assert_called_with(REPO_URL, REPO_MOUNT_PATH)
repo_clone.return_value.git.checkout.assert_called_with(GIT_REPO_REF)
@patch('os.environ.get')
def test_is_git_enabled_returns_false(self, env):
env.return_value = None
cg = CivisGit()
self.assertFalse(cg.is_git_enabled())
def test_is_git_enabled_returns_true(self):
self.assertTrue(CivisGit().is_git_enabled())
def test_has_uncommitted_changes(self):
def custom_side_effect(arg):
if arg == 'HEAD':
return []
return ['foo.py']
cg = CivisGit()
cg.repo = MagicMock(spec=Repo)
cg.repo().index.diff = MagicMock(side_effect=custom_side_effect)
self.assertTrue(cg.has_uncommitted_changes())
def test_has_no_uncommitted_changes(self):
cg = CivisGit()
cg.repo = MagicMock(spec=Repo)
cg.repo().index.diff = MagicMock(return_value=[])
self.assertFalse(cg.has_uncommitted_changes())
def test_has_uncommitted_changes_throws_error(self):
cg = CivisGit()
cg.repo = MagicMock(spec=Repo)
cg.repo().index.diff = MagicMock(side_effect=GitCommandError('diff', 'failed'))
self.assertRaises(CivisGitError, lambda: cg.has_uncommitted_changes())
if __name__ == '__main__':
unittest.main()
| 33.319444 | 87 | 0.706544 | 306 | 2,399 | 5.205882 | 0.267974 | 0.045198 | 0.065913 | 0.045198 | 0.426868 | 0.285625 | 0.252982 | 0.211551 | 0.211551 | 0.211551 | 0 | 0 | 0.186744 | 2,399 | 71 | 88 | 33.788732 | 0.816504 | 0 | 0 | 0.169811 | 0 | 0 | 0.097957 | 0.04085 | 0 | 0 | 0 | 0 | 0.150943 | 1 | 0.169811 | false | 0 | 0.132075 | 0 | 0.358491 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d976185ebb8ba910132efb175704d26165689c5 | 3,417 | py | Python | skeleton.py | Ragnarok7861/4d_plant_registration | 6c2a4fab7857260ecc5f956c717c997bfa25ea9a | [
"MIT"
] | 16 | 2020-09-17T06:36:43.000Z | 2022-02-04T18:35:30.000Z | skeleton.py | Ragnarok7861/4d_plant_registration | 6c2a4fab7857260ecc5f956c717c997bfa25ea9a | [
"MIT"
] | null | null | null | skeleton.py | Ragnarok7861/4d_plant_registration | 6c2a4fab7857260ecc5f956c717c997bfa25ea9a | [
"MIT"
] | 6 | 2021-09-29T08:32:06.000Z | 2022-01-10T20:53:57.000Z | """ Skeleton class for creating, accessing different attributes for a skelton.
"""
import numpy as np
class Skeleton():
def __init__(self, XYZ = None, edges = None , labels = None):
self._XYZ = XYZ if XYZ is not None else np.empty((0,3))
self._edges = edges if edges is not None else []
self._labels = labels if labels is not None else []
# initialize A matrix
self.__compute_adjacency_matrix__()
def add_vertex(self, vertex):
""" Adds a vertex/node to the skeleton graph
"""
self._XYZ = np.vstack((self._XYZ, vertex))
#update A matrix
self.__compute_adjacency_matrix__()
def add_edge(self, vertex1_id, vertex2_id):
""" Adds an edge to the skeleton graph
"""
edge = np.array([vertex1_id, vertex2_id], dtype=np.uint8)
self._edges.append(edge)
# update A matrix
self.__compute_adjacency_matrix__()
def add_label(self, label):
""" Assigns a label to a vertex/node in the skeleton graph
"""
self._labels.append(label)
def __compute_adjacency_matrix__(self):
""" Computes an adjecency matrix from the edge list.
"""
num_vertices =self._XYZ.shape[0]
self.A = np.zeros([num_vertices, num_vertices], dtype=np.uint8)
if num_vertices > 0:
for e in self._edges:
if e[0] != e[1]:
self.A[e[0], e[1]] = 1
self.A[e[1], e[0]] = 1
def get_sequence(self):
""" Computes a sequence along the skeleton in a depth-first manner.
"""
if self._XYZ.shape[0] > 0:
root_idx = np.argmin(self.XYZ[:, 2])
seq = self.__graph_depth_first_traversal__(root_idx)
else:
seq = None
return seq
def __graph_depth_first_traversal__(self, root_idx, seq=None, old_root_idx=-1):
""" Recursive function to traverse the skeleton graph in depth first manner.
"""
if seq == None:
seq = []
seq.append(root_idx)
for i in range(self.A.shape[0]):
if i != old_root_idx and self.A[root_idx, i] == 1:
seq = self.__graph_depth_first_traversal__(i, seq, root_idx)
return seq
@property
def XYZ(self):
return self._XYZ
@property
def edges(self):
return self._edges
@property
def labels(self):
return self._labels
@property
def node_count(self):
return self._XYZ.shape[0]
@property
def edge_count(self):
return len(self._edges)
@classmethod
def read_graph(cls, filename, matlab_type = False):
""" Read a graph from a text file as a skeleton
"""
# read all vertices and edges
with open(filename, "r") as file:
vertices = []
edges = []
labels = []
for line in file:
data = line.split()
if data[0] == 'v':
v = np.array([float(data[1]), float(data[2]), float(data[3])])
vertices.append(v)
if len(data) > 4:
l = int(float(data[4]))
labels.append(l)
if data[0] == 'e':
if matlab_type:
e = np.array([float(data[1])-1, float(data[2])-1], dtype=np.uint8)
else:
e = np.array([float(data[1]), float(data[2])], dtype=np.uint8)
edges.append(e)
XYZ = np.stack(vertices)
return cls(XYZ, edges, labels)
@classmethod
def copy_skeleton(cls, S):
""" Make a copy of the skeleton
"""
# read all vertices and edges
return cls(S._XYZ.copy(), S._edges.copy(), S._labels.copy()) | 28.957627 | 84 | 0.609014 | 495 | 3,417 | 4.012121 | 0.224242 | 0.031722 | 0.04431 | 0.019637 | 0.164149 | 0.164149 | 0.092145 | 0.092145 | 0.045317 | 0 | 0 | 0.015892 | 0.263389 | 3,417 | 118 | 85 | 28.957627 | 0.773143 | 0.179397 | 0 | 0.177215 | 0 | 0 | 0.001093 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.177215 | false | 0 | 0.012658 | 0.063291 | 0.316456 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d97e17b49711bc26ae6b1e76c4955fa918c05d5 | 1,233 | py | Python | src/com/dao/aclDao.py | amzpiper/synchronize_data | a0ef983a6445ac5b793691dd3d4a86790c0581a7 | [
"Apache-2.0"
] | null | null | null | src/com/dao/aclDao.py | amzpiper/synchronize_data | a0ef983a6445ac5b793691dd3d4a86790c0581a7 | [
"Apache-2.0"
] | null | null | null | src/com/dao/aclDao.py | amzpiper/synchronize_data | a0ef983a6445ac5b793691dd3d4a86790c0581a7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env/python
# -*- coding:utf-8 -*-
# Author:guoyuhang
import json
from com.request import aclRequest
from com.config.config import Config
from com.model import acl
""" 处理把response返回的dict对象中内容处理后放到acl对象数组中 """
class AclDao(object):
def __init__(self) :
self.aclList = []
self.aclReq = aclRequest.AclRequest()
self.config = Config.get_instance()
""" 请求并解析acl的json返回结果,把所有json中的acl整理成acl对象,再存入到1个list中 """
def getAcl(self):
print("Acl is Going to get response ...")
response = self.aclReq.list()
# 当响应返回错误时
if response.get("error","true") != "true":
print("Acle get list Error ...")
raise Exception
print("Acl got response.All is %s..." % response['total_size'])
print("Acl is Creating Sql ...")
for item in response['resources']:
self.aclList.append(acl.AclModel(
item['name'].encode('ascii'),
','.join(item['networks']).encode('ascii'),
item['href'].encode('ascii')))
self.config.acl_count += 1
print("Acl Sql is Create %s ..." % self.config.acl_count)
print("Acl Sql is Done ...")
return self.aclList
| 31.615385 | 71 | 0.587997 | 137 | 1,233 | 5.233577 | 0.510949 | 0.055788 | 0.027894 | 0.050209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003308 | 0.264396 | 1,233 | 38 | 72 | 32.447368 | 0.787211 | 0.054339 | 0 | 0 | 0 | 0 | 0.201887 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d9849a947ec51d625e83e09d1e16e0cc76affe4 | 6,044 | py | Python | webserver/www/api/api_functions.py | HendrikVE/my_pi_drei | 7e0392dced6e54791690929d968195282796cbd8 | [
"MIT"
] | null | null | null | webserver/www/api/api_functions.py | HendrikVE/my_pi_drei | 7e0392dced6e54791690929d968195282796cbd8 | [
"MIT"
] | 1 | 2018-02-03T23:14:38.000Z | 2018-02-03T23:17:06.000Z | webserver/www/api/api_functions.py | HendrikVE/my_pi_drei | 7e0392dced6e54791690929d968195282796cbd8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
* Copyright (C) 2017 Hendrik van Essen
*
* This file is subject to the terms and conditions of the MIT License
* See the file LICENSE in the top level directory for more details.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
# append root of the python code tree to sys.apth so that imports are working
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT_DIR = os.path.normpath(os.path.join(CUR_DIR, os.pardir, os.pardir))
sys.path.append(PROJECT_ROOT_DIR)
from hardware.driver_process import RequestDriverProcess
from arduino.dht22.dht22_interface import RequestDataDHT22
from hardware.adafruit_22_display.display import RequestData as RequestDataDisplay
from .api_json_keys import *
API_VERSION = '0.1'
ADDRESS_DHT22 = ('localhost', 7000)
ADDRESS_DISPLAY = ('localhost', 7001)
def __json_result_template__():
# insert only mandatory attributes here
json_result = {
RESULT_KEY_API_VERSION: API_VERSION,
RESULT_KEY_RESULT: None,
RESULT_KEY_ERROR: None,
}
return json_result
########################################################################################################################
# All API-functions need to accept json string request as argument and return a string as result. #
# #
# #
# Create a new api-function with the following template: #
# #
# def get_temperature(request): #
# json_result = __json_result_template__() #
# #
# # try-except-block only needed if REQUEST_KEY_ACTION_ARGUMENTS index is needed #
# try: #
# arguments = request[jk.REQUEST_KEY_ACTION_ARGUMENTS] #
# #
# except KeyError: #
# json_result[jk.RESULT_KEY_ERROR] = 'missing %s' % jk.REQUEST_KEY_ACTION_ARGUMENTS #
# return json_result #
# #
# # your code here #
# #
# return json_result #
# #
########################################################################################################################
def get_temperature(request):
json_dict = __json_result_template__()
rdp = RequestDriverProcess(ADDRESS_DHT22)
try:
argument = request[REQUEST_KEY_ACTION_ARGUMENT]
except KeyError:
json_dict[RESULT_KEY_ERROR] = 'missing key %s' % REQUEST_KEY_ACTION_ARGUMENT
return json_dict
if argument == 'celsius':
try:
temperature = rdp.request(RequestDataDHT22.TEMP_CEL)
except Exception as e:
json_dict[RESULT_KEY_ERROR] = str(e)
return json_dict
elif argument == 'fahrenheit':
try:
temperature = rdp.request(RequestDataDHT22.TEMP_FAH)
except Exception as e:
json_dict[RESULT_KEY_ERROR] = str(e)
return json_dict
else:
json_dict[RESULT_KEY_ERROR] = 'invalid argument: %s' % argument
return json_dict
json_dict[RESULT_KEY_RESULT] = temperature
return json_dict
def get_heat_index(request):
json_dict = __json_result_template__()
rdp = RequestDriverProcess(ADDRESS_DHT22)
try:
argument = request[REQUEST_KEY_ACTION_ARGUMENT]
except KeyError:
json_dict[RESULT_KEY_ERROR] = 'missing key %s' % REQUEST_KEY_ACTION_ARGUMENT
return json_dict
if argument == 'celsius':
try:
heat_index = rdp.request(RequestDataDHT22.HEAT_INDEX_CEL)
except Exception as e:
json_dict[RESULT_KEY_ERROR] = str(e)
return json_dict
elif argument == 'fahrenheit':
try:
heat_index = rdp.request(RequestDataDHT22.HEAT_INDEX_FAH)
except Exception as e:
json_dict[RESULT_KEY_ERROR] = str(e)
return json_dict
else:
json_dict[RESULT_KEY_ERROR] = 'invalid argument: %s' % argument
return json_dict
json_dict[RESULT_KEY_RESULT] = heat_index
return json_dict
def get_humidity(request):
json_dict = __json_result_template__()
rdp = RequestDriverProcess(ADDRESS_DHT22)
try:
humidity = rdp.request(RequestDataDHT22.HUMIDITY)
except Exception as e:
json_dict[RESULT_KEY_ERROR] = str(e)
return json_dict
json_dict[RESULT_KEY_RESULT] = humidity
return json_dict
| 40.293333 | 120 | 0.458471 | 506 | 6,044 | 5.158103 | 0.27668 | 0.082759 | 0.064368 | 0.078161 | 0.52069 | 0.467433 | 0.436015 | 0.436015 | 0.393487 | 0.393487 | 0 | 0.012309 | 0.448875 | 6,044 | 149 | 121 | 40.563758 | 0.77124 | 0.410324 | 0 | 0.6125 | 0 | 0 | 0.040275 | 0 | 0 | 0 | 0 | 0.006711 | 0 | 1 | 0.05 | false | 0 | 0.0875 | 0 | 0.3 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d9a54468e18f49414d191f9a2a62adbd5c19f14 | 2,072 | py | Python | contents/6_OpenAI_gym/my_DQN_CartPole.py | venaissance/Reinforcement-learning-with-tensorflow | 78f5253d7e2bbd5aff5bacbbafd85ba9dbda686d | [
"MIT"
] | 1 | 2021-06-27T15:10:29.000Z | 2021-06-27T15:10:29.000Z | contents/6_OpenAI_gym/my_DQN_CartPole.py | venaissance/Reinforcement-learning-with-tensorflow | 78f5253d7e2bbd5aff5bacbbafd85ba9dbda686d | [
"MIT"
] | null | null | null | contents/6_OpenAI_gym/my_DQN_CartPole.py | venaissance/Reinforcement-learning-with-tensorflow | 78f5253d7e2bbd5aff5bacbbafd85ba9dbda686d | [
"MIT"
] | null | null | null | """
Deep Q network,
Using:
Tensorflow: 1.0
gym: 0.7.3
"""
import gym
import numpy as np
from my_DQN_brain import DeepQNetwork
import matplotlib.pyplot as plt
env = gym.make('CartPole-v0')
env = env.unwrapped
print(env.action_space)
print(env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
RL = DeepQNetwork(n_actions=env.action_space.n, # 2
n_features=env.observation_space.shape[0], # 4
learning_rate=0.01,
e_greedy=0.9,
replace_target_iter=100,
memory_size=2000,
e_greedy_increment=0.001,
)
def training(max_episodes=100):
total_steps = 0
# 存放奖励值
GLOBAL_RUNNING_R = []
for i_episode in range(max_episodes):
observation = env.reset()
total_reward = 0
while True:
env.render()
action = RL.choose_action(observation)
observation_, reward_, done, info = env.step(action)
x, x_dot, theta, theta_dot = observation_
r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
reward = r1 + r2
RL.store_transition(observation, action, reward, observation_)
total_reward += reward
print('total_steps: ', total_steps)
if total_steps > max_episodes:
RL.learn()
if done:
print('episode: ', i_episode,
'total_reward: ', round(total_reward, 2),
' epsilon: ', round(RL.epsilon, 2))
break
observation = observation_
total_steps += 1
GLOBAL_RUNNING_R.append(total_reward)
# 画出奖励值和时间步的关系
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.title('DQN Reward')
plt.xlabel('episodes')
plt.ylabel('moving reward')
plt.show()
env.close()
training(200)
| 25.580247 | 95 | 0.578668 | 246 | 2,072 | 4.650407 | 0.439024 | 0.043706 | 0.066434 | 0.062937 | 0.046329 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031294 | 0.321429 | 2,072 | 80 | 96 | 25.9 | 0.782361 | 0.035714 | 0 | 0 | 0 | 0 | 0.044288 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.076923 | 0 | 0.096154 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d9b4f810021d1ba58be710dada119c03e1176aa | 4,098 | py | Python | src/sampling_update/conditionals.py | mfkiwl/precise_gps | e30c6355447424cb69549feb85c9393b10eae7aa | [
"MIT"
] | null | null | null | src/sampling_update/conditionals.py | mfkiwl/precise_gps | e30c6355447424cb69549feb85c9393b10eae7aa | [
"MIT"
] | null | null | null | src/sampling_update/conditionals.py | mfkiwl/precise_gps | e30c6355447424cb69549feb85c9393b10eae7aa | [
"MIT"
] | null | null | null | # Credit to GPflow
import tensorflow as tf
def base_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, q_sqrt=None, white=False, return_Lm=False):
"""
Given a g1 and g2, and distribution p and q such that
p(g2) = N(g2;0,Kmm)
p(g1) = N(g1;0,Knn)
p(g1|g2) = N(g1;0,Knm)
And
q(g2) = N(g2;f,q_sqrt*q_sqrt^T)
This method computes the mean and (co)variance of
q(g1) = \int q(g2) p(g1|g2)
:param Kmn: M x N
:param Kmm: M x M
:param Knn: N x N or N
:param f: M x R
:param full_cov: bool
:param q_sqrt: None or R x M x M (lower triangular)
:param white: bool
:return: N x R or R x N x N
"""
# compute kernel stuff
num_func = tf.shape(f)[1] # R
Lm = tf.linalg.cholesky(Kmm)
# Compute the projection matrix A
A = tf.linalg.triangular_solve(Lm, Kmn, lower=True)
# compute the covariance due to the conditioning
if full_cov:
fvar = Knn - tf.matmul(A, A, transpose_a=True)
fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N
else:
fvar = Knn - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N
# another backsubstitution in the unwhitened case
if not white:
A = tf.linalg.triangular_solve(tf.transpose(Lm), A, lower=False)
# construct the conditional mean
fmean = tf.matmul(A, f, transpose_a=True)
if q_sqrt is not None:
if q_sqrt.get_shape().ndims == 2:
LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N
elif q_sqrt.get_shape().ndims == 3:
L = q_sqrt
A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1]))
LTA = tf.matmul(L, A_tiled, transpose_a=True) # R x M x N
else: # pragma: no cover
raise ValueError("Bad dimension for q_sqrt: %s" %
str(q_sqrt.get_shape().ndims))
if full_cov:
fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # R x N x N
else:
fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # R x N
if not full_cov:
fvar = tf.transpose(fvar) # N x R
if return_Lm:
return fmean, fvar, Lm
return fmean, fvar # N x R, R x N x N or N x R
def conditional(Xnew, X, kern, f, *, full_cov=False, q_sqrt=None, white=False, return_Lm=False):
"""
Given f, representing the GP at the points X, produce the mean and
(co-)variance of the GP at the points Xnew.
Additionally, there may be Gaussian uncertainty about f as represented by
q_sqrt. In this case `f` represents the mean of the distribution and
q_sqrt the square-root of the covariance.
Additionally, the GP may have been centered (whitened) so that
p(v) = N(0, I)
f = L v
thus
p(f) = N(0, LL^T) = N(0, K).
In this case `f` represents the values taken by v.
The method can either return the diagonals of the covariance matrix for
each output (default) or the full covariance matrix (full_cov=True).
We assume R independent GPs, represented by the columns of f (and the
first dimension of q_sqrt).
:param Xnew: datasets matrix, size N x D. Evaluate the GP at these new points
:param X: datasets points, size M x D.
:param kern: GPflow kernel.
:param f: datasets matrix, M x R, representing the function values at X,
for K functions.
:param q_sqrt: matrix of standard-deviations or Cholesky matrices,
size M x R or R x M x M.
:param white: boolean of whether to use the whitened representation as
described above.
:return:
- mean: N x R
- variance: N x R (full_cov = False), R x N x N (full_cov = True)
"""
num_data = tf.shape(X)[0] # M
Kmm = kern.K(X) + tf.eye(num_data, dtype=tf.float64) * 1e-7
Kmn = kern.K(X, Xnew)
if full_cov:
Knn = kern.K(Xnew)
else:
Knn = kern.K_diag(Xnew)
return base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white, return_Lm=return_Lm) # N x R, N x R or R x N x N | 37.59633 | 141 | 0.608346 | 715 | 4,098 | 3.404196 | 0.243357 | 0.036976 | 0.00986 | 0.00986 | 0.24281 | 0.147905 | 0.104355 | 0.093673 | 0.049302 | 0.041085 | 0 | 0.012991 | 0.286237 | 4,098 | 109 | 141 | 37.59633 | 0.819145 | 0.491215 | 0 | 0.166667 | 0 | 0 | 0.014941 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.02381 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d9d764fa2fc09c6656c23160c85475a4f88f596 | 389 | py | Python | CloudSchedule/CodeCraft-2021/src/constant.py | RavenLite/HuaweiCodeCraft2021 | a9c87eb4e01d58c2ec1c021c89d7da422553613f | [
"Apache-2.0"
] | 3 | 2022-02-16T09:42:10.000Z | 2022-02-24T02:26:30.000Z | CloudSchedule/CodeCraft-2021/src/constant.py | RavenLite/HuaweiCodeCraft2021 | a9c87eb4e01d58c2ec1c021c89d7da422553613f | [
"Apache-2.0"
] | null | null | null | CloudSchedule/CodeCraft-2021/src/constant.py | RavenLite/HuaweiCodeCraft2021 | a9c87eb4e01d58c2ec1c021c89d7da422553613f | [
"Apache-2.0"
] | null | null | null | # queue item action
ACTION_ADD = "add"
ACTION_DEL = "del"
# server type field
SERVER_TYPE_RATIO = "server_type_ratio"
# vm deployment way
VM_DEPLOYMENT_SINGLE = 0
VM_DEPLOYMENT_DOUBLE = 1
# vm node name
VM_NODE_A = "A"
VM_NODE_B = "B"
VM_NODE_AB = "AB"
# common
NULL_STRING = ""
ZERO_NUM = 0
MIN_VALUE_INITIAL = 100000000
# server default id
VIRTUAL_SERVER_ID = -1
NEW_SERVER_ID = -2
| 15.56 | 39 | 0.74036 | 65 | 389 | 4.061538 | 0.538462 | 0.090909 | 0.113636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043344 | 0.169666 | 389 | 24 | 40 | 16.208333 | 0.773994 | 0.233933 | 0 | 0 | 0 | 0 | 0.092784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0da15f409411673cc5bb5b14dc4de318260a890e | 1,788 | py | Python | torchimage/random.py | miaotianyi/torchimage | 8b209c7da087bc18fd78dc0013c671e21a338f38 | [
"MIT"
] | 10 | 2021-04-22T08:15:49.000Z | 2022-03-19T01:28:06.000Z | torchimage/random.py | miaotianyi/torchimage | 8b209c7da087bc18fd78dc0013c671e21a338f38 | [
"MIT"
] | null | null | null | torchimage/random.py | miaotianyi/torchimage | 8b209c7da087bc18fd78dc0013c671e21a338f38 | [
"MIT"
] | null | null | null | import torch
from .utils import NdSpec
from .utils.validation import check_axes
def random_crop(x: torch.Tensor, axes, size, *, generator: torch.Generator = None):
axes = check_axes(x, axes)
size = NdSpec(size, item_shape=[])
idx = [slice(None)] * x.ndim
for i, a in enumerate(axes):
length = size[i]
if length is None:
continue
beg = torch.randint(x.shape[a] - length, size=(), generator=generator).item()
end = beg + length
idx[a] = slice(beg, end)
return x[idx]
def add_poisson_gauss_noise(x: torch.Tensor, k, sigma, *, generator: torch.Generator = None) -> torch.Tensor:
"""
Add Poisson-Gaussian noise to a tensor.
Parameters
----------
x : torch.Tensor
Input tensor.
k : float
Noise parameter for poisson
sigma_2 : float
Noise parameter for gaussian distribution (standard deviation)
generator : torch.Generator
Torch random number generator
Returns
-------
y : torch.Tensor
Output tensor with noise added
"""
sigma_2 = sigma ** 2
poisson_noise = torch.poisson(x / k, generator=generator)
poisson_noise *= k # inplace to save memory
gaussian_noise = torch.randn(x.shape, dtype=x.dtype, device=x.device, generator=generator)
gaussian_noise *= (sigma_2 ** 0.5) # inplace to save memory
y = poisson_noise
y += gaussian_noise
return y
def add_gauss_noise(x: torch.Tensor, sigma, *, generator: torch.Generator = None) -> torch.Tensor:
sigma_2 = sigma ** 2
gaussian_noise = torch.randn(x.shape, dtype=x.dtype, device=x.device, generator=generator)
gaussian_noise *= (sigma_2 ** 0.5) # inplace to save memory
gaussian_noise += x # inplace
return gaussian_noise
| 29.311475 | 109 | 0.644295 | 236 | 1,788 | 4.779661 | 0.271186 | 0.092199 | 0.042553 | 0.071809 | 0.340426 | 0.301418 | 0.27305 | 0.196809 | 0.196809 | 0.196809 | 0 | 0.008136 | 0.243848 | 1,788 | 60 | 110 | 29.8 | 0.826183 | 0.242729 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0da1f2772727d2da5ec5d537da5d7526f0a0bfca | 523 | py | Python | tests/test_core.py | bofm/libextract | 9cf9d55c7f8cd622eab0a50f009385f0a39b1200 | [
"MIT"
] | 502 | 2015-04-18T18:11:07.000Z | 2022-03-04T20:10:11.000Z | tests/test_core.py | bofm/libextract | 9cf9d55c7f8cd622eab0a50f009385f0a39b1200 | [
"MIT"
] | 18 | 2015-04-06T12:21:14.000Z | 2015-04-14T03:53:39.000Z | tests/test_core.py | bofm/libextract | 9cf9d55c7f8cd622eab0a50f009385f0a39b1200 | [
"MIT"
] | 45 | 2015-04-15T13:31:38.000Z | 2021-05-19T09:37:23.000Z | from .fixtures import foo_file
from libextract.core import parse_html, pipeline
def test_parse_html(foo_file):
etree = parse_html(foo_file, encoding='ascii')
divs = etree.xpath('//body/article/div')
for node in divs:
assert node.tag == 'div'
assert node.text == 'foo.'
assert len(divs) == 9
def test_pipeline():
functions = [
lambda x: x + [1],
lambda x: x + [2],
]
assert pipeline([], functions) == [1, 2]
assert pipeline([1], functions) == [1, 1, 2]
| 21.791667 | 50 | 0.59847 | 71 | 523 | 4.295775 | 0.464789 | 0.068852 | 0.078689 | 0.104918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023136 | 0.256214 | 523 | 23 | 51 | 22.73913 | 0.760925 | 0 | 0 | 0 | 0 | 0 | 0.057471 | 0 | 0 | 0 | 0 | 0 | 0.3125 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0da491b9962ecdc77815ce0f838f8cd5faab7f6a | 5,868 | py | Python | retrieve_by_jersey_number_app.py | shivaprasadm26/JerseyNumberDetectionTF20 | 64f8bd63fafb37c1bf2a6054ffba44670b44bb79 | [
"MIT"
] | 1 | 2022-01-02T02:33:54.000Z | 2022-01-02T02:33:54.000Z | retrieve_by_jersey_number_app.py | shivaprasadm26/JerseyNumberDetectionTF20 | 64f8bd63fafb37c1bf2a6054ffba44670b44bb79 | [
"MIT"
] | null | null | null | retrieve_by_jersey_number_app.py | shivaprasadm26/JerseyNumberDetectionTF20 | 64f8bd63fafb37c1bf2a6054ffba44670b44bb79 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 14:36:56 2020
@author: user
"""
import glob
import os
import pickle
import shutil
import time
from flask import request
from flask import Flask
import argparse
import os
import glob
from predict_jersey_number import detect_person_jersey_no, load_objdetection, load_numberDetection
app = Flask(__name__)
@app.route('/')
def hello():
return "welcome! this service is to retrieve player images by jersey number"
DETECTOR = False
DETECTORNUM = False
CUSTOM = False
def JerseyNumberRetrieve(query_jersey_number,SearchSetParentPath,DestPath,NumImages=10):
'''
function to get list of images containing players with jersey number same as query_jersey_number
returns a list of length equal to minimum of number of detected images and NumImages. The list contains
path to the images in decreasing order of detection confidence.
Each element of the list will be a tuple of the form (image_path, detection_confidence)
'''
detections_file = os.path.join(SearchSetParentPath,"reference_detections.p")
if not os.path.exists(detections_file):
filenames= glob.glob(SearchSetParentPath+ "/*.jpg", recursive = False)
filenames.extend(glob.glob(SearchSetParentPath+ "/*.png", recursive = False))
filenames.extend(glob.glob(SearchSetParentPath+ "/*.bmp", recursive = False))
filenames.extend(glob.glob(SearchSetParentPath+ "/*.tif", recursive = False))
filenames.extend(glob.glob(SearchSetParentPath+ "/*.tiff", recursive = False))
filenames.extend(glob.glob(SearchSetParentPath+ "/*.JPEG", recursive = False))
reference_detections = []
for count in range(len(filenames)):
image_path = filenames[count]
image_name = (image_path.replace('\\','/')).split('/')[-1]
detections = detect_person_jersey_no(filenames[count],DETECTOR,DETECTORNUM,CUSTOM)
out_dict = {}
out_dict['path'] = image_path
out_dict['name'] = image_name
out_dict['detections'] = detections
reference_detections.append(out_dict)
pickle.dump(reference_detections, open(detections_file, "wb" ))
print("Detections created successfully")
print("==========>>")
else:
reference_detections= pickle.load(open(detections_file, "rb"))
print("Loaded detections from file")
print("==========>>")
matched_files = []
for detection in reference_detections:
file_name = detection['path']
jersey_numbers = detection['detections'][0]
confidence = detection['detections'][1]
for j_count in range(len(jersey_numbers)):
jersey_number=jersey_numbers[j_count]
# print("jersy numbers",jersey_number)
if jersey_number == query_jersey_number:
matched_files.append((file_name,confidence[j_count]))
matched_files.sort(key = lambda x:x[1], reverse = True)
print("Found {} images with jersey number {}".format(len(matched_files),query_jersey_number))
print("==============>>")
if len(matched_files) > NumImages:
matched_files = matched_files[:NumImages]
#dump the images into user folder with folder name same as jersey number
DestPath = DestPath.replace("\\","/")
user_folder = os.path.join(DestPath,str(query_jersey_number))
print("Retrieving top {} images to {} directory".format(len(matched_files), user_folder))
print("===================>>")
if os.path.exists(user_folder):
shutil.rmtree(user_folder)
time.sleep(10)
os.mkdir(user_folder)
for detected_image in matched_files:
image_name = ((detected_image[0]).replace("\\","/")).split('/')[-1]
dest_path = os.path.join(user_folder, image_name)
shutil.copyfile(detected_image[0], dest_path)
print(" {} images are copied to {} successfully".format(len(matched_files),user_folder))
print("==========================>>")
print("done")
return matched_files, user_folder
@app.route('/retrieve_images/')
def retrieve_by_jersey_number():
in_json = request.get_json()
query_jersey_number = in_json['query_jersey_number']
SearchSetParentPath = in_json['SearchSetParentPath']
NumImages = in_json['NumImages']
matched_list, user_folder = JerseyNumberRetrieve(query_jersey_number,SearchSetParentPath,NumImages=NumImages)
out_message = " {} images are copied to {} successfully".format(len(matched_list),user_folder)
return out_message
#if __name__ == '__main__':
# app.run()
if __name__ == '__main__':
# Must be loaded only once
DETECTOR, CUSTOM = load_objdetection()
DETECTORNUM = load_numberDetection()
parser = argparse.ArgumentParser(description='retrieve by jersey number.')
parser.add_argument("-q","--query_no", type=int, help='Query Jersey Number', required=True)
parser.add_argument("-s","--search_dir", type=str, help='search set path', required = True)
parser.add_argument("-n","--no_images", type=int, help='Number of images to be retrieved', default = 10)
parser.add_argument("-d","--dest_path", type=str, help='destination path to dump output images', required = True)
args = parser.parse_args()
args = args
query_jersey_number = args.query_no
SearchSetParentPath = args.search_dir
NumImages = args.no_images
DestPath = args.dest_path
#print("Modified")
#print(args)SA
#APPi
#matched_list,user_folder = JerseyNumberRetrieve("2","C://Shiva//PP//Rod//Image//2021-05-04","2","C://Shiva//PP//Rod//Image")
matched_list,user_folder = JerseyNumberRetrieve(query_jersey_number=query_jersey_number,SearchSetParentPath =SearchSetParentPath,NumImages=NumImages,DestPath=DestPath)
| 38.860927 | 171 | 0.676721 | 693 | 5,868 | 5.520924 | 0.277056 | 0.069002 | 0.053319 | 0.037899 | 0.20805 | 0.150549 | 0.141662 | 0.053842 | 0 | 0 | 0 | 0.007597 | 0.192399 | 5,868 | 150 | 172 | 39.12 | 0.799747 | 0.131902 | 0 | 0.061224 | 0 | 0 | 0.148067 | 0.014073 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030612 | false | 0 | 0.112245 | 0.010204 | 0.173469 | 0.112245 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0da8d51887cededd42699ffaa6e25ac8a8fcd250 | 1,929 | py | Python | pyembroidery/EmbThreadHus.py | mrjohnmain/pyembroidery | 3acaa543e3ffaad67f83092ab3151fce87c76ec4 | [
"MIT"
] | null | null | null | pyembroidery/EmbThreadHus.py | mrjohnmain/pyembroidery | 3acaa543e3ffaad67f83092ab3151fce87c76ec4 | [
"MIT"
] | null | null | null | pyembroidery/EmbThreadHus.py | mrjohnmain/pyembroidery | 3acaa543e3ffaad67f83092ab3151fce87c76ec4 | [
"MIT"
] | null | null | null | from .EmbThread import EmbThread
def get_thread_set():
return [
EmbThreadHus("#000000", "Black", "026"),
EmbThreadHus("#0000e7", "Blue", "005"),
EmbThreadHus("#00c600", "Green", "002"),
EmbThreadHus("#ff0000", "Red", "014"),
EmbThreadHus("#840084", "Purple", "008"),
EmbThreadHus("#ffff00", "Yellow", "020"),
EmbThreadHus("#848484", "Grey", "024"),
EmbThreadHus("#8484e7", "Light Blue", "006"),
EmbThreadHus("#00ff84", "Light Green", "003"),
EmbThreadHus("#ff7b31", "Orange", "017"),
EmbThreadHus("#ff8ca5", "Pink", "011"),
EmbThreadHus("#845200", "Brown", "028"),
EmbThreadHus("#ffffff", "White", "022"),
EmbThreadHus("#000084", "Dark Blue", "004"),
EmbThreadHus("#008400", "Dark Green", "001"),
EmbThreadHus("#7b0000", "Dark Red", "013"),
EmbThreadHus("#ff6384", "Light Red", "015"),
EmbThreadHus("#522952", "Dark Purple", "007"),
EmbThreadHus("#ff00ff", "Light Purple", "009"),
EmbThreadHus("#ffde00", "Dark Yellow", "019"),
EmbThreadHus("#ffff9c", "Light Yellow", "021"),
EmbThreadHus("#525252", "Dark Grey", "025"),
EmbThreadHus("#d6d6d6", "Light Grey", "023"),
EmbThreadHus("#ff5208", "Dark Orange", "016"),
EmbThreadHus("#ff9c5a", "Light Orange", "018"),
EmbThreadHus("#ff52b5", "Dark Pink", "010"),
EmbThreadHus("#ffc6de", "Light Pink", "012"),
EmbThreadHus("#523100", "Dark Brown", "027"),
EmbThreadHus("#b5a584", "Light Brown", "029")
]
class EmbThreadHus(EmbThread):
def __init__(self, color, description, catalog_number=None):
EmbThread.__init__(self)
self.set(color)
self.description = description
self.catalog_number = catalog_number
self.brand = "Hus"
self.chart = "Hus"
| 41.042553 | 65 | 0.554692 | 174 | 1,929 | 6.074713 | 0.522989 | 0.036897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.139214 | 0.247797 | 1,929 | 46 | 66 | 41.934783 | 0.589249 | 0 | 0 | 0 | 0 | 0 | 0.283741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.02439 | 0.02439 | 0.121951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dae84818582972a573cb9582067de8c07197bac | 852 | py | Python | tests/setup_scripts/rerun_prep.py | yardencsGitHub/vak | 04da97b02ded5acccab437c2538d0a1ded3bef80 | [
"BSD-3-Clause"
] | 1 | 2021-04-27T01:22:38.000Z | 2021-04-27T01:22:38.000Z | tests/setup_scripts/rerun_prep.py | yardencsGitHub/vak | 04da97b02ded5acccab437c2538d0a1ded3bef80 | [
"BSD-3-Clause"
] | null | null | null | tests/setup_scripts/rerun_prep.py | yardencsGitHub/vak | 04da97b02ded5acccab437c2538d0a1ded3bef80 | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
import shutil
import vak
HERE = Path(__file__).parent
# convention is that all the config.ini files in setup_scripts/ that should be
# run when setting up for development have filenames of the form `setup_*_config.ini'
# e.g., 'setup_learncurve_config.ini'
PREP_CONFIGS_TO_RUN = HERE.glob('setup_*_config.toml')
def main():
for toml_path in PREP_CONFIGS_TO_RUN:
if not toml_path.exists():
raise FileNotFoundError(
f'{toml_path} not found')
print(f"preparing datasets for tests using config: {toml_path}")
tmp_toml_path = Path(toml_path.parent).joinpath(f'tmp_{toml_path.name}')
print(f"\tcopying to {tmp_toml_path}")
shutil.copy(src=toml_path, dst=tmp_toml_path)
vak.cli.prep(toml_path=tmp_toml_path)
if __name__ == '__main__':
main()
| 29.37931 | 85 | 0.700704 | 128 | 852 | 4.335938 | 0.484375 | 0.172973 | 0.099099 | 0.057658 | 0.068468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.199531 | 852 | 28 | 86 | 30.428571 | 0.813783 | 0.230047 | 0 | 0 | 0 | 0 | 0.230061 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.235294 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0daf09f9bf36528566ecad4f419efb5d2a980b42 | 1,810 | py | Python | fic_modules/GITMethods.py | Mozilla-GitHub-Standards/4b07a4dfd4387a472231fb57efcde670c9dbb490fb838c59ecfdd33a55e01e42 | d891077a9218444d3948d3c8290e59924d578f99 | [
"MIT"
] | null | null | null | fic_modules/GITMethods.py | Mozilla-GitHub-Standards/4b07a4dfd4387a472231fb57efcde670c9dbb490fb838c59ecfdd33a55e01e42 | d891077a9218444d3948d3c8290e59924d578f99 | [
"MIT"
] | null | null | null | fic_modules/GITMethods.py | Mozilla-GitHub-Standards/4b07a4dfd4387a472231fb57efcde670c9dbb490fb838c59ecfdd33a55e01e42 | d891077a9218444d3948d3c8290e59924d578f99 | [
"MIT"
] | null | null | null | from git import Repo
class Github:
def __init__(self, files, msg, LOGGER, config):
self.files = files
self.msg = msg
self.LOGGER = LOGGER
self.config = config
def git_add(self):
try:
repo = Repo(self.config)
for file in self.files:
repo.git.add(file, update=True)
if not repo.index.diff("HEAD"):
self.LOGGER.info("nothing staged for commit. has the data or files changed?")
except:
self.LOGGER.info("Failed to add the files")
def git_commit(self):
self.LOGGER.info("Committing changes with message: %s", self.msg)
repo = Repo(self.config)
commit = repo.index.commit(self.msg)
for patch in repo.commit("HEAD~1").diff(commit, create_patch=True):
self.LOGGER.info(patch)
def git_push(self):
try:
repo = Repo(self.config)
self.LOGGER.info("pushing changes to %s", repo)
push_info = repo.remotes.origin.push(refspec=repo.active_branch)
self.LOGGER.info("Summary of push: {}".format(push_info[0].summary))
except:
self.LOGGER.info("Error pushing the dates")
finally:
self.LOGGER.info("Data push from {} succeeded".format("Firefox Infra Changelog"))
def git_pull(self):
try:
repo = Repo(self.config)
self.LOGGER.info("pulling changes from %s", repo)
pull_info = repo.remotes.origin.pull(refspec=repo.active_branch)
self.LOGGER.info("Summary of pull: {}".format(pull_info[0]))
except:
self.LOGGER.info("Error pulling the dates")
finally:
self.LOGGER.info("Data pull from {} succeeded".format("Firefox Infra Changelog"))
| 36.938776 | 97 | 0.587293 | 228 | 1,810 | 4.596491 | 0.285088 | 0.124046 | 0.160305 | 0.068702 | 0.373092 | 0.325382 | 0.225191 | 0.162214 | 0.162214 | 0 | 0 | 0.002353 | 0.29558 | 1,810 | 48 | 98 | 37.708333 | 0.819608 | 0 | 0 | 0.285714 | 0 | 0 | 0.195028 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119048 | false | 0 | 0.02381 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0db27a58d33995cf4b7f72d8f930a14784bafea3 | 1,172 | py | Python | code/emojify.py | namaannn/Emoji-with-Deep-Learning | a40c78361663245194c043b485b0f20561559f99 | [
"Apache-2.0"
] | null | null | null | code/emojify.py | namaannn/Emoji-with-Deep-Learning | a40c78361663245194c043b485b0f20561559f99 | [
"Apache-2.0"
] | null | null | null | code/emojify.py | namaannn/Emoji-with-Deep-Learning | a40c78361663245194c043b485b0f20561559f99 | [
"Apache-2.0"
] | null | null | null | import cv2 as cv
import numpy as np
from tensorflow.keras.models import load_model
model=load_model('facial.h5')
cv.ocl.setUseOpenCL(False)
dic={0:"Angry",1:"Disgusted",2:"Fearful",3:"Happy",4:"Neutral",5:"Sad",6:"Surprised"}
cap=cv.VideoCapture(0)
while True:
ret,frame=cap.read()
if not ret:
break
bounding_box=cv.CascadeClassifier(r'C:\Users\naman\anaconda3\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
grey_frame=cv.cvtColor(frame,cv.COLOR_BGR2GRAY)
faces=bounding_box.detectMultiScale(grey_frame,scaleFactor=1.3,minNeighbors=5)
for (x,y,w,h) in faces:
cv.rectangle(frame, (x,y-50),(x+w, y+h+10),(255,0,0),2)
roi=grey_frame[y:y+h,x:x+w]
cropped=np.expand_dims(np.expand_dims(cv.resize(roi,(48,48)),-1),0)
pred=model.predict(cropped)
maxindex=int(np.argmax(pred))
cv.putText(frame,dic[maxindex],(x+20,y-60),cv.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2,cv.LINE_AA)
cv.imshow('Video',cv.resize(frame,(1200,860),interpolation=cv.INTER_CUBIC))
if cv.waitKey(1)&0xFF==ord('q'):
cap.release()
cv.destroyAllWindows()
break | 45.076923 | 130 | 0.673208 | 189 | 1,172 | 4.089947 | 0.571429 | 0.034929 | 0.031048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.056452 | 0.153584 | 1,172 | 26 | 131 | 45.076923 | 0.722782 | 0 | 0 | 0.076923 | 0 | 0.038462 | 0.128049 | 0.075784 | 0 | 0 | 0.003484 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0db309f3af7765ae96862762b146d97c7d0b4df5 | 24,060 | py | Python | mmdet/models/heads/ppyoloe_head.py | jie311/miemiedetection | b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6 | [
"Apache-2.0"
] | 65 | 2021-12-30T03:30:52.000Z | 2022-03-25T01:44:32.000Z | mmdet/models/heads/ppyoloe_head.py | jie311/miemiedetection | b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6 | [
"Apache-2.0"
] | 1 | 2021-12-31T01:51:35.000Z | 2022-01-01T14:42:37.000Z | mmdet/models/heads/ppyoloe_head.py | jie311/miemiedetection | b0e7a45717fe6c9cf9bf3c0f47d47a2e6c68b1b6 | [
"Apache-2.0"
] | 7 | 2021-12-31T09:25:06.000Z | 2022-03-10T01:25:09.000Z | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch import distributed as dist
import torch.nn.functional as F
import numpy as np
import copy
# from ppdet.modeling.layers import MultiClassNMS
from mmdet.models.assigners.utils import generate_anchors_for_grid_cell
from mmdet.models.backbones.cspresnet import ConvBNLayer
from mmdet.models.bbox_utils import batch_distance2bbox
from mmdet.models.matrix_nms import matrix_nms
from mmdet.models.ops import get_static_shape, paddle_distributed_is_initialized, get_act_fn
from mmdet.models.initializer import bias_init_with_prob, constant_, normal_
from mmdet.models.losses.iou_losses import GIoULoss
from mmdet.utils import my_multiclass_nms, get_world_size
def print_diff(dic, key, tensor):
if tensor is not None: # 有的梯度张量可能是None
aaaaaa1 = dic[key]
aaaaaa2 = tensor.cpu().detach().numpy()
ddd = np.sum((aaaaaa1 - aaaaaa2) ** 2)
print('diff=%.6f (%s)' % (ddd, key))
class ESEAttn(nn.Module):
def __init__(self, feat_channels, act='swish'):
super(ESEAttn, self).__init__()
self.fc = nn.Conv2d(feat_channels, feat_channels, 1)
self.conv = ConvBNLayer(feat_channels, feat_channels, 1, act=act)
self._init_weights()
def _init_weights(self):
normal_(self.fc.weight, std=0.001)
def forward(self, feat, avg_feat):
weight = torch.sigmoid(self.fc(avg_feat))
return self.conv(feat * weight)
def add_param_group(self, param_groups, base_lr, base_wd, need_clip, clip_norm):
self.conv.add_param_group(param_groups, base_lr, base_wd, need_clip, clip_norm)
if self.fc.weight.requires_grad:
param_group_conv_weight = {'params': [self.fc.weight]}
param_group_conv_weight['lr'] = base_lr * 1.0
param_group_conv_weight['base_lr'] = base_lr * 1.0
param_group_conv_weight['weight_decay'] = base_wd
param_group_conv_weight['need_clip'] = need_clip
param_group_conv_weight['clip_norm'] = clip_norm
param_groups.append(param_group_conv_weight)
if self.fc.bias.requires_grad:
param_group_conv_bias = {'params': [self.fc.bias]}
param_group_conv_bias['lr'] = base_lr * 1.0
param_group_conv_bias['base_lr'] = base_lr * 1.0
param_group_conv_bias['weight_decay'] = base_wd
param_group_conv_bias['need_clip'] = need_clip
param_group_conv_bias['clip_norm'] = clip_norm
param_groups.append(param_group_conv_bias)
class PPYOLOEHead(nn.Module):
__shared__ = ['num_classes', 'eval_size', 'trt', 'exclude_nms']
__inject__ = ['static_assigner', 'assigner', 'nms']
def __init__(self,
in_channels=[1024, 512, 256],
num_classes=80,
act='swish',
fpn_strides=(32, 16, 8),
grid_cell_scale=5.0,
grid_cell_offset=0.5,
reg_max=16,
static_assigner_epoch=4,
use_varifocal_loss=True,
static_assigner='ATSSAssigner',
assigner='TaskAlignedAssigner',
nms='MultiClassNMS',
eval_size=None,
loss_weight={
'class': 1.0,
'iou': 2.5,
'dfl': 0.5,
},
trt=False,
nms_cfg=None,
exclude_nms=False):
super(PPYOLOEHead, self).__init__()
assert len(in_channels) > 0, "len(in_channels) should > 0"
self.in_channels = in_channels
self.num_classes = num_classes
self.fpn_strides = fpn_strides
self.grid_cell_scale = grid_cell_scale
self.grid_cell_offset = grid_cell_offset
self.reg_max = reg_max
self.iou_loss = GIoULoss()
self.loss_weight = loss_weight
self.use_varifocal_loss = use_varifocal_loss
self.eval_size = eval_size
self.static_assigner_epoch = static_assigner_epoch
self.static_assigner = static_assigner
self.assigner = assigner
self.nms = nms
# if isinstance(self.nms, MultiClassNMS) and trt:
# self.nms.trt = trt
self.exclude_nms = exclude_nms
self.nms_cfg = nms_cfg
# stem
self.stem_cls = nn.ModuleList()
self.stem_reg = nn.ModuleList()
act = get_act_fn(
act, trt=trt) if act is None or isinstance(act,
(str, dict)) else act
for in_c in self.in_channels:
self.stem_cls.append(ESEAttn(in_c, act=act))
self.stem_reg.append(ESEAttn(in_c, act=act))
# pred head
self.pred_cls = nn.ModuleList()
self.pred_reg = nn.ModuleList()
for in_c in self.in_channels:
self.pred_cls.append(
nn.Conv2d(
in_c, self.num_classes, 3, padding=1))
self.pred_reg.append(
nn.Conv2d(
in_c, 4 * (self.reg_max + 1), 3, padding=1))
# projection conv
self.proj_conv = nn.Conv2d(self.reg_max + 1, 1, 1, bias=False)
self._init_weights()
def add_param_group(self, param_groups, base_lr, base_wd, need_clip, clip_norm):
for i in range(len(self.in_channels)):
self.stem_cls[i].add_param_group(param_groups, base_lr, base_wd, need_clip, clip_norm)
self.stem_reg[i].add_param_group(param_groups, base_lr, base_wd, need_clip, clip_norm)
if self.pred_cls[i].weight.requires_grad:
param_group_conv_weight = {'params': [self.pred_cls[i].weight]}
param_group_conv_weight['lr'] = base_lr * 1.0
param_group_conv_weight['base_lr'] = base_lr * 1.0
param_group_conv_weight['weight_decay'] = base_wd
param_group_conv_weight['need_clip'] = need_clip
param_group_conv_weight['clip_norm'] = clip_norm
param_groups.append(param_group_conv_weight)
if self.pred_cls[i].bias.requires_grad:
param_group_conv_bias = {'params': [self.pred_cls[i].bias]}
param_group_conv_bias['lr'] = base_lr * 1.0
param_group_conv_bias['base_lr'] = base_lr * 1.0
param_group_conv_bias['weight_decay'] = base_wd
param_group_conv_bias['need_clip'] = need_clip
param_group_conv_bias['clip_norm'] = clip_norm
param_groups.append(param_group_conv_bias)
if self.pred_reg[i].weight.requires_grad:
param_group_conv_weight2 = {'params': [self.pred_reg[i].weight]}
param_group_conv_weight2['lr'] = base_lr * 1.0
param_group_conv_weight2['base_lr'] = base_lr * 1.0
param_group_conv_weight2['weight_decay'] = base_wd
param_group_conv_weight2['need_clip'] = need_clip
param_group_conv_weight2['clip_norm'] = clip_norm
param_groups.append(param_group_conv_weight2)
if self.pred_reg[i].bias.requires_grad:
param_group_conv_bias2 = {'params': [self.pred_reg[i].bias]}
param_group_conv_bias2['lr'] = base_lr * 1.0
param_group_conv_bias2['base_lr'] = base_lr * 1.0
param_group_conv_bias2['weight_decay'] = base_wd
param_group_conv_bias2['need_clip'] = need_clip
param_group_conv_bias2['clip_norm'] = clip_norm
param_groups.append(param_group_conv_bias2)
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
def _init_weights(self):
bias_cls = bias_init_with_prob(0.01)
for cls_, reg_ in zip(self.pred_cls, self.pred_reg):
constant_(cls_.weight)
constant_(cls_.bias, bias_cls)
constant_(reg_.weight)
constant_(reg_.bias, 1.0)
self.proj = torch.linspace(0, self.reg_max, self.reg_max + 1)
self.proj.requires_grad = False
self.proj_conv.weight.requires_grad_(False)
self.proj_conv.weight.copy_(
self.proj.reshape([1, self.reg_max + 1, 1, 1]))
if self.eval_size:
anchor_points, stride_tensor = self._generate_anchors()
self.register_buffer('anchor_points', anchor_points)
self.register_buffer('stride_tensor', stride_tensor)
def forward_train(self, feats, targets):
anchors, anchor_points, num_anchors_list, stride_tensor = \
generate_anchors_for_grid_cell(
feats, self.fpn_strides, self.grid_cell_scale,
self.grid_cell_offset)
cls_score_list, reg_distri_list = [], []
for i, feat in enumerate(feats):
avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
cls_logit = self.pred_cls[i](self.stem_cls[i](feat, avg_feat) +
feat)
reg_distri = self.pred_reg[i](self.stem_reg[i](feat, avg_feat))
# cls and reg
cls_score = torch.sigmoid(cls_logit)
cls_score_list.append(cls_score.flatten(2).permute((0, 2, 1)))
reg_distri_list.append(reg_distri.flatten(2).permute((0, 2, 1)))
cls_score_list = torch.cat(cls_score_list, 1)
reg_distri_list = torch.cat(reg_distri_list, 1)
# import numpy as np
# dic = np.load('../aaa.npz')
# cls_score_list = torch.Tensor(dic['cls_score_list'])
# reg_distri_list = torch.Tensor(dic['reg_distri_list'])
# anchors = torch.Tensor(dic['anchors'])
# anchor_points = torch.Tensor(dic['anchor_points'])
# stride_tensor = torch.Tensor(dic['stride_tensor'])
# gt_class = torch.Tensor(dic['gt_class'])
# gt_bbox = torch.Tensor(dic['gt_bbox'])
# pad_gt_mask = torch.Tensor(dic['pad_gt_mask'])
# targets['gt_class'] = gt_class
# targets['gt_bbox'] = gt_bbox
# targets['pad_gt_mask'] = pad_gt_mask
#
# loss = torch.Tensor(dic['loss'])
# loss_cls = torch.Tensor(dic['loss_cls'])
# loss_iou = torch.Tensor(dic['loss_iou'])
# loss_dfl = torch.Tensor(dic['loss_dfl'])
# loss_l1 = torch.Tensor(dic['loss_l1'])
losses = self.get_loss([
cls_score_list, reg_distri_list, anchors, anchor_points,
num_anchors_list, stride_tensor
], targets)
return losses
def _generate_anchors(self, feats=None):
# just use in eval time
anchor_points = []
stride_tensor = []
for i, stride in enumerate(self.fpn_strides):
if feats is not None:
_, _, h, w = feats[i].shape
else:
h = int(self.eval_size[0] / stride)
w = int(self.eval_size[1] / stride)
shift_x = torch.arange(end=w) + self.grid_cell_offset
shift_y = torch.arange(end=h) + self.grid_cell_offset
# shift_y, shift_x = torch.meshgrid(shift_y, shift_x, indexing="ij")
shift_y, shift_x = torch.meshgrid(shift_y, shift_x)
anchor_point = torch.stack([shift_x, shift_y], -1).to(torch.float32)
anchor_points.append(anchor_point.reshape([-1, 2]))
stride_tensor.append(
torch.full(
[h * w, 1], stride, dtype=torch.float32))
anchor_points = torch.cat(anchor_points)
stride_tensor = torch.cat(stride_tensor)
return anchor_points, stride_tensor
def forward_eval(self, feats):
if self.eval_size:
anchor_points, stride_tensor = self.anchor_points, self.stride_tensor
else:
anchor_points, stride_tensor = self._generate_anchors(feats)
cls_score_list, reg_dist_list = [], []
for i, feat in enumerate(feats):
b, _, h, w = feat.shape
l = h * w
avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
cls_logit = self.pred_cls[i](self.stem_cls[i](feat, avg_feat) +
feat)
reg_dist = self.pred_reg[i](self.stem_reg[i](feat, avg_feat))
reg_dist = reg_dist.reshape([-1, 4, self.reg_max + 1, l])
reg_dist = reg_dist.permute((0, 2, 1, 3))
reg_dist = self.proj_conv(F.softmax(reg_dist, dim=1))
# cls and reg
cls_score = torch.sigmoid(cls_logit)
cls_score_list.append(cls_score.reshape([b, self.num_classes, l]))
reg_dist_list.append(reg_dist.reshape([b, 4, l]))
cls_score_list = torch.cat(cls_score_list, -1) # [N, 80, A]
reg_dist_list = torch.cat(reg_dist_list, -1) # [N, 4, A]
return cls_score_list, reg_dist_list, anchor_points, stride_tensor
def forward(self, feats, targets=None):
assert len(feats) == len(self.fpn_strides), \
"The size of feats is not equal to size of fpn_strides"
if self.training:
return self.forward_train(feats, targets)
else:
return self.forward_eval(feats)
@staticmethod
def _focal_loss(score, label, alpha=0.25, gamma=2.0):
weight = (score - label).pow(gamma)
if alpha > 0:
alpha_t = alpha * label + (1 - alpha) * (1 - label)
weight *= alpha_t
# loss = F.binary_cross_entropy(
# score, label, weight=weight, reduction='sum')
score = score.to(torch.float32)
eps = 1e-9
loss = label * (0 - torch.log(score + eps)) + \
(1.0 - label) * (0 - torch.log(1.0 - score + eps))
loss *= weight
loss = loss.sum()
return loss
@staticmethod
def _varifocal_loss(pred_score, gt_score, label, alpha=0.75, gamma=2.0):
weight = alpha * pred_score.pow(gamma) * (1 - label) + gt_score * label
# loss = F.binary_cross_entropy(
# pred_score, gt_score, weight=weight, reduction='sum')
# pytorch的F.binary_cross_entropy()的weight不能向前传播梯度,但是
# paddle的F.binary_cross_entropy()的weight可以向前传播梯度(给pred_score),
# 所以这里手动实现F.binary_cross_entropy()
# 使用混合精度训练时,pred_score类型是torch.float16,需要转成torch.float32避免log(0)=nan
pred_score = pred_score.to(torch.float32)
eps = 1e-9
loss = gt_score * (0 - torch.log(pred_score + eps)) + \
(1.0 - gt_score) * (0 - torch.log(1.0 - pred_score + eps))
loss *= weight
loss = loss.sum()
return loss
def _bbox_decode(self, anchor_points, pred_dist):
b, l, _ = get_static_shape(pred_dist)
device = pred_dist.device
pred_dist = pred_dist.reshape([b, l, 4, self.reg_max + 1])
pred_dist = F.softmax(pred_dist, dim=-1)
pred_dist = pred_dist.matmul(self.proj.to(device))
return batch_distance2bbox(anchor_points, pred_dist)
def _bbox2distance(self, points, bbox):
x1y1, x2y2 = torch.split(bbox, 2, -1)
lt = points - x1y1
rb = x2y2 - points
return torch.cat([lt, rb], -1).clamp(0, self.reg_max - 0.01)
def _df_loss(self, pred_dist, target):
target_left = target.to(torch.int64)
target_right = target_left + 1
weight_left = target_right.to(torch.float32) - target
weight_right = 1 - weight_left
eps = 1e-9
# 使用混合精度训练时,pred_dist类型是torch.float16,pred_dist_act类型是torch.float32
pred_dist_act = F.softmax(pred_dist, dim=-1)
target_left_onehot = F.one_hot(target_left, pred_dist_act.shape[-1])
target_right_onehot = F.one_hot(target_right, pred_dist_act.shape[-1])
loss_left = target_left_onehot * (0 - torch.log(pred_dist_act + eps))
loss_right = target_right_onehot * (0 - torch.log(pred_dist_act + eps))
loss_left = loss_left.sum(-1) * weight_left
loss_right = loss_right.sum(-1) * weight_right
return (loss_left + loss_right).mean(-1, keepdim=True)
def _bbox_loss(self, pred_dist, pred_bboxes, anchor_points, assigned_labels,
assigned_bboxes, assigned_scores, assigned_scores_sum):
# select positive samples mask
mask_positive = (assigned_labels != self.num_classes)
num_pos = mask_positive.sum()
# pos/neg loss
if num_pos > 0:
# l1 + iou
bbox_mask = mask_positive.unsqueeze(-1).repeat([1, 1, 4])
pred_bboxes_pos = torch.masked_select(pred_bboxes,
bbox_mask).reshape([-1, 4])
assigned_bboxes_pos = torch.masked_select(
assigned_bboxes, bbox_mask).reshape([-1, 4])
bbox_weight = torch.masked_select(
assigned_scores.sum(-1), mask_positive).unsqueeze(-1)
loss_l1 = F.l1_loss(pred_bboxes_pos, assigned_bboxes_pos)
loss_iou = self.iou_loss(pred_bboxes_pos,
assigned_bboxes_pos) * bbox_weight
loss_iou = loss_iou.sum() / assigned_scores_sum
dist_mask = mask_positive.unsqueeze(-1).repeat(
[1, 1, (self.reg_max + 1) * 4])
pred_dist_pos = torch.masked_select(
pred_dist, dist_mask).reshape([-1, 4, self.reg_max + 1])
assigned_ltrb = self._bbox2distance(anchor_points, assigned_bboxes)
assigned_ltrb_pos = torch.masked_select(
assigned_ltrb, bbox_mask).reshape([-1, 4])
loss_dfl = self._df_loss(pred_dist_pos,
assigned_ltrb_pos) * bbox_weight
loss_dfl = loss_dfl.sum() / assigned_scores_sum
else:
loss_l1 = torch.zeros([]).to(pred_dist.device)
loss_iou = torch.zeros([]).to(pred_dist.device)
loss_dfl = pred_dist.sum() * 0.
# loss_l1 = None
# loss_iou = None
# loss_dfl = None
return loss_l1, loss_iou, loss_dfl
def get_loss(self, head_outs, gt_meta):
pred_scores, pred_distri, anchors,\
anchor_points, num_anchors_list, stride_tensor = head_outs
device = pred_scores.device
anchors = anchors.to(device)
anchor_points = anchor_points.to(device)
stride_tensor = stride_tensor.to(device)
anchor_points_s = anchor_points / stride_tensor
pred_bboxes = self._bbox_decode(anchor_points_s, pred_distri)
gt_labels = gt_meta['gt_class']
gt_labels = gt_labels.to(torch.int64)
gt_bboxes = gt_meta['gt_bbox']
pad_gt_mask = gt_meta['pad_gt_mask']
# miemie2013: 剪掉填充的gt
num_boxes = pad_gt_mask.sum([1, 2])
num_max_boxes = num_boxes.max().to(torch.int32)
pad_gt_mask = pad_gt_mask[:, :num_max_boxes, :]
gt_labels = gt_labels[:, :num_max_boxes, :]
gt_bboxes = gt_bboxes[:, :num_max_boxes, :]
# label assignment
if gt_meta['epoch_id'] < self.static_assigner_epoch:
assigned_labels, assigned_bboxes, assigned_scores = \
self.static_assigner(
anchors,
num_anchors_list,
gt_labels,
gt_bboxes,
pad_gt_mask,
bg_index=self.num_classes,
pred_bboxes=pred_bboxes.detach() * stride_tensor)
alpha_l = 0.25
# import numpy as np
# dic = np.load('../aa2.npz')
# print_diff(dic, 'assigned_labels', assigned_labels)
# print_diff(dic, 'assigned_bboxes', assigned_bboxes)
# print_diff(dic, 'assigned_scores', assigned_scores)
# assigned_labels = torch.Tensor(dic['assigned_labels']).to(torch.int64)
# assigned_bboxes = torch.Tensor(dic['assigned_bboxes']).to(torch.float32)
# assigned_scores = torch.Tensor(dic['assigned_scores']).to(torch.float32)
# print()
else:
assigned_labels, assigned_bboxes, assigned_scores = \
self.assigner(
pred_scores.detach(),
pred_bboxes.detach() * stride_tensor,
anchor_points,
num_anchors_list,
gt_labels,
gt_bboxes,
pad_gt_mask,
bg_index=self.num_classes)
alpha_l = -1
# rescale bbox
assigned_bboxes /= stride_tensor
# cls loss
if self.use_varifocal_loss:
one_hot_label = F.one_hot(assigned_labels,
self.num_classes + 1)[..., :-1]
loss_cls = self._varifocal_loss(pred_scores, assigned_scores,
one_hot_label)
else:
loss_cls = self._focal_loss(pred_scores, assigned_scores, alpha_l)
# 每张卡上的assigned_scores_sum求平均,而且max(x, 1)
assigned_scores_sum = assigned_scores.sum()
world_size = get_world_size()
if world_size > 1:
dist.all_reduce(assigned_scores_sum, op=dist.ReduceOp.SUM)
assigned_scores_sum = assigned_scores_sum / world_size
assigned_scores_sum = F.relu(assigned_scores_sum - 1.) + 1. # y = max(x, 1)
loss_cls /= assigned_scores_sum
loss_l1, loss_iou, loss_dfl = \
self._bbox_loss(pred_distri, pred_bboxes, anchor_points_s,
assigned_labels, assigned_bboxes, assigned_scores,
assigned_scores_sum)
loss = self.loss_weight['class'] * loss_cls + \
self.loss_weight['iou'] * loss_iou + \
self.loss_weight['dfl'] * loss_dfl
out_dict = {
'total_loss': loss,
'loss_cls': loss_cls,
'loss_iou': loss_iou,
'loss_dfl': loss_dfl,
'loss_l1': loss_l1,
}
return out_dict
def post_process(self, head_outs, scale_factor):
pred_scores, pred_dist, anchor_points, stride_tensor = head_outs
pred_bboxes = batch_distance2bbox(anchor_points, pred_dist.permute((0, 2, 1)))
pred_bboxes *= stride_tensor
# scale bbox to origin
# torch的split和paddle有点不同,torch的第二个参数表示的是每一份的大小,paddle的第二个参数表示的是分成几份。
scale_y, scale_x = torch.split(scale_factor, 1, -1)
scale_factor = torch.cat(
[scale_x, scale_y, scale_x, scale_y], -1).reshape([-1, 1, 4])
pred_bboxes /= scale_factor # [N, A, 4] pred_scores.shape = [N, 80, A]
if self.exclude_nms:
# `exclude_nms=True` just use in benchmark
return pred_bboxes.sum(), pred_scores.sum()
else:
# nms
preds = []
nms_cfg = copy.deepcopy(self.nms_cfg)
nms_type = nms_cfg.pop('nms_type')
batch_size = pred_bboxes.shape[0]
yolo_scores = pred_scores.permute((0, 2, 1)) # [N, A, 80]
if nms_type == 'matrix_nms':
for i in range(batch_size):
pred = matrix_nms(pred_bboxes[i, :, :], yolo_scores[i, :, :], **nms_cfg)
preds.append(pred)
elif nms_type == 'multiclass_nms':
preds = my_multiclass_nms(pred_bboxes, yolo_scores, **nms_cfg)
return preds
# bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)
# return bbox_pred, bbox_num
| 44.555556 | 98 | 0.602037 | 3,118 | 24,060 | 4.310776 | 0.121873 | 0.034968 | 0.043747 | 0.020832 | 0.393349 | 0.319545 | 0.271929 | 0.216725 | 0.18704 | 0.133175 | 0 | 0.018569 | 0.294929 | 24,060 | 539 | 99 | 44.638219 | 0.773756 | 0.123067 | 0 | 0.180488 | 0 | 0 | 0.030645 | 0 | 0 | 0 | 0 | 0 | 0.004878 | 1 | 0.05122 | false | 0 | 0.034146 | 0.002439 | 0.134146 | 0.004878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0db35958e78ad7e7170322d7f64f1fbd5c07a1be | 10,800 | py | Python | guipyg/gui.py | neccarus/Gui-Pyg | e69eb9e7f75b67b0cb70f211e5b3e602a920502b | [
"BSD-3-Clause"
] | null | null | null | guipyg/gui.py | neccarus/Gui-Pyg | e69eb9e7f75b67b0cb70f211e5b3e602a920502b | [
"BSD-3-Clause"
] | null | null | null | guipyg/gui.py | neccarus/Gui-Pyg | e69eb9e7f75b67b0cb70f211e5b3e602a920502b | [
"BSD-3-Clause"
] | null | null | null | import pygame
import json
from json import JSONEncoder
from .gui_element.button import Button
from .gui_element.element import Element
from .gui_element.toggleable_element import ToggleableElement
from .gui_element.popup import Popup
from .gui_element.element_group import ElementGroup
from .gui_element.text_elements import TextBox
from .gui_element.menu import Menu
from .gui_style.style_item import theme_dict
# TODO: there is probably a better way to store and retrieve these
class_types = {"Element": Element, "Button": Button, "Popup": Popup, "ToggleableElement": ToggleableElement,
"ElementGroup": ElementGroup, "Menu": Menu, "TextBox": TextBox}
class GUI(ElementGroup):
def __init__(self, *args, theme=None, color_key=(0, 0, 0), **kwargs):
# if elements is None:
self.elements = []
super().__init__(*args, **kwargs)
self.hide_text = True
self.set_colorkey(color_key)
self.elements_to_update = self.elements
self.theme = theme # receives a Theme object from style module, used to stylize all elements
self.need_update = True
self.selected_element = None
self.dragging = None
self.is_draggable = False
self.clip_rect = None
# self.set_clip_area()
def apply_theme(self):
if self.theme:
for theme in theme_dict:
if self.theme == theme_dict[theme].theme_name:
# print(f"found theme {theme}")
theme_dict[theme].style_gui(self)
self.elements_to_update = self.elements
self.apply_theme_to_elements(self.elements)
# break
def apply_theme_to_elements(self, elements):
for element in elements:
element.need_update = True
if hasattr(element, "elements"):
self.apply_theme_to_elements(element.elements)
def bring_element_to_front(self, element):
for index, elements in enumerate(self.elements):
if elements == element:
self.elements += [self.elements.pop(index)]
def fill_elements(self, surface):
for element in self.elements_to_update:
element.fill_elements(surface)
self.elements_to_update = []
def set_clip_area(self):
left, top, right, bottom = 0, 0, self.width, self.height
for element in self.elements:
if element.rect.left < left:
left = element.rect.left + element.pos_x
if element.rect.top < top:
top = element.rect.top + element.pos_y
if element.rect.right > right:
right = element.rect.right + element.pos_x
if element.rect.bottom > bottom:
bottom = element.rect.bottom + element.pos_y
self.clip_rect = pygame.Rect(left, top, right - left, bottom - top)
self.set_clip(self.clip_rect)
def update(self,
screen, need_update=False): # TODO: there must be a more efficient way to do this than have every function loop over every element
# screen to blit to
if self.need_update and self.is_active:
self.fill((0, 0, 0))
self.set_clip_area()
# self.draw_drop_shadows(self)
self.fill_elements(screen)
self.draw_text_to_elements()
self.draw_element_border()
self.blit_elements()
screen.blit(self, (self.pos_x, self.pos_y))
self.need_update = need_update
def select_element(self, mouse_pos):
if not self.selected_element:
reversed_elements = self.elements[::-1]
for index, element in enumerate(reversed_elements):
if element.is_visible and element.is_draggable and \
element.rect.collidepoint(element.get_mouse_pos(mouse_pos)):
element.drag_toggle = True
self.selected_element = element
self.dragging = self.selected_element.drag_element(mouse_pos)
self.bring_element_to_front(element)
break
def drag_selected(self): # TODO: Figure out solution to drag elements from inside of other element_groups
if self.selected_element:
self.selected_element.pos_x, self.selected_element.pos_y = next(self.dragging)
if self.selected_element.pos_x < 0:
self.selected_element.pos_x = 0
elif self.selected_element.pos_x + self.selected_element.width > self.width:
self.selected_element.pos_x = self.width - self.selected_element.width
if self.selected_element.pos_y < 0:
self.selected_element.pos_y = 0
elif self.selected_element.pos_y + self.selected_element.height > self.height:
self.selected_element.pos_y = self.height - self.selected_element.height
self.need_update = True
def let_go(self):
if self.selected_element:
self.selected_element.drag_toggle = False
next(self.dragging)
self.selected_element = None
self.dragging = None
def activate_selected(self, mouse_pos, *args,
**kwargs): # TODO: This should be changed to enable keyboard shortcuts to access the functions of any elements
if self.selected_element:
self.selected_element.click(mouse_pos, *args, **kwargs)
self.need_update = True
return True
else:
self.need_update = True
return False
class GUIEncoder(JSONEncoder):
def default(self, o):
if hasattr(o, "function"):
o.function = encode_function(o.function)
# print(o.function)
if hasattr(o, "elements_to_update"):
# o.elements_to_update = None
del o.elements_to_update
if hasattr(o, "__dict__"):
# print(o.__dict__)
return o.__dict__
else:
pass
def encode_function(function):
# if function:
# print(o.function)
# TODO: this method should be tidied up a bit
encoded_function = {'path': function.path, 'module': function.module,
'function': function.function, 'baseclass': function.baseclass,
'target': function.target, 'parent': function.parent.name,
'args': [],
'kwargs': {}}
# print(function.args)
for arg in function.args:
if hasattr(arg, "base_type") and arg.base_type == "Element":
encoded_function['args'] += [arg.name]
else:
encoded_function['args'] += [arg]
for kwarg in function.kwargs:
if kwarg == 'arg' or kwarg == 'kwarg':
continue
if hasattr(kwarg, "base_type") and kwarg.base_type == "Element":
encoded_function['kwargs'] += {kwarg.dict}
else:
encoded_function['kwargs'] += {kwarg}
# print(encoded_function)
return encoded_function
# def encode_gui(gui):
# # removed indent to reduce json file size (by quite a bit)
# return json.dumps(gui, skipkeys=True, cls=GUIEncoder)
def save_gui(gui, file):
with open(file, 'w') as w:
# json.dump(encode_gui(gui), w)
json.dump(gui, w, skipkeys=True, cls=GUIEncoder) # , check_circular=False)
def decode_element(element, gui, cls=Element, class_types=None):
# TODO: this should probably be in the 'element' module
if type(element) != dict:
# print("Decode")
element_decode = json.loads(element)
element_obj = cls(**element_decode)
else:
# print(element)
element_obj = cls(**element)
# print(element_obj.__dict__)
if hasattr(element_obj, "elements"):
for index, element in enumerate(element_obj.elements):
element_name = element["class_name"]
obj = decode_element(element, class_types[element_name])
print(obj.__dict__)
element_obj.elements[index] = obj
if hasattr(element_obj, "function") and element_obj.function:
print("has a function")
# element_obj.function = decode_function(element_obj.function, element_obj, gui)
decode_function(element_obj.function, element_obj, gui)
if element_obj.base_type == "StoredFunction":
decode_function(element_obj, gui)
# print(element_obj.__dict__)
return element_obj
def decode_function(function, gui):
# TODO: currently functions are either not being decoded, or aren't being attached to their respective elements
if function['parent'] is not None:
# function['parent'] = element
for arg in function.args:
for element in gui.elements:
new_arg = check_for_element(arg, element)
function['args'] += [new_arg]
print(function.args)
for element in gui.elements:
if function['parent'] == element.name:
function_obj = element.StoredFunction(**function)
print(function_obj.__dict__)
# return function_obj
element.function = function_obj
# def decode_function(function, element, gui):
# if function['parent'] is not None:
# function['parent'] = element
#
# for arg in function.args:
# for element in gui.elements:
# new_arg = check_for_element(arg, element)
# function['args'] += [new_arg]
# print(function.args)
# function_obj = element.StoredFunction(**function)
# print(function_obj.__dict__)
# # return function_obj
# element.function = function_obj
def check_for_element(check_for, element):
if check_for in element:
return element
else:
if hasattr(element, "elements"):
check_for_element(check_for, element.elements)
else:
return check_for
def load_gui(file):
with open(file, 'r') as r:
gui_json = json.load(r)
gui = decode_gui(gui_json)
gui.apply_theme()
return gui
def decode_gui(gui):
# gui_decoded = json.loads(gui)
# gui_obj = GUI(**gui_decoded)
gui_obj = GUI(**gui)
for index, element in enumerate(gui_obj.elements):
element_name = element["class_name"]
obj = decode_element(element, gui_obj, class_types[element_name], class_types)
gui_obj.elements[index] = obj
return gui_obj
| 39.130435 | 147 | 0.604074 | 1,274 | 10,800 | 4.903454 | 0.156986 | 0.046102 | 0.072995 | 0.035217 | 0.324476 | 0.237074 | 0.174804 | 0.130943 | 0.103089 | 0.103089 | 0 | 0.001733 | 0.305278 | 10,800 | 275 | 148 | 39.272727 | 0.830868 | 0.168519 | 0 | 0.146597 | 0 | 0 | 0.034554 | 0 | 0 | 0 | 0 | 0.003636 | 0 | 1 | 0.099476 | false | 0.005236 | 0.057592 | 0 | 0.21466 | 0.020942 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0db5b2837cd73476280980cdc40f9d974e4b24bd | 1,645 | py | Python | bin/interactive.py | dianaspencer/multiagent-particle-envs | 3e0dc8c525c4a2f898e89b87b0a036c4a96bc8cb | [
"MIT"
] | null | null | null | bin/interactive.py | dianaspencer/multiagent-particle-envs | 3e0dc8c525c4a2f898e89b87b0a036c4a96bc8cb | [
"MIT"
] | null | null | null | bin/interactive.py | dianaspencer/multiagent-particle-envs | 3e0dc8c525c4a2f898e89b87b0a036c4a96bc8cb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import multiagent.scenarios as scenarios
from multiagent.environment import MultiAgentEnv
from multiagent.policy import InteractivePolicy
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-s', '--scenario', default='simple.py', help='Path of the scenario Python script.')
args = parser.parse_args()
scenario = scenarios.load(args.scenario).Scenario()
world = scenario.make_world()
# Create a multi-agent environment
env = MultiAgentEnv(
world,
reset_callback=scenario.reset_world,
reward_callback=scenario.reward,
observation_callback=scenario.observation,
info_callback=None,
shared_viewer=False
)
# Render call to create viewer window (necessary only for interactive policies)
env.render()
# Create interactive policies for each agent
policies = [InteractivePolicy(env, i) for i in range(env.n)]
obs_n = env.reset()
while True:
# Collect experiences from all agents by querying
# actions from the current policies of each agent.
act_n = []
for i, policy in enumerate(policies):
act_n.append(policy.action(obs_n[i]))
# Perform actions from agents in the environment
obs_n, reward_n, done_n, _ = env.step(act_n)
# Render all agent views
env.render()
# Display rewards
# for agent in env.world.agents:
# print(agent.name + " reward: %0.3f" % env._get_reward(agent))
| 29.909091 | 108 | 0.672948 | 206 | 1,645 | 5.237864 | 0.456311 | 0.044486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003157 | 0.229787 | 1,645 | 54 | 109 | 30.462963 | 0.848461 | 0.27538 | 0 | 0.066667 | 0 | 0 | 0.055885 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dbc3224340c0d1375e8b28e1e9819ae6bd24084 | 731 | py | Python | scripts/old/filter_by_filter_presence.py | tomarovsky/MACE | eccc050820f4bb9e483d2592e485ee372ac9453b | [
"Apache-2.0"
] | null | null | null | scripts/old/filter_by_filter_presence.py | tomarovsky/MACE | eccc050820f4bb9e483d2592e485ee372ac9453b | [
"Apache-2.0"
] | null | null | null | scripts/old/filter_by_filter_presence.py | tomarovsky/MACE | eccc050820f4bb9e483d2592e485ee372ac9453b | [
"Apache-2.0"
] | 8 | 2018-05-17T05:00:11.000Z | 2022-03-06T04:08:24.000Z | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from MACE.Parsers.VCF import CollectionVCF
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
help="Input vcf file with variants")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
args = parser.parse_args()
variants = CollectionVCF(in_file=args.input, from_file=True)
without_filters, with_filters = variants.filter_by_filter_presence()
without_filters.write("%s.without_filters.vcf" % args.output_prefix)
with_filters.write("%s.with_filters.vcf" % args.output_prefix)
| 34.809524 | 97 | 0.72777 | 96 | 731 | 5.3125 | 0.46875 | 0.094118 | 0.066667 | 0.078431 | 0.101961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.134063 | 731 | 20 | 98 | 36.55 | 0.805687 | 0.02736 | 0 | 0 | 0 | 0 | 0.226761 | 0.030986 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dbd78b43d77e2f637add4aadedc2dd45ad3b56e | 1,227 | py | Python | tests/test_analyzer.py | skantar-modoolar/githubrepo | 7a5954dc6383a3c3219bc75c009dde1b2422ec29 | [
"MIT"
] | 31 | 2018-02-23T20:14:40.000Z | 2022-02-13T09:39:41.000Z | tests/test_analyzer.py | skantar-modoolar/githubrepo | 7a5954dc6383a3c3219bc75c009dde1b2422ec29 | [
"MIT"
] | null | null | null | tests/test_analyzer.py | skantar-modoolar/githubrepo | 7a5954dc6383a3c3219bc75c009dde1b2422ec29 | [
"MIT"
] | 12 | 2017-12-24T22:31:16.000Z | 2022-01-27T13:49:43.000Z | import unittest
class SemanticAnalyzerTestCase(unittest.TestCase):
def analyze(self, text):
from interpreter.lexical_analysis.lexer import Lexer
from interpreter.syntax_analysis.parser import Parser
from interpreter.semantic_analysis.analyzer import SemanticAnalyzer
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
SemanticAnalyzer.analyze(tree)
def test_analyzer(self):
self.analyze("""
#include <stdio.h>
#include <math.h>
int a, b;
int test(int a){
}
int main(int a){
int b;
int c = a + b;
double d;
scanf("%d %d", &a, &d);
if(a + 5){
c = 2;
}else{
b = 2;
}
int r = test(a);
printf("%d", c + 2);
return 0;
}
""")
# def test_analyzer_with_error(self):
# self.analyze("""
# int a, b;
#
# int main(int a){
#
# }
# """) | 25.5625 | 75 | 0.416463 | 112 | 1,227 | 4.5 | 0.392857 | 0.039683 | 0.059524 | 0.031746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007886 | 0.483293 | 1,227 | 48 | 76 | 25.5625 | 0.787066 | 0.09617 | 0 | 0 | 0 | 0 | 0.523161 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dbf5574312608332f8313f03b50dab922a2211e | 2,176 | py | Python | pdxpython/ast/php2python.py | dcolish/Presentations | 50697a3f26405a916eab481b8749cfef2fa23d4b | [
"BSD-3-Clause"
] | 2 | 2016-02-28T09:24:58.000Z | 2019-10-22T03:38:18.000Z | pdxpython/ast/php2python.py | dcolish/Presentations | 50697a3f26405a916eab481b8749cfef2fa23d4b | [
"BSD-3-Clause"
] | null | null | null | pdxpython/ast/php2python.py | dcolish/Presentations | 50697a3f26405a916eab481b8749cfef2fa23d4b | [
"BSD-3-Clause"
] | 1 | 2018-08-17T01:28:58.000Z | 2018-08-17T01:28:58.000Z | import ast
from ast import Assign, Load, Module, Name, Print, Store
node_map = {
'Assignment': dict(pynode='Assign',
mappers={'targets': 'node', 'value': 'expr'},
types={'left': 'list'},
args={'ctx': ast.Store(lineno=0, col_offset=0)}),
'Echo': dict(pynode='Print',
mappers={'values': 'nodes'},
types={'left': list},
args={'nl': True, 'ctx': ast.Load(lineno=0, col_offset=0)}),
'Variable': dict(pynode='Name',
mappers={'id': 'name'},
types={'left': 'var'},
args={'ctx': ast.Store(lineno=0, col_offset=0)}),
}
body = []
def eval_node(phpnode, ctx=None):
node_type, el = phpnode.generic()
mapdef = node_map[node_type]
py_ast_class = getattr(ast, mapdef['pynode'])
instance = py_ast_class(lineno=0, col_offset=0,
**mapdef.get('args', {}))
if ctx:
instance.ctx = ctx
else:
ctx = getattr(instance, 'ctx')
for key, val in mapdef['mappers'].iteritems():
sub_expr = getattr(phpnode, val)
if isinstance(sub_expr, Node):
if mapdef['types']['left'] == 'list':
sub_expr = [eval_node(sub_expr, ctx)]
else:
sub_expr = eval_node(sub_expr, ctx)
elif isinstance(sub_expr, list):
list_expr = sub_expr
sub_expr = []
for list_node in list_expr:
if not isinstance(list_node, Node):
if isinstance(list_node, str):
sub_expr.append(ast.Str(s=list_node, lineno=0,
col_offset=0))
else:
sub_expr.append(eval_node(list_node, ctx))
elif isinstance(sub_expr, int):
sub_expr = ast.Num(n=sub_expr, lineno=0, col_offset=0)
setattr(instance, key, sub_expr)
return instance
for node in output:
body.append(eval_node(node))
expr = ast.Module(body, lineno=0, col_offset=0)
eval(compile(ast.parse(expr), '<string>', mode='exec'))
| 31.085714 | 77 | 0.517463 | 257 | 2,176 | 4.217899 | 0.287938 | 0.096863 | 0.064576 | 0.103321 | 0.225092 | 0.105166 | 0.105166 | 0.059041 | 0.059041 | 0 | 0 | 0.009749 | 0.340074 | 2,176 | 69 | 78 | 31.536232 | 0.745125 | 0 | 0 | 0.096154 | 0 | 0 | 0.068506 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.038462 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dc0056efdee9b1d1bd0f350e44c040b3529806f | 8,178 | py | Python | ali1688shopeehtmlparse.py | zdlgithub/Ali1688Scraper | 0dc183eca536417bf970a7126d7fc03e90890e16 | [
"Apache-2.0"
] | null | null | null | ali1688shopeehtmlparse.py | zdlgithub/Ali1688Scraper | 0dc183eca536417bf970a7126d7fc03e90890e16 | [
"Apache-2.0"
] | null | null | null | ali1688shopeehtmlparse.py | zdlgithub/Ali1688Scraper | 0dc183eca536417bf970a7126d7fc03e90890e16 | [
"Apache-2.0"
] | null | null | null | import settings
from ali1688scraperbrowser import ScraperBrowser
from bs4 import BeautifulSoup
import datetime
from functools import reduce
import re
def get_attr_content(aa, bb):
cc = []
for a in aa:
for b in bb:
ctemp = []
tmp0 = a[0] + ',' + b[0]
ctemp.append(tmp0)
tmp1 = ''
if len(a) > 1 and len(b) > 1:
tmp1 = a[1] + ' ' + b[1]
elif len(a) > 1:
tmp1 = a[1]
elif len(b) > 1:
tmp1 = b[1]
ctemp.append(tmp1)
cc.append(ctemp)
return cc
class DougHtmlParse(object):
def __init__(self):
pass
# 整理资料格式product_id;category_name;url
def get_product_info(self, product_id, category_name, exclude_img, web_page_url):
soup = None
if settings.use_html_file_or_browser == settings.HtmlParserTool.FILE:
fp = open('ali1688html/' + product_id + '.html', encoding='gb18030')
soup = BeautifulSoup(fp, features='html.parser')
else:
sb = ScraperBrowser()
sb.web_page_url= web_page_url
html_text=sb.get_html_text()
soup = BeautifulSoup(html_text, features='html.parser')
exclude_imgs = []
if exclude_img:
exclude_imgs = exclude_img.split(',')
# print('exclude_imags:',exclude_imgs)
print('开始解析...')
shopee_product = []
for r in range(78):
shopee_product.append(None)
# print('shopee_product_len:',len(shopee_product))
shopee_product[0] = category_name
# 产品标题
productname = soup.find('h1', class_='d-title')
# print('productname:', productname.get_text())
shopee_product[1] = re.sub(settings.filter_title_name, '', productname.get_text())
print('开始解析产品描述...')
# 查找产品描述
# psummary=soup.select('div.product-property-main')
psummary = soup.find('div', id='mod-detail-attributes', class_='mod-detail-attributes')
feature_tds = psummary.select('td')
tdi = 0
tds = []
# 跳过过滤取值标志
f = 0
# 设置增加标题值标志
tf = 0
add_title_str = ''
for td in feature_tds:
tdi += 1
spans = td.get_text()
if spans:
if f == 1:
f = 0
continue
if tf == 1:
tf = 0
add_title_str += spans
if spans in settings.add_title_field_name:
tf = 1
if spans in settings.product_property_field_names:
f = 1
if f == 0:
tds.append(spans)
# print('spans:')
# print(spans.replace('\n','')+':'+str(tdi))
product_desc = reduce(lambda x, y: x + y,
map(lambda x: x + ':' if tds.index(x) % 2 == 0 and x else x + '\n', tds))
# print(product_desc)
shopee_product[1] = re.sub('[/]+', '', add_title_str) + ' ' + shopee_product[1]
shopee_product[2] = product_desc
shopee_product[6] = settings.shopee_tw_default_ship_days
shopee_product[7] = product_id
local_freight = 8
freight_tag = soup.find('div', class_='cost-entries-type')
if freight_tag:
local_freight = int(freight_tag.find('em', class_='value').string)
weight = settings.shopee_tw_default_weight
weight_tag = soup.find('div', class_='attributes-item mod-info kuajing-attribues')
if weight_tag:
w_tag = weight_tag.select('span em')
weight += float(w_tag[0].string.replace(' kg',''))
shopee_product[5] = weight
# 查找产品价格及价格折扣
# pricetag=soup.find('span', class_='value price-length-6')
# # print(pricetag)
# if not pricetag:
# pricetag=soup.find('div', class_='price-original-sku')
# # print(pricetag)
# productprice=pricetag.get_text()
# if productprice.find('-') < 0:
# print('productprice:', productprice)
# else:
# print('productprice[]:', productprice.split('-')[1].strip())
print('开始解析产品选项...')
# 查找产品attibutes及attributes相应的价格
attrscontent = {}
attr_list = []
attr_one_part = soup.select('div.d-content div.obj-leading')
# print(attr_one_part)
if attr_one_part:
# print('attr_one_part:[dddd]')
for one_part in attr_one_part:
# print(header)
header_tag = one_part.find('span', class_='obj-title')
attrkey = header_tag.string
# print('attrkey:', attrkey)
dllis = []
list_tag = one_part.select('div.obj-content li a')
for lli in list_tag:
# print(lli['title'])
dllis.append([lli['title']])
attrscontent[attrkey] = dllis
attr_list.append(dllis)
# print(attrscontent)
else:
print('attr_one_part:[]')
attr_sku_part = soup.select('div.d-content div.obj-sku')
if attr_sku_part:
for obj_sku in attr_sku_part:
header_tag = obj_sku.find('span', class_='obj-title')
attrkey = header_tag.string
# print('attrkey:', attrkey)
dllis = []
list_tag = obj_sku.select('div.obj-content tr')
for tr in list_tag:
# print(lli['title'])
name_span = tr.find('td', class_='name').find('span')
if name_span.string:
name = name_span.string
else:
name = name_span['title']
# print('name:', name)
price = tr.find('td', class_='price').find('em', class_='value').string
# print('price:', price)
count = tr.find('td', class_='count').find('em', class_='value').string
# print('count:', count)
if count != '0':
dllis.append([name, price, count])
attrscontent[attrkey] = dllis
attr_list.append(dllis)
# print('attrscontent:', attrscontent)
attr_values_array = reduce(get_attr_content, attr_list) if len(attr_list) > 0 else []
# print('attr_values_array:', attr_values_array)
vi = 1
for attr_value in attr_values_array:
if vi > 15:
break
vti = (vi - 1) * 4 + 1
shopee_product[vti + 8] = str(product_id) + '-' + str(vi)
shopee_product[vti + 9] = re.sub('[ ]+', ' ', attr_value[0])
shopee_product[vti + 10] = str(settings.get_sale_price_for_tw(float(attr_value[1]) , local_freight))
shopee_product[vti + 11] = 200
vi += 1
# 查找产品图片
productimages = []
# images = soup.select('div.tab-content-container')
# print(images)
print('开始解析产品图片...')
ulimages = soup.select('div.tab-content-container div.vertical-img img')
# print(ulimages)
for img in ulimages:
imgurl = img['src']
if 'lazyload.png' in imgurl:
imgurl = img['data-lazy-src']
imgurl = imgurl.replace('.60x60.jpg', '.jpg')
productimages.append(imgurl)
# print(productimages)
mi = 1
mb = 1
for img in productimages:
# 判断是否要去掉此图片
if str(mi) in exclude_imgs:
# print('Imgs:', mi, '-', exclude_imgs)
mi += 1
continue
if mb > 9:
break
# print('mb:',mb+68)
shopee_product[mb + 68] = img
mi += 1
mb += 1
if settings.use_html_file_or_browser==settings.HtmlParserTool.FILE:
fp.close()
# print('shopee_product:',shopee_product)
print('完成解析产品...')
return shopee_product
def __del__(self):
pass
| 37.172727 | 112 | 0.514796 | 905 | 8,178 | 4.451934 | 0.225414 | 0.064532 | 0.016381 | 0.011914 | 0.182179 | 0.150906 | 0.110697 | 0.110697 | 0.095309 | 0.065525 | 0 | 0.018018 | 0.362069 | 8,178 | 219 | 113 | 37.342466 | 0.754265 | 0.153827 | 0 | 0.158228 | 0 | 0 | 0.075826 | 0.009751 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025316 | false | 0.012658 | 0.037975 | 0 | 0.082278 | 0.037975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dc06fc3dbe0b9ac91b605cf4ba54edd77f86c7d | 2,454 | py | Python | misc_utils/scripts/combined/combine_datasets.py | shiningsunnyday/ssd_keras | 3a2ea0ceaa676f59b1787ad5f5e8337520c7c056 | [
"Apache-2.0"
] | null | null | null | misc_utils/scripts/combined/combine_datasets.py | shiningsunnyday/ssd_keras | 3a2ea0ceaa676f59b1787ad5f5e8337520c7c056 | [
"Apache-2.0"
] | null | null | null | misc_utils/scripts/combined/combine_datasets.py | shiningsunnyday/ssd_keras | 3a2ea0ceaa676f59b1787ad5f5e8337520c7c056 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
belga_train_path = '../../../../datasets/belgas/belgas_relabelled.csv'
logos_train_path = '../../../../datasets/LogosInTheWild-v2/LogosClean/commonformat/ImageSets/logos_top_train.csv'
flickr_train_path = '../../../../datasets/FlickrLogos_47/flickr_train_labels.csv'
belga_test_path = '../../../../datasets/belgas/belgas_relabelled_test.csv'
logos_test_path = '../../../../datasets/LogosInTheWild-v2/LogosClean/commonformat/ImageSets/logos_top_test.csv'
flickr_test_path = '../../../../datasets/FlickrLogos_47/flickr_val_labels.csv'
belga_train_csv = pd.read_csv(belga_train_path)
belga_train_csv['frame'] = pd.Series(['belgas_images/{}'.format(frame) for frame in belga_train_csv['frame']])
logos_train_csv = pd.read_csv(logos_train_path)
logos_train_csv['frame'] = pd.Series(['LogosInTheWild-v2/LogosClean/voc_format/{}'.format(frame) for frame in logos_train_csv['frame']])
flickr_train_csv = pd.read_csv(flickr_train_path)
flickr_train_csv['frame'] = pd.Series(['FlickrLogos_47/{}'.format(frame) for frame in flickr_train_csv['frame']])
belga_test_csv = pd.read_csv(belga_test_path)
belga_test_csv['frame'] = pd.Series(['belgas_images/{}'.format(frame) for frame in belga_test_csv['frame']])
logos_test_csv = pd.read_csv(logos_test_path)
logos_test_csv['frame'] = pd.Series(['LogosInTheWild-v2/LogosClean/voc_format/{}'.format(frame) for frame in logos_test_csv['frame']])
flickr_test_csv = pd.read_csv(flickr_test_path)
flickr_test_csv['frame'] = pd.Series(['FlickrLogos_47/{}'.format(frame) for frame in flickr_test_csv['frame']])
belga_train_csv['class_id'] = belga_train_csv['class_id'] + np.max(logos_train_csv['class_id']) + 1
flickr_train_csv['class_id'] = flickr_train_csv['class_id'] + np.max(belga_train_csv['class_id']) + 1
belga_test_csv['class_id'] = belga_test_csv['class_id'] + np.max(logos_test_csv['class_id']) + 1
flickr_test_csv['class_id'] = flickr_test_csv['class_id'] + np.max(belga_test_csv['class_id']) + 1
assert np.max(flickr_train_csv['class_id']) == np.max(flickr_test_csv['class_id'])
train_combined = logos_train_csv.append(belga_train_csv).append(flickr_train_csv).reset_index(drop=True)
test_combined = logos_test_csv.append(belga_test_csv).append(flickr_test_csv).reset_index(drop=True)
train_combined.to_csv('../../../../datasets/combined_train.csv',index=False)
test_combined.to_csv('../../../../datasets/combined_test.csv',index=False) | 70.114286 | 137 | 0.757539 | 374 | 2,454 | 4.572193 | 0.125668 | 0.090058 | 0.081871 | 0.061404 | 0.704094 | 0.373099 | 0.327485 | 0.297076 | 0.297076 | 0.218713 | 0 | 0.00699 | 0.067237 | 2,454 | 35 | 138 | 70.114286 | 0.740061 | 0 | 0 | 0 | 0 | 0 | 0.330855 | 0.232549 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0 | false | 0 | 0.068966 | 0 | 0.068966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dc3ca7f4bf0bc9234f64614f8a96483cdf2160f | 10,579 | py | Python | patchy/core.py | arogazinsky/django-more | 2eb309b383b1c5b001a0596f0f3ecf2c6910a821 | [
"BSD-3-Clause"
] | 32 | 2017-10-19T10:38:14.000Z | 2021-11-30T22:31:39.000Z | patchy/core.py | OakNorthAI/django-more | 627448bb280674370d088ceb26bcbb0994665d79 | [
"BSD-3-Clause"
] | 6 | 2017-10-25T14:21:53.000Z | 2020-09-17T07:02:34.000Z | patchy/core.py | OakNorthAI/django-more | 627448bb280674370d088ceb26bcbb0994665d79 | [
"BSD-3-Clause"
] | 7 | 2017-12-29T03:18:59.000Z | 2020-09-16T11:52:38.000Z | """ Generic monkey patching functions for doing it (mostly) safely """
import logging
import inspect
from types import MethodType, ModuleType
from collections import abc
from importlib import import_module
from importlib.util import resolve_name
from contextlib import suppress
__all__ = ['patchy', 'super_patchy']
logger = logging.getLogger(__name__)
class ResolveError(Exception):
""" Used to indicate terminal exception during resolve """
pass
class NotFoundError(ResolveError, ImportError):
pass
def patchy(target, source=None):
""" If source is not supplied, auto updates cannot be applied """
if isinstance(target, str):
target = resolve(target)
if isinstance(source, str):
source = resolve(source)
if isinstance(target, ModuleType):
return PatchModule(target, source)
elif isinstance(target, type) and source:
return PatchClass(target, source)
def super_patchy(*args, do_call=True, **kwargs):
""" super() for patches!
When called from within a patched in function will return or call the
function that it replaced, preserving self/cls arguments
"""
caller_frame = inspect.currentframe().f_back
caller = inspect.getargvalues(caller_frame)
old_func = patchy_records[caller_frame.f_code]
if caller.args[0] in ['self', 'cls']:
# If caller has the appearance of being bound (to instance or class)
old_func = MethodType(old_func, caller.locals[caller.args[0]])
if do_call:
return old_func(*args, **kwargs)
return old_func
def resolve_exposing(name):
""" Attempt an import but reraise any error other than module not found as terminal to resolving """
try:
return import_module(name)
except ImportError as err:
# Pass any non-suspicious errors on
if err.msg.endswith('is not a package') or err.msg.startswith('No module') and name in err.msg:
raise
# Reraise any suspicious errors as terminal - resolve fails
raise ResolveError('Error importing module {n}'.format(n=name)) from err
def resolve(name, package=None):
""" Turn a dotted name into a module or class reference """
if isinstance(package, str):
package = resolve_exposing(package)
if package:
name = resolve_name('.{}'.format(name), package.__name__)
try:
# Try to get a module
return resolve_exposing(name)
except ImportError as err:
if '.' not in name:
raise NotFoundError('{n} is not a valid module name'.format(n=name)) from err
try:
# Try to get an attribute of a module
mod, attr = name.rsplit('.', maxsplit=1)
package = resolve_exposing(mod)
cls = getattr(package, attr)
assert(isinstance(cls, type))
return cls
except ImportError as err:
raise NotFoundError('{n} is not a valid class or module name'.format(n=name)) from err
except AttributeError as err:
raise NotFoundError('{a} does not exist within {m}'.format(a=attr, m=mod)) from err
except AssertionError as err:
raise ResolveError('{a} in {m} is not a valid class'.format(a=attr, m=mod)) from err
class PatchyRecords(dict):
def __getitem__(self, key):
with suppress(AttributeError):
key = key.__code__
with suppress(KeyError):
return super().__getitem__(id(key))
raise RuntimeError('Patched func cannot find its predecessor')
def __setitem__(self, key, value):
# Strip inbult decorators
if isinstance(value, (classmethod, staticmethod)):
value = value.__func__
with suppress(AttributeError):
key = key.__code__
return super().__setitem__(id(key), value)
def __delitem__(self, key):
with suppress(AttributeError):
key = key.__code__
return super().__delitem__(id(key))
def __contains__(self, key):
with suppress(AttributeError):
key = key.__code__
return super().__contains__(id(key))
patchy_records = PatchyRecords()
class PatchBase:
allow = set()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def auto(self, source=None, *, allow=None, only_allow=None, merge=True):
""" Apply all attributes of from source to target.
Defaults to merging collections.
"""
if only_allow:
self.allow = only_allow
elif allow:
self.allow = self.allow | allow
attrs = self.get_auto_attrs(source or self.source)
self.apply(dict(attrs), merge=merge)
def add(self, *attrs, **kattrs):
self.apply(attrs, kattrs)
def merge(self, *attrs, **kattrs):
self.apply(attrs, kattrs, merge=True)
def apply(self, attrs=None, kattrs=None, merge=False):
""" Apply new attributes or classes to the target """
for attr in attrs:
kattrs = kattrs or {}
# Treat objects as assigned to their name
if hasattr(attr, "__name__"):
kattrs[attr.__name__] = attr
else:
kattrs[attr] = inspect.getattr_static(self.source, attr)
for attr, value in kattrs.items():
old_value = inspect.getattr_static(self.target, attr, None)
# If callable, preserve old func
if callable(value) and callable(old_value):
# Prevent duplicate patching
if value in patchy_records:
continue
patchy_records[value] = old_value
# Merge collections and classes instead of replacing
if merge:
if isinstance(old_value, abc.Container):
if isinstance(value, abc.Mapping) and isinstance(old_value, abc.MutableMapping):
old_value.update(value)
logger.info('Merging mapping {mod}.{attr}'.format(mod=self.target.__name__, attr=attr))
elif isinstance(value, abc.Sequence) and isinstance(old_value, abc.MutableSequence):
old_value.extend(value)
logger.info('Merging sequence {mod}.{attr}'.format(mod=self.target.__name__, attr=attr))
elif isinstance(value, abc.Set) and isinstance(old_value, abc.MutableSet):
old_value.update(value)
logger.info('Merging set {mod}.{attr}'.format(mod=self.target.__name__, attr=attr))
else:
setattr(self.target, attr, value)
logger.info("Couldn't merge collection {target}.{attr}, replaced instead".format(
target=self.target.__name__,
attr=attr))
continue
elif isinstance(old_value, type):
logger.info('Merging class for {target}.{attr}'.format(
target=self.target.__name__, attr=attr))
self.cls(old_value, value).auto()
continue
logger.info('Setting value {target}.{attr}'.format(target=self.target.__name__, attr=attr))
# Apply patched value
setattr(self.target, attr, value)
def get_attrs(self, source=None, exclude_hidden=True):
# Get all attributes, except hidden if exclude_hidden
# but allowing whitelisted attributes (like __all__)
source = source or self.source
return (
(attr, val)
for attr, val in source.__dict__.items()
if attr in self.allow
or not exclude_hidden
or not attr.startswith('_'))
class PatchModule(PatchBase):
allow = {'__all__'}
def __init__(self, target, source=None, module_sep='_'):
self.target = target
self.source = source
self.module_sep = module_sep
def cls(self, target, source=None):
if isinstance(target, str):
target = resolve(target, package=self.target)
if self.source and source is None:
with suppress(ImportError):
source_str = '{mod}.{cls}'.format(
mod=target.__module__.replace('.', self.module_sep),
cls=target.__name__)
source = resolve(source_str, package=self.source)
if not source:
with suppress(AttributeError):
source = getattr(self.source, target.__name__)
elif isinstance(source, str):
source = resolve(source, package=self.source)
if isinstance(target, type):
return PatchClass(target, source)
raise TypeError('Must be a valid class or class name')
def mod(self, target, source=None):
if isinstance(target, str):
target = resolve(target, package=self.target.__name__)
if isinstance(source, str):
source = resolve(source, package=self.source.__name__)
elif source is None:
# Deal with nested modules in a pack
# Test for corresponding module relative to current source
source_name = target.__name__.replace('.', self.module_sep)
with suppress(ImportError):
source = resolve(source_name, package=self.source.__name__)
if isinstance(target, ModuleType):
if source:
logger.info('Patching {} using {}'.format(target.__name__, source.__name__))
return PatchModule(target, source, self.module_sep)
def get_auto_attrs(self, source=None, exclude_hidden=True):
# Only auto locally declared objects, or attributes in allow
return (
(attr, val)
for attr, val in self.get_attrs(source, exclude_hidden)
if (hasattr(val, '__module__') and val.__module__ == source.__name__)
or attr in self.allow)
class PatchClass(PatchBase):
allow = {'__init__', '__new__'}
def __init__(self, target, source):
self.target = target
self.source = source
def mod(self):
return self.target.__module__
def get_auto_attrs(self, source=None, exclude_hidden=True):
# Only auto attributes, locally declared objects, or hiddens in allow
return (
(attr, val)
for attr, val in self.get_attrs(source, exclude_hidden)
if not hasattr(val, '__module__')
or val.__module__ == source.__module__
or attr in self.allow)
| 37.647687 | 112 | 0.614519 | 1,235 | 10,579 | 5.046154 | 0.195142 | 0.028883 | 0.015725 | 0.01733 | 0.286264 | 0.238928 | 0.228659 | 0.151797 | 0.137356 | 0.099487 | 0 | 0.0004 | 0.290765 | 10,579 | 280 | 113 | 37.782143 | 0.830201 | 0.125437 | 0 | 0.27 | 0 | 0 | 0.062548 | 0 | 0 | 0 | 0 | 0 | 0.01 | 1 | 0.11 | false | 0.015 | 0.075 | 0.02 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dc4f9d36773ff6ca62f3b1a523b2733e0acae36 | 569 | py | Python | loop-nests/graph-processing/bfs-pull/util/stats_parser.py | NVlabs/data-orchestration | ad86e65fbd99bb7670700cab7c7031bf232c831f | [
"BSD-3-Clause"
] | 5 | 2019-04-13T02:00:35.000Z | 2019-07-22T18:23:25.000Z | loop-nests/graph-processing/bfs-pull/util/stats_parser.py | NVlabs/data-orchestration | ad86e65fbd99bb7670700cab7c7031bf232c831f | [
"BSD-3-Clause"
] | null | null | null | loop-nests/graph-processing/bfs-pull/util/stats_parser.py | NVlabs/data-orchestration | ad86e65fbd99bb7670700cab7c7031bf232c831f | [
"BSD-3-Clause"
] | 2 | 2019-06-23T18:14:21.000Z | 2019-12-29T02:34:33.000Z | import sys
import os
import numpy as np
import helper
fname = sys.argv[1]
stats = ['parent', 'frontier', 'TileInOffsets', 'TileSources', 'Totals']
stypes = ['Tensor', 'Offchip']
data = {}
# collect data
for stat in stats:
data[stat] = {}
for stype in stypes:
data[stat][stype] = helper.getData(fname, stat, stype)
## print('***************** Stats **********************')
## print(stats)
## print('**********************************************')
for stat in stats:
for stype in stypes:
print(data[stat][stype], end=' ')
print('')
| 19.62069 | 73 | 0.525483 | 63 | 569 | 4.746032 | 0.444444 | 0.080268 | 0.060201 | 0.093645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002146 | 0.181019 | 569 | 28 | 74 | 20.321429 | 0.639485 | 0.240773 | 0 | 0.25 | 0 | 0 | 0.137116 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dc665111b39744549e1fb53ae5f7e841e340385 | 369 | py | Python | siswa/urls.py | nelsonpythondev/crud-using-widget-tweaks | eb9722e890540a2703068a101546527c7c667db5 | [
"MIT"
] | null | null | null | siswa/urls.py | nelsonpythondev/crud-using-widget-tweaks | eb9722e890540a2703068a101546527c7c667db5 | [
"MIT"
] | null | null | null | siswa/urls.py | nelsonpythondev/crud-using-widget-tweaks | eb9722e890540a2703068a101546527c7c667db5 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="home"),
path('listsiswa', views.siswa_view, name="list-siswa"),
path('addsiswa', views.add_siswa, name="add-siswa"),
path('editsiswa/<int:id>', views.edit_siswa, name="edit-siswa"),
path('deletesiswa/<int:id>', views.delete_siswa, name="delete-siswa"),
]
| 33.545455 | 74 | 0.674797 | 50 | 369 | 4.9 | 0.44 | 0.110204 | 0.081633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.130081 | 369 | 10 | 75 | 36.9 | 0.76324 | 0 | 0 | 0 | 0 | 0 | 0.271003 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dc67cec6d004a4498776bf6383786b9d8123e1d | 3,909 | py | Python | name_sub.py | user2589/10707-project | adf1b05e15a3bbc2e7dd8940a2a2bba9e72b5ada | [
"Apache-2.0"
] | null | null | null | name_sub.py | user2589/10707-project | adf1b05e15a3bbc2e7dd8940a2a2bba9e72b5ada | [
"Apache-2.0"
] | null | null | null | name_sub.py | user2589/10707-project | adf1b05e15a3bbc2e7dd8940a2a2bba9e72b5ada | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
import argparse
import logging
import os
import numpy as np
import pandas as pd
import string
PRONOUN2GENDER = {
'he': 'male', 'him': 'male', 'his': 'male',
'she': 'female', 'her': 'female', 'hers': 'female'}
GENDER2NAME = {
'female': (
('Mary', 'Alice'),
('Mary', 'Elizabeth'),
('Mary', 'Kate'),
('Mary', 'Emma'),
('Mary', 'Olivia'),
('Alice', 'Elizabeth'),
('Alice', 'Kate'),
('Alice', 'Emma'),
('Alice', 'Olivia'),
('Kate', 'Elizabeth'),
('Kate', 'Emma'),
('Kate', 'Olivia'),
('Elizabeth', 'Emma'),
('Elizabeth', 'Olivia'),
('Emma', 'Olivia'),
),
'male': (
('John', 'Michael'),
('John', 'Henry'),
('John', 'James'),
('John', 'David'),
('Michael', 'Henry'),
('Michael', 'James'),
('Michael', 'David'),
('Henry', 'James'),
('Henry', 'David'),
('James', 'David'),
)
}
def replace(text, search, repl, *indexes):
"""
Replace all occurences of search by repl and update indexes
"""
search_len = len(search)
len_diff = search_len - len(repl)
while True:
try:
idx = text.index(search)
except ValueError:
return (text, *indexes)
text = text[:idx] + repl + text[idx+search_len:]
indexes = [i if i <= idx else i-len_diff for i in indexes]
def transform(row):
text, po, ao, bo = row['Text'], row['Pronoun-offset'], row['A-offset'], row['B-offset']
for a_repl, b_repl in GENDER2NAME[PRONOUN2GENDER[row['Pronoun'].lower()]]:
if a_repl in text or b_repl in text \
or row['A'] in a_repl or row['B'] in b_repl\
or row['A'] in b_repl or row['B'] in a_repl:
continue
new_row = row.copy()
s = sorted((
(row['A'], 'A', a_repl), (row['B'], 'B', b_repl)),
key=lambda x: -len(x[0]))
# sometimes A is part of B or vice versa.
# So, we need to replace the longest first
for search, key, repl in s:
text, po, ao, bo = replace(text, search, repl, po, ao, bo)
new_row[key] = repl
for search, repl in (('#', ''), ('`', ''), ('"', ''), ('*', ''),
("--", "-"),
("'''", ""), ("''", ""), (" '", " "), ("' ", " ")):
text, po, ao, bo = replace(text, search, repl, po, ao, bo)
# sanity check:
assert a_repl in text and b_repl in text
new_row['Text'] = text
new_row['Pronoun-offset'] = po
new_row['A-offset'] = ao
new_row['B-offset'] = bo
return new_row
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=""""
Build BERT embeddings for the pronoun resolution project.
Input file is expected to contain at least three columns:
'Text': text of the sequence
'Pronoun-offset': character offset of the pronoun
'Pronoun': actual pronoun
'A', 'B': the alternatives
'A-offset': character offset of the A alternative
'B-offset': character offset of the B alternative
The output is a CSV file with the same columns,
but A and B will be replaced with neutral names
and all indexes will be updated.
""")
parser.add_argument('-i', '--input', default="-",
type=argparse.FileType('r'),
help='Input filename, "-" or skip for stdin')
parser.add_argument('-o', '--output', default="-",
type=argparse.FileType('w'),
help='Output filename, "-" or skip for stdout')
args = parser.parse_args()
pd.read_csv(args.input, sep="\t", index_col=0).apply(
transform, axis=1).to_csv(args.output)
| 31.02381 | 91 | 0.507291 | 463 | 3,909 | 4.200864 | 0.336933 | 0.021594 | 0.015424 | 0.032391 | 0.097172 | 0.038046 | 0.038046 | 0.038046 | 0.038046 | 0.038046 | 0 | 0.003376 | 0.317984 | 3,909 | 125 | 92 | 31.272 | 0.726182 | 0.04835 | 0 | 0.020619 | 0 | 0 | 0.297107 | 0 | 0 | 0 | 0 | 0 | 0.010309 | 1 | 0.020619 | false | 0 | 0.061856 | 0 | 0.103093 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dce53a767ed3bfb1d9e58bb58279706fd6813fa | 378 | py | Python | bot/config.py | famaxth/Russian-Bot-Shop | 0fc1f55878b867e20489bfd115ed0148bd6238e8 | [
"MIT"
] | 13 | 2021-06-02T09:48:23.000Z | 2022-03-28T22:13:02.000Z | bot/config.py | famaxth/Russian-Bot-Shop | 0fc1f55878b867e20489bfd115ed0148bd6238e8 | [
"MIT"
] | 1 | 2020-12-12T00:31:33.000Z | 2020-12-12T00:31:33.000Z | bot/config.py | famaxth/Russian-Bot-Shop | 0fc1f55878b867e20489bfd115ed0148bd6238e8 | [
"MIT"
] | 5 | 2021-06-23T07:00:46.000Z | 2021-12-20T11:25:49.000Z | name = "text" # Имя магазина
token = "text" # Токен бота
admin_id = 777777777 # ID администратора
channel = "@text" # Адрес канала с новостями
op = "@text" # Юзернейм оператора
token_qiwi = "text"
qiwi = "number" # Номер Qiwi кошелька
bitcoin = "text" # Адрес биткоин
logo_stick = "text" # Код стикера
secret_code = "text" # Секретный код
buyer1 = 777777777
| 31.5 | 46 | 0.674603 | 47 | 378 | 5.340426 | 0.702128 | 0.071713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064407 | 0.219577 | 378 | 11 | 47 | 34.363636 | 0.786441 | 0.383598 | 0 | 0 | 0 | 0 | 0.188679 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dcfc9d940cb87b3c927e8703caf4aa8cf22b376 | 725 | py | Python | SparkPythonCourse/word-count-dataframe.py | leodias-datascience/apache-spark | faecd0e5910b4e780abb7db4e94f83a409e03e34 | [
"Apache-2.0"
] | null | null | null | SparkPythonCourse/word-count-dataframe.py | leodias-datascience/apache-spark | faecd0e5910b4e780abb7db4e94f83a409e03e34 | [
"Apache-2.0"
] | null | null | null | SparkPythonCourse/word-count-dataframe.py | leodias-datascience/apache-spark | faecd0e5910b4e780abb7db4e94f83a409e03e34 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 27 10:13:22 2021
@author: LeonardoDiasdaRosa
"""
from pyspark.sql import SparkSession
from pyspark.sql import functions as func
## Data
DATA_PATH = '../data/'
FILE = "book.txt"
data = DATA_PATH + FILE
# Create a SparkSession
spark = SparkSession.builder.appName("WordCount").getOrCreate()
## Function
inputDF = spark.read.text(data)
words = inputDF.select(func.explode(func.split(inputDF.value, "\\W+")).alias("word"))
words.filter(words.word != "")
lowerCaseWords = words.select(func.lower(words.word).alias("word"))
wordCount = lowerCaseWords.groupBy("word").count()
wordCountSorted = wordCount.sort("count")
wordCountSorted.show(wordCountSorted.count())
spark.stop() | 23.387097 | 85 | 0.726897 | 92 | 725 | 5.706522 | 0.597826 | 0.041905 | 0.053333 | 0.07619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020062 | 0.106207 | 725 | 31 | 86 | 23.387097 | 0.790123 | 0.169655 | 0 | 0 | 0 | 0 | 0.077966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dd0991a96f1b462421439b00656979004b9d577 | 2,606 | py | Python | app/tableScraper.py | nyatla/PdfExtractKit | 541cf5a381f5ff0c96e386e1d1cc0fc552ac6845 | [
"MIT"
] | null | null | null | app/tableScraper.py | nyatla/PdfExtractKit | 541cf5a381f5ff0c96e386e1d1cc0fc552ac6845 | [
"MIT"
] | null | null | null | app/tableScraper.py | nyatla/PdfExtractKit | 541cf5a381f5ff0c96e386e1d1cc0fc552ac6845 | [
"MIT"
] | null | null | null | #!/bin/python3
"""
pdf内の単純表をjsonに変換します。
# python3 table.pdf -o out.json
出力するjsonの形式は以下の通りです。
[
{page:int,data:[parsed tables]},...
]
テーブルはテーブルの行毎に1列づつ、セル内の行毎に配列で格納します。
"""
#%%
from datetime import date
from tqdm import tqdm
from logging import addLevelName
import os,sys
sys.path.append(os.path.join(os.path.dirname("__file__"), '..'))
from typing import Collection,List, Tuple
sys.path.append(os.path.join(os.path.dirname("__file__"), '..'))
from libPdfextractkit import Box, PdfExtractKit,PreviewCanvas,BoxType,BoxSet,TableReader,Page
import argparse
from typing import Collection,Dict,Union
import json
from app.extlibs.libNyatlanPython.syntaxparser import *
from app.extlibs.libNyatlanPython.jsontemplates import DataJson
#%%
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument('pdf', help='pdfファイル名',type=str)
parser.add_argument('--filter', help='文字列のトリミングモード',choices=["none","trim","nogap"],type=str,default="none")
parser.add_argument('--rowmargin', help='表認識の結合マージン',type=float,default=1)
parser.add_argument('--sidemargin', help='表認識の結合マージン',type=float,default=3)
parser.add_argument('--lineseparategap', help='表認識の結合マージン',type=float,default=0.1)
parser.add_argument('--linesidegap', help='表認識の結合マージン',type=float,default=3.)
parser.add_argument('--pages', help='ページ識別子 all|1,2..|1-2|1-|1:2',type=str,default="all")
parser.add_argument('--description', help='descriptionフィールドの値',type=str,default="")
parser.add_argument('-o', help='出力ファイル名',type=str,default=None)
args=parser.parse_args()
dest=args.o if args.o is not None else args.pdf+".json"
ftrim=args.filter=="trim" or args.filter=="nogap"
fnogap=args.filter=="nogap"
params=args.__dict__
version="PdfExtractKit.app.tableScraper/0.1"
ret=None
with PdfExtractKit.load(args.pdf) as doc:
print("%s has %d pages."%(args.pdf,len(doc)))
pl=SyntaxParser.toIntList(args.pages,minimum=1,maximum=len(doc))
print("pages: %s"%(str(pl)))
pbar = tqdm(total=len(pl))
data=[]
for p in pl:
#ページ全体をパース
tr=TableReader(doc[p-1].extract(),rowmargin=args.rowmargin,sidemargin=args.sidemargin,lineseparategap=args.lineseparategap,linesidegap=args.linesidegap)
data.append({"page":p,"table":tr.toList(ftrim,fnogap)})
pbar.update(1)
pbar.close()
dj=DataJson.create(version,data,description=args.description,params=params)
dj.save(args.pdf+".json" if args.o is None else args.o)
| 35.216216 | 164 | 0.693784 | 335 | 2,606 | 5.307463 | 0.370149 | 0.045557 | 0.086052 | 0.051744 | 0.137233 | 0.103487 | 0.103487 | 0.103487 | 0.103487 | 0.049494 | 0 | 0.008981 | 0.145434 | 2,606 | 73 | 165 | 35.69863 | 0.789403 | 0.070606 | 0 | 0.045455 | 0 | 0 | 0.141838 | 0.014268 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dd18628c57f70a1b34391c7c3b1e07d45c77aea | 1,795 | py | Python | sqlite.py | Ben-Ning/webhook_demo | 4a07f83df7e4d7788656582f68358aab4e30e3a1 | [
"Apache-2.0"
] | null | null | null | sqlite.py | Ben-Ning/webhook_demo | 4a07f83df7e4d7788656582f68358aab4e30e3a1 | [
"Apache-2.0"
] | null | null | null | sqlite.py | Ben-Ning/webhook_demo | 4a07f83df7e4d7788656582f68358aab4e30e3a1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2020
# Chenglin Ning, chenglinning@gmain.com
import logging
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy import create_engine
from sqlalchemy import exc
from sqlalchemy import event
from sqlalchemy.pool import Pool
from sqlalchemy.pool import QueuePool
from sqlalchemy.pool import NullPool
from singleton import singleton_sqlite
from models import Base
import traceback
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
try:
cursor = dbapi_connection.cursor()
cursor.execute("SELECT 1")
cursor.close()
except:
raise exc.DisconnectionError()
@singleton_sqlite
class SQLite():
def __init__(self, dbname="master.db"):
self.dbname = dbname
self.dbengine = None
self.dbsession_class = None
def createEngine(self):
db_string = "sqlite:///./%s" % ( self.dbname )
if self.dbengine == None:
self.dbengine = create_engine(db_string, poolclass=NullPool)
if self.dbengine:
logging.info("SQLite connect: %s OK" % (db_string))
else:
logging.info("SQLite connect: %s Failure" % (db_string))
return self.dbengine
def getSessionClass(self):
assert(self.dbengine)
return sessionmaker(bind=self.dbengine)
def getMDBSessionClass(dbname='master.db'):
db = SQLite(dbname=dbname)
db.createEngine()
return db.getSessionClass()
def getMDatabaseInstance(dbname='master.db'):
db = SQLite(dbname=dbname)
db.createEngine()
return db
def getMDatabaseEngine(dbname='master.db'):
db = SQLite(dbname=dbname)
db.createEngine()
return db.dbengine
| 28.951613 | 75 | 0.69415 | 209 | 1,795 | 5.866029 | 0.363636 | 0.091354 | 0.045677 | 0.058728 | 0.177814 | 0.137031 | 0.137031 | 0.137031 | 0.137031 | 0.137031 | 0 | 0.007032 | 0.207799 | 1,795 | 61 | 76 | 29.42623 | 0.855134 | 0.046797 | 0 | 0.12 | 0 | 0 | 0.066198 | 0 | 0 | 0 | 0 | 0 | 0.02 | 1 | 0.14 | false | 0 | 0.24 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dd2369e794daf26a5b02e83b43c9d84e97be11a | 2,909 | py | Python | calibrate_cam.py | EsbernTK/first-order-model | 35117e1bb8ad72f9de134e0167defd6c381dcc5d | [
"MIT"
] | null | null | null | calibrate_cam.py | EsbernTK/first-order-model | 35117e1bb8ad72f9de134e0167defd6c381dcc5d | [
"MIT"
] | null | null | null | calibrate_cam.py | EsbernTK/first-order-model | 35117e1bb8ad72f9de134e0167defd6c381dcc5d | [
"MIT"
] | null | null | null | import cv2
from skimage.transform import resize
import os
from demo import key_pressed
if __name__ == '__main__':
frame_dim = 256
vid = cv2.VideoCapture(0)
width = 1920
height = 1080
vid.set(cv2.CAP_PROP_FRAME_WIDTH, width)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
capname = "cap"
# cv2.namedWindow(capname,cv2.WND_PROP_FULLSCREEN)
cv2.namedWindow(capname, cv2.WND_PROP_AUTOSIZE)
cv2.moveWindow(capname, 0, 0)
#cv2.setWindowProperty(capname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
start_it = 0
inc = 10
save_file = os.path.abspath("camera_calibration.txt")
wind_x, wind_y, wind_height, wind_width = int(width / 2), int(height / 2), 256, 256
#print(os.path.exists(save_file))
if(os.path.exists(save_file)):
with(open(save_file,"r")) as file:
line = file.read().split(",")
wind_x, wind_y, wind_height, wind_width = line
wind_x, wind_y, wind_height, wind_width = int(wind_x), int(wind_y), int(wind_height), int(wind_width)
#wind_x, wind_y, wind_height, wind_width = 710, 480, 486, 486
pressed = False
while (True):
ret, frame = vid.read()
frame = frame[wind_y:wind_y + wind_height, wind_x:wind_x + wind_width]
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = resize(frame, (frame_dim, frame_dim))
cv2.imshow(capname, frame)
if (key_pressed("x")):
break
elif (key_pressed("w")):
if (not pressed):
pressed = True
wind_y = max(0, wind_y - inc)
elif (key_pressed("s")):
if (not pressed):
pressed = True
wind_y = min(height - wind_height, wind_y + inc)
elif (key_pressed("a")):
if (not pressed):
pressed = True
wind_x = max(0, wind_x - inc)
elif (key_pressed("d")):
if (not pressed):
pressed = True
wind_x = min(width - wind_width, wind_x + inc)
elif (key_pressed("q")):
if (not pressed):
pressed = True
wind_height = max(frame_dim, wind_height - inc)
wind_width = wind_height
elif (key_pressed("e")):
if (not pressed):
pressed = True
wind_height = min(height - wind_y, wind_height + inc)
wind_width = wind_height
elif (key_pressed("z")):
if (not pressed):
pressed = True
print(f"saved file {save_file}")
with(open(save_file,"w")) as f:
f.write(f"{wind_x},{wind_y},{wind_height},{wind_width}")
print(f"{wind_x},{wind_y},{wind_height},{wind_width}")
else:
pressed = False
cv2.waitKey(1)
vid.release()
cv2.destroyAllWindows() | 35.048193 | 113 | 0.564799 | 377 | 2,909 | 4.103448 | 0.238727 | 0.096962 | 0.052359 | 0.077569 | 0.521008 | 0.473174 | 0.292178 | 0.177117 | 0.139625 | 0.05947 | 0 | 0.029234 | 0.317979 | 2,909 | 83 | 114 | 35.048193 | 0.750504 | 0.07494 | 0 | 0.257143 | 0 | 0 | 0.057313 | 0.040938 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dd3d5da7a826b786172c44a1312296d48423f6a | 2,824 | py | Python | fallball/fallballapp/fixtures/load_data.py | jazz-jack/fallball-service | 12c91bc0162a8d3d2d103acd66df6e72cdf6a8d2 | [
"Apache-2.0"
] | null | null | null | fallball/fallballapp/fixtures/load_data.py | jazz-jack/fallball-service | 12c91bc0162a8d3d2d103acd66df6e72cdf6a8d2 | [
"Apache-2.0"
] | null | null | null | fallball/fallballapp/fixtures/load_data.py | jazz-jack/fallball-service | 12c91bc0162a8d3d2d103acd66df6e72cdf6a8d2 | [
"Apache-2.0"
] | 1 | 2020-06-19T05:00:43.000Z | 2020-06-19T05:00:43.000Z | import binascii
import os
from django.conf import settings
from fallballapp.meta_data import data
from fallballapp.utils import get_app_username
def load_data(apps, schema_editor):
user_admin, created = apps.get_model('auth', 'User').objects.get_or_create(
username='admin',
password='pbkdf2_sha256$24000$ZVxkeukDOSaR$BkbfzKABp5MTWFALbWbggsunbYjTWYn8G/+tWMktZZg=',
is_superuser=True,
is_staff=True)
apps.get_model('authtoken', 'Token').objects.get_or_create(
pk=settings.ADMIN_AUTH_TOKEN,
user=user_admin)
user_app, created = apps.get_model('auth', 'User').objects.get_or_create(
username='new_app',
is_superuser=False,
is_staff=False)
apps.get_model('authtoken', 'Token').objects.get_or_create(
user=user_app,
pk=binascii.hexlify(os.urandom(20)).decode())
app, created = apps.get_model('fallballapp', 'Application').objects.get_or_create(
pk='new_app',
owner=user_app)
for reseller_template in data:
username = get_app_username(app.id, reseller_template['name'])
owner, created = apps.get_model('auth', 'User').objects.get_or_create(username=username)
apps.get_model('authtoken', 'Token').objects.get_or_create(
user=owner,
pk=binascii.hexlify(os.urandom(20)).decode())
params = dict.copy(reseller_template)
params.pop('clients', None)
reseller, created = apps.get_model('fallballapp', 'Reseller').objects.get_or_create(
application=app,
owner=owner,
**params)
if 'clients' in reseller_template:
for client_template in reseller_template['clients']:
params = dict.copy(client_template)
params.pop('users', None)
client = apps.get_model('fallballapp', 'Client').objects.create(
reseller=reseller,
**params)
if 'users' in client_template:
for user_template in client_template['users']:
username = get_app_username(app.id, user_template['user_id'])
owner, created = apps.get_model('auth', 'User').objects.get_or_create(
username=username)
apps.get_model('authtoken', 'Token').objects.get_or_create(
user=owner,
pk=binascii.hexlify(os.urandom(20)).decode())
params = dict.copy(user_template)
params.pop('users', None)
apps.get_model('fallballapp', 'ClientUser').objects.create(
client=client,
owner=owner,
**params)
| 40.342857 | 97 | 0.589235 | 305 | 2,824 | 5.232787 | 0.213115 | 0.052632 | 0.090226 | 0.112782 | 0.47619 | 0.388471 | 0.354637 | 0.333333 | 0.333333 | 0.305764 | 0 | 0.008586 | 0.298867 | 2,824 | 69 | 98 | 40.927536 | 0.797475 | 0 | 0 | 0.280702 | 0 | 0 | 0.111544 | 0.027266 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0.017544 | 0.087719 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dd55b3492d17351fe309ec42664f2d185906465 | 1,417 | py | Python | pymc3/step_methods/compound.py | bishax/pymc3 | 7cd66b7ff35659bf6dc7ae100794925e18708781 | [
"Apache-2.0"
] | 4 | 2019-04-03T08:49:07.000Z | 2022-01-05T00:36:29.000Z | pymc3/step_methods/compound.py | bishax/pymc3 | 7cd66b7ff35659bf6dc7ae100794925e18708781 | [
"Apache-2.0"
] | null | null | null | pymc3/step_methods/compound.py | bishax/pymc3 | 7cd66b7ff35659bf6dc7ae100794925e18708781 | [
"Apache-2.0"
] | 1 | 2017-06-12T14:59:34.000Z | 2017-06-12T14:59:34.000Z | '''
Created on Mar 7, 2011
@author: johnsalvatier
'''
class CompoundStep(object):
"""Step method composed of a list of several other step methods applied in sequence."""
def __init__(self, methods):
self.methods = list(methods)
self.generates_stats = any(method.generates_stats for method in self.methods)
self.stats_dtypes = []
for method in self.methods:
if method.generates_stats:
self.stats_dtypes.extend(method.stats_dtypes)
def step(self, point):
if self.generates_stats:
states = []
for method in self.methods:
if method.generates_stats:
point, state = method.step(point)
states.extend(state)
else:
point = method.step(point)
return point, states
else:
for method in self.methods:
point = method.step(point)
return point
@property
def report(self):
reports = []
for method in self.methods:
if hasattr(method, 'report'):
reports.append(method.report)
return _CompoundReport(reports)
class _CompoundReport(object):
def __init__(self, reports):
self._reports = reports
def _finalize(self, strace):
for report in self._reports:
report._finalize(strace)
| 28.34 | 91 | 0.579393 | 152 | 1,417 | 5.256579 | 0.296053 | 0.09637 | 0.068836 | 0.093867 | 0.272841 | 0.217772 | 0.110138 | 0.110138 | 0.110138 | 0 | 0 | 0.005319 | 0.336627 | 1,417 | 49 | 92 | 28.918367 | 0.844681 | 0.090332 | 0 | 0.285714 | 0 | 0 | 0.004702 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dda6198b29e6336d0c0d4ba19bec65d52b9c7a2 | 674 | py | Python | Assignment13_2JSONdataextraction.py | AnkurDesai11/PY4E | bfd185ef89d4b054a2286ca8a6eae476c086b782 | [
"BSD-3-Clause"
] | null | null | null | Assignment13_2JSONdataextraction.py | AnkurDesai11/PY4E | bfd185ef89d4b054a2286ca8a6eae476c086b782 | [
"BSD-3-Clause"
] | null | null | null | Assignment13_2JSONdataextraction.py | AnkurDesai11/PY4E | bfd185ef89d4b054a2286ca8a6eae476c086b782 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on 25 Aug, 2020
@author: ABD
Sample data: http://py4e-data.dr-chuck.net/comments_42.json (Sum=2553)
Actual data: http://py4e-data.dr-chuck.net/comments_896976.json (Sum ends with 62)
'''
import urllib.request, urllib.parse, urllib.error
import ssl
import json
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter URL- ')
data = urllib.request.urlopen(url, context=ctx).read()
info = json.loads(data)
#print(len(info["comments"]))
numsum = 0
for i in range( 0, len(info["comments"]) ) :
num = float(info["comments"][i]["count"])
numsum = numsum + num
print(int(numsum)) | 28.083333 | 83 | 0.688427 | 103 | 674 | 4.436893 | 0.582524 | 0.078775 | 0.052516 | 0.070022 | 0.148797 | 0.148797 | 0.148797 | 0.148797 | 0 | 0 | 0 | 0.042032 | 0.152819 | 674 | 24 | 84 | 28.083333 | 0.758319 | 0.32641 | 0 | 0 | 0 | 0 | 0.075472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ddaedacd3a784847edbad802242ae23fdc0a158 | 2,027 | py | Python | strands_tweets/scripts/image_tweeter_node.py | LuAPi/strands_social | 488e2d3450df6d5d0dd26994d19077b8259c285e | [
"MIT"
] | null | null | null | strands_tweets/scripts/image_tweeter_node.py | LuAPi/strands_social | 488e2d3450df6d5d0dd26994d19077b8259c285e | [
"MIT"
] | 8 | 2015-02-10T11:07:21.000Z | 2019-10-17T09:41:10.000Z | strands_tweets/scripts/image_tweeter_node.py | LuAPi/strands_social | 488e2d3450df6d5d0dd26994d19077b8259c285e | [
"MIT"
] | 4 | 2015-02-09T16:15:58.000Z | 2019-01-23T14:15:44.000Z | #! /usr/bin/env python
import rospy
import actionlib
from strands_tweets.msg import SendTweetAction, SendTweetGoal, GrabImageThenTweetAction, GrabImageThenTweetResult
from sensor_msgs.msg import Image
class ImageTweeter(object):
def __init__(self) :
rospy.loginfo('Waiting for strands_tweets')
self.tweet_client = actionlib.SimpleActionClient('strands_tweets', SendTweetAction)
self.tweet_client.wait_for_server()
rospy.loginfo(" ... Init done")
self.server = actionlib.SimpleActionServer('strands_image_tweets', GrabImageThenTweetAction, self.execute, False)
self.server.start()
def execute(self, goal):
self.image = None
self.sub = rospy.Subscriber(goal.topic, Image, self.image_callback, queue_size=1)
# the polling is ugly, but it's hard do do things in a ros-time friendly way otherwise
while self.image == None and not rospy.is_shutdown() and not self.server.is_preempt_requested():
rospy.sleep(0.5)
result = GrabImageThenTweetResult(success = False)
if rospy.is_shutdown() or self.server.is_preempt_requested():
self.server.set_preempted(result)
return
# if we got this far we have an image
tweet = SendTweetGoal(text = goal.text, force = goal.force, with_photo = True, photo = self.image )
self.tweet_client.send_goal(tweet)
while not self.tweet_client.wait_for_result(rospy.Duration(0.5)) and not rospy.is_shutdown() and not self.server.is_preempt_requested():
pass
if rospy.is_shutdown() or self.server.is_preempt_requested():
self.server.set_preempted(result)
else:
result.success = True
self.server.set_succeeded(result)
def image_callback(self, message):
self.image = message
self.sub.unregister()
if __name__ == '__main__':
rospy.init_node('image_tweeter_node')
it = ImageTweeter()
rospy.spin() | 34.948276 | 144 | 0.674396 | 247 | 2,027 | 5.336032 | 0.412955 | 0.068285 | 0.045524 | 0.057663 | 0.230653 | 0.197269 | 0.197269 | 0.197269 | 0.197269 | 0.197269 | 0 | 0.003218 | 0.23335 | 2,027 | 58 | 145 | 34.948276 | 0.844916 | 0.070054 | 0 | 0.108108 | 0 | 0 | 0.053107 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.027027 | 0.108108 | 0 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ddb9f14ed80c3814d00d8cedd70dbfbb0e10be6 | 1,876 | py | Python | 3.py | Abhilash2015mca/Flask-Restful | 308f7e3887e465361cd7c399c684d70b375752fa | [
"MIT"
] | null | null | null | 3.py | Abhilash2015mca/Flask-Restful | 308f7e3887e465361cd7c399c684d70b375752fa | [
"MIT"
] | null | null | null | 3.py | Abhilash2015mca/Flask-Restful | 308f7e3887e465361cd7c399c684d70b375752fa | [
"MIT"
] | null | null | null | # Avoided iterarion using filter function and added put and delete operation
from flask import Flask, request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
items = []
class Item(Resource):
def get(self, name):
item = next(filter(lambda x: x['name'] == name, items), None)
return {" item": item}, 200 if item is not None else 404
# print single item
def delete(self, name):
global items
if next(filter(lambda x: x['name'] == name, items), None):
items = list(filter(lambda x: x['name'] != name, items))
return {name: "is deleted"}, 200
return {name: "not found"}, 404
# delete the requested item
class addItem(Resource):
def post(self):
data = request.get_json()
if next(filter(lambda x: x['name'] == data["name"], items), None):
return {" item": "Already Present"}
item = {"name": data["name"], "price": data["price"]}
items.append(item)
return {item["name"]: "inserted"}, 201
# Add a single item at a time and returns that
def put(self):
data = request.get_json()
item = next(filter(lambda x: x['name'] == data["name"], items), None)
if item is None:
item = {"name": data["name"], "price": data["price"]}
items.append(item)
return {item["name"]: "inserted"}, 201
# add item if item is not present
else:
item.update(data)
return {item["name"]: "updated"}, 200
# update item
class itemList(Resource):
def get(self):
return {"items": items}, 200
# returns the full item list
api.add_resource(Item, '/item/<string:name>/')
api.add_resource(addItem, '/add/')
api.add_resource(itemList, '/items/')
if __name__ == '__main__':
app.run(debug=True)
| 30.258065 | 77 | 0.579424 | 243 | 1,876 | 4.399177 | 0.283951 | 0.056127 | 0.060804 | 0.065482 | 0.371375 | 0.311506 | 0.311506 | 0.275023 | 0.275023 | 0.209542 | 0 | 0.017634 | 0.27452 | 1,876 | 61 | 78 | 30.754098 | 0.767818 | 0.124733 | 0 | 0.195122 | 0 | 0 | 0.115055 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0 | 0.04878 | 0.02439 | 0.439024 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ddd0c2f299cd8c68206ef4fe9197b0a0b2718d4 | 3,287 | py | Python | tests/integration_tests/test_node_starts_in_reset_and_converges.py | sspbft/BFTList | d73aee5bd0ab05995509f0fcfaf3c0a5944e617a | [
"MIT"
] | 6 | 2019-11-12T01:45:55.000Z | 2022-03-18T10:57:21.000Z | tests/integration_tests/test_node_starts_in_reset_and_converges.py | practicalbft/BFTList | d73aee5bd0ab05995509f0fcfaf3c0a5944e617a | [
"MIT"
] | 4 | 2019-02-14T10:57:09.000Z | 2019-03-21T15:22:08.000Z | tests/integration_tests/test_node_starts_in_reset_and_converges.py | sspbft/BFTList | d73aee5bd0ab05995509f0fcfaf3c0a5944e617a | [
"MIT"
] | 1 | 2019-04-04T15:09:33.000Z | 2019-04-04T15:09:33.000Z | """
Testing functionality of reseting when no other node is resetting.
Node 0 has gone through a reset without needing it and ended up in phase 1 with RST_PAIR.
Node 0 shall catch up with the other nodes in view (2,2)
"""
# standard
import asyncio
import logging
from copy import deepcopy
# local
from . import helpers
from .abstract_integration_test import AbstractIntegrationTest
# globals
F = 1
N = 6
logger = logging.getLogger(__name__)
views = [{"current": -1, "next": 0}, {"current": 2, "next": 2},
{"current": 2, "next": 2}, {"current": 2, "next": 2},
{"current": 2, "next": 2}, {"current": 2, "next": 2}]
phases = [1, 0, 0, 0, 0, 0]
vChanges = [False for i in range(N)]
start_state = {}
for i in range(N):
start_state[str(i)] = {
"VIEW_ESTABLISHMENT_MODULE": {
"views": views,
"phs": phases,
"vChange": vChanges
}
}
class TestNodeConvergeAfterReset(AbstractIntegrationTest):
"""Performs health check on all nodes base endpoint (/)."""
async def bootstrap(self):
"""Sets up BFTList for the test."""
helpers.write_state_conf_file(start_state)
return await helpers.launch_bftlist(__name__)
async def validate(self):
calls_left = helpers.MAX_NODE_CALLS
test_result = False
while calls_left > 0:
aws = [helpers.GET(i, "/data") for i in helpers.get_nodes()]
checks = []
last_check = calls_left == 1
for a in asyncio.as_completed(aws):
result = await a
data = result["data"]["VIEW_ESTABLISHMENT_MODULE"]
views = data["views"]
phases = data["phs"]
vChange = data["vChange"]
witnesses = data["witnesses"]
witnesses_set = data["witnesses_set"]
vp_target = {"current": 2, "next": 2}
phases_target = [0 for i in range(N)]
vChange_target = [False for i in range(N)]
if last_check:
for i,vp in enumerate(views):
self.assertEqual(vp, vp_target)
self.assertEqual(phases, phases_target)
self.assertEqual(vChange, vChange_target)
else:
for i,vp in enumerate(views):
checks.append(vp == vp_target)
checks.append(phases == phases_target)
checks.append(vChange == vChange_target)
# all checks passed means test passed
if all(checks):
test_result = True
break
# sleep for 2 seconds and the re-try
await asyncio.sleep(2)
calls_left -= 1
self.assertTrue(test_result)
self.assertTrue(test_result)
@helpers.suppress_warnings
def test(self):
logger.info(f"{__name__} starting")
pids = helpers.run_coro(self.bootstrap())
super().set_pids(pids)
helpers.run_coro(self.validate())
logger.info(f"{__name__} finished")
def tearDown(self):
helpers.kill(super().get_pids())
helpers.cleanup()
if __name__ == '__main__':
asyncio.run(unittest.main()) | 31.304762 | 89 | 0.56404 | 382 | 3,287 | 4.675393 | 0.356021 | 0.015677 | 0.040314 | 0.043673 | 0.143337 | 0.097984 | 0.06103 | 0.036394 | 0.036394 | 0.036394 | 0 | 0.014912 | 0.326742 | 3,287 | 105 | 90 | 31.304762 | 0.792137 | 0.110435 | 0 | 0.054795 | 0 | 0 | 0.081448 | 0.017403 | 0 | 0 | 0 | 0 | 0.068493 | 1 | 0.027397 | false | 0 | 0.068493 | 0 | 0.123288 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ddd68a108546a0adfbfa6089b444f8271d6dbbc | 4,723 | py | Python | test_attack.py | leeyegy/pytorch-cifar | a8a39891c7f7a86b636cfc615a431a761dfd74c6 | [
"MIT"
] | null | null | null | test_attack.py | leeyegy/pytorch-cifar | a8a39891c7f7a86b636cfc615a431a761dfd74c6 | [
"MIT"
] | null | null | null | test_attack.py | leeyegy/pytorch-cifar | a8a39891c7f7a86b636cfc615a431a761dfd74c6 | [
"MIT"
] | null | null | null | '''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models import *
from utils import progress_bar
from loss import *
from advertorch.attacks import GradientSignAttack,LinfPGDAttack
from advertorch.context import ctx_noparamgrad_and_eval
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument("--net",default="ResNet18",type=str)
parser.add_argument('--test_model_path', type=str)
parser.add_argument("--loss",type=str,default="CE",choices=["CE","CS"])
parser.add_argument("--attack_method",type=str,default="FGSM",choices=["PGD","FGSM"])
parser.add_argument("--epsilon",type=float,default=0.03137)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
# model dict
net_dict = {"VGG19":VGG('VGG19'),
"ResNet18": ResNet18(),
"PreActResNet18": PreActResNet18(),
"GoogLeNet":GoogLeNet(),
"DenseNet121":DenseNet121(),
"ResNeXt29_2x64d":ResNeXt29_2x64d(),
"MobileNet":MobileNet(),
"MobileNetV2":MobileNetV2(),
"DPN92": DPN92(),
# "ShuffleNetG2":ShuffleNetG2(),
"SENet18":SENet18(),
"ShuffleNetV2":ShuffleNetV2(1),
"EfficientNetB0":EfficientNetB0(),
"RegNetX_200MF":RegNetX_200MF()
}
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='/home/Leeyegy/.torch/datasets', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='/home/Leeyegy/.torch/datasets', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
net = net_dict[args.net]
net = net.to(device)
checkpoint = torch.load(os.path.join(args.test_model_path))
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
# if args.loss == "CS":
# checkpoint = torch.load(os.path.join(args.test_model_path))
# net.load_state_dict(checkpoint['net'])
# elif args.loss == "CE":
# checkpoint = torch.load(os.path.join(args.test_model_path))
# net.load_state_dict(checkpoint['net'])
# # net.load_state_dict(torch.load(os.path.join(args.test_model_path)))
# best_acc = checkpoint['acc']
# # start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss() if args.loss == "CE" else Cosine_Similarity_Loss()
#define adversary
if args.attack_method == "FGSM":
adversary = GradientSignAttack(net,eps=args.epsilon,loss_fn=criterion,clip_min=0.0,clip_max=1.0)
elif args.attack_method == "PGD":
adversary = LinfPGDAttack(net,eps=args.epsilon,nb_iter=10,eps_iter=0.007,loss_fn=criterion,rand_init=True)
def test():
global best_acc
net.eval()
test_loss = 0
pgd_loss = 0
pgd_correct = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
with torch.no_grad():
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
total += targets.size(0)
correct += get_correct_num(outputs,targets,args.loss)
with ctx_noparamgrad_and_eval(net):
pgd_data = adversary.perturb(inputs.clone().detach(), targets)
with torch.no_grad():
outputs = net(pgd_data)
loss = criterion(outputs, targets)
pgd_loss += loss.item()
pgd_correct += get_correct_num(outputs,targets,args.loss)
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d) PgdAcc:%.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total,100.*pgd_correct/total,pgd_correct,total))
test()
| 33.978417 | 126 | 0.680076 | 598 | 4,723 | 5.230769 | 0.311037 | 0.017583 | 0.027174 | 0.019182 | 0.222187 | 0.206841 | 0.190857 | 0.190857 | 0.164003 | 0.11477 | 0 | 0.042092 | 0.170019 | 4,723 | 138 | 127 | 34.224638 | 0.755867 | 0.139953 | 0 | 0.083333 | 0 | 0.010417 | 0.11367 | 0.014364 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010417 | false | 0 | 0.145833 | 0 | 0.15625 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ddda64a86be49c713d7995b9f5330b5684cded0 | 2,327 | py | Python | src/megrok/rdb/tests/initialization.py | prsephton/megrok.rdb | 5e1f5493704159869eac24518b77fc0a06c62c14 | [
"ZPL-2.1"
] | null | null | null | src/megrok/rdb/tests/initialization.py | prsephton/megrok.rdb | 5e1f5493704159869eac24518b77fc0a06c62c14 | [
"ZPL-2.1"
] | null | null | null | src/megrok/rdb/tests/initialization.py | prsephton/megrok.rdb | 5e1f5493704159869eac24518b77fc0a06c62c14 | [
"ZPL-2.1"
] | null | null | null | """
We test an initialization pattern for z3c.saconfig where the metadata
is set up in an event handler.
Let's first grok things::
>>> grok.testing.grok('megrok.rdb.meta')
>>> grok.testing.grok(__name__)
We start using the database now::
>>> session = rdb.Session()
Let's start using the database now::
>>> session = rdb.Session()
>>> philosophy = Department(name='Philosophy')
>>> session.add(philosophy)
>>> logic = Course(name='Logic')
>>> ethics = Course(name='Ethics')
>>> metaphysics = Course(name='Metaphysics')
>>> session.add_all([logic, ethics, metaphysics])
Let's now add them to the courses container::
>>> philosophy.courses.set(logic)
>>> philosophy.courses.set(ethics)
>>> philosophy.courses.set(metaphysics)
We can now verify that the courses are there::
>>> [(course.id, course.name, course.department_id) for course in
... session.query(Course)]
[(1, 'Logic', 1), (2, 'Ethics', 1), (3, 'Metaphysics', 1)]
>>> for key, value in sorted(philosophy.courses.items()):
... print key, value.name, value.department.name
1 Logic Philosophy
2 Ethics Philosophy
3 Metaphysics Philosophy
"""
import grokcore.component as grok
from z3c.saconfig import EngineFactory, GloballyScopedSession
from z3c.saconfig.interfaces import IEngineCreatedEvent
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Integer, String
from sqlalchemy.orm import relation
from megrok import rdb
TEST_DSN = 'sqlite:///:memory:'
engine_factory = EngineFactory(TEST_DSN)
scoped_session = GloballyScopedSession()
grok.global_utility(engine_factory, direct=True)
grok.global_utility(scoped_session, direct=True)
metadata = rdb.MetaData()
@grok.subscribe(IEngineCreatedEvent)
def setUpDatabase(event):
rdb.setupDatabase(metadata)
rdb.metadata(metadata)
class Courses(rdb.Container):
pass
class Department(rdb.Model):
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
courses = relation('Course',
backref='department',
collection_class=Courses)
class Course(rdb.Model):
id = Column('id', Integer, primary_key=True)
department_id = Column('department_id', Integer,
ForeignKey('department.id'))
name = Column('name', String(50))
| 27.376471 | 69 | 0.700473 | 285 | 2,327 | 5.652632 | 0.336842 | 0.024829 | 0.037244 | 0.026071 | 0.126629 | 0.099317 | 0.099317 | 0.099317 | 0.048417 | 0 | 0 | 0.008277 | 0.169317 | 2,327 | 84 | 70 | 27.702381 | 0.825142 | 0.49248 | 0 | 0.133333 | 0 | 0 | 0.061433 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.033333 | 0.233333 | 0 | 0.566667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0de2dcd6a923e5ccc38202d45bae32bf1315f3b6 | 2,200 | py | Python | python/helper/QTPAD_DataPrep.py | noxtoby/dem | 13fcd39194eeab91b276d5f616270c3e132b9070 | [
"MIT"
] | null | null | null | python/helper/QTPAD_DataPrep.py | noxtoby/dem | 13fcd39194eeab91b276d5f616270c3e132b9070 | [
"MIT"
] | null | null | null | python/helper/QTPAD_DataPrep.py | noxtoby/dem | 13fcd39194eeab91b276d5f616270c3e132b9070 | [
"MIT"
] | null | null | null | # Simple wrapper for data preparation
#
# Input: ADNI data as pandas dataframe
# Output: Modified dataframe
# * handle missing data: remove individuals with missing demographics?
# * stables and progressors (clinical diagnosis)
#
# Author: Neil Oxtoby, UCL, January 2018
# Translated and modified from earlier MATLAB code
import numpy as np
import pandas as pd
from .convenience_functions import findStablesAndProgressors
from sklearn.model_selection import StratifiedKFold
def prep_data(df, covariatesFixed = ['AGE','PTGENDER','PTEDUCAT','APOE4'], covariatesDynamic = ['COLPROT'], covariatesMRI = ['ICV','FLDSTRENG','FSVERSION']):
#====== Covariates ======
df_prepped = df.copy()
covs0 = covariatesFixed + covariatesDynamic
#* Fix AGE
print('\n\n****** Converting AGE to AGE + Years_bl (because AGE was actually baseline age in the QT-PAD spreadsheet prepared by Mike Donohue in June 2017) ******\n\n')
df_prepped['AGE.bl'] = df_prepped['AGE']
df_prepped['AGE'] = df_prepped['AGE.bl'] + df_prepped['Years.bl']
#* Identify the rows with missing demographic/covariate data
print('Excluding rows with missing: DX, and fixed covariates {0}\n'.format(covs0))
missingDemographicsCovariates = df_prepped[['DX']+covs0].isnull().any(1)
includedRows = missingDemographicsCovariates == False
df_prepped = df_prepped[includedRows].copy()
#= Numerical diagnosis
DX = df_prepped['DX']
DXnum = 11*(DX=='NL') + 12*(DX=='NL to MCI') + 13*(DX=='NL to Dementia') + 22*(DX=='MCI') + 23*(DX=='MCI to Dementia') + 21*(DX=='MCI to NL') + 32*(DX=='Dementia to MCI') + 33*(DX=='Dementia')
DXnum[DXnum==0] = np.nan
df_prepped['DXNUM'] = DXnum
#====== Clinical progression: Stable/Progressor ======
rID = df_prepped['RID']
years_bl = df_prepped['Years.bl']
stable, progressor, reverter, mixed, progression_visit, reversion_visit, stable_u, progressor_u, reverter_u, mixed_u, progression_visit_u, reversion_visit_u = findStablesAndProgressors(years_bl,DXnum,rID)
df_prepped.DX_stable = stable
df_prepped.DX_progressor = progressor
df_prepped.DX_progression_visit = progression_visit
return df_prepped
| 48.888889 | 208 | 0.704091 | 280 | 2,200 | 5.396429 | 0.442857 | 0.101257 | 0.0364 | 0.018531 | 0.056916 | 0.040371 | 0 | 0 | 0 | 0 | 0 | 0.016793 | 0.160909 | 2,200 | 44 | 209 | 50 | 0.801733 | 0.216364 | 0 | 0 | 0 | 0.04 | 0.228204 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.24 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dea26bd304d66b97401c4279b06adff6552c050 | 2,160 | py | Python | setup.py | collective/collective.recipe.beanstalkd | e19d8046f0adabfddf8c030a5eeacf66bca1e5cc | [
"MIT"
] | null | null | null | setup.py | collective/collective.recipe.beanstalkd | e19d8046f0adabfddf8c030a5eeacf66bca1e5cc | [
"MIT"
] | null | null | null | setup.py | collective/collective.recipe.beanstalkd | e19d8046f0adabfddf8c030a5eeacf66bca1e5cc | [
"MIT"
] | null | null | null | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/collective/collective.recipe.beanstalkd
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def readfile(filename):
with open(path.join(here, filename), encoding='utf-8') as f:
return f.read()
long_description = readfile('README.rst')
long_description += readfile('CHANGES.rst')
setup(
name = "collective.recipe.beanstalkd",
version = "0.1",
description = "Buildout recipe to download, build, install and configure beanstalkd",
long_description=long_description,
classifiers=[
'Framework :: Buildout',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Environment :: No Input/Output (Daemon)',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Topic :: Communications :: Email',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Topic :: Utilities'
],
keywords='buildout recipe beanstalkd',
author='Noe Nieto',
author_email='nnieto@noenieto.com',
url='https://github.com/collective/collective.recipe.beanstalkd',
license='BSD',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective', 'collective.recipe'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'zc.buildout',
# -*- Extra requirements: -*-
'zc.recipe.egg'
],
entry_points = {'zc.buildout': ['default = collective.recipe.beanstalkd:Recipe']},
)
| 34.285714 | 89 | 0.658796 | 224 | 2,160 | 6.272321 | 0.558036 | 0.05694 | 0.074021 | 0.034164 | 0.071174 | 0.071174 | 0.071174 | 0 | 0 | 0 | 0 | 0.003507 | 0.20787 | 2,160 | 62 | 90 | 34.83871 | 0.817651 | 0.116667 | 0 | 0.081633 | 0 | 0 | 0.497104 | 0.033175 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0 | 0.061224 | 0 | 0.102041 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dea59d213130e261db2bc8cf3da1b67cff5451e | 5,187 | py | Python | model/LGNN.py | FeiGSSS/LGNN | d91107dcb2e94622b57afab1bcef90817545d729 | [
"MIT"
] | 2 | 2021-04-02T03:36:26.000Z | 2022-02-22T13:33:05.000Z | model/LGNN.py | FeiGSSS/LGNN | d91107dcb2e94622b57afab1bcef90817545d729 | [
"MIT"
] | null | null | null | model/LGNN.py | FeiGSSS/LGNN | d91107dcb2e94622b57afab1bcef90817545d729 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
@File : LGNN.py
@Time : 2021/03/26 16:55:20
@Author : Fei gao
@Contact : feig@mail.bnu.edu.cn
BNU, Beijing, China
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class LGNN_layer(nn.Module):
def __init__(self, size_Fa,
size_Fb,
feat_dim_in,
feat_dim_line_in,
feat_dim_out,
feat_dim_line_out,
final_layer=False):
super(LGNN_layer, self).__init__()
self.size_Fa = size_Fa
self.size_Fb = size_Fb
self.final_layer = final_layer
self.x2x_1 = nn.Linear(size_Fa*feat_dim_in, feat_dim_out)
self.y2x_1 = nn.Linear(2*feat_dim_line_in, feat_dim_out)
if not self.final_layer:
self.x2x_2 = nn.Linear(size_Fa*feat_dim_in, feat_dim_out)
self.y2x_2 = nn.Linear(2*feat_dim_line_in, feat_dim_out)
self.y2y_1 = nn.Linear(size_Fb*feat_dim_line_in, feat_dim_line_out)
self.y2y_2 = nn.Linear(size_Fb*feat_dim_line_in, feat_dim_line_out)
self.x2y_1 = nn.Linear(4*feat_dim_out, feat_dim_line_out)
self.x2y_2 = nn.Linear(4*feat_dim_out, feat_dim_line_out)
self.bn_x = nn.BatchNorm1d(2*feat_dim_out)
self.bn_y = nn.BatchNorm1d(2*feat_dim_line_out)
def forward(self, inputs):
"""One forward pass step
Args:
Fa ([type]): list of original graphs' operations
node_feat ([type]): nodes' feature
Fb ([type]): list of line graphs' operators
node_feat_line ([type]): ndoes feat in line graph
"""
Fa, node_feat, Fb, node_feat_line, Pm, Pd = inputs
assert self.size_Fa == len(Fa)
assert self.size_Fb == len(Fb)
if not self.final_layer:
# Agg to X
Fab = [Pm, Pd]
x2x_by_operator = self.operatorXfeat(Fa, node_feat)
y2x_by_operator = self.operatorXfeat(Fab, node_feat_line)
z = F.relu(self.x2x_1(x2x_by_operator) + self.y2x_1(y2x_by_operator))
z_prime = self.x2x_2(x2x_by_operator) + self.y2x_2(y2x_by_operator)
x = torch.cat((z_prime, z), dim=1)
x = self.bn_x(x)
# Agg to y
Fab_prime = [Pm.t(), Pd.t()]
y2y_by_operator = self.operatorXfeat(Fb, node_feat_line)
x2y_by_operator = self.operatorXfeat(Fab_prime, x)
w = F.relu(self.y2y_1(y2y_by_operator) + self.x2y_1(x2y_by_operator))
w_prime = self.y2y_2(y2y_by_operator) + self.x2y_2(x2y_by_operator)
y = torch.cat((w, w_prime), dim=1)
y = self.bn_y(y)
return [Fa, x, Fb, y, Pm, Pd]
else:
Fab = [Pm, Pd]
x2x_by_operator = self.operatorXfeat(Fa, node_feat)
y2x_by_operator = self.operatorXfeat(Fab, node_feat_line)
z_prime = self.x2x_1(x2x_by_operator) + self.y2x_1(y2x_by_operator)
return z_prime
def operatorXfeat(self, operators_list, feat):
feat_out = []
for A in operators_list:
feat_out.append(torch.sparse.mm(A, feat))
return torch.cat(feat_out, dim=1) # [N, (J+2)*feat_dim]
class LGNN(nn.Module):
def __init__(self, hid_dim, num_layers, J, num_classes, device):
super(LGNN, self).__init__()
self.hid_dim = hid_dim
self.num_layers = num_layers
self.J = J
self.num_classes = num_classes
self.device = device
self.layers = self.build_layers()
self.to(self.device)
def forward(self, inputs):
# Fa, node_feat, Fb, node_feat_line, Pm, Pd = inputs
return self.layers(inputs)
def build_layers(self):
layers = nn.Sequential()
layer = LGNN_layer(size_Fa=self.J+2,
size_Fb=self.J+2,
feat_dim_in=1,
feat_dim_out=int(self.hid_dim/2),
feat_dim_line_in=1,
feat_dim_line_out=int(self.hid_dim/2))
layers.add_module(name="lgnn layer init", module=layer)
for i in torch.arange(self.num_layers):
layer = LGNN_layer(size_Fa=self.J+2,
size_Fb=self.J+2,
feat_dim_in=self.hid_dim,
feat_dim_out=int(self.hid_dim/2),
feat_dim_line_in=self.hid_dim,
feat_dim_line_out=int(self.hid_dim/2))
layers.add_module(name="lgnn layer {}".format(i), module=layer)
layer = LGNN_layer(size_Fa=self.J+2,
size_Fb=self.J+2,
feat_dim_in=self.hid_dim,
feat_dim_out=self.num_classes,
feat_dim_line_in=self.hid_dim,
feat_dim_line_out=None,
final_layer=True)
layers.add_module(name="lgnn layer final", module=layer)
return layers
| 38.139706 | 81 | 0.555427 | 728 | 5,187 | 3.629121 | 0.163462 | 0.092733 | 0.07078 | 0.047691 | 0.518547 | 0.4243 | 0.411431 | 0.397426 | 0.397426 | 0.397426 | 0 | 0.026796 | 0.345286 | 5,187 | 135 | 82 | 38.422222 | 0.751178 | 0.08984 | 0 | 0.252632 | 0 | 0 | 0.009452 | 0 | 0 | 0 | 0 | 0 | 0.021053 | 1 | 0.063158 | false | 0 | 0.031579 | 0.010526 | 0.168421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0deb78bf2abfa3eb4f5da9d512c50b99330acc10 | 1,682 | py | Python | Cloud/seeder.py | faramarzQ/Cloud_assisted_resource_allocation_using_machine_learning | 1aa8d8e63dcd6053f69e17bf90595a0aa2a7d317 | [
"Apache-2.0"
] | 1 | 2022-03-14T01:44:28.000Z | 2022-03-14T01:44:28.000Z | Cloud/seeder.py | faramarzQ/Cloud_assisted_resource_allocation_using_machine_learning | 1aa8d8e63dcd6053f69e17bf90595a0aa2a7d317 | [
"Apache-2.0"
] | null | null | null | Cloud/seeder.py | faramarzQ/Cloud_assisted_resource_allocation_using_machine_learning | 1aa8d8e63dcd6053f69e17bf90595a0aa2a7d317 | [
"Apache-2.0"
] | 1 | 2021-08-04T08:30:58.000Z | 2021-08-04T08:30:58.000Z | import random
if __name__ == '__main__':
""" the seeder module creates sample data using the given template
the template is in the seeder_data file,
it specifies the attributes and values our data has
output: data.csv
"""
# read template
with open("Cloud/storage/data/seeder_data_template.csv", "r") as file_object:
seeder = file_object.read().split('\n')
with open("Cloud/storage/data/data.csv", "a") as file_object:
# for each row in template, create data in random numbers
# and write the row in a file
for i in seeder:
temp = i.split(',')
for i in range(random.randint(25, 45)):
file_object.write(temp[0] +',')
if temp[1] == 'low':
file_object.write(str(random.randint(1, 100)) +',')
elif temp[1] == 'high':
file_object.write(str(random.randint(1, 300)) +',')
if temp[2] == 'low':
file_object.write(str(random.randint(1, 10)) +',')
elif temp[2] == 'high':
file_object.write(str(random.randint(11, 30)) +',')
file_object.write(temp[3] +',')
file_object.write(temp[4])
file_object.write('\n')
# read the whole data to manipulate
with open("Cloud/storage/data/data.csv", "r") as file_object:
array = file_object.read().split('\n')
random.shuffle(array)
array2 = ''
for i in array:
array2 += i
array2 += '\n'
with open("Cloud/storage/data/data.csv", "w") as file_object:
array = file_object.write(array2) | 32.346154 | 81 | 0.548157 | 217 | 1,682 | 4.129032 | 0.322581 | 0.167411 | 0.15067 | 0.089286 | 0.401786 | 0.323661 | 0.263393 | 0.149554 | 0 | 0 | 0 | 0.026064 | 0.315696 | 1,682 | 52 | 82 | 32.346154 | 0.752389 | 0.077883 | 0 | 0 | 0 | 0 | 0.123318 | 0.092676 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034483 | 0 | 0.034483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0debd1322ec02fba09f8ff27e8077a2edc7b8e18 | 485 | py | Python | Difficulty/Medium/78.subsets.py | ryderfang/LeetCode | be9470dee6175bab321f0549f173c0c682dddd77 | [
"MIT"
] | 1 | 2021-05-06T02:27:30.000Z | 2021-05-06T02:27:30.000Z | Difficulty/Medium/78.subsets.py | ryderfang/LeetCode | be9470dee6175bab321f0549f173c0c682dddd77 | [
"MIT"
] | null | null | null | Difficulty/Medium/78.subsets.py | ryderfang/LeetCode | be9470dee6175bab321f0549f173c0c682dddd77 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=78 lang=python3
#
# [78] Subsets
#
# @lc code=start
from typing import List
import itertools
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
sz = len(nums)
ret = [[]]
cnt = 1
while cnt <= sz:
idx = list(itertools.combinations(range(sz), cnt))
for x in idx:
ret.append([nums[i] for i in list(x)])
cnt += 1
return ret
# @lc code=end
| 20.208333 | 62 | 0.519588 | 65 | 485 | 3.876923 | 0.569231 | 0.047619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022152 | 0.348454 | 485 | 23 | 63 | 21.086957 | 0.775316 | 0.156701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0decd297da4d380687e304aaf258a5a2ae51da22 | 220 | py | Python | BOJ_Solved/BOJ-1546.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 1e92986999b45aa9951e12e67b23062e410e9b36 | [
"MIT"
] | 7 | 2021-11-19T14:50:59.000Z | 2022-02-25T20:00:20.000Z | BOJ_Solved/BOJ-1546.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 1e92986999b45aa9951e12e67b23062e410e9b36 | [
"MIT"
] | null | null | null | BOJ_Solved/BOJ-1546.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 1e92986999b45aa9951e12e67b23062e410e9b36 | [
"MIT"
] | null | null | null | """
백준 1546번 : 평균
"""
number = int(input( ))
scores = list(map(int, input( ).split( )))
temp = -1
for score in scores:
if score > temp:
temp = score
whole = sum(scores)
print((whole * 100 / temp) / number) | 15.714286 | 42 | 0.577273 | 31 | 220 | 4.096774 | 0.645161 | 0.125984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048193 | 0.245455 | 220 | 14 | 43 | 15.714286 | 0.716867 | 0.059091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df1fb92821d42f2413092da69023b3c603ae978 | 2,902 | py | Python | utils/text_width_check.py | boamaod/frogatto | 21eca0e87af51a57298cb7392fcedda2367614aa | [
"CC-BY-3.0"
] | 326 | 2015-01-02T10:15:14.000Z | 2022-03-28T11:53:57.000Z | utils/text_width_check.py | boamaod/frogatto | 21eca0e87af51a57298cb7392fcedda2367614aa | [
"CC-BY-3.0"
] | 418 | 2015-06-01T22:35:46.000Z | 2022-03-28T08:55:48.000Z | utils/text_width_check.py | boamaod/frogatto | 21eca0e87af51a57298cb7392fcedda2367614aa | [
"CC-BY-3.0"
] | 88 | 2015-01-19T20:03:22.000Z | 2022-03-15T17:36:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, codecs
#usage: from frogatto's base folder, run:
#utils/text_width_check.py po/(desired file, with a "po" or "pot" extension) [optional:max width]
#e.g.:
#utils/text_width_check.py po/frogatto.pot 360
global MAXWIDTH
MAXWIDTH = 360
def main(catalog):
if catalog.split('.')[-1] == "pot":
check = "msgid"
font = "dialog_font.cfg"
elif catalog.split('.')[-1] == "po":
check = "msgstr"
#look for a specific dialog_font definition for that locale;
#if unavailable, select the default one
font = "dialog_font." + catalog.split('.')[0].split('/')[1] + ".cfg"
if font not in os.listdir('data'):
font = "dialog_font.cfg"
else: return
fontdata = codecs.open("../../data/" + font, encoding="utf-8").readlines()
fontdata = [x.strip() for x in fontdata]
charwidths = {}
i = 0
#default kerning for fonts
kerning = 2
while i < len(fontdata):
if "kerning:" in fontdata[i]:
kerning = int(fontdata[i].split(":")[1].replace(',',''))
if "chars: \"" in fontdata[i]:
chars = list(fontdata[i].split(":", 1)[1].replace(" \"", "",1).replace("\",", "",1))
elif "width:" in fontdata[i]:
for x in chars:
charwidths[x]=int(fontdata[i].split(":")[1].replace(',',''))
elif "rect:" in fontdata[i]:
width = fontdata[i].replace('[','').replace(']','').split(':')[1].split(',')[:-1]
width = int(width[2]) - int(width[0]) + 1
for x in chars:
charwidths[x]=width
i += 1
f = codecs.open(catalog, encoding="utf-8").readlines()
#start from the first message line, i. e., the first with a #:
l = [x[:2] for x in f].index("#:")
msgline = 0
while l < len(f):
if "#:" in f[l] and "#:" not in f[l-1]:
msgline = l
if check in f[l]:
linewidth = checkwidth(getmessage(f[l]), charwidths, kerning)
if linewidth > MAXWIDTH:
printline(f, msgline, linewidth)
if len(getmessage(f[l])) == 0:
l += 1
while l < len(f) and '"' in f[l] and f[l][0] != "m":
linewidth = checkwidth(getmessage(f[l]), charwidths, kerning)
if linewidth > MAXWIDTH:
printline(f, msgline, linewidth)
l += 1
l += 1
def printline(f, start, width):
line = start
sys.stdout.write((str(width) + " pixels:\n").encode("utf-8"))
while len(f[line].strip()) > 0:
sys.stdout.write((f[line].strip()+"\n").encode("utf-8"))
line += 1
sys.stdout.write("\n".encode("utf-8"))
def checkwidth(line, widths, kerning):
result = 0
for x in line:
if x in widths.keys():
result += widths[x] + kerning
return result - kerning
def getmessage(line):
if line[0] == "m":
return line.replace('\\n','').replace('\r','').replace('\n','').split(' ',1)[1][1:-1]
else:
return line.replace('\\n','').replace('\r','').replace('\n','')[1:-1]
if __name__ == "__main__":
if len(sys.argv) == 2:
main(sys.argv[1])
elif len(sys.argv) == 3:
MAXWIDTH = int(sys.argv[2])
main(sys.argv[1])
else:
pass
| 30.547368 | 97 | 0.60062 | 442 | 2,902 | 3.90724 | 0.257919 | 0.031268 | 0.017371 | 0.026057 | 0.251303 | 0.251303 | 0.170237 | 0.147076 | 0.107701 | 0.107701 | 0 | 0.022139 | 0.175052 | 2,902 | 94 | 98 | 30.87234 | 0.699248 | 0.141282 | 0 | 0.223684 | 0 | 0.013158 | 0.082661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.013158 | 0.013158 | 0 | 0.105263 | 0.039474 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df217f11454e76bb3aeb5e62d8886ef5b46e27b | 1,228 | py | Python | xclib/utils/dense.py | iksteen/pyxclib | 2948162dd780f8230a785abfd2ee57e8ab5cc156 | [
"MIT"
] | 1 | 2021-04-20T13:52:55.000Z | 2021-04-20T13:52:55.000Z | xclib/utils/dense.py | iksteen/pyxclib | 2948162dd780f8230a785abfd2ee57e8ab5cc156 | [
"MIT"
] | null | null | null | xclib/utils/dense.py | iksteen/pyxclib | 2948162dd780f8230a785abfd2ee57e8ab5cc156 | [
"MIT"
] | null | null | null | import numpy as np
from .sparse import normalize as _normalize
from .sparse import binarize as _binarize
def compute_dense_features(features, word_embeddings, method='wt_sum',
normalize=True, binarize=False):
"""
Compute dense features as per given sparse features and word embeddings
Arguments:
----------
features: csr_matrix
sparse features
word_embeddings: np.ndarray
dense embedding for each token in vocabulary
method: str, optional (default=wt_sum)
wt_sum or wt_avg
normalize: boolean, optional (default=True)
normalize features
binarize: boolean, optional (default=False)
binarize features
Returns:
--------
document_embeddings: np.ndarray
dense embedding for each document
"""
# convert the features to binary
if binarize:
features = _binarize(features, copy=True)
# l2 normalize
if normalize:
features = _normalize(features, copy=True)
document_embeddings = features @ word_embeddings
if method == 'wt_avg':
temp = np.array(features.sum(axis=1))+1e-5
document_embeddings = document_embeddings/temp
return document_embeddings
| 29.238095 | 75 | 0.672638 | 140 | 1,228 | 5.757143 | 0.385714 | 0.111663 | 0.081886 | 0.059553 | 0.099256 | 0.099256 | 0.099256 | 0 | 0 | 0 | 0 | 0.004353 | 0.251629 | 1,228 | 41 | 76 | 29.95122 | 0.872688 | 0.438925 | 0 | 0 | 0 | 0 | 0.019737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df3219c9c394d735e2efe83d97bfc2dd7b7f56a | 2,381 | py | Python | examples/sim2real/utils.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 2,150 | 2019-06-12T20:55:41.000Z | 2022-03-21T07:14:51.000Z | examples/sim2real/utils.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 124 | 2019-06-22T17:12:27.000Z | 2022-02-26T11:43:13.000Z | examples/sim2real/utils.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 329 | 2019-06-13T03:03:54.000Z | 2022-03-30T07:04:55.000Z | # The MIT License
# Copyright (c) 2017 OpenAI (http://openai.com)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# the code is from on the publicly available implementation of the TD3 algorithm
# https://github.com/sfujim/TD3
import numpy as np
# Expects tuples of (state, next_state, action, reward, done)
class ReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
def add(self, data):
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = data
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(data)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
x, y, u, r, d = [], [], [], [], []
for i in ind:
X, Y, U, R, D = self.storage[i]
x.append(np.array(X, copy=False))
y.append(np.array(Y, copy=False))
u.append(np.array(U, copy=False))
r.append(np.array(R, copy=False))
d.append(np.array(D, copy=False))
return (
np.array(x),
np.array(y),
np.array(u),
np.array(r).reshape(-1, 1),
np.array(d).reshape(-1, 1),
)
| 38.403226 | 81 | 0.635867 | 343 | 2,381 | 4.379009 | 0.443149 | 0.046605 | 0.043276 | 0.023968 | 0.035952 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008576 | 0.265435 | 2,381 | 61 | 82 | 39.032787 | 0.8502 | 0.525409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.034483 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df4ba5067412628c67412b6dede1e6a1974fd73 | 1,451 | py | Python | pygmentize/floorplan.py | RedlineResearch/floorplan | dbaa7e6649eed18707ff8a27ee3c33fa83b47fcf | [
"BSD-3-Clause",
"MIT"
] | 17 | 2019-10-21T13:09:49.000Z | 2022-01-26T03:06:35.000Z | pygmentize/floorplan.py | RedlineResearch/floorplan | dbaa7e6649eed18707ff8a27ee3c33fa83b47fcf | [
"BSD-3-Clause",
"MIT"
] | 5 | 2019-10-24T23:02:34.000Z | 2020-01-28T02:01:30.000Z | pygmentize/floorplan.py | RedlineResearch/floorplan | dbaa7e6649eed18707ff8a27ee3c33fa83b47fcf | [
"BSD-3-Clause",
"MIT"
] | null | null | null | from pygments.lexer import RegexLexer
from pygments.token import *
import re
__all__ = ['FloorplanLexer']
class FloorplanLexer(RegexLexer):
name = 'Floorplan'
aliases = ['floorplan']
filenames = ['*.flp']
flags = re.MULTILINE | re.UNICODE
reserved = ( 'ptr', 'flags', '_', 'Counter', 'bit'
, 'bits', 'byte', 'bytes', 'sizeof', 'List'
, 'word', 'words', 'page', 'pages'
, 'enum', 'union', 'seq', 'choice')
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
# Comments:
(r'\/\/.*$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
# Identifiers:
(r'(%s)\b' % '|'.join(reserved), Keyword.Reserved),
(r'[A-Z]\w*', Keyword.Type),
(r'[a-z_]\w*', Name),
# Operators:
(r'({|}|#|@|\^|=>|\.\.\.|\.|->|:|,|=\||~\||\|)', Operator.Word),
(r'[-+\*\/\[\]\<\>]', Operator),
# Numeric Literals:
(r'0[xX][\da-fA-F]+', Number.Hex), # hex literals
(r'0[bB][01]+', Number.Bin), # binary literals
(r'\d+', Number.Integer), # decimal literals
# Special:
(r'[\(\)]', Operator), # parentheses
],
'comment': [
# Multiline Comments
(r'[^\*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[\*/]', Comment.Multiline),
],
}
| 27.377358 | 74 | 0.456926 | 131 | 1,451 | 5.015267 | 0.564886 | 0.073059 | 0.129376 | 0.012177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00388 | 0.289456 | 1,451 | 52 | 75 | 27.903846 | 0.633366 | 0.102688 | 0 | 0.057143 | 0 | 0 | 0.225756 | 0.033359 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.085714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df593c1def415d2eed2a9f746ea18cf01424f0d | 8,816 | py | Python | blotter/server.py | antequant/blotter | 2c4bbee069fec199d97728a22d0b9263d5a145d9 | [
"MIT"
] | 1 | 2021-05-09T10:49:41.000Z | 2021-05-09T10:49:41.000Z | blotter/server.py | webclinic017/blotter | 2c4bbee069fec199d97728a22d0b9263d5a145d9 | [
"MIT"
] | null | null | null | blotter/server.py | webclinic017/blotter | 2c4bbee069fec199d97728a22d0b9263d5a145d9 | [
"MIT"
] | 3 | 2021-01-26T13:44:15.000Z | 2021-09-11T16:55:23.000Z | import asyncio
import concurrent.futures
from datetime import datetime, timedelta, timezone
from logging import getLogger
from typing import Awaitable, Callable, Iterator, Optional, Tuple, TypeVar
import grpc
from google.cloud import bigquery
import ib_insync
from blotter import blotter_pb2, blotter_pb2_grpc, request_helpers
from blotter.backfill import backfill_bars
from gcloud_service.error_handler import ErrorHandlerConfiguration
from blotter.ib_helpers import IBThread, qualify_contract_specifier
from blotter.options import snapshot_options, start_polling_options
from blotter.polling import PollingID, PollingManager
from blotter.streaming import StreamingID, StreamingManager
logger = getLogger(__name__)
_T = TypeVar("_T")
class Servicer(blotter_pb2_grpc.BlotterServicer):
"""
The implementation of the Blotter RPC service, responsible for handling client requests to start/stop blotting different instruments.
"""
@classmethod
def start(
cls,
port: int,
ib_thread: IBThread,
streaming_manager: StreamingManager,
error_handler: ErrorHandlerConfiguration,
executor: concurrent.futures.ThreadPoolExecutor = concurrent.futures.ThreadPoolExecutor(),
) -> Tuple["Servicer", grpc.Server]:
"""
Instantiates a server, binds it to the given port and begins accepting requests on `executor`.
"""
s = grpc.server(executor)
servicer = cls(ib_thread, streaming_manager, error_handler)
blotter_pb2_grpc.add_BlotterServicer_to_server(servicer, s)
s.add_insecure_port(f"[::]:{port}")
s.start()
return (servicer, s)
def __init__(
self,
ib_thread: IBThread,
streaming_manager: StreamingManager,
error_handler: ErrorHandlerConfiguration,
):
"""
Initializes this handler to invoke ib_insync via the given `ib_thread`.
"""
self._ib_thread = ib_thread
self._streaming_manager = streaming_manager
self._polling_manager = PollingManager(error_handler)
self._error_handler = error_handler
super().__init__()
def resume_streaming(self) -> None:
"""
Resumes any streaming market data queries that were interrupted on previous runs.
"""
streaming_ids = list(self._streaming_manager.resume_streaming(self._ib_thread))
logger.info(f"Resumed streaming IDs {streaming_ids}")
# TODO: Resume things on PollingManager too
def _run_in_ib_thread(
self, fn: Callable[[ib_insync.IB], Awaitable[_T]]
) -> "concurrent.futures.Future[_T]":
"""
Schedules work on the `IBThread` for this service, reporting any exceptions that occur.
"""
fut = self._ib_thread.schedule_async(fn)
def _report_future_exception(future: "concurrent.futures.Future[_T]") -> None:
with self._error_handler(f"Exception thrown in IB thread:"):
future.result()
fut.add_done_callback(_report_future_exception)
return fut
def LoadHistoricalData(
self,
request: blotter_pb2.LoadHistoricalDataRequest,
context: grpc.ServicerContext,
) -> Iterator[blotter_pb2.LoadHistoricalDataResponse]:
logger.info(f"LoadHistoricalData: {request}")
td = request_helpers.duration_timedelta_atleast(request.duration)
end_date = datetime.fromtimestamp(request.endTimestampUTC, tz=timezone.utc)
if td.days <= 10:
duration = request_helpers.duration_str(request.duration)
start_date = end_date - timedelta(seconds=1)
else:
logger.debug(f"Splitting requested duration {td}")
duration = request_helpers.duration_str(
blotter_pb2.Duration(count=10, unit=blotter_pb2.Duration.TimeUnit.DAYS)
)
start_date = end_date - td
async def _backfill(
ib_client: ib_insync.IB,
) -> Tuple[datetime, bigquery.LoadJob]:
nonlocal end_date
logger.info(
f"Backfilling {duration} from {end_date} of {request.contractSpecifier}"
)
return await backfill_bars(
ib_client,
contract_specifier=request.contractSpecifier,
end_date=end_date,
duration=duration,
bar_size=request_helpers.bar_size_str(request.barSize),
bar_source=request_helpers.historical_bar_source_str(request.barSource),
regular_trading_hours_only=request.regularTradingHoursOnly,
error_handler=self._error_handler,
)
while end_date > start_date:
(end_date, job) = self._run_in_ib_thread(_backfill).result()
logger.info(f"BigQuery backfill job launched: {job.job_id}")
yield blotter_pb2.LoadHistoricalDataResponse(backfillJobID=job.job_id)
def StartRealTimeData(
self,
request: blotter_pb2.StartRealTimeDataRequest,
context: grpc.ServicerContext,
) -> blotter_pb2.StartRealTimeDataResponse:
logger.info(f"StartRealTimeData: {request}")
async def _start_stream(ib_client: ib_insync.IB) -> StreamingID:
return await self._streaming_manager.start_stream(
ib_client,
contract=request.contractSpecifier,
bar_source=request_helpers.real_time_bar_source_str(request.barSource),
regular_trading_hours_only=request.regularTradingHoursOnly,
)
streaming_id = self._run_in_ib_thread(_start_stream).result()
logger.debug(f"Real-time bars streaming ID: {streaming_id}")
return blotter_pb2.StartRealTimeDataResponse(requestID=streaming_id)
def CancelRealTimeData(
self,
request: blotter_pb2.CancelRealTimeDataRequest,
context: grpc.ServicerContext,
) -> blotter_pb2.CancelRealTimeDataResponse:
logger.info(f"CancelRealTimeData: {request}")
async def _cancel_stream(ib_client: ib_insync.IB) -> None:
await self._streaming_manager.cancel_stream(
ib_client, streaming_id=StreamingID(request.requestID)
)
self._run_in_ib_thread(_cancel_stream)
return blotter_pb2.CancelRealTimeDataResponse()
def HealthCheck(
self, request: blotter_pb2.HealthCheckRequest, context: grpc.ServicerContext,
) -> blotter_pb2.HealthCheckResponse:
logger.info(f"HealthCheck: {request}")
return blotter_pb2.HealthCheckResponse()
def SnapshotOptionChain(
self,
request: blotter_pb2.SnapshotOptionChainRequest,
context: grpc.ServicerContext,
) -> blotter_pb2.SnapshotOptionChainResponse:
logger.info(f"SnapshotOptionChain: {request}")
async def _snapshot(ib_client: ib_insync.IB) -> bigquery.LoadJob:
return await snapshot_options(
ib_client, request.contractSpecifier, self._error_handler
)
job = self._run_in_ib_thread(_snapshot).result()
logger.info(f"BigQuery import job launched: {job.job_id}")
return blotter_pb2.SnapshotOptionChainResponse(importJobID=job.job_id)
def StartStreamingOptionChain(
self,
request: blotter_pb2.StartStreamingOptionChainRequest,
context: grpc.ServicerContext,
) -> blotter_pb2.StartStreamingOptionChainResponse:
logger.info(f"StartStreamingOptionChain: {request}")
async def _start_polling(ib_client: ib_insync.IB) -> PollingID:
polling_interval = PollingManager.DEFAULT_POLLING_INTERVAL
if request.pollingInterval:
polling_interval = timedelta(seconds=request.pollingInterval)
return await start_polling_options(
self._polling_manager,
polling_interval,
ib_client,
request.contractSpecifier,
)
polling_id = self._run_in_ib_thread(_start_polling).result()
logger.debug(f"Real-time bars streaming ID: {polling_id}")
return blotter_pb2.StartStreamingOptionChainResponse(requestID=polling_id)
def CancelStreamingOptionChain(
self,
request: blotter_pb2.CancelStreamingOptionChainRequest,
context: grpc.ServicerContext,
) -> blotter_pb2.CancelStreamingOptionChainResponse:
logger.info(f"CancelStreamingOptionChain: {request}")
async def _cancel_polling(ib_client: ib_insync.IB) -> None:
await self._polling_manager.cancel_polling(
ib_client, polling_id=PollingID(request.requestID)
)
self._run_in_ib_thread(_cancel_polling)
return blotter_pb2.CancelStreamingOptionChainResponse()
| 37.514894 | 137 | 0.681715 | 894 | 8,816 | 6.440716 | 0.237136 | 0.046891 | 0.021014 | 0.015804 | 0.202501 | 0.122265 | 0.10455 | 0.096214 | 0.0719 | 0.028135 | 0 | 0.00478 | 0.240699 | 8,816 | 234 | 138 | 37.675214 | 0.855393 | 0.05819 | 0 | 0.140351 | 0 | 0 | 0.076754 | 0.01684 | 0 | 0 | 0 | 0.004274 | 0 | 1 | 0.070175 | false | 0 | 0.099415 | 0 | 0.245614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df5af911e3f3139f8bc98bc1ee4ef005bead7ae | 5,859 | py | Python | src/ralph/dashboards/renderers.py | DoNnMyTh/ralph | 97b91639fa68965ad3fd9d0d2652a6545a2a5b72 | [
"Apache-2.0"
] | 1,668 | 2015-01-01T12:51:20.000Z | 2022-03-29T09:05:35.000Z | src/ralph/dashboards/renderers.py | hq-git/ralph | e2448caf02d6e5abfd81da2cff92aefe0a534883 | [
"Apache-2.0"
] | 2,314 | 2015-01-02T13:26:26.000Z | 2022-03-29T04:06:03.000Z | src/ralph/dashboards/renderers.py | hq-git/ralph | e2448caf02d6e5abfd81da2cff92aefe0a534883 | [
"Apache-2.0"
] | 534 | 2015-01-05T12:40:28.000Z | 2022-03-29T21:10:12.000Z | import calendar
import json
import logging
from urllib.parse import urlencode
from django.core.urlresolvers import NoReverseMatch, reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from ralph.dashboards.helpers import encode_params, normalize_value
logger = logging.getLogger(__name__)
GRAPH_QUERY_SEP = '|'
def build_filters(labels, value):
params = labels.split(GRAPH_QUERY_SEP)
if len(params) == 1:
return {params[0]: value}
if len(params) == 2:
field, aggr = params
if aggr == 'year':
return {
'{}__gte'.format(field): '{}-01-01'.format(value),
'{}__lte'.format(field): '{}-12-31'.format(value),
}
if aggr == 'month':
year, month = value.split('-')
days_in_month = calendar.monthrange(int(year), int(month))[1]
return {
'{}__gte'.format(field): '{}-01'.format(value),
'{}__lte'.format(field): '{}-{}'.format(value, days_in_month),
}
if aggr == 'day':
return {
'{}__gte'.format(field): '{} 00:00:00'.format(value),
'{}__lte'.format(field): '{} 23:59:59'.format(value),
}
return {}
class ChartistGraphRenderer(object):
"""Renderer for Chartist.js."""
func = None
options = None
template_name = 'dashboard/templatetags/chartist_render_graph.html'
_default_options = {
'distributeSeries': False,
'chartPadding': 20,
'height': '350px'
}
plugins = {'ctBarLabels': {}}
graph_query_sep = GRAPH_QUERY_SEP
def __init__(self, obj):
self.obj = obj
def get_func(self):
if not self.func:
raise NotImplementedError('Specify func attr.')
return self.func
def get_template_name(self):
if not self.template_name:
raise NotImplementedError('Specify template_name attr.')
return self.template_name
def get_options(self, data=None):
options = self._default_options.copy()
if isinstance(self.options, dict):
options.update(self.options)
return options
def _labels2urls(self, model, graph_id, values):
meta = model._meta
base_url = reverse(
"admin:%s_%s_changelist" % (
meta.app_label, meta.model_name
)
)
urls = []
for value in values:
labels = self.obj.params['labels']
url = '?'.join([
base_url,
urlencode({
'graph-query': encode_params({
'pk': graph_id,
'filters': build_filters(
labels=labels,
value=normalize_value(
label=labels.split(GRAPH_QUERY_SEP)[0],
model_class=self.obj.model.model_class(),
value=value,
)
),
})
}),
])
urls.append(url)
return urls
def _series_with_urls(self, series, urls):
series_with_urls = []
for value, url in zip(series, urls):
series_with_urls.append({
'value': value,
'meta': {
'clickUrl': url,
}
})
return series_with_urls
def post_data_hook(self, data):
try:
click_urls = self._labels2urls(
self.obj.changelist_model, self.obj.id, data['labels']
)
data['series'] = self._series_with_urls(
data['series'], click_urls
)
except NoReverseMatch as e:
# graph will be non-clickable when model is not exposed in
# admin
logger.error(e)
return data
def render(self, context):
if not context:
context = {}
error = None
data = {}
try:
data = self.obj.get_data()
data = self.post_data_hook(data)
except Exception as e:
error = str(e)
finally:
options = self.get_options(data)
context.update({
'error': error,
'graph': self.obj,
'options': json.dumps(options),
'options_raw': options,
'func': self.func,
'plugins': self.plugins,
})
context.update(**data)
return mark_safe(render_to_string(self.get_template_name(), context))
class HorizontalBar(ChartistGraphRenderer):
func = 'Bar'
options = {
'horizontalBars': True,
'axisY': {
'offset': 70,
},
'axisX': {
'onlyInteger': True,
}
}
class VerticalBar(ChartistGraphRenderer):
func = 'Bar'
options = {
'axisY': {
'onlyInteger': True,
}
}
class PieChart(ChartistGraphRenderer):
func = 'Pie'
_default_options = {
'distributeSeries': True,
}
options = {
'donut': True,
}
def get_options(self, data):
series = data.get('series')
if series:
self.options['total'] = sum(s['value'] for s in series)
return super().get_options(data)
def include_values_in_labels(self, data):
for idx, pack in enumerate(zip(data['labels'], data['series'])):
label, series = pack
new_label = "{} ({})".format(label, series['value'])
data['labels'][idx] = new_label
return data
def post_data_hook(self, data):
super().post_data_hook(data)
data = self.include_values_in_labels(data)
return data
| 29.00495 | 78 | 0.522103 | 585 | 5,859 | 5.035897 | 0.276923 | 0.019009 | 0.022064 | 0.020367 | 0.10353 | 0.033944 | 0 | 0 | 0 | 0 | 0 | 0.009626 | 0.361666 | 5,859 | 201 | 79 | 29.149254 | 0.778075 | 0.01519 | 0 | 0.139535 | 0 | 0 | 0.087454 | 0.01232 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.046512 | 0 | 0.302326 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df659907ce974dd2868e7904a1d5d37e351ed0f | 2,094 | py | Python | UI/ui_aliasAndNotes.py | ShowingCloud/Capricorn | cfe521975ca23f7502509595f1fd3f8c6c5e1c55 | [
"MIT"
] | null | null | null | UI/ui_aliasAndNotes.py | ShowingCloud/Capricorn | cfe521975ca23f7502509595f1fd3f8c6c5e1c55 | [
"MIT"
] | null | null | null | UI/ui_aliasAndNotes.py | ShowingCloud/Capricorn | cfe521975ca23f7502509595f1fd3f8c6c5e1c55 | [
"MIT"
] | null | null | null | #coding=utf-8
'''
Created on 2013-3-8
@author: pyroshow
'''
from PySide.QtGui import *
from Models.LocalDB import *
from datetime import datetime
class ModifyAliasNotes(QDialog):
def __init__(self, sess, UUID, Alias, Notes, parent =None):
QDialog.__init__(self, parent)
self.group = QGroupBox("Modify")
self.aliasLab = QLabel("Alias:")
self.aliasEdit = QLineEdit()
self.notesLab = QLabel("Notes:")
self.notesTxt = QTextEdit()
vbox = QVBoxLayout()
vbox.addWidget(self.aliasLab)
vbox.addWidget(self.aliasEdit)
vbox.addWidget(self.notesLab)
vbox.addWidget(self.notesTxt)
self.group.setLayout(vbox)
self.saveBtn = QPushButton("Save")
self.cancelBtn = QPushButton("Cancel")
btnLayout = QHBoxLayout()
btnLayout.addStretch(1)
btnLayout.addWidget(self.saveBtn)
btnLayout.addWidget(self.cancelBtn)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.group)
mainLayout.addLayout(btnLayout)
self.setLayout(mainLayout)
self.Alias = Alias
self.Notes = Notes
self.UUID = UUID
self.aliasEdit.setText(self.Alias)
self.notesTxt.setText(self.Notes)
self.saveBtn.clicked.connect(self.saveData)
self.cancelBtn.clicked.connect(self.cancel)
self.sess = sess
def saveData(self):
with self.sess.begin():
record = self.sess.query (FireworksData).filter_by(UUID = self.UUID).first()
record.CTime = datetime.utcnow()
record.Alias = self.aliasEdit.text()
record.Notes = self.notesTxt.toPlainText()
self.aliasEdit.setText("")
self.notesTxt.setText("")
self.accept()
self.close()
def cancel(self):
self.close()
| 24.635294 | 88 | 0.550143 | 192 | 2,094 | 5.953125 | 0.369792 | 0.079615 | 0.059493 | 0.041995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005852 | 0.347182 | 2,094 | 84 | 89 | 24.928571 | 0.830285 | 0.024355 | 0 | 0.042553 | 0 | 0 | 0.013759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.06383 | 0 | 0.148936 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df79d19d0d5139b0f49f4ce3430b3e3555f7c4c | 496 | py | Python | sync.py | bathelomeo/asynchronous_operations | 587fed98810ec54d5eec6359e703dd3d4ed5b029 | [
"MIT"
] | null | null | null | sync.py | bathelomeo/asynchronous_operations | 587fed98810ec54d5eec6359e703dd3d4ed5b029 | [
"MIT"
] | null | null | null | sync.py | bathelomeo/asynchronous_operations | 587fed98810ec54d5eec6359e703dd3d4ed5b029 | [
"MIT"
] | null | null | null | from timeit import default_timer
import requests
def load_data(delay):
print(f'Starting {delay} second timer')
text = requests.get(f'https:// httpbin.org/delay/{delay}').text
print(f'Completed{delay}second timer ')
return text
def run_demo():
start_time = default_timer()
two_data = load_data(2)
three_data = load_data(3)
elapsed_time = default_timer() - start_time
print(f'The operation took{elapsed_time:.2} seconds')
def main():
run_demo()
main()
| 21.565217 | 67 | 0.693548 | 72 | 496 | 4.583333 | 0.486111 | 0.109091 | 0.09697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007407 | 0.183468 | 496 | 22 | 68 | 22.545455 | 0.807407 | 0 | 0 | 0 | 0 | 0 | 0.272177 | 0.137097 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.125 | 0 | 0.375 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df81411a14b3a4796c7dba69369570d42e38320 | 511 | py | Python | setup.py | cdgriffith/pyctf | 55ef40fc784b38afbf377ec0688afa181b74d714 | [
"MIT"
] | 1 | 2020-04-10T00:09:57.000Z | 2020-04-10T00:09:57.000Z | setup.py | cdgriffith/pyctf | 55ef40fc784b38afbf377ec0688afa181b74d714 | [
"MIT"
] | 1 | 2015-07-13T20:04:07.000Z | 2015-07-13T20:04:07.000Z | setup.py | cdgriffith/pyctf | 55ef40fc784b38afbf377ec0688afa181b74d714 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
requirements = f.readlines()
setup(
name="pyctf",
version="0.0",
packages=['pyctf'],
package_dir={'pyctf': 'pyctf'},
include_package_data=True,
install_requires=requirements,
entry_points={
'console_scripts': [
'pyctf_server = pyctf.pyctf_server:main'
]}
) | 22.217391 | 55 | 0.639922 | 63 | 511 | 5 | 0.68254 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007353 | 0.201566 | 511 | 23 | 56 | 22.217391 | 0.764706 | 0.082192 | 0 | 0 | 0 | 0 | 0.196581 | 0.049145 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0df8c6c3e4e25589092f6e714c1d72a9fe4db8d5 | 664 | py | Python | scripts/npc/pipe3.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | scripts/npc/pipe3.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | scripts/npc/pipe3.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | # 3rd Pipe Handle (2111019) | Home of the Missing Alchemist (261000001)
undergroundStudy = 3339
if sm.hasQuest(undergroundStudy) or sm.hasQuestCompleted(undergroundStudy):
# The dummy quest ID 7063 will be used to determine if the pipes are being activated in the right order
# This needs to be activated after the first pipe
pipeStatus = sm.getQRValue(7063)
if pipeStatus == "1":
sm.setQRValue(7063, "3", False)
sm.sendNext("The pipe makes a sharp, shrieking metal noise, and turns a little to the left.")
else:
sm.sendSayOkay("The pipe didn't move one bit.")
else:
sm.sendSayOkay("The pipe didn't move one bit.") | 44.266667 | 107 | 0.707831 | 98 | 664 | 4.795918 | 0.622449 | 0.044681 | 0.07234 | 0.085106 | 0.165957 | 0.165957 | 0.165957 | 0.165957 | 0.165957 | 0.165957 | 0 | 0.06654 | 0.207831 | 664 | 15 | 108 | 44.266667 | 0.826996 | 0.329819 | 0 | 0.4 | 0 | 0 | 0.312217 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dfa57d99c628dfc879e102d39f2b537d771fbed | 3,078 | py | Python | scripts/import-politique-de-contribution-open-source.py | pajachiet/numerique.gouv.fr | 0c4b9c0978da0320a7875c3bd54a1d241e3bba71 | [
"MIT"
] | 17 | 2018-10-05T09:17:41.000Z | 2022-01-14T10:19:31.000Z | scripts/import-politique-de-contribution-open-source.py | Indianajaune/numerique.gouv.fr | 0c4b9c0978da0320a7875c3bd54a1d241e3bba71 | [
"MIT"
] | 10 | 2018-10-15T07:50:38.000Z | 2022-02-26T04:18:49.000Z | scripts/import-politique-de-contribution-open-source.py | Indianajaune/numerique.gouv.fr | 0c4b9c0978da0320a7875c3bd54a1d241e3bba71 | [
"MIT"
] | 9 | 2018-10-12T12:43:01.000Z | 2021-11-12T20:34:08.000Z | # -*- coding: UTF-8 -*-
from git import Repo
import shutil
import os
import glob
import re
import yaml
def get_yaml(f):
pointer = f.tell()
if f.readline() != '---\n':
f.seek(pointer)
return ''
readline = iter(f.readline, '')
readline = iter(readline.next, '---\n')
return ''.join(readline)
def supprime_accentuation(ligne):
ligne = ligne.encode('UTF-8')
accents = {
'a': ['à', 'ã', 'á', 'â'],
'e': ['é', 'è', 'ê', 'ë'],
'i': ['î', 'ï'],
'u': ['ù', 'ü', 'û'],
'o': ['ô', 'ö'],
'-': [' ']
}
for (char, accented_chars) in accents.iteritems():
for accented_char in accented_chars:
ligne = ligne.replace(accented_char, char)
return ligne.lower()
def write_yaml_md(dir, page):
page.markdownify_content()
filename = dir+"/"+page.filename
if os.path.exists(filename):
os.remove(filename)
with open(filename, 'a') as the_file:
the_file.write('---\n')
data = dict(
title = page.title,
menu = dict(
title = page.title_menu,
weight = page.title_weight))
yaml.dump(data, the_file, default_flow_style=False)
the_file.write('layout: politique-logiciel-libre\n')
permalink = "/publications/politique-logiciel-libre/"
if page.title_weight != 10:
permalink = permalink + re.sub(r'(.*)\.md',r'\1',page.filename)+'/'
re.purge()
the_file.write('permalink: '+permalink)
the_file.write('\n---\n')
the_file.write(page.content)
class Page:
def __init__(self, filename, content, title, title_menu,title_weight):
self.filename = filename
self.content = content
self.title = title
self.title_menu = title_menu
self.title_weight = title_weight
def markdownify_content(self):
self.content = re.sub(r'({{% question) "(.*)"(\s*%}})',r'### \2', self.content)
re.purge()
self.content = re.sub(r'{{< relref "(\w*)\.md[#\w\-éèà]*"\s*>}}\s*',r'\1', self.content)
re.purge()
self.content = re.sub(r'{{% (\w*) "(.*)" *%}}([\s\S]*?){{% \/\1 %}}',r'*\2*\3', self.content)
re.purge()
self.content = re.sub(r'\* Exemple : <.*\)',r'', self.content)
re.purge()
self.content = re.sub(r'(#+)\s',r'\1# ', self.content)
re.purge()
rw_dir = './repo'
dirpath = os.path.join(rw_dir)
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
repo = Repo.clone_from("https://github.com/DISIC/politique-de-contribution-open-source.git", os.path.join(rw_dir), branch='master')
pages = []
for filename in glob.glob(rw_dir+"/*"):
match = re.search('\.*(?<!\.en)(?<!\README)\.md', filename)
if match:
with open(filename) as f:
config = yaml.load(get_yaml(f))
content = f.read()
page = Page(re.sub(r'\.\/.*\/(.*\.md)',r'\1',filename), content, config['title'], config['menu']['main']['name'], config['menu']['main']['weight'])
re.purge()
pages.append(page)
for page in pages:
write_yaml_md('../_politique-logiciel-libre', page)
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
| 30.475248 | 154 | 0.594217 | 425 | 3,078 | 4.207059 | 0.308235 | 0.067673 | 0.072707 | 0.044743 | 0.222036 | 0.171141 | 0.159955 | 0.146532 | 0.146532 | 0.068233 | 0 | 0.004796 | 0.187135 | 3,078 | 100 | 155 | 30.78 | 0.709832 | 0.006823 | 0 | 0.127907 | 0 | 0 | 0.159411 | 0.049755 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05814 | false | 0 | 0.069767 | 0 | 0.174419 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dfa5e71db2afe7ed5775f869de18eda648f4a6e | 8,741 | py | Python | tools/test_idf_py/test_idf_py.py | dreamcmi/esp-idf | 2024ae943466a967f48607c3bd9c258526f08b10 | [
"Apache-2.0"
] | null | null | null | tools/test_idf_py/test_idf_py.py | dreamcmi/esp-idf | 2024ae943466a967f48607c3bd9c258526f08b10 | [
"Apache-2.0"
] | null | null | null | tools/test_idf_py/test_idf_py.py | dreamcmi/esp-idf | 2024ae943466a967f48607c3bd9c258526f08b10 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# SPDX-FileCopyrightText: 2019-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import os
import subprocess
import sys
from unittest import TestCase, main, mock
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import idf
except ImportError:
sys.path.append('..')
import idf
current_dir = os.path.dirname(os.path.realpath(__file__))
idf_py_path = os.path.join(current_dir, '..', 'idf.py')
extension_path = os.path.join(current_dir, 'test_idf_extensions', 'test_ext')
link_path = os.path.join(current_dir, '..', 'idf_py_actions', 'test_ext')
class TestWithoutExtensions(TestCase):
@classmethod
def setUpClass(cls):
# Disable the component manager and extra extensions for these tests
cls.env_patcher = mock.patch.dict(os.environ, {
'IDF_COMPONENT_MANAGER': '0',
'IDF_EXTRA_ACTIONS_PATH': '',
})
cls.env_patcher.start()
super().setUpClass()
class TestExtensions(TestWithoutExtensions):
def test_extension_loading(self):
try:
os.symlink(extension_path, link_path)
os.environ['IDF_EXTRA_ACTIONS_PATH'] = os.path.join(current_dir, 'extra_path')
output = subprocess.check_output([sys.executable, idf_py_path, '--help'],
env=os.environ).decode('utf-8', 'ignore')
self.assertIn('--test-extension-option', output)
self.assertIn('test_subcommand', output)
self.assertIn('--some-extension-option', output)
self.assertIn('extra_subcommand', output)
finally:
os.remove(link_path)
def test_extension_execution(self):
try:
os.symlink(extension_path, link_path)
os.environ['IDF_EXTRA_ACTIONS_PATH'] = ';'.join([os.path.join(current_dir, 'extra_path')])
output = subprocess.check_output(
[sys.executable, idf_py_path, '--some-extension-option=awesome', 'test_subcommand', 'extra_subcommand'],
env=os.environ).decode('utf-8', 'ignore')
self.assertIn('!!! From some global callback: awesome', output)
self.assertIn('!!! From some subcommand', output)
self.assertIn('!!! From test global callback: test', output)
self.assertIn('!!! From some subcommand', output)
finally:
os.remove(link_path)
def test_hidden_commands(self):
try:
os.symlink(extension_path, link_path)
os.environ['IDF_EXTRA_ACTIONS_PATH'] = ';'.join([os.path.join(current_dir, 'extra_path')])
output = subprocess.check_output([sys.executable, idf_py_path, '--help'],
env=os.environ).decode('utf-8', 'ignore')
self.assertIn('test_subcommand', output)
self.assertNotIn('hidden_one', output)
finally:
os.remove(link_path)
class TestDependencyManagement(TestWithoutExtensions):
def test_dependencies(self):
result = idf.init_cli()(
args=['--dry-run', 'flash'],
standalone_mode=False,
)
self.assertEqual(['flash'], list(result.keys()))
def test_order_only_dependencies(self):
result = idf.init_cli()(
args=['--dry-run', 'build', 'fullclean', 'all'],
standalone_mode=False,
)
self.assertEqual(['fullclean', 'all'], list(result.keys()))
def test_repeated_dependencies(self):
result = idf.init_cli()(
args=['--dry-run', 'fullclean', 'app', 'fullclean', 'fullclean'],
standalone_mode=False,
)
self.assertEqual(['fullclean', 'app'], list(result.keys()))
def test_complex_case(self):
result = idf.init_cli()(
args=['--dry-run', 'clean', 'monitor', 'clean', 'fullclean', 'flash'],
standalone_mode=False,
)
self.assertEqual(['fullclean', 'clean', 'flash', 'monitor'], list(result.keys()))
def test_dupplicated_commands_warning(self):
capturedOutput = StringIO()
sys.stderr = capturedOutput
idf.init_cli()(
args=['--dry-run', 'clean', 'monitor', 'build', 'clean', 'fullclean', 'all'],
standalone_mode=False,
)
sys.stderr = sys.__stderr__
self.assertIn(
'WARNING: Commands "all", "clean" are found in the list of commands more than once.',
capturedOutput.getvalue())
sys.stderr = capturedOutput
idf.init_cli()(
args=['--dry-run', 'clean', 'clean'],
standalone_mode=False,
)
sys.stderr = sys.__stderr__
self.assertIn(
'WARNING: Command "clean" is found in the list of commands more than once.', capturedOutput.getvalue())
class TestVerboseFlag(TestWithoutExtensions):
def test_verbose_messages(self):
output = subprocess.check_output(
[
sys.executable,
idf_py_path,
'-C%s' % current_dir,
'-v',
'test-verbose',
], env=os.environ).decode('utf-8', 'ignore')
self.assertIn('Verbose mode on', output)
def test_verbose_messages_not_shown_by_default(self):
output = subprocess.check_output(
[
sys.executable,
idf_py_path,
'-C%s' % current_dir,
'test-verbose',
], env=os.environ).decode('utf-8', 'ignore')
self.assertIn('Output from test-verbose', output)
self.assertNotIn('Verbose mode on', output)
class TestGlobalAndSubcommandParameters(TestWithoutExtensions):
def test_set_twice_same_value(self):
"""Can set -D twice: globally and for subcommand if values are the same"""
idf.init_cli()(
args=['--dry-run', '-DAAA=BBB', '-DCCC=EEE', 'build', '-DAAA=BBB', '-DCCC=EEE'],
standalone_mode=False,
)
def test_set_twice_different_values(self):
"""Cannot set -D twice: for command and subcommand of idf.py (with different values)"""
with self.assertRaises(idf.FatalError):
idf.init_cli()(
args=['--dry-run', '-DAAA=BBB', 'build', '-DAAA=EEE', '-DCCC=EEE'],
standalone_mode=False,
)
class TestDeprecations(TestWithoutExtensions):
def test_exit_with_error_for_subcommand(self):
try:
subprocess.check_output(
[sys.executable, idf_py_path, '-C%s' % current_dir, 'test-2'], env=os.environ, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.assertIn('Error: Command "test-2" is deprecated and was removed.', e.output.decode('utf-8', 'ignore'))
def test_exit_with_error_for_option(self):
try:
subprocess.check_output(
[sys.executable, idf_py_path, '-C%s' % current_dir, '--test-5=asdf'],
env=os.environ,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.assertIn(
'Error: Option "test_5" is deprecated since v2.0 and was removed in v3.0.',
e.output.decode('utf-8', 'ignore'))
def test_deprecation_messages(self):
output = subprocess.check_output(
[
sys.executable,
idf_py_path,
'-C%s' % current_dir,
'--test-0=a',
'--test-1=b',
'--test-2=c',
'--test-3=d',
'test-0',
'--test-sub-0=sa',
'--test-sub-1=sb',
'ta',
'test-1',
],
env=os.environ,
stderr=subprocess.STDOUT).decode('utf-8', 'ignore')
self.assertIn('Warning: Option "test_sub_1" is deprecated and will be removed in future versions.', output)
self.assertIn(
'Warning: Command "test-1" is deprecated and will be removed in future versions. '
'Please use alternative command.', output)
self.assertIn('Warning: Option "test_1" is deprecated and will be removed in future versions.', output)
self.assertIn(
'Warning: Option "test_2" is deprecated and will be removed in future versions. '
'Please update your parameters.', output)
self.assertIn('Warning: Option "test_3" is deprecated and will be removed in future versions.', output)
self.assertNotIn('"test-0" is deprecated', output)
self.assertNotIn('"test_0" is deprecated', output)
if __name__ == '__main__':
main()
| 37.676724 | 120 | 0.588491 | 971 | 8,741 | 5.124614 | 0.205973 | 0.048232 | 0.036174 | 0.038585 | 0.626005 | 0.575362 | 0.483119 | 0.469453 | 0.410571 | 0.358722 | 0 | 0.006676 | 0.280288 | 8,741 | 231 | 121 | 37.839827 | 0.784295 | 0.039355 | 0 | 0.42246 | 0 | 0 | 0.22775 | 0.02219 | 0 | 0 | 0 | 0 | 0.15508 | 1 | 0.085562 | false | 0 | 0.053476 | 0 | 0.171123 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
216ddf3f3bee18f9ef70cbc6eb7c66408981e493 | 938 | py | Python | more_exercises/21.py | ilante/programming_immanuela_englander | 45d51c99b09ae335a67e03ac5ea79fc775bdf0bd | [
"MIT"
] | null | null | null | more_exercises/21.py | ilante/programming_immanuela_englander | 45d51c99b09ae335a67e03ac5ea79fc775bdf0bd | [
"MIT"
] | null | null | null | more_exercises/21.py | ilante/programming_immanuela_englander | 45d51c99b09ae335a67e03ac5ea79fc775bdf0bd | [
"MIT"
] | null | null | null | # 21. rewrite the program from steps 18 to 20 so that it works for reverse complement (i.e. the user types the strings “ATTCGT” and “AAGGAT” and she gets the previous example as result)
#18. print 3 rows: the first and the third for the sequences,
#the second one should contain “|” if the bases are complementary, “X” if not. Example:
#ATTCGT
#||X|X|
#TAGGAA
# now check if the first string is the reverse compliment of the other
#[i] and the other one must start at -1
a=input('Give me sequence 1 in capitals')
b=input("Give me another sequence 2 in capitals")
lb=len(b)
def comp(seq1,seq2):
compl=[]
for i in range(len(seq1)):
if (seq1[i] =='A' and seq2[i] == 'T') or (seq1[i] =='T' and seq2[i] == 'A') or (seq1[i] =='G' and seq2[i] == 'C') or(seq1[i] =='C' and seq2[i] == 'G'):
compl.append('|')
else:
compl.append('X')
return ''.join(compl)
print(a)
print(comp(a, b))
print(b)
| 37.52 | 185 | 0.634328 | 166 | 938 | 3.584337 | 0.506024 | 0.033613 | 0.053782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031165 | 0.21322 | 938 | 24 | 186 | 39.083333 | 0.775068 | 0.485075 | 0 | 0 | 0 | 0 | 0.164557 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.142857 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
216fa0157d7ec2be565bf762ad39114041c00593 | 929 | py | Python | examples/conjunctive_graphs.py | Letractively/rdflib | b92e162efd61506d19ed25a12960cd32f0c7432e | [
"BSD-3-Clause"
] | null | null | null | examples/conjunctive_graphs.py | Letractively/rdflib | b92e162efd61506d19ed25a12960cd32f0c7432e | [
"BSD-3-Clause"
] | null | null | null | examples/conjunctive_graphs.py | Letractively/rdflib | b92e162efd61506d19ed25a12960cd32f0c7432e | [
"BSD-3-Clause"
] | null | null | null | from rdflib import Namespace, BNode, Literal, URIRef
from rdflib.graph import Graph, ConjunctiveGraph
from rdflib.plugins.memory import IOMemory
ns = Namespace("http://love.com#")
mary = URIRef("http://love.com/lovers/mary#")
john = URIRef("http://love.com/lovers/john#")
cmary=URIRef("http://love.com/lovers/mary#")
cjohn=URIRef("http://love.com/lovers/john#")
store = IOMemory()
g = ConjunctiveGraph(store=store)
g.bind("love",ns)
gmary = Graph(store=store, identifier=cmary)
gmary.add((mary, ns['hasName'], Literal("Mary")))
gmary.add((mary, ns['loves'], john))
gjohn = Graph(store=store, identifier=cjohn)
gjohn.add((john, ns['hasName'], Literal("John")))
#enumerate contexts
for c in g.contexts():
print("-- %s " % c)
#separate graphs
print(gjohn.serialize(format='n3'))
print("===================")
print(gmary.serialize(format='n3'))
print("===================")
#full graph
print(g.serialize(format='n3'))
| 24.447368 | 52 | 0.674919 | 125 | 929 | 5.016 | 0.344 | 0.063796 | 0.087719 | 0.108453 | 0.172249 | 0.172249 | 0 | 0 | 0 | 0 | 0 | 0.003559 | 0.092573 | 929 | 37 | 53 | 25.108108 | 0.740214 | 0.046286 | 0 | 0.086957 | 0 | 0 | 0.236693 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0.26087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21706cb4bd6e902ccc7617a54c669f011eefffb2 | 8,158 | py | Python | pyflocker/ciphers/backends/symmetric.py | fossabot/pyflocker | 293df31e32fb796df5c7fba803846c9872e67485 | [
"MIT"
] | null | null | null | pyflocker/ciphers/backends/symmetric.py | fossabot/pyflocker | 293df31e32fb796df5c7fba803846c9872e67485 | [
"MIT"
] | null | null | null | pyflocker/ciphers/backends/symmetric.py | fossabot/pyflocker | 293df31e32fb796df5c7fba803846c9872e67485 | [
"MIT"
] | null | null | null | """Tools for Symmetric ciphers common to all the backends."""
from __future__ import annotations
import hmac
import typing
from functools import partial
from .. import base, exc
class FileCipherWrapper(base.BaseAEADCipher):
"""
Wraps ciphers that support BaseAEADCipher interface and provides
file encryption and decryption facility.
"""
def __init__(
self,
cipher: base.BaseAEADCipher,
file: typing.BinaryIO,
offset: int = 0,
):
"""Initialize a file cipher wrapper.
Args:
cipher (:any:`base.BaseAEADCipher`):
A cipher that supports :py:class:`BaseAEADCipher` interface.
file (filelike):
A file or file-like object.
offset (int):
The difference between the length of ``in`` buffer and
``out`` buffer in ``update_into`` method of a BaseAEADCipher.
"""
if not isinstance(cipher, base.BaseAEADCipher):
raise TypeError("cipher must implement BaseAEADCipher interface.")
# the cipher already has an internal context
self._ctx = cipher
self._file = file
self._tag = None
self._encrypting = self._ctx.is_encrypting()
self._offset = offset
def authenticate(self, data):
if self._ctx is None:
raise exc.AlreadyFinalized
return self._ctx.authenticate(data)
def is_encrypting(self):
return self._encrypting
def update(self, blocksize: int = 16384) -> bytes:
"""
Reads at most ``blocksize`` bytes from ``file``, passes through the
cipher and returns the cipher's output.
Args:
blocksize (int): Maximum amount of data to read in a single call.
Returns:
bytes: Encrypted or decrypted data.
Raises:
AlreadyFinalized: if the cipher has been finalized.
"""
if self._ctx is None:
raise exc.AlreadyFinalized
if data := self._file.read(blocksize):
return self._ctx.update(data)
def update_into(
self,
file: typing.BinaryIO,
tag: typing.Optional[bytes] = None,
blocksize: int = 16384,
) -> None:
"""
Read from ``infile``, pass through cipher and write the output of the
cipher to ``file``. Use this method if you want to encrypt/decrypt the
``infile`` and write its output to ``outfile``.
This method is very fast
(compared to :py:meth:`FileCipherWrapper.update`) because no
intermediate copies of data are made during the entire operation.
Args:
file (filelike): File to write the output of the cipher into.
tag (bytes-like, None):
The tag to verify decryption. If the file is being decrypted,
this must be passed.
blocksize (int): Maximum amount of data to read in a single call.
Raises:
AlreadyFinalized: if the cipher has been finalized.
ValueError: if the file is being decrypted and tag is not supplied.
"""
if self._ctx is None:
raise exc.AlreadyFinalized
if not self._encrypting and tag is None:
raise ValueError("tag is required for decryption")
buf = memoryview(bytearray(blocksize + self._offset))
rbuf = buf[:blocksize]
# localize variables for better performance
offset = self._offset
write = file.write
reads = iter(partial(self._file.readinto, rbuf), 0)
update = self._ctx.update_into
for i in reads:
if i < blocksize:
rbuf = rbuf[:i]
buf = buf[: i + offset]
update(rbuf, buf)
write(rbuf)
self.finalize(tag)
def finalize(self, tag=None):
if self._ctx is None:
raise exc.AlreadyFinalized
try:
self._ctx.finalize(tag)
finally:
self._tag, self._ctx = self._ctx.calculate_tag(), None
def calculate_tag(self):
if self._ctx is not None:
raise exc.NotFinalized("Cipher has already been finalized.")
return self._tag
StreamCipherWrapper = FileCipherWrapper
class HMACWrapper(base.BaseAEADCipher):
"""
Wraps a cipher that supports BaseNonAEADCipher cipher interface and
provides authentication capability using HMAC.
"""
def __init__(
self,
cipher: base.BaseNonAEADCipher,
hkey: bytes,
rand: bytes,
digestmod: typing.Union[str, base.BaseHash] = "sha256",
offset: int = 0,
tag_length: typing.Optional[int] = 16,
):
if not isinstance(cipher, base.BaseNonAEADCipher):
raise TypeError("Only NonAEAD ciphers can be wrapped.")
if isinstance(digestmod, base.BaseHash):
# always use a fresh hash object.
digestmod = digestmod.new()
self._auth = hmac.new(hkey, digestmod=digestmod)
self._auth.update(rand)
self._ctx = self._get_mac_ctx(cipher, self._auth, offset)
self._encrypting = cipher.is_encrypting()
self._len_aad, self._len_ct = 0, 0
self._updated = False
self._tag = None
self._tag_length = (
self._auth.digest_size if tag_length is None else tag_length
)
def is_encrypting(self):
return self._encrypting
def authenticate(self, data):
if self._ctx is None:
raise exc.AlreadyFinalized
if self._updated:
raise TypeError(
"Cannot call authenticate after update/update_into has been"
" called"
)
self._auth.update(data)
self._len_aad += len(data)
def update(self, data):
if self._ctx is None:
raise exc.AlreadyFinalized
self._updated = True
self._len_ct += len(data)
return self._ctx.update(data)
def update_into(self, data, out):
if self._ctx is None:
raise exc.AlreadyFinalized
self._updated = True
self._ctx.update_into(data, out)
self._len_ct += len(data)
def finalize(self, tag=None):
if self._ctx is None:
raise exc.AlreadyFinalized
if not self.is_encrypting():
if tag is None:
raise ValueError("tag is required for decryption")
if len(tag) != self._tag_length:
raise ValueError(
f"Invalid tag length: (required {self._tag_length})"
)
self._auth.update(self._len_aad.to_bytes(8, "little"))
self._auth.update(self._len_ct.to_bytes(8, "little"))
self._ctx = None
if not self._encrypting and not hmac.compare_digest(
self._auth.digest()[: self._tag_length],
tag,
):
raise exc.DecryptionError
def calculate_tag(self):
if self._ctx is not None:
raise exc.NotFinalized
if self.is_encrypting():
return self._auth.digest()[: self._tag_length]
@staticmethod
def _get_mac_ctx(cipher: base.BaseNonAEADCipher, auth, offset):
if cipher.is_encrypting():
return _EncryptionCtx(cipher, auth, offset)
return _DecryptionCtx(cipher, auth)
class _EncryptionCtx:
def __init__(self, cipher: base.BaseNonAEADCipher, auth, offset):
self._ctx = cipher
self._auth = auth
self._offset = -offset or None
def update(self, data):
ctxt = self._ctx.update(data)
self._auth.update(ctxt)
return ctxt
def update_into(self, data, out):
self._ctx.update_into(data, out)
self._auth.update(out[: self._offset])
class _DecryptionCtx:
def __init__(self, cipher: base.BaseNonAEADCipher, auth):
self._ctx = cipher
self._auth = auth
def update(self, data):
self._auth.update(data)
return self._ctx.update(data)
def update_into(self, data, out):
self._auth.update(data)
self._ctx.update_into(data, out)
| 30.554307 | 79 | 0.599779 | 950 | 8,158 | 4.996842 | 0.208421 | 0.041289 | 0.020855 | 0.023173 | 0.391405 | 0.323362 | 0.267327 | 0.232779 | 0.195071 | 0.177586 | 0 | 0.003935 | 0.31466 | 8,158 | 266 | 80 | 30.669173 | 0.845108 | 0.220887 | 0 | 0.417178 | 0 | 0 | 0.051346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128834 | false | 0 | 0.030675 | 0.01227 | 0.251534 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2175488fbd9eeb9626a7072d7fe8708e8f6e0714 | 2,350 | py | Python | reactome_gnn/dataset.py | reactome/reactome_gnn | 5e5c0000a971743d8e4aa2f865fde4dd3aa5fa93 | [
"Apache-2.0"
] | 1 | 2021-11-05T19:40:10.000Z | 2021-11-05T19:40:10.000Z | reactome_gnn/dataset.py | reactome/reactome_gnn | 5e5c0000a971743d8e4aa2f865fde4dd3aa5fa93 | [
"Apache-2.0"
] | 12 | 2021-06-20T21:25:35.000Z | 2021-08-11T08:19:02.000Z | reactome_gnn/dataset.py | reactome/reactome_gnn | 5e5c0000a971743d8e4aa2f865fde4dd3aa5fa93 | [
"Apache-2.0"
] | null | null | null | import os
import pickle
import subprocess
import dgl
from dgl.data import DGLDataset
class PathwayDataset(DGLDataset):
"""
A class that inherits from DGLDataset and extends its functionality
by adding additional attributed and processing of the graph
accordingly.
Attributes
----------
root : str
Root directory consisting of other directories where the raw
data can be found, and where all the processing results are
stored.
"""
def __init__(self, root='data'):
"""
Parameters
----------
root : str
Root directory consisting of other directories where the raw
data can be found, and where all the processing results are
stored.
"""
self.root = os.path.abspath(root)
if 'processed' not in os.listdir(self.root):
subprocess.run(f"mkdir 'tmp'", shell=True, cwd=self.root)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.root, 'processed')
super().__init__(name='pathway_graph', raw_dir=raw_dir, save_dir=save_dir)
def has_cache(self):
"""Check whether the dataset already exists."""
return len(os.listdir(self.save_dir)) == len(os.listdir(self.raw_dir))
def __len__(self):
return len(os.listdir(self.save_dir))
def __getitem__(self, idx):
names = sorted(os.listdir(self.save_dir))
name = names[idx]
(graph,), _ = dgl.load_graphs(os.path.join(self.save_dir, name))
return graph, name
def process(self):
"""Process the graphs and store them in the 'processed' directory."""
for cnt, graph_file in enumerate(os.listdir(self.raw_dir)):
graph_path = os.path.join(self.raw_dir, graph_file)
nx_graph = pickle.load(open(graph_path, 'rb'))
for node in nx_graph.nodes:
if nx_graph.nodes[node]['significance'] == 'significant':
nx_graph.nodes[node]['significance'] = 1.0
else:
nx_graph.nodes[node]['significance'] = 0.0
dgl_graph = dgl.from_networkx(nx_graph, node_attrs=['weight', 'significance'])
save_path = os.path.join(self.save_dir, f'{graph_file[:-4]}.dgl')
dgl.save_graphs(save_path, dgl_graph)
| 35.606061 | 90 | 0.614043 | 304 | 2,350 | 4.575658 | 0.335526 | 0.040259 | 0.056075 | 0.050324 | 0.382459 | 0.264558 | 0.20417 | 0.162473 | 0.162473 | 0.162473 | 0 | 0.002921 | 0.271489 | 2,350 | 65 | 91 | 36.153846 | 0.809579 | 0.253191 | 0 | 0 | 0 | 0 | 0.083843 | 0.012852 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.147059 | 0.029412 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21761ae262f482184353d2351a50a9342cd20302 | 1,024 | py | Python | Binomial/Binomial.py | DakotaKry/algorithims | 16eba338df768a0468f7a4788998faae382500e4 | [
"MIT"
] | null | null | null | Binomial/Binomial.py | DakotaKry/algorithims | 16eba338df768a0468f7a4788998faae382500e4 | [
"MIT"
] | null | null | null | Binomial/Binomial.py | DakotaKry/algorithims | 16eba338df768a0468f7a4788998faae382500e4 | [
"MIT"
] | null | null | null |
class Binomial:
def __init__(self, xmax, ymax):
self.A = [ [None] * ymax for i in range(xmax) ]
self.trace = False
def setTrace(self, trace):
self.trace = trace
def b(self, x , y ):
# runs O(x*y) time
A = self.A
trace = self.trace
for i in range ( x ):
for j in range ( y ):
if ( A[i][j] != None ):
if (trace):
continue
elif ( i == 0 or j == 0 ):
A[i][j] = 1;
if (trace):
print(1)
else:
A[i][j] = A[i-1][j] + A[i][j-1]
if (trace):
print(A[i][j])
if (self.trace):
print("="*5 + str(A[x-1][y-1]) + "="*5)
return A[x][y]
def fastRoute(self, xsrc, ysrc, xdest, ydest ):
x = abs( xsrc - xdest )
y = abs( ysrc - ydest )
return self.b( x, y )
def updateMax(self, xmax, ymax ):
# Runs O(xmax*ymax) time
B = [None] * xmax
for i in range(xmax):
Y = [None] * ymax
for j in range(ymax):
Y[j] + A[i][j]
B[i] = Y
self.A = B
binom = Binomial(20,20)
binom.setTrace(True)
binom.fastRoute(1, 1, 8, 4)
| 17.355932 | 49 | 0.508789 | 178 | 1,024 | 2.904494 | 0.258427 | 0.027079 | 0.034816 | 0.06383 | 0.119923 | 0.061896 | 0.061896 | 0 | 0 | 0 | 0 | 0.02521 | 0.302734 | 1,024 | 58 | 50 | 17.655172 | 0.69888 | 0.038086 | 0 | 0.075 | 0 | 0 | 0.002039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.2 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |