hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2176987fdbd61dbebc421fba3bb535bdc9787060 | 957 | py | Python | userlib/utils/__init__.py | WorldMoZara/Pardon-Bot-Community | 541a87b0794ff6ecb57531df99b37bab44d86453 | [
"MIT"
] | 1 | 2022-01-17T00:51:15.000Z | 2022-01-17T00:51:15.000Z | userlib/utils/__init__.py | WorldMoZara/Pardon-Bot-Community | 541a87b0794ff6ecb57531df99b37bab44d86453 | [
"MIT"
] | null | null | null | userlib/utils/__init__.py | WorldMoZara/Pardon-Bot-Community | 541a87b0794ff6ecb57531df99b37bab44d86453 | [
"MIT"
] | null | null | null | import ctypes
import os
import re
import subprocess
QalcExchangeRatesUpdater = ctypes.cdll.LoadLibrary(
os.path.join("/".join(__name__.split("."))
if __name__ != "__main__" else ".",
"qalc-update-exchange-rates.so")
)
qalc_update_exchange_rates = QalcExchangeRatesUpdater._Z20update_exchange_ratev
_fchs_conv = [
[r"小?时", r"h"], [r"(分钟?|mi?n?)", r"min"], [r"秒钟?", r"s"], [r"[天日]", r"d"],
] # 字符转换映射关系
def time2sec(time: str) -> int:
"""Convert time to second(s)."""
time_s = time
for src, dst in _fchs_conv:
# 逐步替换
time_s = re.sub(src, dst, time_s)
proc = subprocess.run(["qalc", "%s to s" % time_s], capture_output=True)
time_po = proc.stdout.decode()
time_po = time_po[time_po.rfind("=") + 1:time_po.rfind("s")].strip()
return int(float(time_po))
def ftouch(fp: str):
subprocess.run(["touch", fp])
def fgetmtime(fp: str):
return os.path.getmtime(fp)
| 25.184211 | 79 | 0.617555 | 137 | 957 | 4.072993 | 0.50365 | 0.064516 | 0.064516 | 0.082437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005249 | 0.203762 | 957 | 37 | 80 | 25.864865 | 0.727034 | 0.042842 | 0 | 0 | 0 | 0 | 0.093509 | 0.031903 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.16 | 0.04 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2176bd40b94b346745b8e60c8cd96eee5c6444d3 | 2,538 | py | Python | project_test/tests/test_005_resolver.py | MtzwOsk/autobreadcrumbs | 75e2fd1cc74db790c5fdc924d6fa9410ee7caa45 | [
"MIT"
] | null | null | null | project_test/tests/test_005_resolver.py | MtzwOsk/autobreadcrumbs | 75e2fd1cc74db790c5fdc924d6fa9410ee7caa45 | [
"MIT"
] | null | null | null | project_test/tests/test_005_resolver.py | MtzwOsk/autobreadcrumbs | 75e2fd1cc74db790c5fdc924d6fa9410ee7caa45 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from django.core.urlresolvers import reverse
from autobreadcrumbs.resolver import PathBreadcrumbResolver
# Autodiscovering is disabled since it allready have be executed
# previously, see warning from "test_004_autodiscover.py"
# from autobreadcrumbs.discover import autodiscover
# autodiscover()
@pytest.mark.parametrize("path,segments", [
('/foo/', [
'/',
'/foo/',
]),
('foo/', [
'/',
'/foo/',
]),
('/foo', [
'/',
'/foo/',
]),
(u'/télô/你好/', [
'/',
u'/télô/',
u'/télô/你好/',
]),
('/foo/bar/', [
'/',
'/foo/',
'/foo/bar/',
]),
('/foo/bar-mip/flop/', [
'/',
'/foo/',
'/foo/bar-mip/',
'/foo/bar-mip/flop/',
]),
('/foo/bar/foo/zouip/', [
'/',
'/foo/',
'/foo/bar/',
'/foo/bar/foo/',
'/foo/bar/foo/zouip/',
]),
])
def test_cut_path_into_segments(settings, path, segments):
"""Cut a path into segments"""
resolver = PathBreadcrumbResolver(settings.ROOT_URLCONF)
assert resolver.cut(path) == segments
@pytest.mark.parametrize("url,urlcurrent,urltitles", [
(
'/',
'Home',
['Home']
),
(
'/bar/',
'Bar',
['Home', 'Bar']
),
(
'/foo/invisible/chu/',
'Chu',
['Home', 'Foo', 'Chu']
),
(
'/foo/sluggy/plaf/',
'Sluggy',
['Home', 'Foo', 'Sluggy']
),
(
'/foo/date/2016/08/',
'Year month',
['Home', 'Foo', "Year month"]
),
(
'/foo/sub/plop/',
'Plop',
['Home', 'Foo', 'Sub', 'Plop']
),
(
'/foo/controlled-true/yep/',
'Control Yep',
['Home', 'Foo', 'Controlled true', 'Control Yep']
),
(
'/foo/controlled-false/nope/',
'Control Nope',
['Home', 'Foo', 'Control Nope']
),
])
def test_resolving_path(settings, rf, url, urlcurrent, urltitles):
"""Resolve breadcrumbs from a path"""
# Forge a request object from url
request = rf.get(url)
resolver = PathBreadcrumbResolver(settings.ROOT_URLCONF)
results = resolver.resolve(request.path, request=request)
elements = results['autobreadcrumbs_elements']
current = results['autobreadcrumbs_current']
#print [str(item.title) for item in elements]
assert [item.title for item in elements] == urltitles
assert current.title == urlcurrent
| 22.069565 | 66 | 0.509062 | 243 | 2,538 | 5.26749 | 0.36214 | 0.042188 | 0.042188 | 0.028125 | 0.189063 | 0.06875 | 0 | 0 | 0 | 0 | 0 | 0.005627 | 0.299842 | 2,538 | 114 | 67 | 22.263158 | 0.714688 | 0.13357 | 0 | 0.376344 | 0 | 0 | 0.266269 | 0.05637 | 0 | 0 | 0 | 0 | 0.032258 | 1 | 0.021505 | false | 0 | 0.032258 | 0 | 0.053763 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21773447f02a33a437fe0c12beffe1d346edd743 | 7,480 | py | Python | mixer/api.py | ooojustin/mixer.py | 9195ecb3f30772c0e982cd92a27f7f56ac05a2a8 | [
"MIT"
] | 3 | 2019-08-28T00:51:23.000Z | 2020-03-26T01:19:19.000Z | mixer/api.py | ooojustin/mixer.py | 9195ecb3f30772c0e982cd92a27f7f56ac05a2a8 | [
"MIT"
] | null | null | null | mixer/api.py | ooojustin/mixer.py | 9195ecb3f30772c0e982cd92a27f7f56ac05a2a8 | [
"MIT"
] | null | null | null | import aiohttp
import dateutil.parser
import json
from datetime import datetime, timezone, timedelta
from enum import Enum
from . import exceptions as MixerExceptions
from .objects import MixerUser, MixerChannel
class RequestMethod(Enum):
GET = 0
POST = 1
class MixerAPI:
API_URL = "https://mixer.com/api/v1"
API_URL_V2 = "https://mixer.com/api/v2"
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
self._session = aiohttp.ClientSession(headers = { "Client-ID": self.client_id })
async def close(self):
await self._session.close()
async def request(self, method, url, parse_json = False, **kwargs):
# pick ... based on request type
if method is RequestMethod.GET:
ctx_mgr = self._session.get(url, **kwargs)
elif method is RequestMethod.POST:
data = kwargs.pop("data")
ctx_mgr = self._session.post(url, json = data, **kwargs)
async with ctx_mgr as response:
text = await response.text()
# handle specific response codes
if response.status == 404:
raise MixerExceptions.NotFound(text)
if response.status != 200:
raise MixerExceptions.WebException(response.status, text)
try:
# NOTE: the only exception that will be thrown here is in the .json() func
return await response.json() if parse_json else text
except:
raise RuntimeError("Failed to parse json from response.")
async def get(self, url, **kwargs):
coro = self.request(RequestMethod.GET, url, **kwargs)
return await coro
async def post(self, url, data, **kwargs):
kwargs["data"] = data
coro = self.request(RequestMethod.POST, url, **kwargs)
return await coro
async def get_channel(self, id_or_token):
"""Retrieves a MixerChannel object from username or channel id.
Args:
id_or_token (str): Username (or id) of Mixer channel.
Returns:
:class:`mixer.objects.MixerChannel`: Channel information.
"""
url = "{}/channels/{}".format(self.API_URL, id_or_token)
data = await self.get(url, parse_json = True)
channel = MixerChannel(data)
channel.set_api(self)
return channel
async def get_user(self, user_id):
"""Retrieves a MixerUser object from a user id.
Args:
user_id (int): The unique id of a Mixer user.
Returns:
:class:`mixer.objects.MixerUser`: User information.
"""
url = "{}/users/{}".format(self.API_URL, user_id)
data = await self.get(url, parse_json = True)
user = MixerUser(data)
user.set_api(self)
return user
async def get_shortcode(self, scope = None):
"""Makes a request to begin shortcode oauth process.
Args:
scope (list): A list of scope/permissions to generate token with.
Returns:
dict: Information to proceed with shortcode oauth process."""
url = "{}/oauth/shortcode".format(self.API_URL)
if scope is None: scope = list()
data = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"scope": " ".join(scope)
}
response = await self.post(url, data, parse_json = True)
return response
async def check_shortcode(self, handle):
"""Check a shortcode handle to determine it's status.
Args:
str: Shortcode handle.
Returns:
dict: Shortcode status information.
"""
url = "{}/oauth/shortcode/check/{}".format(self.API_URL, handle)
response = await self.get(url, parse_json = True)
return response
async def get_token(self, code_or_token, refresh = False):
"""Generate/refresh tokens.
Args:
code_or_token (str): Authorization code or refresh token.
refresh (bool): Whether or not a refresh token is provided.
Returns:
dict: New token(s) + information from server.
"""
url = "{}/oauth/token".format(self.API_URL)
data = {
"client_id": self.client_id,
"client_secret": self.client_secret
}
if refresh:
data["grant_type"] = "refresh_token"
data["refresh_token"] = code_or_token
else:
data["grant_type"] = "authorization_code"
data["code"] = code_or_token
response = await self.post(url, data, parse_json = True)
return response # https://pastebin.com/n1Kjjphq
async def check_token(self, token):
"""Gets information about an existing token.
Args:
token (str): An access token or a refresh token.
"""
url = "{}/oauth/token/introspect".format(self.API_URL)
data = { "token": token }
response = await self.post(url, data, parse_json = True)
return response # https://pastebin.com/SEd6Y2Jz
async def get_broadcast(self, channel_id):
"""Gets an active broadcast on a given chanel.
Args:
channel_id (int): Unique channel ID number.
"""
url = "{}/channels/{}/broadcast".format(self.API_URL, channel_id)
response = await self.get(url, parse_json = True)
return response
async def get_uptime(self, channel_id):
"""Gets the uptime of a channels broadcast.
Returns:
datetime.timedelta: Duration of the active broadcast.
None: If the broadcast is not currently online.
"""
# get broadcast and make sure it's online
broadcast = await self.get_broadcast(channel_id)
if "error" in broadcast or not broadcast["online"]:
return None
# determine the streams start time and current time
started = dateutil.parser.parse(broadcast["startedAt"])
now = datetime.now(timezone.utc)
# calculate delta and remove microseconds because they're insignificant
delta = now - started
delta = delta - timedelta(microseconds = delta.microseconds)
return delta
async def get_leaderboard(self, type, channel_id, limit = 10):
# type format: [sparks, embers]-[weekly, monthly, yearly, alltime]
url = "{}/leaderboards/{}/channels/{}?limit={}".format(self.API_URL_V2, type, channel_id, limit)
response = await self.get(url, parse_json = True)
return response
async def get_chatters(self, channel_id):
url = "{}/chats/{}/users".format(self.API_URL_V2, channel_id)
response = await self.get(url, parse_json = True)
return response
async def get_user_services(self, oauth):
# NOTE: requires "user:details:self" scope
await oauth.ensure_active()
url = "{}/users/{}/links".format(self.API_URL, oauth.user_id)
response = await self.get(url, parse_json = True, headers = oauth.header)
return response
async def get_user_service(self, service, oauth):
# NOTE: requires "user:details:self" scope
services = await self.get_user_services(oauth)
check = lambda d: d["service"] == service
filtered = list(filter(check, services))
return filtered[0]["id"] if len(filtered) else None
| 34.62963 | 104 | 0.61123 | 899 | 7,480 | 4.968854 | 0.221357 | 0.028655 | 0.027088 | 0.035818 | 0.225207 | 0.198791 | 0.191404 | 0.151108 | 0.136781 | 0.128274 | 0 | 0.003562 | 0.286898 | 7,480 | 215 | 105 | 34.790698 | 0.833896 | 0.066979 | 0 | 0.194915 | 0 | 0 | 0.085464 | 0.021459 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008475 | false | 0 | 0.059322 | 0 | 0.254237 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
217a1c4d9a24091de6f28191afa50d26888bd024 | 3,983 | py | Python | src/baxter_gui/sonar.py | HumaRobotics/baxter_evalutation_gui | eceea236286dd2548287c8df3bc9cae5800ca23e | [
"BSD-2-Clause"
] | 2 | 2017-04-06T06:14:10.000Z | 2017-10-05T22:35:58.000Z | src/baxter_gui/sonar.py | HumaRobotics/baxter_evalutation_gui | eceea236286dd2548287c8df3bc9cae5800ca23e | [
"BSD-2-Clause"
] | null | null | null | src/baxter_gui/sonar.py | HumaRobotics/baxter_evalutation_gui | eceea236286dd2548287c8df3bc9cae5800ca23e | [
"BSD-2-Clause"
] | 2 | 2017-04-06T06:13:59.000Z | 2017-11-06T12:31:53.000Z | #!/usr/bin/env python
###########################################################################
# This software is graciously provided by HumaRobotics
# under the Simplified BSD License on
# github:
# HumaRobotics is a trademark of Generation Robots.
# www.humarobotics.com
# Copyright (c) 2015, Generation Robots.
# All rights reserved.
# www.generationrobots.com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
#
#############################################################################
import rospy
from std_msgs.msg import UInt16
from sensor_msgs.msg import PointCloud
from threading import Lock
class Sonar:
"""
Enables or disables the sonar sensors
"""
def __init__(self):
self.state = 0
self.distances = [None for x in xrange(12)]
self.mutex = Lock()
self.__sonar_pub = rospy.Publisher("/robot/sonar/head_sonar/set_sonars_enabled",UInt16, queue_size=1)
self.__sonar_sub = rospy.Subscriber("/robot/sonar/head_sonar/state",PointCloud,self.callback,queue_size=1)
def callback(self,msg):
with self.mutex:
self.distances = [None for x in xrange(12)]
for channel in msg.channels:
if channel.name == "SensorId":
sensors = list(channel.values)
if channel.name == "Distance":
for i,sensor in enumerate(sensors):
self.distances[int(sensor)] = channel.values[i]
rospy.sleep(0.2)
def getRanges(self):
with self.mutex:
return self.distances
def checkConnection(self):
"""
Checks if baxter's sonars already subscribed to the publisher
"""
while not rospy.is_shutdown() and self.__sonar_pub.get_num_connections() < 1:
# rospy.logwarn("No subscriber for sonar state found yet")
rospy.sleep(0.01)
rospy.loginfo("Found a subscriber. Changing sonar state")
def enable(self):
"""
Enables all sonar sensors
"""
rospy.loginfo("Enable sonar")
self.checkConnection()
self.state = 4095
self.__sonar_pub.publish(4095)
def disable(self):
"""
Disables all sonar sensors
"""
rospy.loginfo("Disable sonar")
self.checkConnection()
self.state = 0
self.__sonar_pub.publish(0)
if __name__ == '__main__':
rospy.init_node("baxter_sonar")
| 38.669903 | 114 | 0.648004 | 487 | 3,983 | 5.217659 | 0.435318 | 0.01771 | 0.01889 | 0.018103 | 0.144038 | 0.077922 | 0.077922 | 0.077922 | 0.053522 | 0.053522 | 0 | 0.010924 | 0.241526 | 3,983 | 102 | 115 | 39.04902 | 0.830189 | 0.491087 | 0 | 0.2 | 0 | 0 | 0.098964 | 0.040852 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
217a53f692aa8c3f0575201c87b0e50ccbd4ef2e | 1,647 | py | Python | Dynamic Programming/LongestCommonSubseq.py | Jh123x/COMP-550-Algorithms-and-Analysis | 1bc0b7fb8c48dc52cf89704557795aa87290e88b | [
"MIT"
] | null | null | null | Dynamic Programming/LongestCommonSubseq.py | Jh123x/COMP-550-Algorithms-and-Analysis | 1bc0b7fb8c48dc52cf89704557795aa87290e88b | [
"MIT"
] | null | null | null | Dynamic Programming/LongestCommonSubseq.py | Jh123x/COMP-550-Algorithms-and-Analysis | 1bc0b7fb8c48dc52cf89704557795aa87290e88b | [
"MIT"
] | null | null | null | from typing import List
# Define directions
DIAGONOAL = (-1, -1)
UP = (0, -1)
LEFT = (-1, 0)
def trace_table(backtrack_table, seq1, seq2) -> List[str]:
"""Trace back the backtrack table to find the longest common subsequence"""
i, j = len(seq1)-1, len(seq2)-1
lcs = []
while i >= 0 and j >= 0:
dx, dy = backtrack_table[i][j]
if dx + dy == -2:
lcs.append(seq1[i])
# break
i += dx
j += dy
return lcs[::-1]
def longest_common_subsequence(seq1, seq2):
"""Find the longest common subsequence between 2 strings"""
count_table = [[0] * (len(seq2)+1) for _ in range(len(seq1)+1)]
backtrack_table = [[(0, 0)] * (len(seq2)) for _ in range(len(seq1))]
for i in range(1, len(seq1) + 1):
for j in range(1, len(seq2) + 1):
# If the word is found
if (seq1[i-1] == seq2[j-1]):
count_table[i][j] = count_table[i-1][j-1] + 1
backtrack_table[i-1][j-1] = DIAGONOAL
# If left > up
elif count_table[i-1][j] >= count_table[i][j-1]:
count_table[i][j] = count_table[i-1][j]
backtrack_table[i-1][j-1] = LEFT
# Otherwise up
else:
count_table[i][j] = count_table[i][j-1]
backtrack_table[i-1][j-1] = UP
return count_table[-1][-1], trace_table(backtrack_table, seq1, seq2)
if __name__ == '__main__':
X = ["A", "T", "C", "A", "C", "C", "T", "A", "T", "C", "A", "C", "C", "T"]
Y = ["A", "T", "A", "A", "C", "T", "A", "T", "A", "A", "C", "T"]
print(longest_common_subsequence(X, Y))
| 30.5 | 79 | 0.505161 | 252 | 1,647 | 3.170635 | 0.230159 | 0.090113 | 0.110138 | 0.060075 | 0.43179 | 0.295369 | 0.161452 | 0.067584 | 0.067584 | 0.067584 | 0 | 0.048738 | 0.302368 | 1,647 | 53 | 80 | 31.075472 | 0.646649 | 0.118397 | 0 | 0 | 0 | 0 | 0.02366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.030303 | 0 | 0.151515 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
217b4a4e53e620a12cb915d6303bb2ebe3952a08 | 1,787 | py | Python | main.py | gyyang/bio-kvm | 1db65ec3250879d16b7906edf4833670d170ec9c | [
"MIT"
] | 2 | 2022-01-01T18:42:50.000Z | 2022-03-13T16:20:12.000Z | main.py | gyyang/bio-kvm | 1db65ec3250879d16b7906edf4833670d170ec9c | [
"MIT"
] | null | null | null | main.py | gyyang/bio-kvm | 1db65ec3250879d16b7906edf4833670d170ec9c | [
"MIT"
] | 1 | 2022-03-18T16:16:32.000Z | 2022-03-18T16:16:32.000Z | """File that summarizes all key results.
To train networks in a specific experiment, run in command line
python main.py --train experiment_name
To analyze results from this experiment
python main.py --analyze experiment_name
To train and analyze all models quickly, run in command line
python main.py --train --analyze
To reproduce the results from paper, run
python main.py --train --analyze
To analyze pretrained networks, run
python main.py --analyze
To run specific experiments (e.g. orn2pn, vary_pn), run
python main.py --train --analyze --experiment orn2pn vary_pn
"""
import os
import platform
import sys
import argparse
from experiment_utils import train_experiment, analyze_experiment
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--device', help='CUDA device number', default=0,
type=int)
parser.add_argument('-t', '--train', help='Training', nargs='+', default=[])
parser.add_argument('-a', '--analyze', help='Analyzing', nargs='+', default=[])
parser.add_argument('--no-general', help='No general analysis',
dest='general', action='store_false')
parser.set_defaults(general=True)
args = parser.parse_args()
# For running from IDE instead of command line
if len(sys.argv) == 1: #no command line parameters passed
args.train = [] #add experiments here
args.analyze = []
for item in args.__dict__.items():
print(item)
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device)
experiments2train = args.train
experiments2analyze = args.analyze
use_cluster = 'columbia' in platform.node() # on columbia cluster
for experiment in experiments2train:
train_experiment(experiment, use_cluster=use_cluster)
for experiment in experiments2analyze:
analyze_experiment(experiment, general=args.general)
| 31.350877 | 79 | 0.743145 | 241 | 1,787 | 5.406639 | 0.406639 | 0.046048 | 0.055257 | 0.052187 | 0.14505 | 0.100537 | 0.050652 | 0.050652 | 0 | 0 | 0 | 0.005242 | 0.146055 | 1,787 | 56 | 80 | 31.910714 | 0.848624 | 0.386682 | 0 | 0 | 0 | 0 | 0.132597 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.185185 | 0 | 0.185185 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
217c9d04d50b6d94a1a684d457eb2c81ff82f9f5 | 1,216 | py | Python | tests/gsfpy3_08/conftest.py | irewolepeter/gsfpy_USM_Implementation | c4614ac3f7d833eb86ea38c7708108b130f96612 | [
"MIT"
] | 7 | 2020-07-01T07:12:19.000Z | 2022-01-20T20:39:57.000Z | tests/gsfpy3_08/conftest.py | irewolepeter/gsfpy_USM_Implementation | c4614ac3f7d833eb86ea38c7708108b130f96612 | [
"MIT"
] | 36 | 2020-06-23T09:10:15.000Z | 2022-03-22T10:27:58.000Z | tests/gsfpy3_08/conftest.py | irewolepeter/gsfpy_USM_Implementation | c4614ac3f7d833eb86ea38c7708108b130f96612 | [
"MIT"
] | 2 | 2021-02-07T13:21:52.000Z | 2021-06-24T19:16:16.000Z | import shutil
from dataclasses import dataclass
from enum import Enum, auto
from pathlib import Path
import pytest
from pytest_cases import fixture_union
class GsfVersion(Enum):
V03_08 = auto()
@dataclass(frozen=True)
class GsfDatafile:
gsf_version: GsfVersion
path: Path
num_beams: int
GSF_03_08_DATAFILE = GsfDatafile(
GsfVersion.V03_08,
Path(__file__).parent.parent / "test_data" / "GSF3_08_test_file.gsf",
num_beams=432,
)
def _setup_gsf_test_data(src_datafile: GsfDatafile, tmp_path: Path):
tmp_path.mkdir(parents=True, exist_ok=True)
tmp_datafile_path = tmp_path / src_datafile.path.name
shutil.copyfile(src_datafile.path, tmp_datafile_path)
yield GsfDatafile(
src_datafile.gsf_version, tmp_datafile_path, src_datafile.num_beams
)
shutil.rmtree(tmp_path, ignore_errors=True)
@pytest.fixture
def gsf_test_data_03_08(tmp_path):
yield from _setup_gsf_test_data(GSF_03_08_DATAFILE, tmp_path)
fixture_union("gsf_test_data", [gsf_test_data_03_08])
@pytest.fixture(params=[GSF_03_08_DATAFILE])
def gsf_test_data(request, tmp_path):
src_datafile: GsfDatafile = request.param
yield from _setup_gsf_test_data(src_datafile, tmp_path)
| 24.32 | 75 | 0.774671 | 181 | 1,216 | 4.79558 | 0.276243 | 0.073733 | 0.08871 | 0.051843 | 0.135945 | 0.101382 | 0 | 0 | 0 | 0 | 0 | 0.032692 | 0.144737 | 1,216 | 49 | 76 | 24.816327 | 0.801923 | 0 | 0 | 0 | 0 | 0 | 0.035362 | 0.01727 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.176471 | 0 | 0.441176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
217cdf7669f321ac62a49a7990e683d0c9fcc359 | 3,041 | py | Python | third_party/catapult/dashboard/dashboard/edit_test_owners.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | 1 | 2020-09-15T08:43:34.000Z | 2020-09-15T08:43:34.000Z | third_party/catapult/dashboard/dashboard/edit_test_owners.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | null | null | null | third_party/catapult/dashboard/dashboard/edit_test_owners.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for adding and removing test owners."""
import json
from google.appengine.api import users
from dashboard import request_handler
from dashboard import test_owner
from dashboard import xsrf
class EditTestOwnersHandler(request_handler.RequestHandler):
"""Handles rendering and editing test owners."""
def get(self):
"""Renders the UI for editing owners.
If user is an admin, renders UI with all test suite path and its owners,
otherwise renders UI with a list test suite path for the logged in user.
"""
user = users.get_current_user()
if user:
if users.is_current_user_admin():
owner_json = self._GetAllOwnerDataJson()
else:
owner_json = self._GetOwnerDataForUserJson(user)
else:
self.RenderHtml('result.html', {
'errors': ['Log in to edit test owners.']})
return
self.RenderHtml('edit_test_owners.html',
{'owner_info': owner_json})
@xsrf.TokenRequired
def post(self):
"""Handles updates of test owners."""
user = users.get_current_user()
if not user:
self.ReportError('Must be logged in to edit test owners.', status=403)
return
action = self.request.get('action')
test_suite_path = self.request.get('item')
if not action or not test_suite_path:
self.ReportError('Missing required parameters.', status=403)
return
owner_email = self.request.get('sub_item')
if not users.is_current_user_admin():
owner_email = user.email()
test_suite_path = str(test_suite_path)
owner_email = str(owner_email) if owner_email else None
try:
test_owner.ValidateTestSuitePath(test_suite_path)
test_owner.ValidateOwnerEmail(owner_email)
except ValueError as error:
self.ReportError(error.message, status=400)
return
if action == 'add':
test_owner.AddOwner(test_suite_path, owner_email)
else:
test_owner.RemoveOwner(test_suite_path, owner_email)
self.response.out.write('{}')
def _GetOwnerDataForUserJson(self, user):
"""Returns json list of owner data for a user."""
results = []
owner_email = user.email()
test_suite_paths = test_owner.GetTestSuitePaths(owner_email)
for test_suite_path in sorted(test_suite_paths):
results.append({
'name': test_suite_path,
})
return json.dumps(results)
def _GetAllOwnerDataJson(self):
"""Returns json list of all owner data."""
owner_dict = test_owner.GetMasterCachedOwner()
results = []
for test_suite_path in sorted(owner_dict):
owners = owner_dict[test_suite_path]
item = {
'name': test_suite_path,
'sub_items': []
}
for owner in owners:
item['sub_items'].append({
'name': owner
})
results.append(item)
return json.dumps(results)
| 30.108911 | 76 | 0.680697 | 395 | 3,041 | 5.055696 | 0.321519 | 0.072108 | 0.091137 | 0.027041 | 0.155233 | 0.102654 | 0 | 0 | 0 | 0 | 0 | 0.00552 | 0.225584 | 3,041 | 100 | 77 | 30.41 | 0.842463 | 0.183492 | 0 | 0.271429 | 0 | 0 | 0.079606 | 0.008617 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.071429 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
217dcf9e3ff97f5c6cd6b8522780f4c3bc9a75ab | 1,604 | py | Python | google/cloud/websecurityscanner/v1beta/websecurityscanner-v1beta-py/google/cloud/websecurityscanner_v1beta/types/crawled_url.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/websecurityscanner/v1beta/websecurityscanner-v1beta-py/google/cloud/websecurityscanner_v1beta/types/crawled_url.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/websecurityscanner/v1beta/websecurityscanner-v1beta-py/google/cloud/websecurityscanner_v1beta/types/crawled_url.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.websecurityscanner.v1beta',
manifest={
'CrawledUrl',
},
)
class CrawledUrl(proto.Message):
r"""A CrawledUrl resource represents a URL that was crawled
during a ScanRun. Web Security Scanner Service crawls the web
applications, following all links within the scope of sites, to
find the URLs to test against.
Attributes:
http_method (str):
The http method of the request that was used
to visit the URL, in uppercase.
url (str):
The URL that was crawled.
body (str):
The body of the request that was used to
visit the URL.
"""
http_method = proto.Field(
proto.STRING,
number=1,
)
url = proto.Field(
proto.STRING,
number=2,
)
body = proto.Field(
proto.STRING,
number=3,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 27.186441 | 74 | 0.657107 | 214 | 1,604 | 4.859813 | 0.560748 | 0.057692 | 0.043269 | 0.060577 | 0.147115 | 0.069231 | 0.069231 | 0.069231 | 0.069231 | 0.069231 | 0 | 0.011026 | 0.264963 | 1,604 | 58 | 75 | 27.655172 | 0.871077 | 0.662095 | 0 | 0.136364 | 0 | 0 | 0.102564 | 0.081197 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
218172bb8e01270d10bfb5ce4012d602f95f1b03 | 4,173 | py | Python | src/Chapters/Chapter6.py | Fennec2000GH/Python-Crash-Course | 421072c13ec4527405297af5fc7920d963cf0fda | [
"MIT"
] | null | null | null | src/Chapters/Chapter6.py | Fennec2000GH/Python-Crash-Course | 421072c13ec4527405297af5fc7920d963cf0fda | [
"MIT"
] | null | null | null | src/Chapters/Chapter6.py | Fennec2000GH/Python-Crash-Course | 421072c13ec4527405297af5fc7920d963cf0fda | [
"MIT"
] | null | null | null | # create dictionary
alien_0 = {'color': 'green', 'points': 5}
print(alien_0['color'])
print(alien_0['points'])
# assign variable to value in dictionary
new_points = alien_0['points']
print(f'You just earned {new_points} points!')
print(alien_0)
# adding new key-value pair
alien_0['x_position'] = 0
alien_0['y_position'] = 25
print(alien_0)
# starting with empty dictionary
alien_0 = {}
alien_0['color'] = 'green'
alien_0['points'] = 5
print(alien_0)
# modifying values in dictionary
alien_0['color'] = 'green'
print(f"The alien is {alien_0['color']}.")
alien_0['color'] = 'yellow'
print(f"The alien is now {alien_0['color']}.")
# more practice
alien_0 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}
print(f"Original position: {alien_0['x_position']}")
# Move the alien to the right.
# Determine how far to move the alien based on its current speed.
if alien_0['speed'] == 'slow':
x_increment = 1
elif alien_0['speed'] == 'medium':
x_increment = 2
else:
# This must be a fast alien.
x_increment = 3
# The new position is the old position plus the increment. v
alien_0['x_position'] = alien_0['x_position'] + x_increment
print(f"New position: {alien_0['x_position']}")
alien_0['speed'] = 'fast'
# removing key-value pairs
alien_0 = {'color': 'green', 'points': 5}
print(alien_0)
del alien_0['points']
print(alien_0)
# dictionary of similar objects
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python'
}
language = favorite_languages['sarah'].title()
print(f"Sarah's favorite language is {language}.")
# accesssing values in dictionary
alien_0 = {'color': 'green', 'speed': 'slow'}
point_value = alien_0.get('points', 'No point values assigned.')
print(point_value)
# looping through dictionary
# both keys and values
user_0 = {
'username': 'efermi',
'first': 'enrico',
'last': 'fermi'
}
for key, value in user_0.items():
print(f'\nKey: {key}')
print(f'Value: {value}')
for name, language in favorite_languages.items():
print(f"{name.title()}'s favorite language is {language.title()}.")
# keys only
for name in favorite_languages.keys():
print(name.title())
friends = ['phil', 'sarah']
for name in favorite_languages.keys():
print(name.title())
if name in friends:
language = favorite_languages[name].title()
print(f'\t{name.title()}, I see you love {language}!')
if 'erin' not in favorite_languages.keys():
print('Erin, please take our poll!')
# sorted keys
for name in sorted(favorite_languages.keys()):
print(f'{name.title()}, thank you for taking the poll.')
# values only
print("The following languages have been mentioned:")
for language in favorite_languages.values():
print(language.title())
# using set
for language in set(favorite_languages.values()):
print(language.title())
# nesting
# list of dictionaries
alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'red', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
# list in dicitonary
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra cheese']
}
print(f"You ordered a {pizza['crust']}-crust pizza with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping)
favorite_languages = {
'jen': ['python', 'ruby'],
'sarah': ['c'],
'edward': ['ruby', 'go'], 'phil': ['python', 'haskell']
}
for name, languages in favorite_languages.items():
print(f"\n{name.title()}'s favorite languages are:")
for language in languages:
print(f"\t{language.title()}")
# dictionary in dictionary
users = {
'aeinstein': {
'first': 'albert',
'last': 'einstein',
'location': 'princeton'
},
'mcurie': {
'first': 'marie',
'last': 'curie',
'location': 'paris'
}
}
for username, user_info in users.items():
print(f"\nUsername: {username}")
full_name = f"{user_info['first']}" f"{user_info['last']}"
location = user_info['location']
print(f"\tFull name: {full_name.title()}")
print(f"\tLocation: {location.title()}")
| 26.08125 | 81 | 0.654685 | 579 | 4,173 | 4.594128 | 0.278066 | 0.07218 | 0.041353 | 0.03609 | 0.242481 | 0.16203 | 0.084211 | 0.058647 | 0.058647 | 0 | 0 | 0.015928 | 0.172538 | 4,173 | 159 | 82 | 26.245283 | 0.754416 | 0.141145 | 0 | 0.163636 | 0 | 0 | 0.362844 | 0.019112 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2181c2135fcc99d7c1b53305b3e3de21dd2e227e | 6,936 | py | Python | MWW-inventory/cooperage.py | ProsperousHeart/Training-Events | 71aa6fae8e77f61924619407f34c39b3ace17fe4 | [
"MIT"
] | 6 | 2019-04-17T17:41:36.000Z | 2021-02-10T20:50:59.000Z | MWW-inventory/cooperage.py | ProsperousHeart/Training-Events | 71aa6fae8e77f61924619407f34c39b3ace17fe4 | [
"MIT"
] | 2 | 2018-11-27T06:33:06.000Z | 2019-02-20T04:35:49.000Z | MWW-inventory/cooperage.py | ProsperousHeart/Personal-Projects | 71aa6fae8e77f61924619407f34c39b3ace17fe4 | [
"MIT"
] | 3 | 2019-04-23T03:26:55.000Z | 2020-04-17T14:38:39.000Z | import cooper_item
import logging # https://docs.python.org/3/library/logging.html
import pprint
import table_mod as tables
from money import Money
# create_item = cooperidge_item.CooperageItem
# https://docs.python.org/3/library/logging.html#logrecord-attributes
#fmt = '%(asctime)s | %(levelname)s %(funcname)s at line %(lineno)s: %(message)s'
#logging.basicConfig(format=fmt, filename='CooperAppLogs.log', filemode='w',
# level=logging.DEBUG, datefmt='%m/%d/%Y %I:%M:%S %p')
# fmt = '%(asctime)s | %(levelname)s - %(module)s line %(lineno)d, %(funcName)s: %(message)s'
fmt = '%(asctime)s | %(levelname)s - %(module)s.py [%(lineno)d]: %(message)s'
# fmt = '%(created)f | %(levelname)s: %(message)s'
logging.basicConfig(format=fmt, filename='CooperAppLogs.log', filemode='w',
level=logging.DEBUG, datefmt='%m/%d/%Y %I:%M:%S %p')
# removing filemode will append logs vs overwrite
logger = logging.getLogger(__name__)
pp = pprint.PrettyPrinter(indent=4)
# base_wood = [{'at_cost': Money(amount=3.65, currency='USA'),
base_wood = [{'at_cost': round(3.65, 2),
'hard_vs_soft': 'medium',
# 'markup': Money(amount=0.00, currency='USA'),
'markup': 0.00,
'vip_bool': False,
'wood_name': 'birch'},
# {'at_cost': Money(amount=2.50, currency='USA'),
{'at_cost': round(2.50, 2),
'hard_vs_soft': 'hard',
# 'markup': Money(amount=0.00, currency='USA'),
'markup': round(0.00, 2),
'vip_bool': False,
'wood_name': 'oak'},
# {'at_cost': Money(amount=2.25, currency='USA'),
{'at_cost': round(2.25, 2),
'hard_vs_soft': 'soft',
# 'markup': Money(amount=0.00, currency='USA'),
'markup': round(0.00, 2),
'vip_bool': False,
'wood_name': 'pine'}]
class entry_exit(object):
"""
This decorator will add logging before and after calling a function.
http://python-3-patterns-idioms-test.readthedocs.io/en/latest/PythonDecorators.html#slightly-more-useful
"""
def __init__(self, func):
self.func = func
def __call__(self):
logger.debug('Starting {} for cooper_item.py file'.format(self.func.__name__))
self.func()
logger.debug('Completing {} for cooper_item.py file'.format(self.func.__name__))
# return entry_exit
# @entry_exit
def create_piece(item_num):
"""
This function creates a new Cooperidge inventory item.
"""
new_item = cooper_item.CooperageItem(sum_num=item_num)
# logger.debug('Successfully created item {}!'.format(new_item.itemNum))
logger.debug('Returning new cooper_item via create_piece()')
# return create_item()
return new_item
# Create options
# - create new cooper_item (cup, sign, furniture, etc)
# - create new location (where they take the cups to)
def db_manip(sql_cnxn, to_do:str, data, table_name:str=None):
"""
This function takes in a string (db_name), string (table_name) and
data (object). It will then create a new data structure appropriate
for the requested table.
Current table names:
- wood
- items
"""
logger.warning('Check and ensure incoming parameters match expectations')
if to_do.lower() == 'add':
logger.info('Adding item to DB table: {}'.format(table_name))
tables.add_to_table(sql_cnxn, table_name, data)
else:
logger.error('Called on a function not yet written!')
# viewing options
# - all items
# - just cups
# - just Furniture
# - just signs
# - wood inventory
# - sold
# - to sell
# - what items are in what location
# update cooperage cooperidge_item:
# - add final piece number
# - update artwork
# - update price sold
# - update notes
# - update _completion_date
# - update handle side, staves #, size if Wood_Cup
# exportation ability
# - export to Excel locally
# - export to Google excel
# also looking for ability to backup DB locally???
if __name__ == "__main__":
"""
This function ensures that if this file is run, it is read as a source file.
"""
logger.debug('Starting __init__ for cooper_app.py file')
logger.warning('Need to create a different DB log for JUST changes...')
logger.warning('All setter methods will eventually need to also update the DB')
# Database needs ...
# - DB check & create if none found
# - add to DB
# - update in DB (per piece, as well as wood resources)
logger.debug('Checking for local DB...')
sql_cnxn = tables.create_connection('cooperage')
tables.check_4_tbls(sql_cnxn, base_wood, cooper_item.CooperageItem())
# item_list = []
piece_num = 1
logger.info('Creating new item...')
item = create_piece(piece_num)
logger.info('New item created!')
print('New item created - adding to DB!')
# pp.pprint(item.get_dict())
# replacing internal data pieces with adding to a DB
# item_list.append(item)
logger.debug('Calling sql_cnxn() for ADD...')
# db_manip(sql_cnxn, 'add', 'items', item)
rtn_tuple = tables.add_to_table(sql_cnxn, 'items', item.get_txtDict())
if rtn_tuple[0] == True:
print('ERROR: There was an error adding item to the DB.')
piece_num += 1
print('===' * 5)
logger.info('Updating at_cost...')
logger.warning('Need to update DB not just local variable.')
# item.at_cost(4.69)
item.at_cost = 4.69
print("Updated data after changing at_cost to $4.69:\n{}".format(pprint.pformat(item.get_dict())))
print('===' * 5)
logger.info('Creating new item...')
item = create_piece(piece_num)
print('New item created - adding to DB!')
# pp.pprint(item.get_dict())
# replacing internal data pieces with adding to a DB
# item_list.append(item)
logger.debug('Calling sql_cnxn() for ADD...')
# db_manip(sql_cnxn, 'add', 'items', item)
rtn_tuple = tables.add_to_table(sql_cnxn, 'items', item.get_txtDict())
if rtn_tuple[0] == True:
print('ERROR: There was an error adding item to the DB.')
piece_num += 1
print('===' * 5)
# print("Hardnesses: {}".format([item.woodType.hard_vs_soft for item in item_list]))
# print('Need to get Hardnesses of all wood of items in DB')
logger.warning('Need to get Hardnesses of all wood of items in DB')
# print("Wood Type: {}".format([item.woodType.wood_name for item in item_list]))
# print('Need to get names of all wood type of items in DB')
logger.warning('Need to get names of all wood type of items in DB')
logger.debug('Starting tests for SQL DB calls...')
print('Printing full DB:')
tables.print_tables(sql_cnxn, log_list=True)
sql_cnxn.close()
logger.debug('Completing __init__ for cooper_app.py file')
| 34.336634 | 108 | 0.634371 | 965 | 6,936 | 4.40829 | 0.278756 | 0.019746 | 0.009403 | 0.017866 | 0.397038 | 0.372826 | 0.346262 | 0.333098 | 0.307005 | 0.274565 | 0 | 0.011756 | 0.227364 | 6,936 | 201 | 109 | 34.507463 | 0.782049 | 0.397491 | 0 | 0.292683 | 0 | 0.012195 | 0.32667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.060976 | 0 | 0.134146 | 0.146341 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21827a2ebb42f575b8b66013cc122ecef6030ccc | 1,667 | py | Python | homework03/main.py | PatrickKalkman/pirplepython | d622ce4541e679d63d56115e3c6b21d39564993c | [
"MIT"
] | null | null | null | homework03/main.py | PatrickKalkman/pirplepython | d622ce4541e679d63d56115e3c6b21d39564993c | [
"MIT"
] | null | null | null | homework03/main.py | PatrickKalkman/pirplepython | d622ce4541e679d63d56115e3c6b21d39564993c | [
"MIT"
] | null | null | null | """
Python Is Easy course @Pirple.com
Homework Assignment #3: "If" Statements
Patrick Kalkman / patrick@simpletechture.nl
Details:
Create a function that accepts 3 parameters and checks for equality between
any two of them.
Your function should return True if 2 or more of the parameters are equal,
and false is none of them are equal to any of the others.
Extra Credit:
Modify your function so that strings can be compared to integers if they are
equivalent. For example, if the following values are passed to
your function: 6,5,"5"
You should modify it so that it returns true instead of false.
Hint: there's a built in Python function called "int" that will help you
convert strings to Integers.
"""
def areTwoOrMoreInputsEqual(param1, param2, param3):
convertedParam1 = int(param1)
convertedParam2 = int(param2)
convertedParam3 = int(param3)
if convertedParam1 == convertedParam2:
return True
if convertedParam1 == convertedParam3:
return True
if convertedParam2 == convertedParam3:
return True
return False
result = areTwoOrMoreInputsEqual(1, 2, 3)
print(f"(1,2,3) should return False and is {result}")
result = areTwoOrMoreInputsEqual(1, 1, 3)
print(f"(1,1,3) should return True and is {result}")
result = areTwoOrMoreInputsEqual(1, 2, 2)
print(f"(1,2,2) should return True and is {result}")
result = areTwoOrMoreInputsEqual(1, 2, 1)
print(f"(1,2,1) should return True and is {result}")
result = areTwoOrMoreInputsEqual(1, 1, 1)
print(f"(1,1,1) should return True and is {result}")
result = areTwoOrMoreInputsEqual(1, 2, "1")
print(f"(1,2,'1') should return True and is {result}")
| 26.887097 | 77 | 0.728254 | 248 | 1,667 | 4.895161 | 0.354839 | 0.074135 | 0.079077 | 0.070016 | 0.265239 | 0.265239 | 0.265239 | 0.229819 | 0.229819 | 0.229819 | 0 | 0.041789 | 0.181764 | 1,667 | 61 | 78 | 27.327869 | 0.84824 | 0.424115 | 0 | 0.130435 | 0 | 0 | 0.26919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.217391 | 0.26087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21898a7d154a88e2d3cffd20cd2b52c26a47aa0f | 4,201 | py | Python | compiler.py | Hyper5phere/c-minus-compiler | 2b111f98c9f2b0ba7473f169e91e5ef7373c1929 | [
"MIT"
] | 20 | 2020-09-20T22:58:58.000Z | 2022-03-28T06:35:32.000Z | compiler.py | Hyper5phere/c-minus-compiler | 2b111f98c9f2b0ba7473f169e91e5ef7373c1929 | [
"MIT"
] | null | null | null | compiler.py | Hyper5phere/c-minus-compiler | 2b111f98c9f2b0ba7473f169e91e5ef7373c1929 | [
"MIT"
] | 15 | 2020-10-04T10:56:10.000Z | 2022-03-28T10:11:46.000Z | import os
import sys
import time
import argparse
import subprocess as sp
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(script_dir, "modules"))
from cparser import Parser
from scanner import Scanner, SymbolTableManager
from semantic_analyser import SemanticAnalyser
from code_gen import CodeGen, MemoryManager
# Maximal virtual memory for compiled program process (in bytes).
MAX_VIRTUAL_MEMORY = 50 * 1024 * 1024 # 50 MB
def limit_virtual_memory():
import resource
resource.setrlimit(resource.RLIMIT_AS, (MAX_VIRTUAL_MEMORY, MAX_VIRTUAL_MEMORY))
def compile(args):
print("Compiling", args.source_file)
SymbolTableManager.init()
MemoryManager.init()
parser = Parser(args.source_file)
start = time.time()
parser.parse()
stop = time.time() - start
print(f"Compilation took {stop:.6f} s")
if not SymbolTableManager.error_flag:
print("Compilation successful!")
else:
print("Compilation failed due to the following errors:\n")
print(parser.scanner.lexical_errors)
print(parser.syntax_errors)
print(parser.semantic_analyzer.semantic_errors)
if args.abstract_syntax_tree:
parser.save_parse_tree()
if args.symbol_table:
parser.scanner.save_symbol_table()
if args.tokens:
parser.scanner.save_tokens()
if args.error_files:
parser.save_syntax_errors()
parser.scanner.save_lexical_errors()
parser.semantic_analyzer.save_semantic_errors()
parser.code_generator.save_output()
if args.run and not SymbolTableManager.error_flag:
print("Executing compiled program")
if os.name == "nt":
tester_file = os.path.join(script_dir, "interpreter", "tester_Windows.exe")
elif os.name == "posix":
tester_file = os.path.join(script_dir, "interpreter", "tester_Linux.out")
else:
tester_file = os.path.join(script_dir, "interpreter", "tester_Mac.out")
output_file = os.path.join(script_dir, "output", "output.txt")
output_dir = os.path.dirname(output_file)
if os.path.exists(output_file):
preexec_fn = limit_virtual_memory if os.name != "nt" else None
stderr = sp.PIPE if not args.verbose else None
start = time.time()
try:
tester_output = sp.check_output(tester_file, cwd=output_dir,
stderr=stderr, timeout=10,
preexec_fn=preexec_fn).decode("utf-8")
except sp.TimeoutExpired:
print("RuntimeError: Execution timed out!")
else:
if not args.verbose:
tester_output = "\n".join([line.replace("PRINT", "").strip()
for line in tester_output.splitlines()
if line.startswith("PRINT")])
stop = time.time() - start
print(f"Execution took {stop:.6f} s")
print("Program output:")
print(tester_output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simple C Compiler written in Python')
parser.add_argument("source_file", help="Path to C source file.")
parser.add_argument('-r', '--run', action='store_true', help='Run the output program after compilation.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print all used three address codes.')
parser.add_argument('-ef', '--error-files', action='store_true', help='Save compilation errors to text files.')
parser.add_argument('-ast', '--abstract-syntax-tree', action='store_true', help='Save abstract syntax tree into a text file.')
parser.add_argument('-st', '--symbol-table', action='store_true', help='Save symbol table into a text file.')
parser.add_argument('-t', '--tokens', action='store_true', help='Save lexed tokens into a text file.')
args = parser.parse_args()
if not os.path.isabs(args.source_file):
args.source_file = os.path.abspath(script_dir)
args = parser.parse_args()
compile(args)
| 43.760417 | 130 | 0.645085 | 518 | 4,201 | 5.046332 | 0.305019 | 0.025249 | 0.045524 | 0.043611 | 0.171385 | 0.102142 | 0.075746 | 0.052793 | 0.052793 | 0 | 0 | 0.005648 | 0.241371 | 4,201 | 95 | 131 | 44.221053 | 0.814559 | 0.016425 | 0 | 0.105882 | 0 | 0 | 0.191814 | 0.005328 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023529 | false | 0 | 0.117647 | 0 | 0.141176 | 0.141176 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
218a8e1accf1fbbcec9a82434ec70681538880eb | 4,902 | py | Python | app/main/views.py | adhoadhi/PITCH | e0e02adf32966bcc622aad4b732e7a7d2db041e5 | [
"Unlicense"
] | null | null | null | app/main/views.py | adhoadhi/PITCH | e0e02adf32966bcc622aad4b732e7a7d2db041e5 | [
"Unlicense"
] | null | null | null | app/main/views.py | adhoadhi/PITCH | e0e02adf32966bcc622aad4b732e7a7d2db041e5 | [
"Unlicense"
] | null | null | null | from flask import render_template, request, redirect, url_for, abort
from . import main
from flask_login import login_required, current_user
from ..models import User, PitchCategory, Pitches, Comments
from .forms import UpdateProfile, PitchForm, CommentForm, CategoriesForm
from .. import db, photos
@main.route('/')
def index():
"""View root page function that returns index page and the various news sources"""
title = 'Home- Welcome to the Pitch Website'
categories = PitchCategory.get_categories()
return render_template('index.html', title=title, categories=categories)
# Route for adding a new pitch
@main.route('/category/pitch/new/<int:id>', methods=['GET', 'POST'])
@login_required
def new_pitch(id):
'''
Function to check Pitches form
'''
form = PitchForm()
category = PitchCategory.query.filter_by(id=id).first()
if category is None:
abort(404)
if form.validate_on_submit():
actual_pitch = form.content.data
new_pitch = Pitches(actual_pitch=actual_pitch,
user_id=current_user.id, category_id=category.id)
new_pitch.save_pitch()
return redirect(url_for('.category', id=category.id))
return render_template('new_pitch.html', pitch_form=form, category=category)
# Routes for displaying the different pitches
@main.route('/category/new',methods=['GET','POST'])
@login_required
def new_category():
form = CategoriesForm()
if form.validate_on_submit():
name = form.name.data
new_category = PitchCategory(name=name)
new_category.save_category()
return redirect(url_for('.index'))
title = 'New Pitch Category'
return render_template('new_category.html',categories_form=form)
@main.route('/category/<int:id>')
def category(id):
'''
category route function returns a list of pitches in the category chosen
'''
category = PitchCategory.query.get(id)
if category is None:
abort(404)
pitches = Pitches.get_pitches(id)
return render_template('category.html', category=category, pitches=pitches)
@main.route('/pitch/<int:id>', methods=['GET', 'POST'])
@login_required
def single_pitch(id):
'''
Function the returns a single pitch for comment to be added
'''
pitches = Pitches.query.get(id)
if pitches is None:
abort(404)
comment = Comments.get_comments(id)
return render_template('pitch.html', pitches=pitches, comment=comment)
# Routes for user authentication
@main.route('/user/<uname>')
@login_required
def profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user)
@main.route('/user/<uname>/update', methods=['GET', 'POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username=uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname=user.username))
return render_template('profile/update.html', form=form)
@main.route('/user/<uname>/update/pic', methods=['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username=uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile', uname=uname))
# Route to add commments.
@main.route('/pitch/new/<int:id>', methods=['GET', 'POST'])
@login_required
def new_comment(id):
'''
Function that returns a list of comments for the particular pitch
'''
form = CommentForm()
pitches = Pitches.query.filter_by(id=id).first()
if pitches is None:
abort(404)
if form.validate_on_submit():
comment_id = form.comment_id.data
new_comment = Comments(comment_id=comment_id,
user_id=current_user.id, pitches_id=pitches.id)
new_comment.save_comment()
return redirect(url_for('.category', id=pitches.category_id))
return render_template('comment.html', comment_form=form)
@main.route('/like/<id>')
@login_required
def like(id):
if Likes.query.filter(Likes.users_id==current_user.id,Likes.post_id==id).first():
return url_for('main.new_pitch',id=Likes.post_id)
Likes(users_id=current_user.id).save()
return url_for('main.new_pitch',id=Likes.post_id)
@main.route('/dislike/<id>')
@login_required
def dislike(id):
if Dislikes.query.filter(Dislikes.users_id==current_user.id,Dislikes.post_id==id).first():
return url_for('main.new_pitch',id=Dislikes.post_id)
Dislikes(users_id=current_user.id,post_id=id).save()
return url_for('main.new_pitch',id=Dislikes.post_id)
| 29.709091 | 91 | 0.695838 | 669 | 4,902 | 4.946188 | 0.16293 | 0.029918 | 0.043518 | 0.036265 | 0.365065 | 0.297975 | 0.22061 | 0.174977 | 0.1644 | 0.139015 | 0 | 0.00443 | 0.171155 | 4,902 | 164 | 92 | 29.890244 | 0.809993 | 0.088739 | 0 | 0.311927 | 0 | 0 | 0.11528 | 0.011824 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100917 | false | 0 | 0.055046 | 0 | 0.311927 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
218b6c65cc4bb546fc7b94bf21f63dd98d3f4aef | 859 | py | Python | shifts/urls.py | Beshutaf/shift-report | d8784ba3901fcbbbe20b31d7b9435d8eb808bdc5 | [
"MIT"
] | null | null | null | shifts/urls.py | Beshutaf/shift-report | d8784ba3901fcbbbe20b31d7b9435d8eb808bdc5 | [
"MIT"
] | null | null | null | shifts/urls.py | Beshutaf/shift-report | d8784ba3901fcbbbe20b31d7b9435d8eb808bdc5 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from django.template.response import TemplateResponse
from django.views.generic import TemplateView, RedirectView
from django.contrib.staticfiles.views import serve
from shifts import settings
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='index.html'), name="home"),
url(r'^', include('shift_report.urls')),
url(r'^favicon.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'favicon.ico')),
url(r'^robots.txt$',
lambda request: TemplateResponse(
request,
template='robots.txt',
content_type='text/plain',
)
),
url(r'^admin/', admin.site.urls),
# url('^', include('django.contrib.auth.urls')),
]
if settings.DEBUG:
urlpatterns += [
url(r'^(?P<path>.*)$', serve),
] | 29.62069 | 89 | 0.657742 | 103 | 859 | 5.427184 | 0.456311 | 0.042934 | 0.060823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.187427 | 859 | 29 | 90 | 29.62069 | 0.80086 | 0.053551 | 0 | 0 | 0 | 0 | 0.1367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.26087 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
218dc3061d5bd2699c54d77094a85e1b5564ed76 | 3,388 | py | Python | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/pyNN/neuron/nineml.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | 2 | 2020-11-01T13:22:11.000Z | 2020-11-01T13:22:20.000Z | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/pyNN/neuron/nineml.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/pyNN/neuron/nineml.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | """
Support cell types defined in 9ML with NEURON.
Requires the 9ml2nmodl script to be on the path.
Classes:
NineMLCell - a single neuron instance
NineMLCellType - base class for cell types, not used directly
Functions:
nineml_cell_type - return a new NineMLCellType subclass
Constants:
NMODL_DIR - subdirectory to which NMODL mechanisms will be written
:copyright: Copyright 2006-2013 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
from __future__ import absolute_import # Not compatible with Python 2.4
import subprocess
import neuron
from pyNN.nineml.cells import join, _add_prefix, _build_nineml_celltype, NineMLCellType
import logging
import os
h = neuron.h
logger = logging.getLogger("PyNN")
NMODL_DIR = "nineml_mechanisms"
class NineMLCell(object):
def __init__(self, **parameters):
self.type = parameters.pop("type")
self.source_section = h.Section()
self.source = getattr(h, self.type.model_name)(0.5, sec=self.source_section)
for param, value in parameters.items():
setattr(self.source, param, value)
# for recording
self.spike_times = h.Vector(0)
self.traces = {}
self.recording_time = False
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
if name in self.type.synapse_types:
return self.source # source is also target
else:
raise AttributeError("'NineMLCell' object has no attribute or synapse type '%s'" % name)
def record(self, active):
if active:
rec = h.NetCon(self.source, None)
rec.record(self.spike_times)
else:
self.spike_times = h.Vector(0)
def memb_init(self):
# this is a bit of a hack
for var in self.type.recordable:
if hasattr(self, "%s_init" % var):
initial_value = getattr(self, "%s_init" % var)
logger.debug("Initialising %s to %g" % (var, initial_value))
setattr(self.source, var, initial_value)
def _compile_nmodl(nineml_component, weight_variables): # weight variables should really be within component
"""
Generate NMODL code for the 9ML component, run "nrnivmodl" and then load
the mechanisms into NEURON.
"""
if not os.path.exists(NMODL_DIR):
os.makedirs(NMODL_DIR)
cwd = os.getcwd()
os.chdir(NMODL_DIR)
xml_file = "%s.xml" % nineml_component.name
logger.debug("Writing NineML component to %s" % xml_file)
nineml_component.write(xml_file)
nineml2nmodl = __import__("9ml2nmodl")
nineml2nmodl.write_nmodl(xml_file, weight_variables) # weight variables should really come from xml file
p = subprocess.check_call(["nrnivmodl"])
os.chdir(cwd)
neuron.load_mechanisms(NMODL_DIR)
def nineml_cell_type(name, neuron_model, port_map={}, weight_variables={}, **synapse_models):
"""
Return a new NineMLCellType subclass.
"""
return _build_nineml_celltype(name, (NineMLCellType,),
{'neuron_model': neuron_model,
'synapse_models': synapse_models,
'port_map': port_map,
'weight_variables': weight_variables})
| 33.88 | 108 | 0.645809 | 413 | 3,388 | 5.104116 | 0.394673 | 0.033207 | 0.019924 | 0.042695 | 0.091082 | 0.060721 | 0 | 0 | 0 | 0 | 0 | 0.008839 | 0.265348 | 3,388 | 99 | 109 | 34.222222 | 0.838088 | 0.242916 | 0 | 0.068966 | 0 | 0 | 0.087803 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.12069 | 0 | 0.293103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
218e022befdbcc3b099f59fc82691e5701aaf7ed | 693 | py | Python | Network/perf.py | LunarWatcher/NN-chatbot-legacy | 50e6d124cb3dbab7d79be3cc61b1c4aa45cbcc68 | [
"Apache-2.0"
] | null | null | null | Network/perf.py | LunarWatcher/NN-chatbot-legacy | 50e6d124cb3dbab7d79be3cc61b1c4aa45cbcc68 | [
"Apache-2.0"
] | null | null | null | Network/perf.py | LunarWatcher/NN-chatbot-legacy | 50e6d124cb3dbab7d79be3cc61b1c4aa45cbcc68 | [
"Apache-2.0"
] | null | null | null | from time import *
def ifOrTuple():
boolVal = False
t = time()
for i in range(10000000):
"test" if boolVal else "testFalse"
print("Average: {}".format(time() - t))
combined = 0.0
t = time()
for i in range(10000000):
("testFalse", "test")[boolVal]
print("Average: {}".format(time() - t))
def updateOrManual():
t = time()
x = {}
for i in range(10000000):
x[i] = i
print("Average: {}".format(time() - t))
t = time()
for i in range(10000000):
x.update({i: i})
print("Average: {}".format(time() - t))
if __name__ == "__main__":
ifOrTuple()
print("####")
updateOrManual()
print("####") | 21 | 43 | 0.52381 | 83 | 693 | 4.277108 | 0.325301 | 0.056338 | 0.067606 | 0.123944 | 0.532394 | 0.402817 | 0.343662 | 0 | 0 | 0 | 0 | 0.068136 | 0.279942 | 693 | 33 | 44 | 21 | 0.643287 | 0 | 0 | 0.518519 | 0 | 0 | 0.123919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.037037 | 0 | 0.111111 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
218ed007c0e968a23bde2bf831865f422fd7eba5 | 5,404 | py | Python | loris/app/views/fly.py | gucky92/loris | 6f54b7d791d473f52690380d71da0acc0352d954 | [
"MIT"
] | 1 | 2021-08-01T02:02:54.000Z | 2021-08-01T02:02:54.000Z | loris/app/views/fly.py | gucky92/loris | 6f54b7d791d473f52690380d71da0acc0352d954 | [
"MIT"
] | null | null | null | loris/app/views/fly.py | gucky92/loris | 6f54b7d791d473f52690380d71da0acc0352d954 | [
"MIT"
] | 3 | 2020-03-31T10:26:46.000Z | 2021-08-02T00:12:54.000Z | """fly specific views
"""
import os
from flask import render_template, request, flash, url_for, redirect, \
send_from_directory, session
from functools import wraps
from flask_login import current_user, login_user, login_required, logout_user
import datajoint as dj
import pandas as pd
from loris import config
from loris.app.app import app
from loris.app.templates import form_template, joined_table_template
from loris.app.forms.dynamic_form import DynamicForm
from loris.app.forms.fixed import (
dynamic_jointablesform, dynamic_settingstableform, LoginForm,
PasswordForm, dynamic_tablecreationform
)
from loris.app.utils import draw_helper, get_jsontable, user_has_permission
from loris.utils import save_join
from loris.app.login import User
from loris.database.users import grantuser, change_password
from loris.io import string_load, string_dump
@app.route('/genotype', methods=['GET', 'POST'])
@login_required
def genotype():
schema = 'subjects'
table = 'FlyGenotype'
subtable = None
edit_url = url_for('genotype')
overwrite_url = url_for('genotype')
return form_template(
schema, table, subtable, edit_url, overwrite_url, page='genotype',
override_permissions=True
)
@app.route('/stock', methods=['GET', 'POST'])
@login_required
def stock():
schema = 'subjects'
table = 'FlyStock'
subtable = None
edit_url = url_for('stock')
overwrite_url = url_for('stock')
return form_template(
schema, table, subtable, edit_url, overwrite_url, page='stock',
join_tables=[getattr(config['schemata'][schema], 'FlyGenotype')],
joined_name='stockgenotype',
override_permissions=True,
)
@app.route('/cross', methods=['GET', 'POST'])
@login_required
def cross():
schema = 'subjects'
table = 'FlyCross'
subtable = None
edit_url = url_for('cross')
overwrite_url = url_for('cross')
load_url = url_for('crossload')
return form_template(
schema, table, subtable, edit_url, overwrite_url, page='cross',
join_tables=[getattr(config['schemata'][schema], 'FlyGenotype')],
joined_name='crossgenotype',
load_url=load_url
)
@app.route('/crossload', methods=['GET', 'POST'])
@login_required
def crossload():
_id = string_load(request.args.get('_id', string_dump(None)))
if _id is None or not isinstance(_id, dict) or 'cross_id' not in _id:
flash('No cross_id was given for loading FlyCross', 'error')
return redirect(url_for('cross'))
# combine tables
cross_table = getattr(config['schemata']['subjects'], 'FlyCross')
genotype_table = getattr(config['schemata']['subjects'], 'FlyGenotype')
# fetch data
joined_table = save_join([cross_table, genotype_table])
data = (joined_table & _id).fetch1()
image = data['cross_schema']
if image is not None:
image = os.path.abspath(image)
return render_template(
'pages/crossload.html',
cross_id=_id['cross_id'],
image=image,
experimenter=data['experimenter'],
chromosome=f"{data['chr1']}; {data['chr2']}; {data['chr3']}",
comments=data['comments']
)
@app.route('/entersubject', methods=['GET', 'POST'])
@login_required
def entersubject():
schema = 'subjects'
table = 'FlySubject'
subtable = None
edit_url = url_for('entersubject')
overwrite_url = url_for('entersubject')
return form_template(
schema, table, subtable, edit_url, overwrite_url, page='entersubject',
join_tables=[getattr(config['schemata'][schema], 'FlyGenotype')],
joined_name='subjectgenotype'
)
@app.route('/stockgenotype', methods=['GET', 'POST'])
@login_required
def stockgenotype():
"""join various tables in the database
"""
delete_url = url_for(
'delete', schema='subjects', table='FlyStock', subtable=None)
return joined_table_template(
['subjects.fly_genotype', 'subjects.fly_stock'],
'Stock + Genotype Table',
'stock',
edit_url=url_for('stock'),
delete_url=delete_url
)
@app.route('/crossgenotype', methods=['GET', 'POST'])
@login_required
def crossgenotype():
"""join various tables in the database
"""
delete_url = url_for(
'delete', schema='subjects', table='FlyCross', subtable=None)
return joined_table_template(
['subjects.fly_genotype', 'subjects.fly_cross'],
'Cross + Genotype Table',
'cross',
edit_url=url_for('cross'),
load_url=url_for('crossload'),
delete_url=delete_url
)
@app.route('/subjectgenotype', methods=['GET', 'POST'])
@login_required
def subjectgenotype():
"""join various tables in the database
"""
delete_url = url_for(
'delete', schema='subjects', table='FlySubject', subtable=None)
return joined_table_template(
['subjects.fly_genotype', 'subjects.fly_subject'],
'Subject + Genotype Table',
'entersubject',
edit_url=url_for('entersubject'),
delete_url=delete_url
)
@app.route('/stockcrossgenotype', methods=['GET', 'POST'])
@login_required
def stockcrossgenotype():
"""join various tables in the database
"""
return joined_table_template(
['subjects.fly_genotype', 'subjects.fly_stock', 'subjects.fly_cross'],
'Stock + Cross + Genotype Table',
'#',
)
| 28.898396 | 78 | 0.67154 | 632 | 5,404 | 5.531646 | 0.204114 | 0.030892 | 0.04119 | 0.048913 | 0.5 | 0.443078 | 0.275172 | 0.275172 | 0.275172 | 0.204805 | 0 | 0.000921 | 0.196151 | 5,404 | 186 | 79 | 29.053763 | 0.803867 | 0.038675 | 0 | 0.241135 | 0 | 0 | 0.2036 | 0.016257 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0.014184 | 0.113475 | 0 | 0.248227 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
219016ed9395191a9d92995122f8e4aa0387fd7a | 555 | py | Python | coroutines/coroutine_finally.py | lisboaxd/python_shards | 42c28288da7f08565de43a2f118216c28c07ce92 | [
"MIT"
] | null | null | null | coroutines/coroutine_finally.py | lisboaxd/python_shards | 42c28288da7f08565de43a2f118216c28c07ce92 | [
"MIT"
] | null | null | null | coroutines/coroutine_finally.py | lisboaxd/python_shards | 42c28288da7f08565de43a2f118216c28c07ce92 | [
"MIT"
] | null | null | null | class DemoException(Exception):
'''A kind of exception to demonstrate'''
def demonstrate_exc_finally():
print('-> coroutine started')
try:
while True:
try:
x = yield
except DemoException:
print('*** DemoExcepetion handled. Continuining...')
else:
print('-> Coroutine receied: {!r}'.format(x))
finally:
print('-> coroutine ending')
coro_finally = demonstrate_exc_finally()
next(coro_finally)
coro_finally.send(20)
coro_finally.send(50)
| 24.130435 | 68 | 0.590991 | 55 | 555 | 5.818182 | 0.6 | 0.1375 | 0.13125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010152 | 0.29009 | 555 | 22 | 69 | 25.227273 | 0.80203 | 0.061261 | 0 | 0.117647 | 0 | 0 | 0.209709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.117647 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21938c91ae398fd3303b1f93d2ae749d6ad8be19 | 640 | py | Python | Lab Tests/2019.1 Lab Test/Solutions/q2a.py | alphatrl/IS111 | f09fc47f5321dc4d79b9dde97399109e2a368443 | [
"MIT"
] | null | null | null | Lab Tests/2019.1 Lab Test/Solutions/q2a.py | alphatrl/IS111 | f09fc47f5321dc4d79b9dde97399109e2a368443 | [
"MIT"
] | null | null | null | Lab Tests/2019.1 Lab Test/Solutions/q2a.py | alphatrl/IS111 | f09fc47f5321dc4d79b9dde97399109e2a368443 | [
"MIT"
] | null | null | null | def get_multiples_of(num_list, n):
'''
This function returns the number of numbers in num_list that are multiples of n.
If there is no number in num_list that is a multiple of n, the function returns 0.
Parameters:
- num_list, a list of positive integers; list may be empty
- n, a positive integer
'''
# write your answer between #start and #end
#start
number_multiples = 0
if len(num_list) == 0:
return number_multiples
for number in num_list:
if number % n == 0:
number_multiples += 1
return number_multiples
#end | 26.666667 | 87 | 0.6125 | 91 | 640 | 4.175824 | 0.450549 | 0.110526 | 0.071053 | 0.068421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011655 | 0.329688 | 640 | 24 | 88 | 26.666667 | 0.874126 | 0.496875 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2195736268951f73a7aaddba531d710135929ebd | 8,230 | py | Python | nipyapi/nifi/models/connection_diagnostics_snapshot_dto.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 199 | 2017-08-24T12:19:41.000Z | 2022-03-20T14:50:17.000Z | nipyapi/nifi/models/connection_diagnostics_snapshot_dto.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 275 | 2017-08-28T21:21:49.000Z | 2022-03-29T17:57:26.000Z | nipyapi/nifi/models/connection_diagnostics_snapshot_dto.py | Jimvin/nipyapi | 826beac376d4321bd2d69491f09086474c7e7bfb | [
"Apache-2.0"
] | 73 | 2017-09-07T10:13:56.000Z | 2022-02-28T10:37:21.000Z | # coding: utf-8
"""
NiFi Rest API
The Rest API provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.15.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ConnectionDiagnosticsSnapshotDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total_flow_file_count': 'int',
'total_byte_count': 'int',
'node_identifier': 'str',
'local_queue_partition': 'LocalQueuePartitionDTO',
'remote_queue_partitions': 'list[RemoteQueuePartitionDTO]'
}
attribute_map = {
'total_flow_file_count': 'totalFlowFileCount',
'total_byte_count': 'totalByteCount',
'node_identifier': 'nodeIdentifier',
'local_queue_partition': 'localQueuePartition',
'remote_queue_partitions': 'remoteQueuePartitions'
}
def __init__(self, total_flow_file_count=None, total_byte_count=None, node_identifier=None, local_queue_partition=None, remote_queue_partitions=None):
"""
ConnectionDiagnosticsSnapshotDTO - a model defined in Swagger
"""
self._total_flow_file_count = None
self._total_byte_count = None
self._node_identifier = None
self._local_queue_partition = None
self._remote_queue_partitions = None
if total_flow_file_count is not None:
self.total_flow_file_count = total_flow_file_count
if total_byte_count is not None:
self.total_byte_count = total_byte_count
if node_identifier is not None:
self.node_identifier = node_identifier
if local_queue_partition is not None:
self.local_queue_partition = local_queue_partition
if remote_queue_partitions is not None:
self.remote_queue_partitions = remote_queue_partitions
@property
def total_flow_file_count(self):
"""
Gets the total_flow_file_count of this ConnectionDiagnosticsSnapshotDTO.
Total number of FlowFiles owned by the Connection
:return: The total_flow_file_count of this ConnectionDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._total_flow_file_count
@total_flow_file_count.setter
def total_flow_file_count(self, total_flow_file_count):
"""
Sets the total_flow_file_count of this ConnectionDiagnosticsSnapshotDTO.
Total number of FlowFiles owned by the Connection
:param total_flow_file_count: The total_flow_file_count of this ConnectionDiagnosticsSnapshotDTO.
:type: int
"""
self._total_flow_file_count = total_flow_file_count
@property
def total_byte_count(self):
"""
Gets the total_byte_count of this ConnectionDiagnosticsSnapshotDTO.
Total number of bytes that make up the content for the FlowFiles owned by this Connection
:return: The total_byte_count of this ConnectionDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._total_byte_count
@total_byte_count.setter
def total_byte_count(self, total_byte_count):
"""
Sets the total_byte_count of this ConnectionDiagnosticsSnapshotDTO.
Total number of bytes that make up the content for the FlowFiles owned by this Connection
:param total_byte_count: The total_byte_count of this ConnectionDiagnosticsSnapshotDTO.
:type: int
"""
self._total_byte_count = total_byte_count
@property
def node_identifier(self):
"""
Gets the node_identifier of this ConnectionDiagnosticsSnapshotDTO.
The Node Identifier that this information pertains to
:return: The node_identifier of this ConnectionDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._node_identifier
@node_identifier.setter
def node_identifier(self, node_identifier):
"""
Sets the node_identifier of this ConnectionDiagnosticsSnapshotDTO.
The Node Identifier that this information pertains to
:param node_identifier: The node_identifier of this ConnectionDiagnosticsSnapshotDTO.
:type: str
"""
self._node_identifier = node_identifier
@property
def local_queue_partition(self):
"""
Gets the local_queue_partition of this ConnectionDiagnosticsSnapshotDTO.
The local queue partition, from which components can pull FlowFiles on this node.
:return: The local_queue_partition of this ConnectionDiagnosticsSnapshotDTO.
:rtype: LocalQueuePartitionDTO
"""
return self._local_queue_partition
@local_queue_partition.setter
def local_queue_partition(self, local_queue_partition):
"""
Sets the local_queue_partition of this ConnectionDiagnosticsSnapshotDTO.
The local queue partition, from which components can pull FlowFiles on this node.
:param local_queue_partition: The local_queue_partition of this ConnectionDiagnosticsSnapshotDTO.
:type: LocalQueuePartitionDTO
"""
self._local_queue_partition = local_queue_partition
@property
def remote_queue_partitions(self):
"""
Gets the remote_queue_partitions of this ConnectionDiagnosticsSnapshotDTO.
:return: The remote_queue_partitions of this ConnectionDiagnosticsSnapshotDTO.
:rtype: list[RemoteQueuePartitionDTO]
"""
return self._remote_queue_partitions
@remote_queue_partitions.setter
def remote_queue_partitions(self, remote_queue_partitions):
"""
Sets the remote_queue_partitions of this ConnectionDiagnosticsSnapshotDTO.
:param remote_queue_partitions: The remote_queue_partitions of this ConnectionDiagnosticsSnapshotDTO.
:type: list[RemoteQueuePartitionDTO]
"""
self._remote_queue_partitions = remote_queue_partitions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ConnectionDiagnosticsSnapshotDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.872881 | 478 | 0.655286 | 898 | 8,230 | 5.732739 | 0.194878 | 0.05711 | 0.077506 | 0.066434 | 0.539239 | 0.470474 | 0.423854 | 0.281274 | 0.239899 | 0.192696 | 0 | 0.001354 | 0.281896 | 8,230 | 235 | 479 | 35.021277 | 0.869712 | 0.402916 | 0 | 0.074468 | 0 | 0 | 0.089526 | 0.050374 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170213 | false | 0 | 0.031915 | 0 | 0.351064 | 0.010638 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2195ef8dc9eef8e0680f99fe7fef7e19264acba5 | 11,726 | py | Python | blue_st_examples/read_sensors.py | cchangeur/BlueSTSDK_Python | e5c6e4bc5a58680bad0d867633dd9d92012b9baf | [
"BSD-3-Clause"
] | null | null | null | blue_st_examples/read_sensors.py | cchangeur/BlueSTSDK_Python | e5c6e4bc5a58680bad0d867633dd9d92012b9baf | [
"BSD-3-Clause"
] | null | null | null | blue_st_examples/read_sensors.py | cchangeur/BlueSTSDK_Python | e5c6e4bc5a58680bad0d867633dd9d92012b9baf | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# IMPORT
from __future__ import print_function
import sys
import os
import time
from abc import abstractmethod
from blue_st_sdk.manager import Manager
from blue_st_sdk.manager import ManagerListener
from blue_st_sdk.node import NodeListener
from blue_st_sdk.feature import FeatureListener
from blue_st_sdk.features.audio.adpcm.feature_audio_adpcm import FeatureAudioADPCM
from blue_st_sdk.features.audio.adpcm.feature_audio_adpcm_sync import FeatureAudioADPCMSync
# PRECONDITIONS
#
# In case you want to modify the SDK, clone the repository and add the location
# of the "BlueSTSDK_Python" folder to the "PYTHONPATH" environment variable.
#
# On Linux:
# export PYTHONPATH=/home/<user>/BlueSTSDK_Python
# CONSTANTS
# Presentation message.
INTRO = """##############
# Start Gyro #
##############"""
# Bluetooth Scanning time in seconds (optional).
SCANNING_TIME_s = 2 #5
# Mac adress to auto connect
MAC_AUTO_CONNEXION = "cd:26:fb:e4:6d:f1"
# Feature to auto start : "Temperature", "Humidity", "Pressure", "Magnetometer", "Gyroscope", "Accelerometer", "Proximity", "Audio & Sync", "Switch"
FEATURE_AUTO_START = ["Gyroscope", "Proximity"] # or ["Temperature", "Humidity", "Pressure"] or [] to use manualy
# FUNCTIONS
#
# Printing intro.
#
def print_intro():
print('\n' + INTRO + '\n')
# INTERFACES
#
# Implementation of the interface used by the Manager class to notify that a new
# node has been discovered or that the scanning starts/stops.
#
class MyManagerListener(ManagerListener):
#
# This method is called whenever a discovery process starts or stops.
#
# @param manager Manager instance that starts/stops the process.
# @param enabled True if a new discovery starts, False otherwise.
#
def on_discovery_change(self, manager, enabled):
print('[+] Discovery %s.' % ('started' if enabled else 'stopped'))
if not enabled:
print()
#
# This method is called whenever a new node is discovered.
#
# @param manager Manager instance that discovers the node.
# @param node New node discovered.
#
def on_node_discovered(self, manager, node):
print('[+] New device discovered: %s.' % (node.get_name()))
#
# Implementation of the interface used by the Node class to notify that a node
# has updated its status.
#
class MyNodeListener(NodeListener):
#
# To be called whenever a node connects to a host.
#
# @param node Node that has connected to a host.
#
def on_connect(self, node):
print('[+] Device %s connected.' % (node.get_name()))
#
# To be called whenever a node disconnects from a host.
#
# @param node Node that has disconnected from a host.
# @param unexpected True if the disconnection is unexpected, False otherwise
# (called by the user).
#
def on_disconnect(self, node, unexpected=False):
print('[+] Device %s disconnected%s.' % \
(node.get_name(), ' unexpectedly' if unexpected else ''))
if unexpected:
# Exiting.
print('\n[+] Exiting...\n')
sys.exit(0)
#
# Implementation of the interface used by the Feature class to notify that a
# feature has updated its data.
#
class MyFeatureListener(FeatureListener):
#
# To be called whenever the feature updates its data.
#
# @param feature Feature that has updated.
# @param sample Data extracted from the feature.
#
def on_update(self, feature, sample):
print(feature)
#TODO data output (fifo ? ZMQ ?) :
timestamp = sample.get_timestamp()
if feature.get_name() == "Temperature":
out_temp = sample.get_data()
elif feature.get_name() == "Humidity":
out_temp = sample.get_data()
elif feature.get_name() == "Pressure":
out_temp = sample.get_data()
elif feature.get_name() == "Magnetometer":
out_temp_x = sample.get_data()[0]
out_temp_y = sample.get_data()[1]
out_temp_z = sample.get_data()[2]
elif feature.get_name() == "Gyroscope":
out_temp_x = sample.get_data()[0]
out_temp_y = sample.get_data()[1]
out_temp_z = sample.get_data()[2]
elif feature.get_name() == "Accelerometer":
out_temp_x = sample.get_data()[0]
out_temp_y = sample.get_data()[1]
out_temp_z = sample.get_data()[2]
elif feature.get_name() == "Proximity":
out_temp = sample.get_data()
elif feature.get_name() == "Audio & Sync":
pass
elif feature.get_name() == "Switch":
pass
# MAIN APPLICATION
#
# Main application.
#
def main(argv):
# Printing intro.
print_intro()
try:
# Creating Bluetooth Manager.
manager = Manager.instance()
manager_listener = MyManagerListener()
manager.add_listener(manager_listener)
while True:
discovered_devices_once = []
no_connect = True
no_feature_select = True
feature_selected = []
# Asynchronous discovery of Bluetooth devices.
print('[+] Scanning Bluetooth devices...\n')
manager.start_discovery()
timeout = time.time() + SCANNING_TIME_s
while no_connect:
time.sleep(0.01)
# Getting discovered devices.
discovered_devices = manager.get_nodes()
i = 1
for device in discovered_devices:
if device.get_tag() not in discovered_devices_once:
print('[+] %s: [%s]' % (device.get_name(), device.get_tag()))
discovered_devices_once.append(device.get_tag())
# Autoconnection management
if device.get_tag() == MAC_AUTO_CONNEXION:
print('[+] Device MAC address match')
no_connect = False
choice = i
i += 1
# Timeout management
if time.time() > timeout:
break
manager.stop_discovery()
# Selecting a device.
while no_connect:
print('[+] Available Bluetooth devices:')
i = 1
for device in discovered_devices:
print('[+] %d) %s: [%s]' % (i, device.get_name(), device.get_tag()))
i += 1
choice = int(input("\nSelect a device to connect to (\'0\' to quit): "))
if choice >= 0 and choice <= len(discovered_devices):
no_connect = False
if choice == 0:
# Exiting.
manager.remove_listener(manager_listener)
print('[+] Exiting...\n')
sys.exit(0)
device = discovered_devices[choice - 1]
node_listener = MyNodeListener()
device.add_listener(node_listener)
# Connecting to the device.
print('[+] Connecting to %s...' % (device.get_name()))
if not device.connect():
print('[+] Connection failed.\n')
continue
while True:
# Getting features.
features = device.get_features()
print('\n[+] Features:')
i = 1
for feature in features:
if feature.get_name() in FEATURE_AUTO_START:
print('[+] Feature matching - %s' % (feature.get_name()))
choice = i
feature_selected.append(i)
no_feature_select = False
if isinstance(feature, FeatureAudioADPCM):
audio_feature = feature
print('[+] %d,%d) %s' % (i,i+1, "Audio & Sync"))
i+=1
elif isinstance(feature, FeatureAudioADPCMSync):
audio_sync_feature = feature
else:
print('[+] %d) %s' % (i, feature.get_name()))
i+=1
# Selecting a feature.
while no_feature_select:
choice = int(input('\nSelect a feature ''(\'0\' to disconnect): '))
if choice >= 0 and choice <= len(features):
feature_selected.append(choice)
no_feature_select = False
if len(feature_selected) == 0:
# Disconnecting from the device.
print('\n[+] Disconnecting from %s...' % (device.get_name()))
if not device.disconnect():
print('[+] Disconnection failed.\n')
continue
device.remove_listener(node_listener)
# Resetting discovery.
manager.reset_discovery()
# Going back to the list of devices.
break
for feature_id in feature_selected:
feature = features[feature_id - 1]
# Enabling notifications.
feature_listener = MyFeatureListener()
feature.add_listener(feature_listener)
device.enable_notifications(feature)
# Handling audio case (both audio features have to be enabled).
if isinstance(feature, FeatureAudioADPCM):
audio_sync_feature_listener = MyFeatureListener()
audio_sync_feature.add_listener(audio_sync_feature_listener)
device.enable_notifications(audio_sync_feature)
elif isinstance(feature, FeatureAudioADPCMSync):
audio_feature_listener = MyFeatureListener()
audio_feature.add_listener(audio_feature_listener)
device.enable_notifications(audio_feature)
# Getting notifications.
while True:
device.wait_for_notifications(10)
#TODO break management
# TODO : clean disabling (array of feature/feature_listener)
# Disabling notifications.
device.disable_notifications(feature)
feature.remove_listener(feature_listener)
# Handling audio case (both audio features have to be disabled).
if isinstance(feature, FeatureAudioADPCM):
device.disable_notifications(audio_sync_feature)
audio_sync_feature.remove_listener(audio_sync_feature_listener)
elif isinstance(feature, FeatureAudioADPCMSync):
device.disable_notifications(audio_feature)
audio_feature.remove_listener(audio_feature_listener)
except KeyboardInterrupt:
try:
# Exiting.
print('\n[+] Exiting...\n')
sys.exit(0)
except SystemExit:
os._exit(0)
if __name__ == "__main__":
main(sys.argv[1:])
| 36.990536 | 149 | 0.541532 | 1,164 | 11,726 | 5.276632 | 0.20189 | 0.021654 | 0.027515 | 0.023445 | 0.281667 | 0.203028 | 0.148649 | 0.104526 | 0.095083 | 0.056659 | 0 | 0.005663 | 0.367474 | 11,726 | 316 | 150 | 37.107595 | 0.822435 | 0.210728 | 0 | 0.295455 | 0 | 0 | 0.084765 | 0 | 0 | 0 | 0 | 0.003165 | 0 | 1 | 0.039773 | false | 0.011364 | 0.0625 | 0 | 0.119318 | 0.147727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
219815a06b42358ab44f5f67029d88d169f0dbbc | 9,934 | py | Python | mnnpy/utils.py | dbrg77/mnnpy | b6fd65aa6c7e2ad72308ba10da810e805b79298f | [
"BSD-3-Clause"
] | null | null | null | mnnpy/utils.py | dbrg77/mnnpy | b6fd65aa6c7e2ad72308ba10da810e805b79298f | [
"BSD-3-Clause"
] | null | null | null | mnnpy/utils.py | dbrg77/mnnpy | b6fd65aa6c7e2ad72308ba10da810e805b79298f | [
"BSD-3-Clause"
] | 1 | 2018-10-16T01:56:34.000Z | 2018-10-16T01:56:34.000Z | import math
import numpy as np
from multiprocessing import Pool
from scipy.spatial import cKDTree
from scipy.linalg import orth
from scipy.linalg.interpolative import svd as rsvd
from scipy.sparse import issparse
from numba import jit, float32, int32, int8
from . import settings
from .irlb import lanczos
@jit(float32[:](float32[:, :]), nogil=True)
def l2_norm(in_matrix):
return np.linalg.norm(x=in_matrix, axis=1)
@jit(float32[:, :](float32[:, :], float32[:, :]), nogil=True)
def scale_rows(in_matrix, scale_vector):
return np.divide(in_matrix, scale_vector)
@jit(float32[:, :](float32[:, :], float32[:, :]))
def kdist(m, n):
dist = np.zeros((m.shape[0], n.shape[0]), dtype=np.float32)
for i in range(m.shape[0]):
for j in range(n.shape[0]):
dist[i, j] = np.dot(m[i], n[j])
return dist
def transform_input_data(datas, cos_norm_in, cos_norm_out, var_index, var_subset, n_jobs):
datas = [data.toarray().astype(np.float32) if issparse(data) else data.astype(np.float32) for data in datas]
if var_index is None:
raise ValueError('Argument var_index not provideed.')
if var_subset is not None:
if set(var_subset) - set(var_index) != set():
raise ValueError('Some items in var_subset are not in var_index.')
do_subset = True
if set(var_index) == set(var_subset):
do_subset = False
else:
do_subset = False
same_set = cos_norm_in == cos_norm_out and not do_subset
if do_subset:
var_sub_index = [list(var_index).index(var) for var in var_subset]
in_batches = [data[:, var_sub_index] for data in datas]
else:
var_sub_index = None
in_batches = datas
if settings.normalization == 'parallel':
with Pool(n_jobs) as p_n:
in_scaling = p_n.map(l2_norm, in_batches)
else:
in_scaling = [l2_norm(b) for b in in_batches]
in_scaling = [scaling[:, None] for scaling in in_scaling]
if cos_norm_in:
if settings.normalization == 'parallel':
with Pool(n_jobs) as p_n:
in_batches = p_n.starmap(scale_rows, zip(in_batches, in_scaling))
else:
in_batches = [scale_rows(a,b) for (a,b) in zip(in_batches, in_scaling)]
if cos_norm_out:
if not cos_norm_in:
if settings.normalization == 'parallel':
with Pool(n_jobs) as p_n:
out_batches = p_n.starmap(scale_rows, zip(datas, in_scaling))
else:
out_batches = [scale_rows(a,b) for (a,b) in zip(datas, in_scaling)]
else:
if settings.normalization == 'parallel':
with Pool(n_jobs) as p_n:
out_scaling = p_n.map(l2_norm, datas)
else:
out_scaling = [l2_norm(d) for d in datas]
out_scaling = [scaling[:, None] for scaling in out_scaling]
if settings.normalization == 'parallel':
with Pool(n_jobs) as p_n:
out_batches = p_n.starmap(scale_rows, zip(datas, out_scaling))
else:
out_batches = [scale_rows(a,b) for (a,b) in zip(datas, out_scaling)]
return in_batches, out_batches, var_sub_index, same_set
@jit((float32[:, :], float32[:, :], int8, int8, int8))
def find_mutual_nn(data1, data2, k1, k2, n_jobs):
k_index_1 = cKDTree(data1).query(x=data2, k=k1, n_jobs=n_jobs)[1]
k_index_2 = cKDTree(data2).query(x=data1, k=k2, n_jobs=n_jobs)[1]
mutual_1 = []
mutual_2 = []
for index_2 in range(data2.shape[0]):
for index_1 in k_index_1[index_2]:
if index_2 in k_index_2[index_1]:
mutual_1.append(index_1)
mutual_2.append(index_2)
return mutual_1, mutual_2
@jit(float32[:, :](float32[:, :], float32[:, :], int32[:], int32[:], float32[:, :], float32))
def compute_correction(data1, data2, mnn1, mnn2, data2_or_raw2, sigma):
vect = data1[mnn1] - data2[mnn2]
mnn_index, mnn_count = np.unique(mnn2, return_counts=True)
vect_reduced = np.zeros((data2.shape[0], vect.shape[1]), dtype=np.float32)
for index, ve in zip(mnn2, vect):
vect_reduced[index] += ve
vect_avg = np.divide(vect_reduced[mnn_index], mnn_count.astype(np.float32)[:, None])
exp_distance = np.exp(-kdist(data2_or_raw2, data2_or_raw2[mnn_index]) / sigma)
density = np.sum(exp_distance[mnn_index], axis=0)
mult = np.divide(exp_distance, density)
total_prob = np.sum(mult, axis=1, keepdims=True)
output = np.dot(mult, vect_avg)
return np.divide(output, total_prob)
def svd_internal(mat, nu, svd_mode, **kwargs):
mat = mat.astype(np.float64)
if svd_mode == 'svd':
svd_out = rsvd(mat, eps_or_k=nu, rand=False)
elif svd_mode == 'rsvd':
svd_out = rsvd(mat, eps_or_k=nu)
elif svd_mode == 'irlb':
svd_out = lanczos(mat, nu, **kwargs)
else:
raise ValueError('The svd_mode must be one of \'rsvd\', \'svd\', \'irlb\'.')
return svd_out[0].astype(np.float32), svd_out[1].astype(np.float32), svd_out[2].astype(np.float32)
def find_shared_subspace(mat1, mat2, sin_thres=0.05, cos_thres=1 / math.sqrt(2), mat2_vec=False,
assume_orthonomal=False, get_angle=True):
if mat2_vec:
mat2 = mat2[:, None]
if not assume_orthonomal:
mat1 = orth(mat1)
mat2 = orth(mat2)
cross_prod = np.dot(mat1.T, mat2)
singular = np.linalg.svd(cross_prod)
shared = sum(singular[1] > sin_thres)
if not get_angle:
return None, shared
costheta = min(singular[1])
if costheta < cos_thres:
theta = math.acos(min(1, costheta))
else:
if mat1.shape[1] < mat2.shape[1]:
sintheta = np.linalg.norm(x=mat1 - np.dot(mat2, cross_prod.T), ord=2)
else:
sintheta = np.linalg.norm(x=mat2.T - np.dot(mat1, cross_prod), ord=2)
theta = math.asin(min(1, sintheta))
return 180 * theta / math.pi, shared
def get_bio_span(exprs, ndim, svd_mode, var_subset=None, **kwargs):
centred = exprs - np.mean(exprs, axis=0)
if var_subset is not None:
subsetter = [True] * centred.shape[1]
keeper = [False] * centred.shape[1]
for i in var_subset:
subsetter[i] = False
keeper[i] = True
leftovers = centred[:, subsetter].T
centred = centred[:, keeper]
ndim = min(ndim, *centred.shape)
singular = svd_internal(centred.T, ndim, svd_mode, **kwargs)
if var_subset is None:
return singular[0]
output = np.zeros((exprs.shape[1], ndim), dtype=np.float32)
output[keeper,] = singular[0]
output[subsetter,] = np.divide(np.dot(leftovers, singular[2]), singular[1][range(ndim)])
return output
def subtract_bio(*spans, correction, var_subset=None):
for span in spans:
if var_subset is None:
bio_mag = np.dot(correction, span)
else:
bio_mag = np.dot(correction[:, var_subset], span[var_subset, :])
bio_comp = np.dot(bio_mag, span.T)
correction -= bio_comp
return correction
def adjust_shift_variance(data1, data2, correction, sigma, n_jobs, var_subset=None):
if var_subset is not None:
vect = correction[:, var_subset]
data1 = data1[:, var_subset]
data2 = data2[:, var_subset]
else:
vect = correction
with Pool(n_jobs) as p_n:
scaling = p_n.starmap(adjust_v_worker(data1, data2, sigma), zip(data2, vect),
chunksize=int(data2.shape[0]/n_jobs) + 1)
scaling = np.fmax(scaling, 1).astype(np.float32)
return correction * scaling[:, None]
@jit(float32(float32[:, :], float32[:, :], float32[:], float32[:], float32), nogil=True)
def adjust_s_variance(data1, data2, curcell, curvect, sigma):
distance1 = np.zeros((data1.shape[0], 2), dtype=np.float32)
l2_norm = np.linalg.norm(curvect)
grad = np.divide(curvect, l2_norm)
curproj = np.dot(grad, curcell)
prob2 = 0.
totalprob2 = 0.
for samecell in data2:
sameproj = np.dot(grad, samecell)
samedist = sq_dist_to_line(curcell, grad, samecell)
sameprob = np.exp(-samedist / sigma)
if sameproj <= curproj:
prob2 += sameprob
totalprob2 += sameprob
prob2 /= totalprob2
totalprob1 = 0.
for other in range(data1.shape[0]):
othercell = data1[other]
distance1[other, 0] = np.dot(grad, othercell)
otherdist = sq_dist_to_line(curcell, grad, othercell)
weight = np.exp(-otherdist / sigma)
distance1[other, 1] = weight
totalprob1 += weight
distance1 = distance1[distance1[:, 0].argsort()]
target = prob2 * totalprob1
cumulative = 0.
ref_quan = distance1[-1, 0]
for i in distance1:
cumulative += i[1]
if cumulative > target:
ref_quan = i[0]
break
return (ref_quan - curproj) / l2_norm
@jit(float32(float32[:], float32[:], float32[:]), nopython=True)
def sq_dist_to_line(ref, grad, point):
working = ref - point
scale = np.dot(working, grad)
working = working - grad * scale
return np.dot(working, working)
class adjust_v_worker(object):
def __init__(self, data1, data2, sigma):
self.d1 = data1
self.d2 = data2
self.s2 = sigma
def __call__(self, curcell, curvect):
return adjust_s_variance(self.d1, self.d2, curcell, curvect, self.s2)
def get_so_paths(dir_name):
dir_name = os.path.join(os.path.dirname(__file__), dir_name)
list_dir = os.listdir(dir_name) if os.path.isdir(dir_name) else []
return [os.path.join(dir_name, so_name) for so_name in list_dir if so_name.split('.')[-1] in ['so', 'pyd']]
try:
from ._utils import _adjust_shift_variance as adjust_shift_variance
#print('Cython module loaded!')
except ImportError:
print('Cython module _utils not initialized. Fallback to python.')
pass
| 37.628788 | 112 | 0.62875 | 1,433 | 9,934 | 4.166085 | 0.177948 | 0.028643 | 0.031658 | 0.013065 | 0.19866 | 0.142714 | 0.091457 | 0.08392 | 0.076884 | 0.076884 | 0 | 0.034313 | 0.243104 | 9,934 | 263 | 113 | 37.771863 | 0.759675 | 0.00302 | 0 | 0.135965 | 0 | 0 | 0.023124 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0.004386 | 0.052632 | 0.013158 | 0.201754 | 0.004386 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2199fec3c4a94bc3ee9c0ccd244406c9f8fdfe01 | 15,408 | py | Python | pycharmers/utils/soup_utils.py | iwasakishuto/Py-utils | 5dc1e7d676811c2239e64f500b734bd508335256 | [
"MIT"
] | 2 | 2019-11-13T11:59:30.000Z | 2019-11-17T15:44:09.000Z | pycharmers/utils/soup_utils.py | iwasakishuto/Py-utils | 5dc1e7d676811c2239e64f500b734bd508335256 | [
"MIT"
] | 1 | 2020-09-12T18:00:30.000Z | 2020-09-12T18:00:50.000Z | pycharmers/utils/soup_utils.py | iwasakishuto/Py-utils | 5dc1e7d676811c2239e64f500b734bd508335256 | [
"MIT"
] | 1 | 2021-04-13T16:19:26.000Z | 2021-04-13T16:19:26.000Z | #coding: utf-8
import re
import requests
from bs4 import BeautifulSoup
from .generic_utils import str_strip, handleKeyError
def str2soup(string):
"""Convert strings to soup, and removed extra tags such as ``<html>``, ``<body>``, and ``<head>``.
Args:
string (str) : strings
Returns:
bs4.BeautifulSoup : A data structure representing a parsed HTML or XML document.
Examples:
>>> from pycharmers.utils import str2soup
>>> string = "<title>Python-Charmers</title>"
>>> type(string)
str
>>> soup = str2soup(string)
>>> soup
<title>Python-Charmers</title>
>>> type(soup)
bs4.BeautifulSoup
>>> from bs4 import BeautifulSoup
>>> BeautifulSoup(string)
<html><head><title>Python-Charmers</title></head></html>
"""
soup = BeautifulSoup(markup=string, features="html5lib")
for attr in ["html", "body", "head"]:
if hasattr(soup, attr) and getattr(soup, attr) is not None:
getattr(soup, attr).unwrap()
return soup
def split_section(section, name=None, attrs={}, recursive=True, text=None, **kwargs):
""" Split ``bs4.BeautifulSoup``.
Args:
section (bs4.BeautifulSoup) : A data structure representing a parsed HTML or XML document.
name (str) : A filter on tag name.
attrs (dict) : A dictionary of filters on attribute values.
recursive (bool) : If this is True, ``.find`` will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered.
text (str) : An inner text.
kwargs (dict) : A dictionary of filters on attribute values.
Returns:
list : A list of elements without filter tag elements.
Examples:
>>> from bs4 import BeautifulSoup
>>> from pycharmers.utils import split_section
>>> section = BeautifulSoup(\"\"\"
... <section>
... <div>
... <h2>Title</h2>
... <div>
... <p>aaaaaaaaaaaaaaaaaaaaaa</p>
... <div>
... <img/>
... </div>
... <p>bbbbbbbbbbbbbbbbbbbbbb</p>
... </div>
... </div>
... </section>
>>> \"\"\")
>>> len(split_section(section, name="img"))
3
>>> split_section(section, name="img")
[<section>
<div>
<h2>Title</h2>
<div>
<p>aaaaaaaaaaaaaaaaaaaaaa</p>
<div>
</div></div></div></section>,
<img/>,
<p>bbbbbbbbbbbbbbbbbbbbbb</p>
]
"""
str_section = str(section)
page_elements = []
delimiters = section.find_all(name=name, attrs=attrs, recursive=recursive, text=text, **kwargs)
# Initialization (Prevent occuring an error when for-loop enter continue at the beginning (i=0))
end = 0
for i,delimiter in enumerate(delimiters):
str_delimiter = str(delimiter)
start = str_section.find(str_delimiter)
if start==-1:
continue
page_elements.append(str2soup(string=str_section[end:start]))
page_elements.append(delimiter)
end = start + len(str_delimiter)
page_elements.append(str2soup(string=str_section[end:]))
return page_elements
def group_soup_with_head(soup, name=None, attrs={}, recursive=True, text=None, **kwargs):
""" Gouping ``bs4.BeautifulSoup`` based on head.
Args:
section (bs4.BeautifulSoup) : A data structure representing a parsed HTML or XML document.
name (str) : A filter on tag name.
attrs (dict) : A dictionary of filters on attribute values.
recursive (bool) : If this is True, ``.find`` will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered.
text (str) : An inner text.
kwargs (dict) : A dictionary of filters on attribute values.
Returns:
list : A list of elements without filter tag elements.
Examples:
>>> from bs4 import BeautifulSoup
>>> from pycharmers.utils import group_soup_with_head
>>> section = BeautifulSoup(\"\"\"
... <h2>AAA</h2>
... <div>
... <p>aaaaaaaaaaaaaaaaaaaaaa</p>
... </div>
... <h2>BBB</h2>
... <div>
... <p>bbbbbbbbbbbbbbbbbbbbbb</p>
... </div>
>>> \"\"\")
>>> sections = group_soup_with_head(section, name="h2")
>>> len(sections)
2
>>> sections
[<section><h2>AAA</h2><div>
<p>aaaaaaaaaaaaaaaaaaaaaa</p>
</div>
</section>,
<section><h2>BBB</h2><div>
<p>bbbbbbbbbbbbbbbbbbbbbb</p>
</div>
</section>]
"""
str_soup = str(soup)
sections = []
heads = soup.find_all(name=name, attrs=attrs, recursive=recursive, text=text, **kwargs)
# Initialization (Prevent occuring an error when for-loop enter continue at the beginning (i=0))
end = 0; section = BeautifulSoup(markup="", features="lxml").new_tag(name="section")
if len(heads)>0:
for i,head in enumerate(heads):
str_head = str(head)
start = str_soup.find(str_head)
if start==-1:
continue
if i>0:
body = str2soup(string=str_soup[end:start])
section.append(body)
sections.append(section)
end = start + len(str_head)
section = BeautifulSoup(markup="", features="lxml").new_tag(name="section")
section.append(head)
body = str2soup(string=str_soup[end:])
section.append(body)
sections.append(section)
return sections
def replace_soup_tag(soup,
new_name, new_namespace=None, new_nsprefix=None, new_attrs={}, new_sourceline=None,
new_sourcepos=None, new_kwattrs={},
old_name=None, old_attrs={}, old_recursive=True, old_text=None, old_limit=None, old_kwargs={}, **kwargs):
"""Replace Old tag with New tag.
- Args named ``old_XXX`` specifies "How to find old tags"
- Args named ``new_XXX`` specifies "How to create new tags"
Args:
old_name (str) : A filter on tag name.
old_attrs (dict) : A dictionary of filters on attribute values.
old_recursive (bool) : If this is True, ``.find_all`` will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered.
old_limit (int) : Stop looking after finding this many results.
old_kwargs (dict) : A dictionary of filters on attribute values.
new_name (str) : The name of the new Tag.
new_namespace (str) : The URI of the new Tag's XML namespace, if any.
new_prefix (str) : The prefix for the new Tag's XML namespace, if any.
new_attrs (dict) : A dictionary of this Tag's attribute values; can be used instead of `kwattrs` for attributes like 'class' that are reserved words in Python.
new_sourceline (str) : The line number where this tag was (purportedly) found in its source document.
new_sourcepos (str) : The character position within ``sourceline`` where this tag was (purportedly) found.
new_kwattrs (dict) : Keyword arguments for the new Tag's attribute values.
Examples:
>>> from bs4 import BeautifulSoup
>>> from pycharmers.utils import replace_soup_tag
>>> section = BeautifulSoup(\"\"\"
... <h2>AAA</h2>
... <div>
... <p>aaaaaaaaaaaaaaaaaaaaaa</p>
... </div>
... <h3>BBB</h3>
... <div>
... <p>bbbbbbbbbbbbbbbbbbbbbb</p>
... </div>
>>> \"\"\")
>>> section = replace_soup_tag(soup=section, old_name="h3", new_name="h2")
>>> section
<html><body><h2>AAA</h2>
<div>
<p>aaaaaaaaaaaaaaaaaaaaaa</p>
</div>
<h2>BBB</h2>
<div>
<p>bbbbbbbbbbbbbbbbbbbbbb</p>
</div>
</body></html>
"""
for old in soup.find_all(name=old_name, attrs=old_attrs, recursive=old_recursive, text=old_text, limit=old_limit, **old_kwargs):
new = BeautifulSoup(markup="", features="lxml").new_tag(name=new_name, namespace=new_namespace, nsprefix=new_nsprefix, attrs=new_attrs, sourceline=new_sourceline, sourcepos=new_sourcepos, **new_kwattrs)
new.extend(list(old.children))
old.replace_with(new)
return soup
def find_target_text(soup, name=None, attrs={}, recursive=True, text=None, default="__NOT_FOUND__", strip=True, **kwargs):
"""Find target element, and get all child strings from it.
Args:
soup (bs4.BeautifulSoup) : A data structure representing a parsed HTML or XML document.
name (str) : A filter on tag name.
attrs (dict) : A dictionary of filters on attribute values.
recursive (bool) : If this is True, ``.find`` will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered.
text (str) : An inner text.
default (str) : Default return value if element not found.
strip (bool) : Whether to use :func:`str_strip <pycharmers.utils.generic_utils.str_strip>`
kwargs (dict) : A dictionary of filters on attribute values.
Returns:
str : text
Examples:
>>> from bs4 import BeautifulSoup
>>> from pycharmers.utils import find_target_text
>>> section = BeautifulSoup(\"\"\"
... <h2>AAA</h2>
... <div> <p>aaaaaaaaaaaaaaaaaaaaaa</p></div>
>>> \"\"\")
>>> find_target_text(soup=section, name="div")
'aaaaaaaaaaaaaaaaaaaaaa'
>>> find_target_text(soup=section, name="div", strip=False)
' aaaaaaaaaaaaaaaaaaaaaa '
>>> find_target_text(soup=section, name="divdiv", default="not found")
'not found'
"""
target = soup.find(name=name, attrs=attrs, recursive=recursive, text=text, **kwargs)
if target is None:
text = default
else:
text = target.text
if strip:
text = str_strip(string=text)
return text
def find_all_target_text(soup, name=None, attrs={}, recursive=True, text=None, default="__NOT_FOUND__", strip=True, joint="", **kwargs):
"""Find target element, and get all child strings from it.
Args:
soup (bs4.BeautifulSoup) : A data structure representing a parsed HTML or XML document.
name (str) : A filter on tag name.
attrs (dict) : A dictionary of filters on attribute values.
recursive (bool) : If this is True, ``.find`` will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered.
text (str) : An inner text.
default (str) : Default return value if element not found.
strip (bool) : Whether to use :func:`str_strip <pycharmers.utils.generic_utils.str_strip>`
joint (str) : Inserted between target strings.
kwargs (dict) : A dictionary of filters on attribute values.
Returns:
str : text
Examples:
>>> from bs4 import BeautifulSoup
>>> from pycharmers.utils import find_all_target_text
>>> section = BeautifulSoup(\"\"\"
... <div>
... <p class="lang en">Hello</p>
... <p class="lang zh-CN">你好</p>
... <p class="lang es">Hola</p>
... <p class="lang fr">Bonjour</p>
... <p class="lang ja">こんにちは</p>
... </div>
>>> \"\"\")
>>> find_all_target_text(soup=section, name="p", class_="lang", joint=", ")
'Hello, 你好, Hola, Bonjour, こんにちは'
>>> find_all_target_text(soup=section, name="p", class_="es", joint=", ")
'Hola'
"""
texts = []
for target in soup.find_all(name=name, attrs=attrs, recursive=recursive, text=text, **kwargs):
text = target.text
if strip:
text = str_strip(string=text)
texts.append(text)
return joint.join(texts)
def find_target_id(soup, key, name=None, attrs={}, recursive=True, text=None, default=None, strip=True, **kwargs):
"""Find target element, and get id from it.
Args:
soup (bs4.BeautifulSoup) : A data structure representing a parsed HTML or XML document.
key (str) : id name.
name (str) : A filter on tag name.
attrs (dict) : A dictionary of filters on attribute values.
recursive (bool) : If this is True, ``.find`` will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered.
text (str) : An inner text.
default (str) : Default return value if element not found.
strip (bool) : Whether to use :func:`str_strip <pycharmers.utils.generic_utils.str_strip>`
kwargs (dict) : A dictionary of filters on attribute values.
Returns:
str : text.
Examples:
>>> from bs4 import BeautifulSoup
>>> from pycharmers.utils import find_target_id
>>> section = BeautifulSoup(\"\"\"
... <h2>IMAGE</h2>
... <div>
... <img id="apple-touch-icon" src="https://iwasakishuto.github.io/images/apple-touch-icon/Python-Charmers.png">
... </div>
>>> \"\"\")
>>> find_target_id(soup=section, name="img", key="id")
'apple-touch-icon'
>>> find_target_id(soup=section, name="img", key="src")
'https://iwasakishuto.github.io/images/apple-touch-icon/Python-Charmers.png'
"""
target = soup.find(name=name, attrs=attrs, recursive=recursive, text=text, **kwargs)
if target is None:
id_ = default
else:
id_ = target.get(key=key, default=default)
if strip:
id_ = str_strip(string=id_)
return id_
def get_soup(url, driver=None, features="lxml", timeout=1):
""" Scrape and get page source from ``url``.
Args:
url (str) : URL.
driver (WebDriver) : webdriver
features (str) : Desirable features of the parser to be used. This may be the name of a specific parser ("lxml", "lxml-xml", "html.parser", or "html5lib") or it may be the type of markup to be used ("html", "html5", "xml"). It's recommended that you name a specific parser, so that Beautiful Soup gives you the same results across platforms and virtual environments.
Returns:
BeautifulSoup : A data structure representing a parsed HTML or XML document.
"""
handleKeyError(lst=["lxml", "lxml-xml", "html.parser", "html5lib", "html", "html5", "xml"], features=features)
if driver is None:
html = requests.get(url=url).content
else:
from .driver_utils import scrollDown, wait_until_all_elements
driver.get(url)
wait_until_all_elements(driver=driver, timeout=timeout, verbose=False)
scrollDown(driver=driver, verbose=False)
html = driver.page_source.encode("utf-8")
return BeautifulSoup(markup=html, features=features) | 43.402817 | 378 | 0.594496 | 1,837 | 15,408 | 4.899837 | 0.138813 | 0.023331 | 0.021664 | 0.024553 | 0.631152 | 0.590379 | 0.563937 | 0.538385 | 0.494723 | 0.450506 | 0 | 0.006224 | 0.280439 | 15,408 | 355 | 379 | 43.402817 | 0.805628 | 0.64791 | 0 | 0.258065 | 0 | 0 | 0.027878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086022 | false | 0 | 0.053763 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a00207837c0b03d9e8ba6b3b0c1c8c83c6a9d4 | 8,444 | py | Python | third_party/chromite/cli/cros/lint_autotest.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/chromite/cli/cros/lint_autotest.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/chromite/cli/cros/lint_autotest.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This module is not automatically loaded by the `cros` helper. The filename
# would need a "cros_" prefix to make that happen. It lives here so that it
# is alongside the cros_lint.py file.
#
# For msg namespaces, the 9xxx should generally be reserved for our own use.
"""A lint module loaded by pylint for Autotest linting.
This module patches pylint library functions to suit autotest.
This is loaded by pylint directly via the autotest pylintrc file:
load-plugins=chromite.cli.cros.lint_autotest
"""
from __future__ import print_function
import re
from pylint.checkers import base
from pylint.checkers import BaseChecker
from pylint.checkers import imports
from pylint.checkers import variables
from pylint.interfaces import IAstroidChecker
import logilab.common.modutils
# patch up the logilab module lookup tools to understand autotest_lib.* trash
_ffm = logilab.common.modutils.file_from_modpath
def file_from_modpath(modpath, paths=None, context_file=None):
"""Wrapper to eliminate autotest_lib from modpath.
Args:
modpath: name of module splitted on '.'
paths: optional list of paths where module should be searched for.
context_file: path to file doing the importing.
Returns:
The path to the module as returned by the parent method invocation.
Raises:
ImportError if these is no such module.
"""
if modpath[0] == "autotest_lib":
return _ffm(modpath[1:], paths, context_file)
else:
return _ffm(modpath, paths, context_file)
# patch up pylint import checker to handle our importing magic
ROOT_MODULE = 'autotest_lib.'
# A list of modules for pylint to ignore, specifically, these modules
# are imported for their side-effects and are not meant to be used.
_IGNORE_MODULES = (
'common',
'frontend_test_utils',
'setup_django_environment',
'setup_django_lite_environment',
'setup_django_readonly_environment',
'setup_test_environment',
)
def patch_modname(modname):
"""Patches modname so we can make sense of autotest_lib modules.
Args:
modname: name of a module, contains '.'
Returns:
The modname string without the 'autotest_lib.' prefix. For example,
patch_modname('autotest_lib.foo.bar') == 'foo.bar'
patch_modname('foo.bar') == 'foo.bar'
"""
if modname.startswith(ROOT_MODULE) or modname.startswith(ROOT_MODULE[:-1]):
modname = modname[len(ROOT_MODULE):]
return modname
def patch_consumed_list(to_consume=None, consumed=None):
"""Patches the consumed modules list to ignore modules with side effects.
Autotest relies on importing certain modules solely for their side
effects. Pylint doesn't understand this and flags them as unused, since
they're not referenced anywhere in the code. To overcome this we need
to transplant said modules into the dictionary of modules pylint has
already seen, before pylint checks it.
Args:
to_consume: a dictionary of names pylint needs to see referenced.
consumed: a dictionary of names that pylint has seen referenced.
"""
if to_consume is None or consumed is None:
return
for module in _IGNORE_MODULES:
if module in to_consume:
consumed[module] = to_consume[module]
del to_consume[module]
# This decorator will be used for monkey patching the built-in pylint classes.
def patch_cls(cls):
"""Sets a method of `cls`."""
def patcher(method):
setattr(cls, method.__name__, method)
return patcher
def CustomizeImportsChecker():
"""Modifies stock imports checker to suit autotest."""
cls = imports.ImportsChecker
old_visit_from = cls.visit_from
@patch_cls(cls)
def visit_from(self, node): # pylint: disable=unused-variable
node.modname = patch_modname(node.modname)
return old_visit_from(self, node)
def CustomizeVariablesChecker():
"""Modifies stock variables checker to suit autotest."""
cls = variables.VariablesChecker
old_visit_module = cls.visit_module
@patch_cls(cls)
def visit_module(self, node): # pylint: disable=unused-variable
"""Unflag 'import common'.
_to_consume eg: [({to reference}, {referenced}, 'scope type')]
Enteries are appended to this list as we drill deeper in scope.
If we ever come across a module to ignore, we immediately move it
to the consumed list.
Args:
node: node of the ast we're currently checking.
"""
old_visit_module(self, node)
# pylint: disable=protected-access
scoped_names = self._to_consume.pop()
patch_consumed_list(scoped_names[0], scoped_names[1])
self._to_consume.append(scoped_names)
old_visit_from = cls.visit_from
@patch_cls(cls)
def visit_from(self, node): # pylint: disable=unused-variable
"""Patches modnames so pylints understands autotest_lib."""
node.modname = patch_modname(node.modname)
return old_visit_from(self, node)
def _ShouldSkipArg(arg):
"""Checks if arg name can be excluded from @param list.
Returns:
True if the argument given by arg is whitelisted, and does
not require a "@param" docstring.
"""
return arg in ('self', 'cls', 'args', 'kwargs', 'dargs')
def ShouldSkipDocstring(node):
"""Returns whether docstring checks should run on this function node.
Args:
node: The node under examination.
"""
# Even plain functions will have a parent, which is the
# module they're in, and a frame, which is the context
# of said module; They need not however, always have
# ancestors.
return (node.name in ('run_once', 'initialize', 'cleanup') and
hasattr(node.parent.frame(), 'ancestors') and
any(ancestor.name == 'base_test' for ancestor in
node.parent.frame().ancestors()))
def CustomizeDocStringChecker():
"""Modifies stock docstring checker to suit Autotest doxygen style."""
cls = base.DocStringChecker
@patch_cls(cls)
def visit_module(_self, _node): # pylint: disable=unused-variable
"""Don't visit imported modules when checking for docstrings.
Args:
node: the node we're visiting.
"""
old_visit_function = cls.visit_function
@patch_cls(cls)
def visit_function(self, node): # pylint: disable=unused-variable
"""Don't request docstrings for commonly overridden autotest functions.
Args:
node: node of the ast we're currently checking.
"""
if ShouldSkipDocstring(node):
return
old_visit_function(self, node)
class ParamChecker(BaseChecker):
"""Checks that each argument has a @param entry in the docstring."""
__implements__ = IAstroidChecker
# The numbering for this message matches that of the doc string checker class
# in chromite.cli.cros.lint
class _MessageCP010(object):
"""Message for missing @param statements."""
pass
name = 'doc_string_param_checker'
priority = -1
MSG_ARGS = 'offset:%(offset)i: {%(line)s}'
msgs = {
'C9010': ('Docstring for %(func)s needs "@param %(arg)s:"',
('docstring-missing-args'), _MessageCP010),
}
def visit_function(self, node):
"""Verify the function's docstrings."""
if node.doc and not ShouldSkipDocstring(node):
self._check_all_args_in_doc(node)
ARG_DOCSTRING_RGX = re.compile(r'@param ([^:]+)')
def _check_all_args_in_doc(self, node):
"""Teaches pylint to look for @param with each argument.
Args:
node_type: type of the node we're currently checking.
node: node of the ast we're currently checking.
"""
present_args = set(arg for arg in node.argnames()
if not _ShouldSkipArg(arg))
documented_args = set(re.findall(self.ARG_DOCSTRING_RGX, node.doc))
for undocumented in present_args - documented_args:
self.add_message('C9010', node=node,
line=node.fromlineno,
args={'arg': undocumented,
'func': node.name},)
def register(linter):
"""Pylint will call this func when we use the 'load-plugins' invocation.
Args:
linter: The pylint linter instance for this run.
"""
# Walk all the classes in this module and register ours.
linter.register_checker(ParamChecker(linter))
CustomizeDocStringChecker()
CustomizeImportsChecker()
CustomizeVariablesChecker()
logilab.common.modutils.file_from_modpath = file_from_modpath
| 31.864151 | 79 | 0.71613 | 1,159 | 8,444 | 5.08887 | 0.281277 | 0.01492 | 0.01119 | 0.021363 | 0.137165 | 0.114615 | 0.096982 | 0.096982 | 0.089691 | 0.083418 | 0 | 0.003823 | 0.194576 | 8,444 | 264 | 80 | 31.984848 | 0.863402 | 0.493842 | 0 | 0.160377 | 0 | 0 | 0.09368 | 0.038471 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169811 | false | 0.009434 | 0.103774 | 0 | 0.443396 | 0.009434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a0c8636280ad53c71eff8b3260ed75a7d6bbee | 8,298 | py | Python | MindLink-Eumpy/data_collection_framework/util/EEG_record.py | Breeze1in1drizzle/MindLink-Exploring | 24e7d60112754c9fe5faf7b7f9ae255fa1bc4c59 | [
"MIT"
] | 7 | 2020-11-19T14:34:50.000Z | 2022-02-26T14:16:50.000Z | MindLink-Eumpy/data_collection_framework/util/EEG_record.py | Breeze1in1drizzle/MindLink-Exploring | 24e7d60112754c9fe5faf7b7f9ae255fa1bc4c59 | [
"MIT"
] | 1 | 2021-08-20T07:30:32.000Z | 2021-09-01T07:20:14.000Z | MindLink-Eumpy/data_collection_framework/util/EEG_record.py | Breeze1in1drizzle/MindLink-Exploring | 24e7d60112754c9fe5faf7b7f9ae255fa1bc4c59 | [
"MIT"
] | 2 | 2021-07-20T08:59:14.000Z | 2021-08-10T08:03:56.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 18 11:42:40 2018 by Yongrui Huang
Modified on Tue Oct 29th 16:16:50 2019 by Ruixin Lee
@author: Yongrui Huang
"""
import sys
sys.path.append('../')
from data_collection_framework.util import record
import configuration
import os
import platform
import time
import ctypes
from array import *
from ctypes import *
import multiprocessing
if sys.platform.startswith('win32'):
import msvcrt
elif sys.platform.startswith('linux'):
import atexit
from select import select
from ctypes import *
import numpy as np
class EEGRecorder(record.AbstractRecorder):
'''
This class gives an example of how to use AbsRecorder to record EEG data.
Specially, the devices we used is emotiv insight.
It should be noted that this code may not work well for your envirnoment,
since the device (emotiv insight) is relied on different platform and
different version of progarm language. I used windows64 ana python 3.5
for my development.
You may noticed that the code here is very complex. However, I just copied
the code that emotiv company released and modify just a litte bit, basically.
Oct 29 2019
Now we use Emotiv Epoc+ to record EEG signals which has 14 channels.
'''
def __init__(self, name):
record.AbstractRecorder.__init__(self, name)
def record_one_sample(self):
'''
This method is supposed to read one data sample from information
source. For EEG using emotiv insight, it means recording signal from
5 different channel (i.e, IED_AF3, IED_AF4, IED_T7, IED_T8, IED_Pz) and
each channel's PSD features (i.e. theta, alpha, low_beta, high_beta, gamma).
25 features totally to be treated as a sample.
Oct 29 2019
Now we use Emotiv Epoc+ with 14 channels to record EEG signals.
My mission is to complete the connection method and make it more convenient
for others and suitable for all kinds of platforms.
'''
#------------------------------------
print("configuration.ROOT_PATH:")
print(configuration.ROOT_PATH)
print("configuration.ROOT_PATH.")
# load_str = configuration.ROOT_PATH + "data_collection_frame/util/win64/edk/1.edk.dll"
print("load_str:")
# print(load_str)
print("load_str.")
#--------------------------------------
# self.libEDK = cdll.LoadLibrary(configuration.ROOT_PATH + "data_collection_frame/util/win64/edk.dll")
# load sdk
self.libEDK = cdll.loadLibrary("win64/edk.dll")
# print("self.libEDK")
# print(self.libEDK)
# print("self.libEDK")
# cdll.LoadLibrary()???
# self.libEDK = cdll.LoadLibrary(load_str)
# print("self.libEDK")
# print(self.libEDK)
# print("self.libEDK")
# Create an Emotiv Engine Event
IEE_EmoEngineEventCreate = self.libEDK.IEE_EmoEngineEventCreate
IEE_EmoEngineEventCreate.restype = c_void_p
self.eEvent = IEE_EmoEngineEventCreate()
IEE_EmoEngineEventGetEmoState = self.libEDK.IEE_EmoEngineEventGetEmoState
IEE_EmoEngineEventGetEmoState.argtypes = [c_void_p, c_void_p]
IEE_EmoEngineEventGetEmoState.restype = c_int
IEE_EmoStateCreate = self.libEDK.IEE_EmoStateCreate
IEE_EmoStateCreate.restype = c_void_p
eState = IEE_EmoStateCreate()
userID = c_uint(0)
user = pointer(userID)
ready = 0
state = c_int(0)
IEE_EngineConnect = self.libEDK.IEE_EngineConnect
#add code here
IEE_EngineConnect.restype = c_int
IEE_EngineConnect.argtypes = [c_void_p]
IEE_EngineGetNextEvent = self.libEDK.IEE_EngineGetNextEvent
IEE_EngineGetNextEvent.restype = c_int
IEE_EngineGetNextEvent.argtypes = [c_void_p]
IEE_EmoEngineEventGetUserId = self.libEDK.IEE_EmoEngineEventGetUserId
IEE_EmoEngineEventGetUserId.restype = c_int
IEE_EmoEngineEventGetUserId.argtypes = [c_void_p , c_void_p]
IEE_EmoEngineEventGetType = self.libEDK.IEE_EmoEngineEventGetType
IEE_EmoEngineEventGetType.restype = c_int
IEE_EmoEngineEventGetType.argtypes = [c_void_p]
IEE_EmoEngineEventCreate = self.libEDK.IEE_EmoEngineEventCreate
IEE_EmoEngineEventCreate.restype = c_void_p
IEE_EmoEngineEventGetEmoState = self.libEDK.IEE_EmoEngineEventGetEmoState
IEE_EmoEngineEventGetEmoState.argtypes = [c_void_p, c_void_p]
IEE_EmoEngineEventGetEmoState.restype = c_int
IEE_EmoStateCreate = self.libEDK.IEE_EmoStateCreate
IEE_EmoStateCreate.argtype = c_void_p
IEE_EmoStateCreate.restype = c_void_p
IEE_FFTSetWindowingType = self.libEDK.IEE_FFTSetWindowingType
IEE_FFTSetWindowingType.restype = c_int
IEE_FFTSetWindowingType.argtypes = [c_uint, c_void_p]
IEE_GetAverageBandPowers = self.libEDK.IEE_GetAverageBandPowers
IEE_GetAverageBandPowers.restype = c_int
IEE_GetAverageBandPowers.argtypes = [c_uint, c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]
IEE_EngineDisconnect = self.libEDK.IEE_EngineDisconnect
IEE_EngineDisconnect.restype = c_int
IEE_EngineDisconnect.argtype = c_void_p
IEE_EmoStateFree = self.libEDK.IEE_EmoStateFree
IEE_EmoStateFree.restype = c_int
IEE_EmoStateFree.argtypes = [c_void_p]
IEE_EmoEngineEventFree = self.libEDK.IEE_EmoEngineEventFree
IEE_EmoEngineEventFree.restype = c_int
IEE_EmoEngineEventFree.argtypes = [c_void_p]
# finish adding code
# init frequency dataf
alphaValue = c_double(0)
low_betaValue = c_double(0)
high_betaValue = c_double(0)
gammaValue = c_double(0)
thetaValue = c_double(0)
alpha = pointer(alphaValue)
low_beta = pointer(low_betaValue)
high_beta = pointer(high_betaValue)
gamma = pointer(gammaValue)
theta = pointer(thetaValue)
channelList = array('I',[3, 7, 9, 12, 16]) # IED_AF3, IED_AF4, IED_T7, IED_T8, IED_Pz
if self.libEDK.IEE_EngineConnect(create_string_buffer(b"Emotiv Systems-5")) != 0:
print(self.libEDK.IEE_EngineConnect("Emotiv Systems-5"))
print("Emotiv Engine start up failed.")
return
state = IEE_EngineGetNextEvent(self.eEvent)
if state == 0:
eventType = IEE_EmoEngineEventGetType(self.eEvent)
IEE_EmoEngineEventGetUserId(self.eEvent, user)
if eventType == 64: # self.libEDK.IEE_Event_enum.IEE_UserAdded
ready = 1
self.libEDK.IEE_FFTSetWindowingType(userID, 1);
# 1: self.libEDK.IEE_WindowingTypes_enum.IEE_HAMMING
if ready == 1:
EEG_row = np.zeros(25)
j = 0
for i in channelList:
result = c_int(0)
result = self.libEDK.IEE_GetAverageBandPowers(
userID, i, theta, alpha, low_beta, high_beta, gamma
)
if result == 0: # EDK_OK
EEG_row[j*5+0], EEG_row[j*5+1], EEG_row[j*5+2], EEG_row[j*5+3], EEG_row[j*5+4] = (
thetaValue.value, alphaValue.value, low_betaValue.value,
high_betaValue.value, gammaValue.value
)
j += 1
return EEG_row
elif state != 0x0600:
print("Internal error in Emotiv Engine ! ")
else:
print('Noe event for EEG device!')
def release_resourse_in_one_trial(self):
pass
if __name__ == '__main__':
print("main.start")
eeg_recorder = EEGRecorder('EEG')
for i in range(100):
print("i: %d" % i)
print(eeg_recorder.record_one_sample())
print("main.end")
| 38.416667 | 110 | 0.626657 | 954 | 8,298 | 5.226415 | 0.283019 | 0.060168 | 0.027678 | 0.023466 | 0.257521 | 0.238067 | 0.222222 | 0.209587 | 0.1785 | 0.139591 | 0 | 0.019408 | 0.292119 | 8,298 | 215 | 111 | 38.595349 | 0.829418 | 0.241624 | 0 | 0.126984 | 0 | 0 | 0.040884 | 0.007913 | 0 | 0 | 0.000989 | 0 | 0 | 1 | 0.02381 | false | 0.007937 | 0.119048 | 0 | 0.166667 | 0.103175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a131fadbf256dc2fd90d0c9f6e8868b23e49f2 | 3,198 | py | Python | autophrase/pos_tag.py | QianyangPeng/AutophrasePy | 1d34a62fdf96a649f5e06fe69dc74fa8d69fc8a7 | [
"Apache-2.0"
] | 7 | 2018-08-11T07:03:07.000Z | 2022-03-18T06:33:30.000Z | autophrase/pos_tag.py | QianyangPeng/AutophrasePy | 1d34a62fdf96a649f5e06fe69dc74fa8d69fc8a7 | [
"Apache-2.0"
] | 1 | 2018-11-01T08:18:00.000Z | 2018-11-01T08:18:00.000Z | autophrase/pos_tag.py | QianyangPeng/AutophrasePy | 1d34a62fdf96a649f5e06fe69dc74fa8d69fc8a7 | [
"Apache-2.0"
] | 1 | 2020-05-05T12:54:56.000Z | 2020-05-05T12:54:56.000Z | import os
from glob import glob
from shutil import copyfile
from math import floor
import sys
import threading
# from download_parameter_files import download_parameter_files
def split_file(num_lines, num_files, raw, tmp):
smallfile = None
file_num = 0
with open(raw) as bigfile:
for lineno, line in enumerate(bigfile):
if lineno % num_lines == 0 and file_num != num_files:
if smallfile:
smallfile.close()
small_filename = tmp + "/split_files.{}".format(file_num)
file_num += 1
smallfile = open(small_filename, "w")
smallfile.write(line)
if smallfile:
smallfile.close()
def one_line_per_word(file):
with open(file) as input_file:
with open(file + ".token", "w") as out_file:
for line in input_file:
out_file.write(line.replace(' ', '\n'))
def execute_tagger(file, tagger, parfile):
command = tagger + " -quiet " + parfile + " < " + file + " > " + file + ".tagged"
os.system(command)
# with open(file) as token_file:
# with open(file + ".tagged", 'w') as tagged:
# subprocess.call(command, stdin = token_file, stdout = tagged)
def pos_tag(language, num_thread, raw, tmp):
root_path = os.path.dirname(os.path.abspath(__file__))
# download_parameter_files(language, root_path)
for file in glob(tmp + "/split_files*"):
os.remove(file)
print("Current step: Splitting files...")
with open(raw) as f:
num_lines = floor(sum(1 for _ in f)/num_thread)
if num_lines <= 0 or num_thread == 1:
copyfile(raw, tmp + "/split_files.0")
else:
split_file(num_lines, num_thread, raw, tmp)
for file in glob(tmp + "/split_files.*"):
one_line_per_word(file)
print("Current step: Tagging...")
tagger = None
parfile = None
if language == "EN":
tagger = root_path + "/tools/treetagger/bin/tree-tagger"
parfile = root_path + "/tools/treetagger/lib/english-utf8.par"
elif language == "FR":
tagger = root_path + "/tools/treetagger/bin/tree-tagger"
parfile = root_path + "/tools/treetagger/lib/french-utf8.par"
elif language == "IT":
tagger = root_path + "/tools/treetagger/bin/tree-tagger"
parfile = root_path + "/tools/treetagger/lib/italian-utf8.par"
elif language == "RU":
tagger = root_path + "/tools/treetagger/bin/tree-tagger"
parfile = root_path + "/tools/treetagger/lib/russian-utf8.par"
elif language == "ES":
tagger = root_path + "/tools/treetagger/bin/tree-tagger"
parfile = root_path + "/tools/treetagger/lib/spanish-utf8.par"
else:
sys.exit("[ERROR]: Tree tagger does not support the language.")
curent_directory = os.getcwd()
# os.chdir(root_path + "/tools/treetagger")
thread_list = []
for file in glob(tmp + "/split_files.*.token"):
t = threading.Thread(target = execute_tagger, args = (file, tagger, parfile,))
thread_list.append(t)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
# os.chdir(curent_directory)
print("Current step: Merging...")
with open(tmp + "/pos_tags.txt", "w") as outfile:
for filenum in range(num_thread):
file_name = tmp + "/split_files." + str(filenum) + ".token.tagged"
with open(file_name, "r") as infile:
outfile.write(infile.read())
for file in glob(tmp + "/split_files*"):
os.remove(file) | 31.98 | 82 | 0.693558 | 464 | 3,198 | 4.618534 | 0.267241 | 0.04853 | 0.066729 | 0.118059 | 0.29538 | 0.259916 | 0.234718 | 0.210453 | 0.210453 | 0.210453 | 0 | 0.004479 | 0.162289 | 3,198 | 100 | 83 | 31.98 | 0.795446 | 0.100063 | 0 | 0.217949 | 0 | 0 | 0.22884 | 0.123302 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.076923 | 0 | 0.128205 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a59aacb4bd81cb3e7cdf393821079b2b85a81e | 926 | py | Python | app/utils/try.py | yudhik11/StackOverflow_UserQuery | b3c88ed18304078901497a9a0dc99c232e1ffac4 | [
"MIT"
] | 3 | 2019-07-21T16:30:27.000Z | 2020-03-04T15:03:32.000Z | app/utils/try.py | yudhik11/StackOverflow_UserQuery | b3c88ed18304078901497a9a0dc99c232e1ffac4 | [
"MIT"
] | 11 | 2020-01-28T22:51:49.000Z | 2022-02-10T09:16:19.000Z | app/utils/try.py | yudhik11/StackOverflow_UserQuery | b3c88ed18304078901497a9a0dc99c232e1ffac4 | [
"MIT"
] | 2 | 2019-07-21T12:30:13.000Z | 2019-07-31T16:34:07.000Z | import requests, re
import numpy as np
u='https://api.stackexchange.com/2.2/similar'
tag_arr=[]
def clean_text(text):
text = str(text)
text = re.sub(r"[^\w]", " ", text.lower())
return text
# for i in range(1,28):
for i in range(1,2):
print(i)
p={'page':str(i), 'pagesize':'100','fromdate':'1388534400','tagged':'javascript;node.js;npm','title':'node how to run node app js', 'order':'desc','sort':'votes','min':'40','site':'stackoverflow','key':'hWdB8OaWM0hGZP3sRV18iA(('}
r = requests.get(url = u, params = p)
data = r.json()
temp = data['items']
for k in temp:
# tag_arr.append(k['title'].encode('latin1').decode('utf-8'))
tag_arr.append(k['title'])
print(tag_arr)
#np.savetxt('ques-so.txt', tag_arr, delimiter=',', newline='\n', fmt='%s')
with open('ques-tmp.txt', 'w') as f:
for item in tag_arr:
f.write("%s\n" % clean_text( item))
f.close()
| 33.071429 | 233 | 0.600432 | 147 | 926 | 3.727891 | 0.591837 | 0.065693 | 0.021898 | 0.040146 | 0.109489 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037909 | 0.173866 | 926 | 27 | 234 | 34.296296 | 0.678431 | 0.166307 | 0 | 0 | 0 | 0 | 0.304688 | 0.059896 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.190476 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a73b6961e324b2bf36ea8c248c456445c83c7d | 832 | py | Python | infrastructure/codigos/solver/extraer_datos_yahoo.py | izmfc/MNO_finalproject | 5e25ba84708f75e98768e75a681992986efd87fc | [
"RSA-MD"
] | null | null | null | infrastructure/codigos/solver/extraer_datos_yahoo.py | izmfc/MNO_finalproject | 5e25ba84708f75e98768e75a681992986efd87fc | [
"RSA-MD"
] | 61 | 2020-04-25T01:09:22.000Z | 2020-05-29T00:18:46.000Z | infrastructure/codigos/solver/extraer_datos_yahoo.py | izmfc/MNO_finalproject | 5e25ba84708f75e98768e75a681992986efd87fc | [
"RSA-MD"
] | 4 | 2020-05-01T19:24:45.000Z | 2021-01-23T01:28:44.000Z | import yfinance as yf
def extraer_datos_yahoo(stocks, start='2015-01-01', end='2020-04-30'):
'''
Funcion para extraer precios al cierre de las acciones mediante yahoo finance de 2015-01-01 a 2020-04-30
params: stocks lista de acciones de las cuales se desea obtener el precio
start fecha inicial
end fecha final
return: base Dataframe de precios por acción (columnas) y día (filas)
'''
df_c = yf.download(stocks, start=start, end=end).Close
base = df_c['AAPL'].dropna().to_frame()
for i in range(0,50):
base = base.join(df_c.iloc[:,i].to_frame(), lsuffix='_caller', rsuffix='_other')
base = base.drop(columns=['AAPL_caller'])
base = base.rename(columns={"AAPL_other": "AAPL"})
base = base.fillna(method='ffill')
base = base.fillna(method='bfill')
return base
| 37.818182 | 106 | 0.674279 | 128 | 832 | 4.296875 | 0.578125 | 0.072727 | 0.029091 | 0.072727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052161 | 0.19351 | 832 | 21 | 107 | 39.619048 | 0.767511 | 0.384615 | 0 | 0 | 0 | 0 | 0.146341 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a81e454b5018f5bfc22e5ccbd3228cb3ba62b6 | 2,786 | py | Python | tests/unit/test_bio_metadata.py | haoyuanli/candig-server | 5f58a4ef5f58d9e91afb98d385e9213e0899e51d | [
"Apache-2.0"
] | 4 | 2019-05-08T20:42:10.000Z | 2021-08-13T16:39:38.000Z | tests/unit/test_bio_metadata.py | haoyuanli/candig-server | 5f58a4ef5f58d9e91afb98d385e9213e0899e51d | [
"Apache-2.0"
] | 102 | 2019-04-03T23:08:46.000Z | 2021-11-28T19:41:38.000Z | tests/unit/test_bio_metadata.py | haoyuanli/candig-server | 5f58a4ef5f58d9e91afb98d385e9213e0899e51d | [
"Apache-2.0"
] | 6 | 2019-05-08T20:42:16.000Z | 2021-08-21T03:15:44.000Z | """
Tests the biodata module
"""
import unittest
import candig.server.datamodel.datasets as datasets
import candig.server.exceptions as exceptions
import candig.server.datamodel.bio_metadata as bioMetadata
import candig.schemas.protocol as protocol
class TestIndividuals(unittest.TestCase):
"""
Tests the Individuals class
"""
def testToProtocolElement(self):
dataset = datasets.Dataset('dataset1')
term = protocol.OntologyTerm()
term.term = "male genotypic sex"
term.term_id = "PATO:0020001"
# Write out a valid input
print(protocol.toJsonDict(term))
validIndividual = protocol.Individual(
name="test",
created="2016-05-19T21:00:19Z",
updated="2016-05-19T21:00:19Z",
sex=term)
validIndividual.attributes.attr['test']. \
values.add().string_value = 'test-info'
# pass through protocol creation
individual = bioMetadata.Individual(
dataset, "test")
individual.populateFromJson(protocol.toJson(validIndividual))
gaIndividual = individual.toProtocolElement()
# Verify elements exist
self.assertEqual(gaIndividual.created, validIndividual.created)
self.assertEqual(gaIndividual.updated, validIndividual.updated)
# Invalid input
invalidIndividual = '{"bad:", "json"}'
individual = bioMetadata.Individual(dataset, "test")
# Should fail
self.assertRaises(
exceptions.InvalidJsonException,
individual.populateFromJson,
invalidIndividual)
class TestBiosamples(unittest.TestCase):
"""
Tests the Biosamples class
"""
def testToProtocolElement(self):
dataset = datasets.Dataset('dataset1')
# Write out a valid input
validBiosample = protocol.Biosample(
name="test",
created="2016-05-19T21:00:19Z",
updated="2016-05-19T21:00:19Z")
validBiosample.attributes.attr['test']. \
values.add().string_value = 'test-info'
# pass through protocol creation
biosample = bioMetadata.Biosample(
dataset, "test")
biosample.populateFromJson(protocol.toJson(validBiosample))
gaBiosample = biosample.toProtocolElement()
# Verify elements exist
self.assertEqual(gaBiosample.created, validBiosample.created)
self.assertEqual(gaBiosample.updated, validBiosample.updated)
# Invalid input
invalidBiosample = '{"bad:", "json"}'
biosample = bioMetadata.Individual(dataset, "test")
# Should fail
self.assertRaises(
exceptions.InvalidJsonException,
biosample.populateFromJson,
invalidBiosample)
| 35.265823 | 71 | 0.648959 | 248 | 2,786 | 7.274194 | 0.33871 | 0.026608 | 0.02439 | 0.028825 | 0.414634 | 0.364745 | 0.308204 | 0.308204 | 0.238359 | 0.238359 | 0 | 0.03128 | 0.254128 | 2,786 | 78 | 72 | 35.717949 | 0.836862 | 0.102656 | 0 | 0.307692 | 0 | 0 | 0.084794 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 1 | 0.038462 | false | 0 | 0.096154 | 0 | 0.173077 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a84ccfa838eca956c2bc2c4621cd9756bd544c | 4,580 | py | Python | eval/fig4.py | tk2lab/logbesselk | 6ffbc344c0b472d307a556e50de943a380616fb2 | [
"Apache-2.0"
] | null | null | null | eval/fig4.py | tk2lab/logbesselk | 6ffbc344c0b472d307a556e50de943a380616fb2 | [
"Apache-2.0"
] | null | null | null | eval/fig4.py | tk2lab/logbesselk | 6ffbc344c0b472d307a556e50de943a380616fb2 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from . import common
def v_loc(x):
return 40*np.log10(x + 1)
def x_loc(x):
return 40*(np.log10(x) + 1)
def main(debug=False):
thr_v = 25.0
thr_x = lambda v: 1.6 + 0.5 * np.log(v + 1)
name = ['I', 'A', 'S', 'C', 'SCA']
suffix = ['', '', '', '', '']
df = []
for n, s in zip(name, suffix):
prec = pd.read_csv(f'results/logk_prec_{n}{s}.csv')
prec = prec.groupby(['v', 'x'])['log_err'].mean()
prec.name = f'prec_{n}'
time = pd.read_csv(f'results/logk_time_{n}{s}.csv')
time = time.groupby(['v', 'x'])['time'].mean()
time = 1000 * time
time.name = f'time_{n}'
df += [prec, time]
df = pd.concat(df, axis=1)
v, x = [np.array(z) for z in zip(*df.index)]
for t in ['prec', 'time']:
df[f'{t}_SCA'] = df[f'{t}_S']
df.loc[x < thr_x(v), f'{t}_SCA'] = df[f'{t}_S']
df.loc[x >= thr_x(v), f'{t}_SCA'] = df[f'{t}_C']
df.loc[v >= thr_v, f'{t}_SCA'] = df[f'{t}_A']
df['type_prec'] = -1
df['min_prec'] = np.inf
df['min_time'] = np.inf
for i, name in enumerate(['S', 'C', 'A']):
cond = df[f'prec_{name}'] < df['min_prec']
df.loc[cond, 'type_prec'] = i
df.loc[cond, 'min_prec'] = df.loc[cond, f'prec_{name}']
df.loc[cond, 'min_time'] = df.loc[cond, f'time_{name}']
df['type_time'] = -1
df['min_prec'] = np.inf
df['min_time'] = np.inf
for i, name in enumerate(['S', 'C', 'A']):
cond = (df[f'prec_{name}'] < 1.) & (df[f'time_{name}'] < df['min_time'])
df.loc[cond, 'type_time'] = i
df.loc[cond, 'min_prec'] = df.loc[cond, f'prec_{name}']
df.loc[cond, 'min_time'] = df.loc[cond, f'time_{name}']
#type_cmap = ListedColormap(['magenta', 'blue', 'green', 'cyan'])
#type_cmap.set_under('white')
name = [['type_prec', 'prec_SCA'], ['type_time', 'time_SCA']]
#pos = [[[0.1, 0.85], [0.85, 0.1]], [[0.1, 0.1], [0.1, 0.85]]]
vmin = [[-1.5, 0], [-1.5, 0]]
vmax = [[3.5, 2.8], [3.5, 28]]
#cmap = [[type_cmap, 'Reds'], [type_cmap, 'Blues']]
cmap = [['Greys', 'Greys'], ['Greys', 'Greys']]
fig = common.figure(figsize=(5.5, 4), box=debug)
ax = fig.subplots(
2, 2, sharex=True, sharey=True,
#gridspec_kw=dict(width_ratios=(1,1,0.15)),
)
#ax[0, 2].set_visible(False)
#ax[1, 2].set_visible(False)
#ax[0, 2] = fig.add_axes([0.93, 0.1, 0.02, 0.4])
#ax[1, 2] = fig.add_axes([0.93, 0.57, 0.02, 0.4])
vticks = [0, 1, 5, 10, 50]
xticks = [0.1, 0.5, 1, 5, 10, 50]
label = [['a', 'c'], ['b', 'd']]
pos = [[[-0.15, 0.8], [-0.1, 0.8]],
[[-0.15, 0.8], [-0.1, 0.8]]]
for i in range(2):
for j in range(2):
hm = df[name[i][j]].unstack(0)
if j == 0:
args = dict(cbar=False)
else:
args = dict(cbar=True)
sns.heatmap(hm, vmin=vmin[i][j], vmax=vmax[i][j], cmap=cmap[i][j], ax=ax[i, j], **args)
#sns.heatmap(hm, vmin=vmin[i][j], vmax=vmax[i][j], cmap=cmap[i][j], ax=ax[i, j])
v = np.linspace(0, thr_v, 100)
x = thr_x(v)
v = v_loc(v)
x = x_loc(x)
ax[i, j].plot(v, x, c='k')
ax[i, j].plot([v_loc(thr_v), v_loc(thr_v)], [x_loc(0.1), x_loc(10**2.1)], c='k')
ax[i, j].invert_yaxis()
ax[i, j].text(*pos[i][j], label[i][j], transform=ax[i, j].transAxes)
ax[i, j].text(v_loc(5), x_loc(0.5), 'S')
ax[i, j].text(v_loc(2), x_loc(6), 'C')
ax[i, j].text(v_loc(50), x_loc(1), 'A')
ax[i, j].set_xticks([v_loc(v) for v in vticks])
ax[i, j].set_xticklabels([f"${k}$" for k in vticks], rotation=0)
ax[i, j].xaxis.set_ticks_position('both')
ax[i, j].set_yticks([x_loc(x) for x in xticks])
ax[i, j].set_yticklabels([f"${k}$" for k in xticks])
ax[i, j].yaxis.set_ticks_position('both')
if i == 1:
ax[i, j].set_xlabel('$v$')
else:
ax[i, j].set_xlabel('')
if j == 0:
ax[i, j].set_ylabel('$x$')
else:
ax[i, j].set_ylabel('')
#cbar = ax[0, 0].collections[0].colorbar
#cbar.set_ticks([0, 10, 20])
#cbar.set_ticklabels([f'${{{l}}}$' for l in [0, 10, 20]])
fig.savefig('figs/fig4.pdf')
if __name__ == '__main__':
main(debug=False)
| 35.503876 | 99 | 0.4869 | 792 | 4,580 | 2.688131 | 0.191919 | 0.027243 | 0.037576 | 0.026303 | 0.376703 | 0.286519 | 0.249883 | 0.226867 | 0.218412 | 0.195867 | 0 | 0.051964 | 0.277293 | 4,580 | 128 | 100 | 35.78125 | 0.591239 | 0.129913 | 0 | 0.154639 | 0 | 0 | 0.115055 | 0.014099 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030928 | false | 0 | 0.061856 | 0.020619 | 0.113402 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a89ee632dc825af897834036510c30f557a71f | 1,660 | py | Python | yt_idv/scene_components/particles.py | sochowski/yt_idv | d840d3f80e6f5afb743f2eb9afce1f576645c731 | [
"BSD-3-Clause"
] | 3 | 2021-02-08T21:27:21.000Z | 2021-07-09T08:35:37.000Z | yt_idv/scene_components/particles.py | sochowski/yt_idv | d840d3f80e6f5afb743f2eb9afce1f576645c731 | [
"BSD-3-Clause"
] | 17 | 2020-12-22T18:45:04.000Z | 2022-02-25T20:05:50.000Z | yt_idv/scene_components/particles.py | sochowski/yt_idv | d840d3f80e6f5afb743f2eb9afce1f576645c731 | [
"BSD-3-Clause"
] | 4 | 2021-04-02T19:56:56.000Z | 2021-09-24T01:22:55.000Z | import math
import numpy as np
import traitlets
from OpenGL import GL
from yt_idv.scene_components.base_component import SceneComponent
from yt_idv.scene_data.particle_positions import ParticlePositions
class ParticleRendering(SceneComponent):
name = "particle_rendering"
data = traitlets.Instance(ParticlePositions)
scale = traitlets.CFloat(1e-3)
max_particle_size = traitlets.CFloat(1e-3)
def render_gui(self, imgui, renderer, scene):
changed = super(ParticleRendering, self).render_gui(imgui, renderer, scene)
_, new_value = imgui.slider_float(
"Log Scale", math.log10(self.scale), -8.0, 2.0
)
if _:
self.scale = 10 ** new_value
changed = True
imgui.text("Filter Particle Max Size")
_, new_value = imgui.slider_float("", 1.0 / self.max_particle_size, 1.0, 100.0)
if _:
self.max_particle_size = 1.0 / new_value
changed = True
return changed
def draw(self, scene, program):
GL.glEnable(GL.GL_CULL_FACE)
GL.glCullFace(GL.GL_BACK)
GL.glDrawArraysInstanced(GL.GL_TRIANGLE_STRIP, 0, 4, self.data.size)
def _set_uniforms(self, scene, shader_program):
cam = scene.camera
shader_program._set_uniform("scale", self.scale)
shader_program._set_uniform("projection", cam.projection_matrix)
shader_program._set_uniform("modelview", cam.view_matrix)
shader_program._set_uniform("max_particle_size", self.max_particle_size)
shader_program._set_uniform(
"inv_pmvm", np.linalg.inv(cam.projection_matrix @ cam.view_matrix)
)
| 36.086957 | 87 | 0.680723 | 211 | 1,660 | 5.085308 | 0.379147 | 0.072693 | 0.069897 | 0.107176 | 0.137931 | 0.039143 | 0 | 0 | 0 | 0 | 0 | 0.018634 | 0.224096 | 1,660 | 45 | 88 | 36.888889 | 0.814441 | 0 | 0 | 0.105263 | 0 | 0 | 0.060241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.157895 | 0 | 0.394737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21a941828c011cb36b4651f7351fe451889403ba | 3,332 | py | Python | uniplot/cli.py | Titas-In-Cloud/uniplot | 0d31f1c4d4d5ac24543d96ec4bfdc2cbf43406c2 | [
"MIT"
] | null | null | null | uniplot/cli.py | Titas-In-Cloud/uniplot | 0d31f1c4d4d5ac24543d96ec4bfdc2cbf43406c2 | [
"MIT"
] | null | null | null | uniplot/cli.py | Titas-In-Cloud/uniplot | 0d31f1c4d4d5ac24543d96ec4bfdc2cbf43406c2 | [
"MIT"
] | null | null | null | import argparse
from os import path
from uniplot import plot
from . import parse
from . import analysis
file = open("location.txt", "r")
LOC = file.read()
file.close()
def file_location_configuration(args):
"""Allows to set the location from where to get the data file"""
open("location.txt", "w").close()
file = open("location.txt", "r+")
location = input("What file would you like to use? Please write the location: ")
if path.exists(location):
file.write(location)
print("Success! File location was scanned.")
else:
print("Error! File does not exist.")
file.close()
def dump(args):
"""Prints a list with all the information about proteins"""
for record in parse.uniprot_seqrecords(LOC):
print(record)
def name_list(args):
"""Prints a list with the lengths of proteins"""
for record in parse.uniprot_seqrecords(LOC):
print(record.name)
def proteins_average_lenght(args):
"""Prints the average length of all proteins"""
print("Average Length is {}".format(analysis.average_len(parse.uniprot_seqrecords(LOC))))
def bar_plot_average_by_taxa(args):
"""Gives bar chart with the average length of top level taxa proteins"""
av = analysis.average_len_taxa(parse.uniprot_seqrecords(LOC), depth = ())
plot.plot_bar_show(av)
def pie_plot_average_by_taxa(args):
"""Gives pie chart with the average length of top level taxa proteins"""
av = analysis.average_len_taxa(parse.uniprot_seqrecords(LOC), depth = ())
plot.plot_pie_show(av)
def cli():
"""Configures and describes parsing, protein data and help functions"""
parser = argparse.ArgumentParser(prog = "uniplot", usage = '%(prog)s [options]')
subparsers = parser.add_subparsers(help = "Sub Command Help")
subparsers.add_parser("file_location").set_defaults(func = file_location_configuration)
subparsers.add_parser("dump").set_defaults(func = dump)
subparsers.add_parser("list").set_defaults(func = name_list)
subparsers.add_parser("average").set_defaults(func = proteins_average_lenght)
subparsers.add_parser("bar_average-by-taxa").set_defaults(func = bar_plot_average_by_taxa)
subparsers.add_parser("pie_average-by-taxa").set_defaults(func = pie_plot_average_by_taxa)
parser.add_argument('--file_location', help = 'allows the user to set the location of the file'
'that he wants to use')
parser.add_argument('--dump', help = 'gives a list with all the information about proteins '
'- protein sequence, ID, name, lenght, description and other '
'related data')
parser.add_argument('--list', help = 'gives a list with only the lenghts of proteins')
parser.add_argument('--average', help = 'gives average lenght of all proteins')
parser.add_argument('--bar_average-by-taxa', help = 'gives average lenght of proteins categorized '
'by type in a form of a bar chart')
parser.add_argument('--pie_average-by-taxa', help = 'gives average lenght of proteins categorized '
'by type in a form of a pie chart')
args = parser.parse_args()
args.func(args)
| 42.717949 | 103 | 0.664466 | 442 | 3,332 | 4.859729 | 0.262443 | 0.03352 | 0.048417 | 0.058194 | 0.379888 | 0.302607 | 0.252328 | 0.252328 | 0.219739 | 0.219739 | 0 | 0 | 0.226891 | 3,332 | 77 | 104 | 43.272727 | 0.833851 | 0.119148 | 0 | 0.111111 | 0 | 0 | 0.274232 | 0.014488 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12963 | false | 0 | 0.092593 | 0 | 0.222222 | 0.092593 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21aac5ef63f55e45d1b0efa2a2ae4e72550f661b | 2,709 | py | Python | tests/utils/adapters/test_source_adapter.py | damare01/novelsave | 7896e8393c944e169e3cb52a33ab81ae396dff9f | [
"Apache-2.0"
] | null | null | null | tests/utils/adapters/test_source_adapter.py | damare01/novelsave | 7896e8393c944e169e3cb52a33ab81ae396dff9f | [
"Apache-2.0"
] | null | null | null | tests/utils/adapters/test_source_adapter.py | damare01/novelsave | 7896e8393c944e169e3cb52a33ab81ae396dff9f | [
"Apache-2.0"
] | null | null | null | import pytest
from novelsave_sources import models as sm
from novelsave.core import dtos
from novelsave.utils.adapters import SourceAdapter
@pytest.fixture
def source_adapter() -> SourceAdapter:
return SourceAdapter()
def test_novel_to_internal(source_adapter):
test_novel = sm.Novel(
title="title",
author="author",
synopsis=["a nice description"],
thumbnail_url="thumbnail",
lang="language",
url="link",
)
expected_novel = dtos.NovelDTO(
id=None,
title="title",
author="author",
synopsis="a nice description",
thumbnail_url="thumbnail",
thumbnail_path=None,
lang="language",
url="link",
last_updated=None,
)
actual_novel = source_adapter.novel_to_internal(test_novel)
assert expected_novel == actual_novel
def test_chapter_to_internal(source_adapter):
test_chapter = sm.Chapter(
index=1,
title="title",
paragraphs="paragraphs this is",
url="https://",
)
expected_chapter = dtos.ChapterDTO(
index=1,
title="title",
content="paragraphs this is",
url="https://",
)
actual_chapter = source_adapter.chapter_to_internal(test_chapter)
assert expected_chapter == actual_chapter
def test_chapter_from_internal(source_adapter):
test_chapter = dtos.ChapterDTO(
index=1,
title="title",
content="paragraphs this is",
url="https://",
)
expected_chapter = sm.Chapter(
index=1,
title="title",
paragraphs="paragraphs this is",
url="https://",
)
actual_chapter = source_adapter.chapter_to_external(test_chapter)
assert expected_chapter == actual_chapter
def test_chapter_content_to_internal(source_adapter):
test_chapter = sm.Chapter(
index=1,
title="title",
paragraphs="paragraphs this is",
url="https://",
)
expected_chapter = dtos.ChapterDTO(
index=-1,
title="",
url="",
)
assert expected_chapter.content is None
source_adapter.chapter_content_to_internal(test_chapter, expected_chapter)
assert test_chapter.paragraphs == expected_chapter.content
def test_metadata_to_internal(source_adapter):
test_metadata = sm.Metadata(
name="name",
value="value",
others={"role": "something"},
)
expected_metadata = dtos.MetaDataDTO(
name="name",
value="value",
others={"role": "something"},
namespace="OPF",
)
actual_metadata = source_adapter.metadata_to_internal(test_metadata)
assert expected_metadata == actual_metadata
| 24.1875 | 78 | 0.640827 | 291 | 2,709 | 5.71134 | 0.209622 | 0.086041 | 0.039711 | 0.075211 | 0.552948 | 0.505415 | 0.505415 | 0.456077 | 0.456077 | 0.456077 | 0 | 0.002962 | 0.252123 | 2,709 | 111 | 79 | 24.405405 | 0.817374 | 0 | 0 | 0.477273 | 0 | 0 | 0.11148 | 0 | 0 | 0 | 0 | 0 | 0.068182 | 1 | 0.068182 | false | 0 | 0.045455 | 0.011364 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21af623d6e191cda3a4a98ff6894dd3bac159239 | 570 | py | Python | Python/coins_demo.py | Kodiologist/Citematic | 03710d08d2a928f0b9bf4a37da056b5946a06c47 | [
"Unlicense"
] | 1 | 2016-12-02T20:32:34.000Z | 2016-12-02T20:32:34.000Z | Python/coins_demo.py | Kodiologist/Citematic | 03710d08d2a928f0b9bf4a37da056b5946a06c47 | [
"Unlicense"
] | null | null | null | Python/coins_demo.py | Kodiologist/Citematic | 03710d08d2a928f0b9bf4a37da056b5946a06c47 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
from sys import stderr
from os import environ
import yaml, cgi
from citematic_coins import coins
bib_path = environ['DAYLIGHT_BIB_PATH']
with open(bib_path) as o:
database = yaml.load(o)
print('''<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<title>Bibliography in COinS</title>
</head>
<body>''')
for n, x in enumerate(database):
print('{} of {} ({})…'.format(n + 1, len(database), x['KEY']), file = stderr)
print('<p>{}: {}\n'.format(cgi.escape(x['KEY']), coins(x['csl'])))
print('</body></html>')
| 21.923077 | 81 | 0.633333 | 87 | 570 | 4.126437 | 0.597701 | 0.058496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006198 | 0.150877 | 570 | 25 | 82 | 22.8 | 0.729339 | 0.036842 | 0 | 0 | 0 | 0 | 0.35219 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21b2b134eb876a2908c4696de72ff6c0e1d441eb | 4,694 | py | Python | wsgi/scripts/subinfo_aggrgate.py | AlexeyProskuryakov/rr | dcf598405159a48826f4214fa33abcf57418b680 | [
"MIT"
] | null | null | null | wsgi/scripts/subinfo_aggrgate.py | AlexeyProskuryakov/rr | dcf598405159a48826f4214fa33abcf57418b680 | [
"MIT"
] | null | null | null | wsgi/scripts/subinfo_aggrgate.py | AlexeyProskuryakov/rr | dcf598405159a48826f4214fa33abcf57418b680 | [
"MIT"
] | null | null | null | import logging
from Queue import Empty
from multiprocessing import Process, Event
from multiprocessing import Queue as mQ
from threading import Thread
from Queue import Queue as Q
from time import sleep
import praw
from wsgi.engine import get_reposts_count
from wsgi.scripts import GET_USER_AGENT_R
from wsgi.scripts.subinfo_elements import Users, RelationalElements, all_elements
from wsgi.scripts.utils import comments_sequence
from wsgi.sub_connections import SCStorage
log = logging.getLogger("sub_info_agg")
CMNT = "comment"
ATHR = "author"
r = praw.Reddit(user_agent=GET_USER_AGENT_R())
sc_store = SCStorage()
def load_recommended(sub_name):
recomended_subs = r.get_subreddit_recommendations(sub_name)
for r_sub in recomended_subs:
sc_store.add_connection(sub_name, r_sub.display_name, ct="recommendation")
rr_subs = r.get_subreddit_recommendations(r_sub.display_name)
for rr_sub in rr_subs:
sc_store.add_connection(r_sub.display_name, rr_sub.display_name, ct="recommendation")
def get_sub_users(sub_name, uq):
log.info("Start getting users from: %s" % sub_name)
sub = r.get_subreddit(sub_name)
s_c, c_c = 0, 0
fsbm, esbm = None, None
hot = list(sub.get_hot(limit=500))
log.info("Load %s hot posts in %s" % (len(hot), sub_name))
for subm in hot:
if sc_store.is_contains(subm.fullname):
log.info("%s is contains" % subm.fullname)
continue
if fsbm is None:
fsbm = subm
esbm = subm
get_reposts_count(subm.url, {"subreddit": subm.subreddit.display_name, "created_utc": subm.created_utc})
if not subm.author:
continue
su = Users()
su.add('author', subm.author.name)
for comment in comments_sequence(subm.comments):
if comment.author:
su.add("comment", comment.author.name)
c_c += 1
s_c += 1
log.info("\t%s processed; posts: %s, comments: %s uniques: %s", subm.fullname, s_c, c_c, len(su.all))
sc_store.u_add(subm.fullname)
uq.put(su)
sub_speed = float(s_c) / abs(esbm.created_utc - fsbm.created_utc)
sc_store.set_sub_info(sub_name, {"speed": sub_speed})
def get_subs_from_users(users_queue, sub_queue, event):
reddit = praw.Reddit(user_agent=GET_USER_AGENT_R())
while 1:
user_name = qget(users_queue)
if not user_name:
break
if sc_store.is_contains(user_name):
log.info("%s is contains")
continue
log.info("Start load subs from comments and posts of %s" % user_name)
user = reddit.get_redditor(user_name)
us = RelationalElements()
c_subs = set(map(lambda x: x.subreddit.display_name, user.get_comments()))
p_subs = set(map(lambda x: x.subreddit.display_name, user.get_submitted()))
u_subs = c_subs.union(p_subs)
us.add_groups(u_subs, user_name)
sub_queue.put(dict(us))
log.info("\tloaded %s subs of %s" % (len(u_subs), user_name))
sc_store.u_add(user_name)
event.clear()
def generate_subs(users):
q_in, q_out = mQ(len(users)), mQ(len(users))
for u in users:
q_in.put(u)
te = []
for _ in range(8):
e = Event()
e.set()
t = Process(target=get_subs_from_users, args=(q_in, q_out, e))
t.daemon = True
t.start()
te.append(e)
while 1:
result = qget(q_out)
if not result:
for e in te:
if e.is_set():
continue
break
result = RelationalElements.create(result)
yield result
def qget(q):
max_tryings = 5
while 1:
try:
return q.get()
except Empty:
if max_tryings > 0:
max_tryings -= 1
sleep(1)
continue
except Exception as e:
log.exception(e)
return None
def load_sub_users_and_reposts_connections(sub):
users_queue = Q(500)
p = Thread(target=get_sub_users, args=(sub, users_queue))
p.daemon = True
p.start()
all_users = set()
while 1:
su = qget(users_queue)
if not su:
break
su.compile_subs(generate_subs)
for r_sub, users in su.subs.iteritems():
if r_sub != all_elements:
sc_store.add_connection(r_sub, sub, ons=users, ct="users")
all_users.union(su.all)
sc_store.set_sub_info(sub, {"unique_users_count": len(all_users)})
if __name__ == '__main__':
# load_recommended("cringe")
load_sub_users_and_reposts_connections("cringe")
pass
| 28.448485 | 112 | 0.626545 | 673 | 4,694 | 4.123328 | 0.213967 | 0.025225 | 0.02018 | 0.014054 | 0.206126 | 0.110991 | 0.055496 | 0.055496 | 0.032432 | 0.032432 | 0 | 0.005557 | 0.271623 | 4,694 | 164 | 113 | 28.621951 | 0.806084 | 0.005539 | 0 | 0.094488 | 0 | 0 | 0.069653 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047244 | false | 0.007874 | 0.102362 | 0 | 0.165354 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21b594ae9bbaf94ccb0e9c28f5fa4f7f82326ce0 | 2,955 | py | Python | Wrapping/Generators/Python/itk_generate_pyi.py | rflanz/ITK | 7863da6db1fd541487c9e543308dcfcea26d3261 | [
"Apache-2.0"
] | 1 | 2021-03-17T14:09:24.000Z | 2021-03-17T14:09:24.000Z | Wrapping/Generators/Python/itk_generate_pyi.py | rflanz/ITK | 7863da6db1fd541487c9e543308dcfcea26d3261 | [
"Apache-2.0"
] | null | null | null | Wrapping/Generators/Python/itk_generate_pyi.py | rflanz/ITK | 7863da6db1fd541487c9e543308dcfcea26d3261 | [
"Apache-2.0"
] | null | null | null | from typing import List, Any
import inspect
import importlib
import sys
try:
# First attempt using convention of build directory
from pathlib import Path
wrap_itk_pth: Path = Path(__file__).parent / "WrapITK.pth"
if not wrap_itk_pth.is_file():
print(
"ERROR: itk_generate_pyi.py must be run in the same directory as the WrapITK.pth file"
)
with open(wrap_itk_pth, "r") as fid:
itk_module_paths = [
itk_module_path.strip() for itk_module_path in fid.readlines()
]
for pp in itk_module_paths:
if not pp.startswith("#"):
sys.path.append(pp)
import itkConfig
except:
# Second attempt on the standard path
import itkConfig
itkConfig.LazyLoading = False
itkConfig.DumpInterfaces = True
requested_module_name = "itk"
requested_module = importlib.import_module(requested_module_name)
# Can not dump complete .pyi interface file if LazyLoading is ued
class ITKSignaturesList:
"""
A pure static class to manage dumping a .pyi file for
the itk_module.
"""
_itk_namespace_list: List[str] = []
_broken_introspection_signatures: List[str] = ["echo", "image", "string", "str"]
@staticmethod
def parse_object(obj_name: str, obj: Any):
# builtin classes do not have introspection signatures.
if inspect.isbuiltin(obj):
return
elif obj_name in ITKSignaturesList._broken_introspection_signatures:
return
elif obj_name.startswith("_"):
return
elif inspect.isclass(obj):
ITKSignaturesList._itk_namespace_list.append(f"class {obj_name}:")
methods_exists: bool = False
for elem_name, elem_obj in obj.__dict__.items():
if inspect.ismethod(elem_obj) or inspect.isfunction(elem_obj):
methods_exists = True
ITKSignaturesList._itk_namespace_list.append(
f" def {elem_name}{inspect.signature(elem_obj)}: ..."
)
if not methods_exists:
ITKSignaturesList._itk_namespace_list[-1] += " ..."
elif inspect.isfunction(obj):
ITKSignaturesList._itk_namespace_list.append(
f"def {obj_name}{inspect.signature(obj)}: ..."
)
# else:
# print(f"{obj_name}: {type(obj)}")
@staticmethod
def dumps(dump_file: str) -> None:
with open(dump_file, "w") as fid:
for ln in ITKSignaturesList._itk_namespace_list:
fid.write(f"{ln}\n")
# Now iterate through all module items and print of signatures
# of the objects collected. The generation of a .pyi file
# allows IDE's and other tools to do better introspection
all_items = list(requested_module.__dict__.items())
for k, v in all_items:
ITKSignaturesList.parse_object(k, v)
ITKSignaturesList.dumps(requested_module.__file__ + "i")
| 34.360465 | 98 | 0.643655 | 362 | 2,955 | 5.01105 | 0.367403 | 0.039691 | 0.052922 | 0.090959 | 0.072767 | 0.072767 | 0.072767 | 0 | 0 | 0 | 0 | 0.000463 | 0.268359 | 2,955 | 85 | 99 | 34.764706 | 0.838575 | 0.166497 | 0 | 0.15 | 0 | 0 | 0.100205 | 0.031212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.133333 | 0 | 0.266667 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21b789e1f1b1d8c14c64211b16ff443f97bbdcbe | 33,445 | py | Python | challenge_submissions/submit_767200/supplements/bot_agent_1.py | opendilab/GoBigger-Challenge-2021 | 794897e94caf15b69635dcb36eb64e1341131db9 | [
"Apache-2.0"
] | 121 | 2021-11-01T01:24:31.000Z | 2022-03-31T10:53:47.000Z | challenge_submissions/submit_767200/supplements/bot_agent_1.py | opendilab/GoBigger-Challenge-2021 | 794897e94caf15b69635dcb36eb64e1341131db9 | [
"Apache-2.0"
] | 2 | 2021-11-06T14:13:00.000Z | 2022-01-25T09:19:41.000Z | challenge_submissions/submit_767200/supplements/bot_agent_1.py | opendilab/GoBigger-Challenge-2021 | 794897e94caf15b69635dcb36eb64e1341131db9 | [
"Apache-2.0"
] | 39 | 2021-11-01T12:48:19.000Z | 2022-03-01T11:23:38.000Z | import math
import os
import random
import logging
import copy
import queue
from pygame.math import Vector2
from gobigger.agents.base_agent import BaseAgent
# from .base_agent import BaseAgent
tabu_size=1
position_size=11
class BotAgent(BaseAgent):
'''
Overview:
A simple script bot
'''
def __init__(self, name=None, level=3):
self.name = name
self.actions_queue = queue.Queue()
self.last_clone_num = 1
self.last_total_size = 0
self.level = level
self.my_score=0
# self.team_score=0
self.position_times=[]
def step(self, obs):
global_obs,obs=obs
# self.team_score=global_obs['leaderboard'][str(int(self.name)//3)]
ally_info,others_clone_balls=self.process_ally_balls(obs)
obs=obs[self.name]
overlap = obs['overlap']
overlap = self.preprocess(overlap)
food_balls = overlap['food']
thorns_balls = overlap['thorns']
spore_balls = overlap['spore']
clone_balls = overlap['clone']
food_balls.extend(spore_balls)
my_clone_balls, ally_clone_balls, near_other_balls = self.process_clone_balls_1(clone_balls)
my_total_score=sum(my_ball['radius']**2 for my_ball in my_clone_balls)
for my_ball in my_clone_balls:
for position, time in self.position_times:
if (my_ball['position'] - position).length() < 6.5:
my_ball['time'] = time
break
else:
my_ball['time'] = 101
my_ball['time'] = max(0, my_ball['time'] - 1)
self.position_times = [(my_ball['position'], my_ball['time']) for my_ball in my_clone_balls]
if abs(self.last_total_size-my_clone_balls[0]['radius'])<0.01:
self.stay_same_times+=1
else:
self.stay_same_times=0
if self.stay_same_times>5 and self.last_total_size<10 and len(my_clone_balls)==1:
self.stay_same_times = 0
# print(fr'={self.name}===================')
self.actions_queue.put([None, None, 2])
self.actions_queue.put([None, None, 2])
self.actions_queue.put([None, None, 2])
self.actions_queue.put([None, None, 2])
self.actions_queue.put([None, None, 2])
action_ret = self.actions_queue.get()
return action_ret
self.last_total_size=my_clone_balls[0]['radius']
direction_attact=self.attact(my_clone_balls,others_clone_balls,thorns_balls)
if direction_attact and len(my_clone_balls)<16:
direction=direction_attact.normalize()
action_type = 1
# print(f'{self.name}nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnattack')
self.actions_queue.queue.clear()
self.actions_queue.put([direction.x, direction.y, action_type])
action_ret = self.actions_queue.get()
return action_ret
direction_attact=self.attact2(my_clone_balls,others_clone_balls,thorns_balls)
if direction_attact and len(my_clone_balls)<16:
direction=direction_attact.normalize()
action_type = 1
# print(f'{self.name}nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnattack')
self.actions_queue.queue.clear()
self.actions_queue.put([direction.x, direction.y, action_type])
action_ret = self.actions_queue.get()
return action_ret
# if self.actions_queue.qsize() > 0:
# return self.actions_queue.get()
direction,danger = self.APF(my_clone_balls, others_clone_balls)
if direction.length()>=1:
direction=direction.normalize()
action_type = -1
self.actions_queue.queue.clear()
self.actions_queue.put([direction.x, direction.y, action_type])
action_ret = self.actions_queue.get()
return action_ret
direction0=self.near_thorns_balls(thorns_balls, my_clone_balls, others_clone_balls)
if direction0:
action_type = -1
self.actions_queue.queue.clear()
self.actions_queue.put([direction0.x, direction0.y, action_type])
action_ret = self.actions_queue.get()
return action_ret
elif self.actions_queue.qsize() > 0:
return self.actions_queue.get()
elif direction.length()<0.5 and danger and len(my_clone_balls)>3:
self.actions_queue.queue.clear()
self.actions_queue.put([None, None, 2])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
action_ret = self.actions_queue.get()
return action_ret
for other_ball in others_clone_balls:
if 800<my_total_score/6<other_ball['radius']**2<0.47*my_total_score and (other_ball['position']-my_clone_balls[0]['position']).length()<2*math.sqrt(0.9*my_total_score):
self.actions_queue.put([None, None, 2])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
action_ret = self.actions_queue.get()
# print('/////////////////////////////')
return action_ret
if (len(my_clone_balls) >= 9 and my_clone_balls[4]['radius'] > 20) or (my_clone_balls[0]['radius']**2<my_total_score/4 and self.my_score>5000) or \
(len(my_clone_balls)>1 and (my_clone_balls[0]['position']-my_clone_balls[1]['position']).length()<1.2*my_clone_balls[0]['radius']):
self.actions_queue.put([None, None, 2])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, -1])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
self.actions_queue.put([None, None, 0])
action_ret = self.actions_queue.get()
return action_ret
else:
direction = self.APF2(direction,my_clone_balls, others_clone_balls, ally_info, food_balls, thorns_balls)
action_type=-1
self.actions_queue.put([direction.x, direction.y, action_type])
action_ret = self.actions_queue.get()
return action_ret
def process_clone_balls_1(self, clone_balls):
my_clone_balls = []
ally_clone_balls=[]
others_clone_balls = []
for clone_ball in clone_balls:
if clone_ball['player'] == self.name:
my_clone_balls.append(copy.deepcopy(clone_ball))
elif clone_ball['team'] == str(int(self.name) // 3):
ally_clone_balls.append(copy.deepcopy(clone_ball))
else:
others_clone_balls.append(copy.deepcopy(clone_ball))
my_clone_balls.sort(key=lambda a: a['radius'], reverse=True)
ally_clone_balls.sort(key=lambda a: a['radius'], reverse=True)
others_clone_balls.sort(key=lambda a: a['radius'], reverse=True)
# others_clone_balls.sort(key=lambda a: (my_clone_balls[0]['position'] - a['position']).length()-2*a['radius'], reverse=False)
# threaten_balls=[]
# for other_ball in others_clone_balls:
# for my_ball in my_clone_balls[0:int(2*len(my_clone_balls)/3)]:
# if my_ball['radius']<other_ball['radius']:
return my_clone_balls,ally_clone_balls, others_clone_balls
def preprocess(self, overlap):
new_overlap = {}
for k, v in overlap.items():
if k == 'clone':
new_overlap[k] = []
for index, vv in enumerate(v):
tmp = {}
tmp['position'] = Vector2(vv[0], vv[1])
tmp['radius'] = vv[2]
tmp['player'] = str(int(vv[-2]))
tmp['team'] = str(int(vv[-1]))
new_overlap[k].append(tmp)
else:
new_overlap[k] = []
for index, vv in enumerate(v):
tmp = {}
tmp['position'] = Vector2(vv[0], vv[1])
tmp['radius'] = vv[2]
new_overlap[k].append(tmp)
return new_overlap
def preprocess_tuple2vector(self, overlap):
new_overlap = {}
for k, v in overlap.items():
new_overlap[k] = []
for index, vv in enumerate(v):
new_overlap[k].append(vv)
new_overlap[k][index]['position'] = Vector2(*vv['position'])
return new_overlap
def covered(self,my_ball,other_ball,my_clone_balls):
neighbor_balls=[]
other_ball_newradius=math.sqrt(my_ball['radius']**2+other_ball['radius']**2/2)
for my_ball0 in my_clone_balls:
if my_ball0['radius']<other_ball_newradius and (my_ball0['position']-my_ball['position']).length()<other_ball_newradius:
neighbor_balls.append(my_ball0)
tmp=other_ball['radius']**2/2
for neighbor_ball in neighbor_balls:
tmp+=neighbor_ball['radius']**2
new_radius=math.sqrt(tmp)
for my_big_ball in my_clone_balls:
if 0.71*my_big_ball['radius']>new_radius:
if my_big_ball['radius']*2.12>(my_big_ball['position']-my_ball['position']).length()+0.71*other_ball['radius']:
# print('==========',self.name)
return True
else:
return False
return False
def APF(self,my_clone_balls,other_clone_balls):
danger=False
rep = Vector2(0, 0) # 所有障碍物总斥力
for my_ball in my_clone_balls:
for other_ball in other_clone_balls:
t_vec = my_ball['position'] - other_ball['position']
if my_ball['radius']>other_ball['radius'] or t_vec.length()>20+2.5*other_ball['radius']:
pass
elif t_vec.length()>20+other_ball['radius'] and self.covered(my_ball,other_ball,my_clone_balls):
pass
else:
direction=t_vec.normalize()
rep_tmp=direction * 10000 * (2.5 / (t_vec.length()-20)- 1.0 / other_ball['radius']) / (t_vec.length())*(other_ball['radius']/my_clone_balls[0]['radius'])**2
rep_tmp=rep_tmp * my_ball['radius'] ** 2 / self.my_score * 10
if my_ball['radius']<0.71*other_ball['radius'] and \
(other_ball['position']-my_ball['position']).length()<10+2.12*other_ball['radius']: #3倍根号2
rep_tmp=rep_tmp*3
new_radius = math.sqrt(my_ball['radius'] ** 2 + 0.5 * other_ball['radius'] ** 2)
for my_ball_1 in my_clone_balls:
if my_ball_1 !=my_ball and 0.71*new_radius >my_ball_1['radius'] and (my_ball['position']-my_ball_1['position']).length()<10+2.12*new_radius:
rep+=rep_tmp*(my_ball_1['radius']/my_ball['radius'])
# print('xxxxxxxxxxxxasfasfasd',self.name)
# avg_radius=math.sqrt(sum(tmp_ball['radius']**2 for tmp_ball in my_clone_balls)/len(my_clone_balls))
# if my_ball['radius']<0.71*other_ball['radius'] and my_ball['radius']>avg_radius*0.8 and \
# (other_ball['position']-my_ball['position']).length()<20+2.212*other_ball['radius']: #3倍根号2
# rep_tmp=rep_tmp*len(my_clone_balls)
# print('xxxxxxxxxxxxasfasfasd')
rep +=rep_tmp
if rep_tmp.length()>=5:
danger=True
return rep,danger
def APF2(self,rep,my_clone_balls,other_clone_balls,ally_info,food_balls,thorns_balls):
food_balls.extend(thorns_balls)
neighbor_food_balls=[]
att=Vector2(0.01, 0.01) #食物球,队友引力
#队友合并
min_time=10
direct=None
for my_ball in my_clone_balls:
for ally_ball in ally_info:
if ally_ball[2] !=self.name and len(my_clone_balls)>4 and ally_ball[3]>2000 and self.my_score>2000:
dis=(ally_ball[1]-my_ball['position']).length()
time=(dis-max(my_ball['radius'],ally_ball[0]))/((500/(10+my_ball['radius']))+(500/(10+ally_ball[0])))
if dis<1.5*(my_ball['radius']+ally_ball[0]) and time<min_time:
min_time=time
direct=(ally_ball[1]-my_ball['position']).normalize()
if direct:
return direct
# if len(my_clone_balls)>=15 and my_clone_balls[-1]['radius']<ally_info[0][0]:
# direct=ally_info[0][1]-my_clone_balls[-1]['position']
# att+=direct.normalize()*math.sqrt(direct.length())
# if (ally_info[0][1]-my_clone_balls[0]['position']).length()-my_clone_balls[0]['radius']>350 and 3000<self.my_score<6000:
# direct=ally_info[0][1]-my_clone_balls[0]['position']
# att+=direct.normalize()*math.sqrt(direct.length())
# if len(my_clone_balls)>=15:
# if self.name != ally_info[0][2] and my_clone_balls[-1]['radius']<ally_info[0][0]:
# direct=ally_info[0][1]-my_clone_balls[-1]['position']
# att+=direct.normalize()*math.sqrt(direct.length())
# elif my_clone_balls[-1]['radius']<ally_info[1][0]:
# direct=ally_info[1][1]-my_clone_balls[-1]['position']
# att+=direct.normalize()*math.sqrt(direct.length())
# elif (ally_info[0][1]-my_clone_balls[0]['position']).length()-my_clone_balls[0]['radius']>400 and 30<my_clone_balls[0]['radius']<80:
# direct=ally_info[0][1]-my_clone_balls[0]['position']
# att+=direct.normalize()*math.sqrt(direct.length())
# elif self.name != ally_info[0][2] and ally_info[0][3] > 5000:
# self_score=[info[3] for info in ally_info if info[2]==self.name][0]
# if self_score>2000:
# direct = ally_info[0][1] - my_clone_balls[0]['position']
# att += direct.normalize() * math.sqrt(direct.length())
for my_ball in my_clone_balls:
for food_ball in food_balls:
x,y=food_ball['position'][0],food_ball['position'][1]
dis=min(x+y,1000-x+y,1000-y+x,2000-x-y)
if my_ball['radius']>=food_ball['radius'] and dis>my_ball['radius']:
t_vec = food_ball['position'] - my_ball['position']
if food_ball['radius']>5 and len(my_clone_balls)<16 and t_vec.length()<50+my_ball['radius']/10 and \
(not self.is_thorns_ball_safe(my_ball, food_ball, other_clone_balls,16-len(my_clone_balls),my_clone_balls)): #如果荆棘球不安全,会产生斥力?
if t_vec.length()>10+my_ball['radius']:
continue
else:
t_vec=-t_vec
att += t_vec.normalize() * (food_ball['radius'] ** 2) / t_vec.length()*10
else:
att+=t_vec.normalize()*(food_ball['radius']**2)/t_vec.length()
if t_vec.length()<100+my_ball['radius']/10 and food_ball not in neighbor_food_balls:
neighbor_food_balls.append(food_ball)
# elif dis>my_ball['radius'] and 0.8*my_ball['radius']>food_ball['radius']:
# t_vec = food_ball['position'] - my_ball['position']
# att += t_vec.normalize() * (food_ball['radius'] ** 2) *min(t_vec.length(),50)/ 5000*(my_ball['radius']/food_ball['radius'])**2
# # print('222222222222222222')
# for other_bal l in other_clone_balls:
# if my_ball['radius']>other_ball['radius']:
# t_vec = other_ball['position'] - my_ball['position']
# dis_x=1000-other_ball['position'][0]-other_ball['radius'] if t_vec[0]>0 else other_ball['position'][0]-other_ball['radius']
# dis_y=1000-other_ball['position'][1]-other_ball['radius'] if t_vec[1]>0 else other_ball['position'][1]-other_ball['radius']
#
# time=(dis_x+dis_y-my_ball['radius'])/(250/(10+my_ball['radius']))
# if t_vec.length()<100+my_ball['radius']/10 and time<50 and other_ball['radius']>50:
# print('------------------------------------',time,self.name,other_ball['player'])
#
# att += t_vec.normalize() * (other_ball['radius'] ** 2 / 5) / time
# neighbor_food_balls.append(other_ball)
try:
direction_goal=(rep+att).normalize()
except:
direction_goal =att.normalize()
print(f'att:{att}')
print(f'rep:{rep}')
best_direction=copy.deepcopy(direction_goal)
min_div=100000
for my_ball in my_clone_balls:
for food_ball in neighbor_food_balls:
if my_ball['radius']>food_ball['radius']:
copy_balls=copy.deepcopy(my_clone_balls)
for copy_ball in copy_balls:
copy_ball['position']=copy_ball['position']+(food_ball['position']-my_ball['position'])*(10+my_ball['radius'])/(10+copy_ball['radius'])
direction,danger=self.APF(copy_balls,other_clone_balls)
if direction.length()>2 or danger:
continue
t_vec=food_ball['position']-my_ball['position']
diverse=((direction_goal-t_vec.normalize()).length()+0.5)*(t_vec.length()-my_ball['radius'])/(250/(10+my_ball['radius']))/food_ball['radius']
if diverse<min_div:
min_div=diverse
best_direction=t_vec.normalize()
if best_direction==direction_goal:
for my_ball in my_clone_balls:
for food_ball in neighbor_food_balls:
if my_ball['radius'] > food_ball['radius']:
t_vec = food_ball['position'] - my_ball['position']
diverse = ((direction_goal - t_vec.normalize()).length() + 0.5) * (
t_vec.length() - my_ball['radius']) / (250 / (10 + my_ball['radius'])) / food_ball[
'radius']
if diverse < min_div:
min_div = diverse
best_direction = t_vec.normalize()
return best_direction
def near_thorns_balls(self,thorns_balls,my_clone_balls,others_clone_balls):
min_div=3
best_direction=None
for my_ball in my_clone_balls:
for thorns_ball in thorns_balls:
if my_ball['radius']>thorns_ball['radius'] and self.is_thorns_ball_safe(my_ball,thorns_ball,others_clone_balls,16-len(my_clone_balls),my_clone_balls):
t_vec=thorns_ball['position']-my_ball['position']
diverse=(t_vec.length()-my_ball['radius'])/(250/(10+my_ball['radius']))
if diverse<min_div:
min_div=diverse
best_direction=t_vec.normalize()
return best_direction
def is_thorns_ball_safe(self,my_ball,thorns_ball,other_balls,len,my_clone_balls):
for other_ball in other_balls:
my_ball_tmp=copy.deepcopy(my_ball)
my_ball_tmp['radius']=math.sqrt(my_ball_tmp['radius']**2+thorns_ball['radius']**2)
if self.covered(my_ball_tmp,other_ball,my_clone_balls):
continue
new_radius_sqr=(my_ball['radius']**2+thorns_ball['radius']**2)/min(10,(len+1))
new_radius1_sqr=my_ball['radius']**2+thorns_ball['radius']**2-400*min(9,len)
new_radius=math.sqrt(max(new_radius1_sqr,new_radius_sqr))
if other_ball['radius']*0.71>new_radius:
enemy_radius=max(math.sqrt(other_ball['radius']**2/2+min(400,new_radius_sqr)*min(9,len)/3),other_ball['radius'])
else:
enemy_radius=other_ball['radius']
if (thorns_ball['position'] - my_ball['position']).length()-my_ball['radius']>(thorns_ball['position'] - other_ball['position']).length()-10-2.12*other_ball['radius'] and \
new_radius<enemy_radius and other_ball['radius']>10:
return False
return True
def process_ally_balls(self,obs):
ally_info=[]
others_clone_balls = []
other_ball_position = []
for name,obs_player in obs.items():
# if name != self.name:
overlap = obs_player['overlap']
overlap = self.preprocess(overlap)
ally_clone_balls_obs = overlap['clone']
ally_clone_balls=[]
totol_score=0
for clone_ball in ally_clone_balls_obs:
if clone_ball['player'] == name:
ally_clone_balls.append(copy.deepcopy(clone_ball))
totol_score+=clone_ball['radius']**2
elif clone_ball['team'] != str(int(self.name) // 3) and clone_ball['position'] not in other_ball_position:
others_clone_balls.append(copy.deepcopy(clone_ball))
other_ball_position.append(clone_ball['position'])
ally_clone_balls.sort(key=lambda a: a['radius'], reverse=True)
ally_info.append([ally_clone_balls[0]['radius'],ally_clone_balls[0]['position'],name,totol_score])
if name==self.name:
self.my_score=totol_score
ally_info.sort(key=lambda a:a[3],reverse=True)
others_clone_balls.sort(key=lambda a: a['radius'], reverse=True)
for i in range(len(others_clone_balls)-1):
for j in range(i+1,len(others_clone_balls)):
other_ball=others_clone_balls[i]
other_ball1=others_clone_balls[j]
if (other_ball['position']-other_ball1['position']).length()<1.1*other_ball['radius']:
other_ball['radius']=math.sqrt(other_ball['radius']**2+other_ball1['radius']**2)
return ally_info,others_clone_balls
def is_safe(self,my_ball,other_balls,direction,culed_reward_pos):
loss=0
reward=0
new_position = my_ball['position'] + direction * (10 + 1.41 * my_ball['radius'])
for other_ball in other_balls:
if my_ball['radius']<1.01*other_ball['radius'] and (other_ball['position']-my_ball['position']).length()<15+2.2*other_ball['radius']:
loss += my_ball['radius'] ** 2 / 2
if 0.7*my_ball['radius']<other_ball['radius'] and \
(new_position-other_ball['position']).length()<other_ball['radius']:
loss+=my_ball['radius']**2/2
elif my_ball['radius']<1.01*other_ball['radius'] and (other_ball['position']-new_position).length()<15+2.2*other_ball['radius']:
loss += my_ball['radius'] ** 2 / 2
elif 0.7*my_ball['radius']>other_ball['radius'] and (new_position-other_ball['position']).length()<0.7*my_ball['radius'] and other_ball['position'] not in culed_reward_pos:
reward+=other_ball['radius']**2
culed_reward_pos.append(other_ball['position'])
loss=min(loss,my_ball['radius'] ** 2)
# if 0.7*my_ball['radius']>other_ball['radius']:
# return loss
return loss,reward
def attact(self,my_clone_balls,other_clone_balls,thorns_balls):
for other_ball in other_clone_balls:
for i in range(max(min(len(my_clone_balls),16-len(my_clone_balls)),0)):
if 0.7*my_clone_balls[i]['radius']>other_ball['radius'] and 0.05*self.my_score<other_ball['radius']**2 and \
(my_clone_balls[i]['position'] - other_ball['position']).length() <= max(0,15-(500 / (10 + other_ball['radius'])))+ 2.12*my_clone_balls[i]['radius']:
flag=False
if len(my_clone_balls)<8:
new_position = my_clone_balls[i]['position'] + (other_ball['position'] - my_clone_balls[i]['position']).normalize() * (1.41 * my_clone_balls[i]['radius'] + 5)
for throns_ball in thorns_balls:
if (throns_ball['position']-new_position).length()<0.71*my_clone_balls[i]['radius']+5:
# print('[[[[[[[[[[[[[[[[[[[[[[[[[[[[[')
flag=True
break
if flag:
continue
direction=(other_ball['position'] - my_clone_balls[i]['position']).normalize()
reward=other_ball['radius']**2
loss=0
culed_reward_pos=[other_ball['position']]
for j in range(min(len(my_clone_balls),16-len(my_clone_balls))):
loss1,reward1=self.is_safe(my_clone_balls[j],other_clone_balls,direction,culed_reward_pos)
loss+=loss1
reward+=reward1
if reward-loss<0.05*self.my_score:
break
else:
return direction
return False
def attact1(self,my_clone_balls,other_clone_balls,thorns_balls):
if len(my_clone_balls)<=15 and my_clone_balls[0]['radius']>50:
for other_ball in other_clone_balls:
if 0.49*my_clone_balls[0]['radius']>other_ball['radius'] and 0.3*my_clone_balls[0]['radius']<other_ball['radius'] and\
(my_clone_balls[0]['position'] - other_ball['position']).length() <= (1.41+1.5) *my_clone_balls[0]['radius']:
direction=(other_ball['position'] - my_clone_balls[0]['position']).normalize()
position1=my_clone_balls[0]['position']+direction*1.41 *my_clone_balls[0]['radius']
if len(my_clone_balls)>7:
for j in range(1,len(my_clone_balls)):
if (my_clone_balls[j]['position']-position1).length()<0.71*my_clone_balls[0]['radius'] and my_clone_balls[j]['time']==0:
break
else:
continue
#判断是否有荆棘球
if self.attact_thorns(position1, my_clone_balls[0]['radius'],len(my_clone_balls),thorns_balls):
continue
my_new_clone_balls=copy.deepcopy(my_clone_balls)
for i in range(min(len(my_clone_balls), 16 - len(my_clone_balls))):
my_new_clone_balls[i]['radius']=0.71*my_new_clone_balls[i]['radius']
new_clone_tmp=copy.deepcopy(my_new_clone_balls[i])
new_clone_tmp['position']=new_clone_tmp['position']+direction*1.41 *new_clone_tmp['radius']
new_clone_tmp['radius']=new_clone_tmp['radius']+0.1 #保证新分裂的排第一个
my_new_clone_balls.append(new_clone_tmp)
my_new_clone_balls.sort(key=lambda a: a['radius'], reverse=True)
reward=other_ball['radius']**2
loss=0
culed_reward_pos=[other_ball['position']]
for j in range(min(len(my_clone_balls),16-len(my_clone_balls))):
loss1,reward1=self.is_safe(my_clone_balls[j],other_clone_balls,direction,culed_reward_pos)
loss+=loss1
reward+=reward1
for j in range(max(1,min(len(my_new_clone_balls), 16 - len(my_new_clone_balls)))):
loss1,reward1=self.is_safe(my_new_clone_balls[j], other_clone_balls, direction,culed_reward_pos)
loss += loss1
reward += reward1
if reward-loss<0.05*self.my_score:
continue
else:
return direction
return False
def attact2(self,my_clone_balls,other_clone_balls,thorns_balls):
if (len(my_clone_balls)<=7 or (7<len(my_clone_balls)<16 and my_clone_balls[15-len(my_clone_balls)]['radius']<10)) and my_clone_balls[0]['radius']>50:
for other_ball in other_clone_balls:
if 0.49*my_clone_balls[0]['radius']>other_ball['radius'] and 0.3*my_clone_balls[0]['radius']<other_ball['radius'] and\
(my_clone_balls[0]['position'] - other_ball['position']).length() <= (1.41+1.5) *my_clone_balls[0]['radius']:
direction=(other_ball['position'] - my_clone_balls[0]['position']).normalize()
position1=my_clone_balls[0]['position']+direction*1.41 *my_clone_balls[0]['radius']
# if len(my_clone_balls)>7:
# for j in range(1,len(my_clone_balls)):
# if (my_clone_balls[j]['position']-position1).length()<0.71*my_clone_balls[0]['radius'] and my_clone_balls[j]['time']==0:
# break
# else:
# continue
#判断是否有荆棘球
if self.attact_thorns(position1, my_clone_balls[0]['radius'],len(my_clone_balls),thorns_balls):
continue
my_new_clone_balls=copy.deepcopy(my_clone_balls)
for i in range(min(len(my_clone_balls), 16 - len(my_clone_balls))):
my_new_clone_balls[i]['radius']=0.71*my_new_clone_balls[i]['radius']
new_clone_tmp=copy.deepcopy(my_new_clone_balls[i])
new_clone_tmp['position']=new_clone_tmp['position']+direction*1.41 *new_clone_tmp['radius']
new_clone_tmp['radius']=new_clone_tmp['radius']+0.1 #保证新分裂的排第一个
my_new_clone_balls.append(new_clone_tmp)
my_new_clone_balls.sort(key=lambda a: a['radius'], reverse=True)
reward=other_ball['radius']**2
loss=0
culed_reward_pos=[other_ball['position']]
for j in range(min(len(my_clone_balls),16-len(my_clone_balls))):
loss1,reward1=self.is_safe(my_clone_balls[j],other_clone_balls,direction,culed_reward_pos)
loss+=loss1
reward+=reward1
for j in range(max(1,min(len(my_new_clone_balls), 16 - len(my_new_clone_balls)))):
loss1,reward1=self.is_safe(my_new_clone_balls[j], other_clone_balls, direction,culed_reward_pos)
loss += loss1
reward += reward1
if reward-loss<0.05*self.my_score:
continue
else:
return direction
return False
def attact_thorns(self,new_position,radius,len,thorns_balls):
if len<8:
for thorns_ball in thorns_balls:
if (thorns_ball['position']-new_position).length()<radius:
return True
return False
| 54.205835 | 186 | 0.554642 | 4,232 | 33,445 | 4.106569 | 0.050567 | 0.122562 | 0.094597 | 0.054664 | 0.709765 | 0.64716 | 0.590195 | 0.54796 | 0.509581 | 0.463375 | 0 | 0.031896 | 0.312872 | 33,445 | 616 | 187 | 54.293831 | 0.724338 | 0.121932 | 0 | 0.530271 | 0 | 0 | 0.056539 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033403 | false | 0.004175 | 0.016701 | 0 | 0.118998 | 0.004175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21c1c1fd0d436d4aa0d902e64ebd89bf247f49da | 948 | py | Python | chapter1/problem_1_20.py | JBannwarth/OrbitalMechanics | fe3c36eba7cadf977804fb2ad866e0e28a96aab5 | [
"MIT"
] | 1 | 2020-08-29T13:34:48.000Z | 2020-08-29T13:34:48.000Z | chapter1/problem_1_20.py | JBannwarth/OrbitalMechanics | fe3c36eba7cadf977804fb2ad866e0e28a96aab5 | [
"MIT"
] | 1 | 2020-09-06T21:17:51.000Z | 2020-09-07T00:52:39.000Z | chapter1/problem_1_20.py | JBannwarth/OrbitalMechanics | fe3c36eba7cadf977804fb2ad866e0e28a96aab5 | [
"MIT"
] | null | null | null | """ Orbital Mechanics for Engineering Students Problem 1.20
Question:
Numerically solve the second order-differential equation
t*yDDot + t^2*yDot - 2*y = 0
for y at t = 4, if the initial conditions at t=1 are:
- y = 0
- yDot = 1
Written by: J.X.J. Bannwarth
"""
import numpy as np
import matplotlib.pyplot as plt
from orbitutils.solvers import rkf45
# Differential equations
def Rates(t, Y):
F = np.zeros(Y.shape)
F[0] = Y[1]
F[1] = 2.*Y[0]/t - t*Y[1]
return F
# Title
print("Orbital Mechanics for Engineering Students Problem 1.20")
# Parameters
tSpan = np.array([1., 4.])
Y0 = np.array([0., 1.])
# Solve numerically
y, t = rkf45(Rates, Y0, tSpan)
# Show answer
print(f"y({t[-1]:.3f}) = {y[-1,0]:.3f}")
# Plot answer
plt.figure()
plt.plot(t, y[:, 0], label="y")
plt.plot(t, y[:, 1], label="yDot")
plt.xlabel("Time (-)")
plt.ylabel("Value (-)")
plt.legend()
plt.show()
| 21.545455 | 65 | 0.607595 | 156 | 948 | 3.692308 | 0.435897 | 0.013889 | 0.065972 | 0.104167 | 0.166667 | 0.166667 | 0.166667 | 0.166667 | 0 | 0 | 0 | 0.04878 | 0.221519 | 948 | 43 | 66 | 22.046512 | 0.731707 | 0.367089 | 0 | 0 | 0 | 0 | 0.196691 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.25 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21c44a1f70cf3b10e10c40da9f79c822ef4ae113 | 8,798 | py | Python | boilerplate_app/tests.py | rishabhst/graph_test_task | 5d342b077401903cf642bfdc8c49a1edb83ec9d7 | [
"MIT"
] | null | null | null | boilerplate_app/tests.py | rishabhst/graph_test_task | 5d342b077401903cf642bfdc8c49a1edb83ec9d7 | [
"MIT"
] | 2 | 2020-06-05T20:42:56.000Z | 2021-06-10T21:25:31.000Z | boilerplate_app/tests.py | rishabhst/graph_test_task | 5d342b077401903cf642bfdc8c49a1edb83ec9d7 | [
"MIT"
] | null | null | null | # from model_mommy import mommy
# from django.test import TestCase
# from rest_framework_jwt.serializers import JSONWebTokenSerializer
# from boilerplate_app.models import User
# from boilerplate_app.serializers import UserListSerializer, UserCreateSerializer
# class APITests(TestCase):
# def test_list_user(self):
# user = mommy.make(User)
# self.assertTrue(isinstance(user, User))
# user_serializer = UserListSerializer(user)
# assert user.id == user_serializer.data.get('id')
# assert user.first_name == user_serializer.data.get('first_name')
# assert user.last_name == user_serializer.data.get('last_name')
# assert user.email == user_serializer.data.get('email')
# assert user.role == user_serializer.data.get('role')
# def test_register(self):
# request_data = {
# "username": "username",
# "first_name" : "first_name",
# "last_name" : "last_name",
# "email" : "email@gmail.com",
# "password" : "qwerty1234",
# "role" : "role"
# }
# user_serializer = UserCreateSerializer(data=request_data)
# if user_serializer.is_valid():
# pass
# else:
# message = ''
# for error in user_serializer.errors.values():
# message += " "
# message += error[0]
# print(message)
# user = User(username=request_data.get('username'), first_name=request_data.get('first_name'), last_name=request_data.get('last_name'), email=request_data.get('email'), password=request_data.get('password'), role=request_data.get('role'))
# assert request_data.get('username') == user_serializer.data.get('username')
# assert request_data.get('first_name') == user_serializer.data.get('first_name')
# assert request_data.get('last_name') == user_serializer.data.get('last_name')
# assert request_data.get('email') == user_serializer.data.get('email')
# assert request_data.get('role') == user_serializer.data.get('role')
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Django imports
from django.test import TestCase
from django.urls import reverse
# rest framework imports
from rest_framework.test import APIClient
from rest_framework import status
# third party imports
from django.test import mock
from unittest.mock import patch
# local imports
from boilerplate_app.models import Graph, Node, Edge
class GraphAPITestCases(TestCase):
def setUp(self):
self.title = "TestGraph"
self.nod_id1, self.nod_id2 = "X1", "X2"
self.nod_id3 = "X3"
self.nod_id4 = "X4"
self.nod_id5 = "X5"
self.nod_id6 = "X6"
self.nod_title = "XYZ1"
self.position = {"top": 10, "left": 15, "bottom": 30, "right": 50}
self.weight = 0.5
self.weight1 = 0.7
self.URLS = {
'graph-list-create-api': reverse('boilerplate_app-api:graph-list-create-api'),
'graph-detail-update-delete-api': reverse('boilerplate_app-api:graph-detail-update-delete-api', kwargs={'title': self.title}),
'graph-weekly-connected-node-api': reverse('boilerplate_app-api:graph-weekly-connected-node-api', kwargs={'title': self.title}),
'parse-node-csv-api': reverse('boilerplate_app-api:parse-node-csv-api', kwargs={'title': self.title}),
'graph-list-islands': reverse('boilerplate_app-api:graph-list-islands', kwargs={'title': self.title}),
}
def create_graph(self):
graph = Graph.objects.create(title=self.title)
Node.objects.bulk_create([
Node(nod_id=self.nod_id1, title=self.nod_title, position=self.position, graph=graph),
Node(nod_id=self.nod_id2, title=self.nod_title, position=self.position, graph=graph),
Node(nod_id=self.nod_id3, title=self.nod_title, position=self.position, graph=graph),
Node(nod_id=self.nod_id4, title=self.nod_title, position=self.position, graph=graph),
Node(nod_id=self.nod_id5, title=self.nod_title, position=self.position, graph=graph),
Node(nod_id=self.nod_id6, title=self.nod_title, position=self.position, graph=graph)
])
resp = Edge.objects.bulk_create([
Edge(source=Node.objects.get(nod_id=self.nod_id1, graph=graph), target=Node.objects.get(nod_id=self.nod_id2, graph=graph), weight=self.weight, graph=graph),
Edge(source=Node.objects.get(nod_id=self.nod_id1, graph=graph), target=Node.objects.get(nod_id=self.nod_id3, graph=graph), weight=self.weight1, graph=graph),
Edge(source=Node.objects.get(nod_id=self.nod_id4, graph=graph), target=Node.objects.get(nod_id=self.nod_id5, graph=graph), weight=self.weight, graph=graph)
])
def test_create_graph(self):
client = APIClient()
resp = client.post(self.URLS['graph-list-create-api'], {
"title": "TestGraph1",
"nodes": [
{
"id": "x1",
"title": "ABC",
"position": {"top": 10, "left": 15, "bottom": 30, "right": 50}
},
{
"id": "x2",
"title": "DEF",
"position": {"top": 10, "left": 60, "bottom": 30, "right": 95}
},
{
"id": "x3",
"title": "GHI",
"position": {"top": 10, "left": 100, "bottom": 30, "right": 125}
}
],
"edges": [
{"source": "x1", "target": "x2", "weight": 0.5},
{"source": "x1", "target": "x3", "weight": 0.8}
]
}, format='json')
assert resp.status_code == status.HTTP_201_CREATED
assert resp.data['status'] == 'created'
def test_list_graph(self):
client = APIClient()
self.create_graph()
resp = client.get(self.URLS['graph-list-create-api'])
assert resp.status_code == status.HTTP_200_OK
assert resp.data['Response'][0]['title'] == 'TestGraph'
assert len(resp.data['Response'][0]['nodes']) != 0
assert len(resp.data['Response'][0]['edges']) != 0
def test_detail_graph(self):
client = APIClient()
self.create_graph()
resp = client.get(self.URLS['graph-detail-update-delete-api'])
assert resp.status_code == status.HTTP_200_OK
assert resp.data['Response']['title'] == 'TestGraph'
assert len(resp.data['Response']['nodes']) != 0
assert len(resp.data['Response']['edges']) != 0
def test_update_graph(self):
client = APIClient()
self.create_graph()
resp = client.put(self.URLS['graph-detail-update-delete-api'], {
"title": "TestGraph",
"nodes": [
{
"id": "v1",
"title": "ABC",
"position": {"top": 10, "left": 15, "bottom": 30, "right": 50}
},
{
"id": "v2",
"title": "DEF",
"position": {"top": 10, "left": 60, "bottom": 30, "right": 95}
},
{
"id": "v3",
"title": "GHI",
"position": {"top": 10, "left": 100, "bottom": 30, "right": 125}
}
],
"edges": [
{"source": "v1", "target": "v2", "weight": 0.5},
{"source": "v1", "target": "v3", "weight": 0.8}
]
}, format='json')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['status'] == 'updated'
def test_delete_graph(self):
client = APIClient()
self.create_graph()
resp = client.delete(self.URLS['graph-detail-update-delete-api'])
assert resp.status_code == status.HTTP_204_NO_CONTENT
assert resp.data['status'] == 'deleted'
def test_weekly_connected_node(self):
client = APIClient()
self.create_graph()
resp = client.get(self.URLS['graph-weekly-connected-node-api'])
assert resp.status_code == status.HTTP_200_OK
assert resp.data['Response'] == ['X2', 'X5']
def tearDown(self):
graph = Graph.objects.filter(title=self.title)
Graph.objects.filter(title=self.title).delete()
Node.objects.filter(graph=graph).delete()
Edge.objects.filter(graph=graph).delete()
| 39.630631 | 247 | 0.5599 | 998 | 8,798 | 4.793587 | 0.155311 | 0.03658 | 0.022575 | 0.0301 | 0.619147 | 0.537834 | 0.4375 | 0.372701 | 0.372701 | 0.298495 | 0 | 0.023569 | 0.291089 | 8,798 | 221 | 248 | 39.809955 | 0.743466 | 0.240396 | 0 | 0.280303 | 0 | 0 | 0.163001 | 0.06975 | 0 | 0 | 0 | 0 | 0.121212 | 1 | 0.068182 | false | 0 | 0.060606 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21c45eed900d2bb1c867cbce3f567b86b198b8b5 | 984 | py | Python | utils.py | NeuralBending/stylegan2-ada-pytorch | c1b33b7e6d67a8de3d483d64f9ca1a45a7c6d217 | [
"BSD-Source-Code"
] | null | null | null | utils.py | NeuralBending/stylegan2-ada-pytorch | c1b33b7e6d67a8de3d483d64f9ca1a45a7c6d217 | [
"BSD-Source-Code"
] | null | null | null | utils.py | NeuralBending/stylegan2-ada-pytorch | c1b33b7e6d67a8de3d483d64f9ca1a45a7c6d217 | [
"BSD-Source-Code"
] | null | null | null | import torch
def postprocess(img_out):
return (img_out.permute(0,2,3,1)* 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
def flicker(frames, f = 30):
frames[::f]*=0
frames[2::f]*=0
frames[1::f]=255-frames[1::f]
return frames
def latent_walk(w,num=20, width=1):
lin = torch.linspace(0,width,num)[:,None,None]
# ws=0
tensor=[]
for i in range(w.shape[0]-1):
ws= w[i].repeat((num,1,1))*(width-lin)
ws+= w[i+1].repeat((num,1,1))*lin
tensor.append(ws)
return torch.cat(tensor)
def noiseIt(x,scale=1):
noise = scale*torch.zeros_like(x).normal_()
return x+noise
def noiseItT(x,scale=1,n=2):
app=[]
random_noise = torch.zeros_like(x).normal_()[1:n]
lin = torch.linspace(0,1,x.shape[0]//n)[:,None,None,None].cuda()
for noise in random_noise:
noise = noise.repeat((x.shape[0]//n,1,1,1))
app+= [noise*lin]
app+= [noise*(1-lin)]
noise = torch.cat(app)
# print (noise.shape, x.shape)
x+=scale*noise.cuda()
return x
| 25.230769 | 92 | 0.627033 | 180 | 984 | 3.377778 | 0.316667 | 0.013158 | 0.026316 | 0.055921 | 0.069079 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060024 | 0.153455 | 984 | 38 | 93 | 25.894737 | 0.669868 | 0.033537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.033333 | 0.033333 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21c9f21c6cd6f96ad31cddc81cac13f24697fdd7 | 16,697 | py | Python | tool/merganser/data_retrieval2.py | ualberta-smr/conflict-prediction | 6cd2c54a8af991256d0a2f85c3ab5d828ea2c3f8 | [
"MIT"
] | 4 | 2019-08-04T03:36:51.000Z | 2020-09-17T18:13:45.000Z | tool/merganser/data_retrieval2.py | ualberta-smr/conflict-prediction | 6cd2c54a8af991256d0a2f85c3ab5d828ea2c3f8 | [
"MIT"
] | null | null | null | tool/merganser/data_retrieval2.py | ualberta-smr/conflict-prediction | 6cd2c54a8af991256d0a2f85c3ab5d828ea2c3f8 | [
"MIT"
] | 1 | 2019-06-11T21:15:34.000Z | 2019-06-11T21:15:34.000Z |
import os
import pandas as pd
import numpy as np
from io import StringIO
import config
import logging
import pandas as pd
import pymysql
from sqlalchemy import create_engine
class Data_Retreival:
def __init__(self):
self.code_complexity_query = 'SELECT Merge_Scenario_merge_commit_hash, measure1_diff, measure2_diff, ' \
'measure3_diff, measure4_diff, measure5_diff, measure6_diff, measure7_diff, ' \
'measure8_diff ' \
'FROM Merge_Data.Code_Complexity'
self.code_violation_query = 'SELECT Code_Violation.Merge_Scenario_merge_commit_hash, ' \
'Code_Violation.parent1_style_violation_num - ' \
' Code_Violation.parent2_style_violation_num ' \
'FROM Merge_Data.Code_Style_Violation Code_Violation '
self.parallel_changes_query = 'SELECT merge_commit_hash, parallel_changed_file_num ' \
'FROM Merge_Data.Merge_Scenario'
self.commit_num_query = 'SELECT Merge_Scenario.merge_commit_hash, count(Commits.commit_hash) ' \
'FROM Merge_Data.Merge_Scenario Merge_Scenario LEFT JOIN ' \
'Merge_Data.Merge_Related_Commit Commits ' \
'on Merge_Scenario.merge_commit_hash = Commits.Merge_Scenario_merge_commit_hash' \
' AND Commits.merge_commit_parent = {} ' \
'GROUP BY Merge_Scenario.merge_commit_hash'
self.commit_density_two_weeks = 'SELECT Merge_Scenario.merge_commit_hash, count(Commits.commit_hash) FROM ' \
'Merge_Data.Merge_Scenario Merge_Scenario' \
' LEFT JOIN Merge_Data.Merge_Related_Commit Commits ' \
'on Merge_Scenario.merge_commit_hash = Commits.Merge_Scenario_merge_commit_hash ' \
'AND Commits.merge_commit_parent = {} AND ' \
'TIMESTAMPDIFF(WEEK, Merge_Scenario.merge_commit_date, Commits.date) < 3 ' \
'GROUP BY Merge_Scenario.merge_commit_hash'
self.file_change_query = 'SELECT Merge_Scenario.merge_commit_hash, COALESCE(SUM(file_added_num), 0), ' \
'COALESCE(SUM(file_removed_num), 0), COALESCE(SUM(file_renamed_num), 0), ' \
'COALESCE(SUM(file_copied_num), 0), COALESCE(SUM(file_modified_num), 0) ' \
' FROM Merge_Data.Merge_Scenario Merge_Scenario ' \
'LEFT JOIN Merge_Data.Merge_Related_Commit Commits ' \
'on Merge_Scenario.merge_commit_hash = Commits.Merge_Scenario_merge_commit_hash' \
' AND Commits.merge_commit_parent = {} ' \
'GROUP BY Merge_Scenario.merge_commit_hash'
self.line_change_query = 'SELECT Merge_Scenario.merge_commit_hash, COALESCE(SUM(line_added_num), 0), COALESCE(SUM(line_removed_num), 0) ' \
'FROM Merge_Data.Merge_Scenario Merge_Scenario ' \
'LEFT JOIN Merge_Data.Merge_Related_Commit Commits ' \
'on Merge_Scenario.merge_commit_hash = Commits.Merge_Scenario_merge_commit_hash ' \
'AND Commits.merge_commit_parent = {} ' \
'GROUP BY Merge_Scenario.merge_commit_hash'
self.developer_num_query = 'SELECT merge_commit_hash, parent{}_developer_num ' \
'FROM Merge_Data.Merge_Scenario'
self.commit_message_quey = 'SELECT GROUP_CONCAT(Commits.message SEPARATOR \' ||| \') ' \
'FROM Merge_Data.Merge_Scenario Merge_Scenario ' \
'LEFT JOIN Merge_Data.Merge_Related_Commit Commits ' \
'on Merge_Scenario.merge_commit_hash = Commits.Merge_Scenario_merge_commit_hash ' \
'AND Commits.merge_commit_parent = {} ' \
'GROUP BY Merge_Scenario.merge_commit_hash'
self.branch_duration = 'SELECT Merge_Scenario.merge_commit_hash, TIMESTAMPDIFF(HOUR, ' \
'Merge_Scenario.ancestor_date, Merge_Scenario.parent{}_date) ' \
'FROM Merge_Data.Merge_Scenario Merge_Scenario'
self.is_conflict_query = 'SELECT Merge_Replay.Merge_Scenario_merge_commit_hash, Merge_Replay.is_conflict ' \
'FROM Merge_Data.Merge_Replay Merge_Replay'
self.conflict_rate_query = """SELECT scenarios.name AS 'Repository Name'
, scenarios.scenarios AS '# Merge Scenarios',
conflicts.conflicts AS '# Merge Scenarios with Conflicts',
100 * conflicts.conflicts/scenarios.scenarios 'Conflict Rate (%)'
FROM
(SELECT Repository.name,
COUNT(Merge_Replay.Merge_Scenario_merge_commit_hash) AS 'scenarios'
FROM Merge_Data.Repository
JOIN Merge_Data.Merge_Replay
ON id = Merge_Scenario_Repository_id
GROUP BY name
ORDER BY name) scenarios
INNER JOIN
(SELECT name, COUNT(Merge_Replay.Merge_Scenario_merge_commit_hash) AS 'conflicts'
FROM Merge_Data.Repository
JOIN Merge_Data.Merge_Replay
ON id = Merge_Scenario_Repository_id
WHERE is_conflict = 1
GROUP BY name
ORDER BY name) conflicts
ON scenarios. name = conflicts.name"""
self.repository_stat = """(SELECT MIN(star_num) as Min, AVG(star_num) as AVG,MAX(star_num) as Max
FROM Merge_Data.Repository)
UNION ALL
(SELECT MIN(watch_num), AVG(watch_num),MAX(watch_num)
FROM Merge_Data.Repository)
UNION ALL
(SELECT MIN(fork_num), AVG(fork_num),MAX(fork_num)
FROM Merge_Data.Repository)
UNION ALL
(SELECT MIN(issue_num), AVG(issue_num),MAX(issue_num)
FROM Merge_Data.Repository)
UNION ALL
(SELECT MIN(size) / 1024, AVG(size) / 1024,MAX(size) / 1024
FROM Merge_Data.Repository)"""
self.parallel_changed_commits_query = """select merge_commit_hash from Merge_Data.Merge_Scenario sc Where parallel_changed_file_num > 0"""
self.merge_commits_langs_query = """SELECT merge_commit_hash, language
FROM Merge_Data.Repository
JOIN Merge_Data.Merge_Scenario
ON id = Repository_id
WHERE language IN ({})"""
def get_parallel_changed_commits(self):
logging.info('Extracting parallel changes...')
return self.get_data_frame_of_query_result(self.get_query_result(self.parallel_changed_commits_query))
def get_query_result(self, query):
engine = create_engine('mysql+pymysql://{}:{}@localhost/{}'.format(config.DB_USER_NAME, config.DB_PASSWORD, config.DB_NAME))
df = pd.read_sql_query(query, engine)
return df
# return os.popen('mysql -u {} -e "{}"'.format(config.DB_USER_NAME, query)).read()
def get_data_frame_of_query_result(self, query_result):
return query_result
if len(query_result) == 0:
print('Empty result!')
return -1
return pd.read_csv(StringIO(query_result), delimiter='\t')
def get_complexity(self):
logging.info('Extracting code complexity...')
return self.get_data_frame_of_query_result(self.get_query_result(self.code_complexity_query))
def get_code_violation(self):
logging.info('Extracting code style violation...')
return self.get_data_frame_of_query_result(self.get_query_result(self.code_violation_query))
def get_parallel_changes(self):
logging.info('Extracting code parallel changes...')
return self.get_data_frame_of_query_result(self.get_query_result(self.parallel_changes_query))
def get_commit_num(self, parent): # TODO: The number of data in two branches is not the same.
logging.info('Extracting the number of commits...')
return self.get_data_frame_of_query_result(self.get_query_result(self.commit_num_query.format(parent)))
def get_commit_density(self, parent):
logging.info('Extracting commit density...')
return self.get_data_frame_of_query_result(self.get_query_result(self.commit_density_two_weeks.format(parent)))
def get_file_changes(self, parent):
logging.info('Extracting file changes...')
return self.get_data_frame_of_query_result(self.get_query_result(self.file_change_query.format(parent)))
def get_line_changes(self, parent):
logging.info('Extracting line changes...')
return self.get_data_frame_of_query_result(self.get_query_result(self.line_change_query.format(parent)))
def get_developer_num(self, parent):
logging.info('Extracting developer num...')
res = self.get_data_frame_of_query_result(self.get_query_result(self.developer_num_query.format(parent))).drop('merge_commit_hash', axis=1).values
return pd.DataFrame([item for sublist in res for item in sublist], columns=['# Developers'])
def get_merge_scenarios_in_lang(self, langs):
logging.info('Extracting merges by language...')
langs = ','.join(['\'{}\''.format(lang) for lang in langs])
return self.get_data_frame_of_query_result(self.get_query_result(self.merge_commits_langs_query.format(langs)))
def get_commit_messege_characteristics(self, parent):
logging.info('Extracting message characteristics...')
commit_messages = self.get_query_result(self.commit_message_quey.format(parent))
commit_messages_list = commit_messages.values[1:]
commit_messages_list = [item for sublist in commit_messages_list for item in sublist if item is not None]
keywords = sorted(['fix', 'bug', 'feature', 'improve', 'document', 'refactor', 'update', 'add', 'remove', 'use',
'delete', 'change'])
keywords_frequency = []
commit_messege_length_stats = []
for merge_scenrio_commits in commit_messages_list:
keywords_frequency.append([merge_scenrio_commits.lower().count(word) for word in keywords])
if merge_scenrio_commits != 'NULL':
seperated_commit_message = merge_scenrio_commits.replace(' ||| ', '\n').split('\n')
commit_messege_length = [len(msg.split()) for msg in seperated_commit_message]
commit_messege_length_stats.append([np.min(commit_messege_length), np.mean(commit_messege_length),
np.median(commit_messege_length), np.max(commit_messege_length)])
else:
commit_messege_length_stats.append([0.0, 0.0, 0.0, 0.0])
column_names_frequency = ['# fix', '# bug', '# feature', '# improve', '# document', '# refactor', '# update',
'# add', '# remove', '# use', '# delete', '# change']
column_names_stats = ['Min Msg Length', 'Mean Msg Length', 'Median Msg Length', 'Max Msg Length']
return pd.DataFrame(keywords_frequency, columns=column_names_frequency), \
pd.DataFrame(commit_messege_length_stats, columns=column_names_stats)
def get_branch_duration(self, parent):
logging.info('Extracting branch duration...')
res = self.get_data_frame_of_query_result(self.get_query_result(self.branch_duration.format(parent))).drop(
'merge_commit_hash', axis=1).values
return pd.DataFrame([item for sublist in res for item in sublist], columns=['Branch Duration'])
def get_is_conflict(self):
res = self.get_data_frame_of_query_result(self.get_query_result(self.is_conflict_query.format())).drop(
'Merge_Scenario_merge_commit_hash', axis=1).values
return pd.DataFrame([item for sublist in res for item in sublist], columns=['Is Conflict'])
def get_merge_scenario_prediction_data(self, langs):
keywords_frequency1, commit_messege_length_stats1 = self.get_commit_messege_characteristics(1)
keywords_frequency2, commit_messege_length_stats2 = self.get_commit_messege_characteristics(2)
git_features_scenario = self.get_parallel_changes()
features = [git_features_scenario,
self.get_commit_num(1).drop('merge_commit_hash', axis=1) - self.get_commit_num(2).drop('merge_commit_hash',
axis=1),
self.get_commit_density(1).drop('merge_commit_hash', axis=1) - self.get_commit_density(2).drop(
'merge_commit_hash', axis=1),
self.get_file_changes(1).drop('merge_commit_hash', axis=1) - self.get_file_changes(2).drop(
'merge_commit_hash', axis=1),
self.get_line_changes(1).drop('merge_commit_hash', axis=1) - self.get_line_changes(2).drop(
'merge_commit_hash', axis=1),
self.get_developer_num(1) - self.get_developer_num(2),
keywords_frequency1 - keywords_frequency2,
commit_messege_length_stats1 - commit_messege_length_stats2,
self.get_branch_duration(1) - self.get_branch_duration(2)]
res = pd.concat([pd.concat(features, axis=1).sort_values(by=['merge_commit_hash']),
self.get_merge_scenarios_in_lang(langs).sort_values(by=['merge_commit_hash'])], axis=1)
res = res[res['language'].isin(langs)].drop('merge_commit_hash', axis=1).drop('language', axis=1)
return res
def save_prediction_data_to_csv(self, langs, post_name):
self.get_merge_scenario_prediction_data(langs).drop('Merge_Scenario_merge_commit_hash', axis=1)\
.to_csv(path_or_buf=config.PREDICTION_CSV_PATH + config.PREDICTION_CSV_DATA_NAME + post_name)
self.get_is_conflict().to_csv(path_or_buf=config.PREDICTION_CSV_PATH + config.PREDICTION_CSV_LABEL_NAME + post_name)
def get_conflict_ratio(self):
return self.get_data_frame_of_query_result(self.get_query_result(self.conflict_rate_query))
def get_repository_stats(self):
return self.get_data_frame_of_query_result(self.get_query_result(self.repository_stat)).rename(index={0:'star', 1:'watch', 2: 'fork', 3: 'issue', 4:'size'})
def print_df_stats(self, df):
print('DataFrame Stats:')
print(' - # Data Points: {}'.format(df.shape[0]))
print(' - # Features: {}'.format(df.shape[1]))
print(' - Index: {}'.format(df.index))
print(' - Columns: {}'.format(df.columns))
# Logging
logging.basicConfig(level=logging.INFO,
format='%(levelname)s in %(threadName)s - %(asctime)s by %(name)-12s : %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
obj = Data_Retreival()
print('Start data saving')
print(' - Java')
obj.save_prediction_data_to_csv(['Java'], '_java')
print(' - Python')
obj.save_prediction_data_to_csv(['Python'], '_Python')
print(' - PHP')
obj.save_prediction_data_to_csv(['PHP'], '_PHP')
print(' - Ruby')
obj.save_prediction_data_to_csv(['Ruby'], '_Ruby')
print(' - C++')
obj.save_prediction_data_to_csv(['C++'], '_CPP')
print(' - Java')
obj.save_prediction_data_to_csv(['Java', 'Python', 'Ruby', 'PHP', 'C++'], '_ALL')
print('Finish data saving')
#print(obj.get_repository_stats())
| 61.386029 | 164 | 0.605199 | 1,901 | 16,697 | 4.960021 | 0.126249 | 0.068936 | 0.069997 | 0.071269 | 0.578004 | 0.465055 | 0.413512 | 0.392831 | 0.366105 | 0.343939 | 0 | 0.007945 | 0.298976 | 16,697 | 271 | 165 | 61.612546 | 0.797608 | 0.01072 | 0 | 0.20524 | 0 | 0.017467 | 0.409218 | 0.15728 | 0 | 0 | 0 | 0.00369 | 0 | 1 | 0.091703 | false | 0.004367 | 0.039301 | 0.008734 | 0.222707 | 0.065502 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21cd10ec0c43a2a5245ac89b17664c6b6824fc20 | 12,948 | py | Python | gmusicapi_wrapper/utils.py | dmoebius/gmusicapi-wrapper | 8708683cd33955def1378fc28319ef37805b851d | [
"MIT"
] | 20 | 2015-08-29T15:35:22.000Z | 2018-09-29T06:16:15.000Z | gmusicapi_wrapper/utils.py | dmoebius/gmusicapi-wrapper | 8708683cd33955def1378fc28319ef37805b851d | [
"MIT"
] | 17 | 2015-02-28T17:19:30.000Z | 2018-06-08T10:33:29.000Z | gmusicapi_wrapper/utils.py | dmoebius/gmusicapi-wrapper | 8708683cd33955def1378fc28319ef37805b851d | [
"MIT"
] | 11 | 2015-12-11T15:20:24.000Z | 2019-02-13T18:54:11.000Z | # coding=utf-8
"""Utility functions for gmusicapi_wrapper.
>>> import gmusicapi_wrapper.utils as gmw_utils
>>> from gmusicapi_wrapper.utils import ...
"""
import logging
import os
import re
import subprocess
import mutagen
from .constants import CHARACTER_REPLACEMENTS, CYGPATH_RE, TEMPLATE_PATTERNS
from .decorators import cast_to_list
logger = logging.getLogger(__name__)
def convert_cygwin_path(path):
"""Convert Unix path from Cygwin to Windows path."""
try:
win_path = subprocess.check_output(["cygpath", "-aw", path], universal_newlines=True).strip()
except (FileNotFoundError, subprocess.CalledProcessError):
logger.exception("Call to cygpath failed.")
raise
return win_path
def _get_mutagen_metadata(filepath):
"""Get mutagen metadata dict from a file."""
try:
metadata = mutagen.File(filepath, easy=True)
except mutagen.MutagenError:
logger.warning("Can't load {} as music file.".format(filepath))
raise
return metadata
def _mutagen_fields_to_single_value(metadata):
"""Replace mutagen metadata field list values in mutagen tags with the first list value."""
return dict((k, v[0]) for k, v in metadata.items() if v)
def _split_field_to_single_value(field):
"""Convert number field values split by a '/' to a single number value."""
split_field = re.match(r'(\d+)/\d+', field)
return split_field.group(1) or field
def _filter_comparison_fields(song):
"""Filter missing artist, album, title, or track fields to improve match accuracy."""
# Need both tracknumber (mutagen) and track_number (Google Music) here.
return [field for field in ['artist', 'album', 'title', 'tracknumber', 'track_number'] if field in song and song[field]]
def _normalize_metadata(metadata):
"""Normalize metadata to improve match accuracy."""
metadata = str(metadata)
metadata = metadata.lower()
metadata = re.sub(r'\/\s*\d+', '', metadata) # Remove "/<totaltracks>" from track number.
metadata = re.sub(r'^0+([0-9]+)', r'\1', metadata) # Remove leading zero(s) from track number.
metadata = re.sub(r'^\d+\.+', '', metadata) # Remove dots from track number.
metadata = re.sub(r'[^\w\s]', '', metadata) # Remove any non-words.
metadata = re.sub(r'\s+', ' ', metadata) # Reduce multiple spaces to a single space.
metadata = re.sub(r'^\s+', '', metadata) # Remove leading space.
metadata = re.sub(r'\s+$', '', metadata) # Remove trailing space.
metadata = re.sub(r'^the\s+', '', metadata, re.I) # Remove leading "the".
return metadata
def _normalize_song(song):
"""Convert filepath to song dict while leaving song dicts untouched."""
return song if isinstance(song, dict) else _mutagen_fields_to_single_value(_get_mutagen_metadata(song))
def compare_song_collections(src_songs, dst_songs):
"""Compare two song collections to find missing songs.
Parameters:
src_songs (list): Google Music song dicts or filepaths of local songs.
dest_songs (list): Google Music song dicts or filepaths of local songs.
Returns:
A list of Google Music song dicts or local song filepaths from source missing in destination.
"""
def gather_field_values(song):
return tuple((_normalize_metadata(song[field]) for field in _filter_comparison_fields(song)))
dst_songs_criteria = {gather_field_values(_normalize_song(dst_song)) for dst_song in dst_songs}
return [src_song for src_song in src_songs if gather_field_values(_normalize_song(src_song)) not in dst_songs_criteria]
@cast_to_list(0)
def get_supported_filepaths(filepaths, supported_extensions, max_depth=float('inf')):
"""Get filepaths with supported extensions from given filepaths.
Parameters:
filepaths (list or str): Filepath(s) to check.
supported_extensions (tuple or str): Supported file extensions or a single file extension.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
Returns:
A list of supported filepaths.
"""
supported_filepaths = []
for path in filepaths:
if os.name == 'nt' and CYGPATH_RE.match(path):
path = convert_cygwin_path(path)
if os.path.isdir(path):
for root, __, files in walk_depth(path, max_depth):
for f in files:
if f.lower().endswith(supported_extensions):
supported_filepaths.append(os.path.join(root, f))
elif os.path.isfile(path) and path.lower().endswith(supported_extensions):
supported_filepaths.append(path)
return supported_filepaths
@cast_to_list(0)
def exclude_filepaths(filepaths, exclude_patterns=None):
"""Exclude file paths based on regex patterns.
Parameters:
filepaths (list or str): Filepath(s) to check.
exclude_patterns (list): Python regex patterns to check filepaths against.
Returns:
A list of filepaths to include and a list of filepaths to exclude.
"""
if not exclude_patterns:
return filepaths, []
exclude_re = re.compile("|".join(pattern for pattern in exclude_patterns))
included_songs = []
excluded_songs = []
for filepath in filepaths:
if exclude_patterns and exclude_re.search(filepath):
excluded_songs.append(filepath)
else:
included_songs.append(filepath)
return included_songs, excluded_songs
def _check_field_value(field_value, pattern):
"""Check a song metadata field value for a pattern."""
if isinstance(field_value, list):
return any(re.search(pattern, str(value), re.I) for value in field_value)
else:
return re.search(pattern, str(field_value), re.I)
def _check_filters(song, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Check a song metadata dict against a set of metadata filters."""
include = True
if include_filters:
if all_includes:
if not all(field in song and _check_field_value(song[field], pattern) for field, pattern in include_filters):
include = False
else:
if not any(field in song and _check_field_value(song[field], pattern) for field, pattern in include_filters):
include = False
if exclude_filters:
if all_excludes:
if all(field in song and _check_field_value(song[field], pattern) for field, pattern in exclude_filters):
include = False
else:
if any(field in song and _check_field_value(song[field], pattern) for field, pattern in exclude_filters):
include = False
return include
def filter_google_songs(songs, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Match a Google Music song dict against a set of metadata filters.
Parameters:
songs (list): Google Music song dicts to filter.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
::
(matched, filtered)
"""
matched_songs = []
filtered_songs = []
if include_filters or exclude_filters:
for song in songs:
if _check_filters(
song, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes):
matched_songs.append(song)
else:
filtered_songs.append(song)
else:
matched_songs += songs
return matched_songs, filtered_songs
def filter_local_songs(filepaths, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Match a local file against a set of metadata filters.
Parameters:
filepaths (list): Filepaths to filter.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields.
Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields.
Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of local song filepaths matching criteria and
a list of local song filepaths filtered out using filter criteria.
Invalid music files are also filtered out.
::
(matched, filtered)
"""
matched_songs = []
filtered_songs = []
for filepath in filepaths:
try:
song = _get_mutagen_metadata(filepath)
except mutagen.MutagenError:
filtered_songs.append(filepath)
else:
if include_filters or exclude_filters:
if _check_filters(
song, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes):
matched_songs.append(filepath)
else:
filtered_songs.append(filepath)
else:
matched_songs.append(filepath)
return matched_songs, filtered_songs
def get_suggested_filename(metadata):
"""Generate a filename for a song based on metadata.
Parameters:
metadata (dict): A metadata dict.
Returns:
A filename.
"""
if metadata.get('title') and metadata.get('track_number'):
suggested_filename = '{track_number:0>2} {title}'.format(**metadata)
elif metadata.get('title') and metadata.get('trackNumber'):
suggested_filename = '{trackNumber:0>2} {title}'.format(**metadata)
elif metadata.get('title') and metadata.get('tracknumber'):
suggested_filename = '{tracknumber:0>2} {title}'.format(**metadata)
else:
suggested_filename = '00 {}'.format(metadata.get('title', ''))
return suggested_filename
def _replace_template_patterns(template, metadata, template_patterns):
drive, path = os.path.splitdrive(template)
parts = []
while True:
newpath, tail = os.path.split(path)
if newpath == path:
break
parts.append(tail)
path = newpath
parts.reverse()
for i, part in enumerate(parts):
for key in template_patterns:
if key in part and template_patterns[key] in metadata:
# Force track number to be zero-padded to 2 digits.
if any(template_patterns[key] == tracknumber_field for tracknumber_field in ['tracknumber', 'track_number']):
track_number = _split_field_to_single_value(metadata[template_patterns[key]])
metadata[template_patterns[key]] = track_number.zfill(2)
parts[i] = parts[i].replace(key, metadata[template_patterns[key]])
for char in CHARACTER_REPLACEMENTS:
if char in parts[i]:
parts[i] = parts[i].replace(char, CHARACTER_REPLACEMENTS[char])
if drive:
filepath = os.path.join(drive, os.sep, *parts)
else:
if os.path.isabs(template):
filepath = os.path.join(os.sep, *parts)
else:
filepath = os.path.join(*parts)
return filepath
def template_to_filepath(template, metadata, template_patterns=None):
"""Create directory structure and file name based on metadata template.
Parameters:
template (str): A filepath which can include template patterns as defined by :param template_patterns:.
metadata (dict): A metadata dict.
template_patterns (dict): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values.
Default: :const TEMPLATE_PATTERNS:
Returns:
A filepath.
"""
if template_patterns is None:
template_patterns = TEMPLATE_PATTERNS
metadata = metadata if isinstance(metadata, dict) else _mutagen_fields_to_single_value(metadata)
assert isinstance(metadata, dict)
suggested_filename = get_suggested_filename(metadata).replace('.mp3', '')
if template == os.getcwd() or template == '%suggested%':
filepath = suggested_filename
else:
t = template.replace('%suggested%', suggested_filename)
filepath = _replace_template_patterns(t, metadata, template_patterns)
return filepath
def walk_depth(path, max_depth=float('inf')):
"""Walk a directory tree with configurable depth.
Parameters:
path (str): A directory path to walk.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
"""
start_level = os.path.abspath(path).count(os.path.sep)
for dir_entry in os.walk(path):
root, dirs, _ = dir_entry
level = root.count(os.path.sep) - start_level
yield dir_entry
if level >= max_depth:
dirs[:] = []
| 30.902148 | 121 | 0.742431 | 1,836 | 12,948 | 5.081699 | 0.142702 | 0.032583 | 0.009003 | 0.012004 | 0.445338 | 0.381565 | 0.33687 | 0.293355 | 0.280279 | 0.270847 | 0 | 0.002017 | 0.157708 | 12,948 | 418 | 122 | 30.976077 | 0.853475 | 0.415972 | 0 | 0.265957 | 0 | 0 | 0.045155 | 0 | 0 | 0 | 0 | 0 | 0.005319 | 1 | 0.101064 | false | 0 | 0.037234 | 0.005319 | 0.244681 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21cf17efec6eb019292dc869b1f00c24149cffb4 | 4,402 | py | Python | restructuredBootstrap/custom_pygments_style.py | lakhman/restructuredBootstrap | 57002c6db2d3b5b97dc23820b91711db4f00c07e | [
"MIT"
] | 1 | 2020-08-30T08:58:46.000Z | 2020-08-30T08:58:46.000Z | restructuredBootstrap/custom_pygments_style.py | lakhman/restructuredBootstrap | 57002c6db2d3b5b97dc23820b91711db4f00c07e | [
"MIT"
] | null | null | null | restructuredBootstrap/custom_pygments_style.py | lakhman/restructuredBootstrap | 57002c6db2d3b5b97dc23820b91711db4f00c07e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2017 - Anil Lakhman - MIT License
# -----------------------------------------------------------------------------
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal, Text
# https://github.com/jhermann/pygments-markdown-lexer/blob/master/src/pygments_markdown_lexer/__init__.py
class StackOverflowStyle(Style): # pragma: no cover
#: highlight background color
background_color = "#eff0f1"
highlight_color = 'rgba(174, 247, 174, 0.5)'
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #F8F8F8", # class: 'w'
Error: "#A40000 border:#EF2929", # class: 'err'
Other: "#303336", # class 'x'
Comment: "#858C93", # class: 'c'
Comment.Single: "#858C93", # class: 'c1'
Comment.Multiline: "#858C93", # class: 'cm'
Comment.Preproc: "italic #AAA", # class: 'cp'
Keyword: "#101094", # class: 'k'
Keyword.Constant: "#303336", # class: 'kc'
Keyword.Declaration: "#101094", # class: 'kd'
Keyword.Namespace: "#101094", # class: 'kn'
Keyword.Pseudo: "#101094", # class: 'kp'
Keyword.Reserved: "#101094", # class: 'kr'
Keyword.Type: "#7D2727", # class: 'kt'
Operator: "#303336", # class: 'o'
Operator.Word: "#1010B7", # class: 'ow' - like keywords
Punctuation: "#3c3d3e", # class: 'p'
Punctuation.Indicator: "#000", # class: 'p-Indicator'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#303336", # class: 'n'
Name.Attribute: "#e64320", # class: 'na' - to be revised
Name.Builtin: "#303336", # class: 'nb'
Name.Builtin.Pseudo: "#3465A4", # class: 'bp'
Name.Class: "#1010B7", # class: 'nc' - to be revised
Name.Constant: "#303336", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "#cc0000", # class: 'ne'
Name.Function: "#008000", # class: 'nf'
Name.Property: "#303336", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#303336", # class: 'nn' - to be revised
Name.Other: "#303336", # class: 'nx'
Name.Tag: "#7d2727", # class: 'nt' - like a keyword
Name.Variable: "#303336", # class: 'nv' - to be revised
Name.Variable.Class: "#303336", # class: 'vc' - to be revised
Name.Variable.Global: "#303336", # class: 'vg' - to be revised
Name.Variable.Instance: "#303336", # class: 'vi' - to be revised
Number: "#7D2727", # class: 'm'
Literal: "#303336", # class: 'l'
Literal.Date: "#303336", # class: 'ld'
Literal.Scalar.Plain: '#090977', # class: 'l-Scalar-Plain'
String: "#1010B7", # class: 's'
String.Backtick: "#008000", # class: 'sb'
String.Char: "#008000", # class: 'sc'
String.Doc: "italic #B729D9", # class: 'sd' - like a comment
String.Double: "#008000", # class: 's2'
String.Escape: "#008000", # class: 'se'
String.Heredoc: "#008000", # class: 'sh'
String.Interpol: "#008000", # class: 'si'
String.Other: "#008000", # class: 'sx'
String.Regex: "#008000", # class: 'sr'
String.Single: "#7D2727", # class: 's1'
String.Symbol: "#008000", # class: 'ss'
Generic: "#000", # class: 'g'
Generic.Deleted: "#A40000", # class: 'gd'
Generic.Emph: "italic #444", # class: 'ge'
Generic.Error: "#EF2929", # class: 'gr'
Generic.Heading: "#1010b7", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #444", # class: 'gs'
Generic.Subheading: "#800080", # class: 'gu'
Generic.Traceback: "#A40000", # class: 'gt'
}
| 48.911111 | 105 | 0.537483 | 469 | 4,402 | 5.025586 | 0.466951 | 0.070004 | 0.042003 | 0.050912 | 0.029275 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107744 | 0.257837 | 4,402 | 89 | 106 | 49.460674 | 0.613713 | 0.362335 | 0 | 0 | 0 | 0 | 0.186515 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027397 | 0 | 0.09589 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21d2b7b6aeb4285360c3d0b80011bc93dc5f393f | 1,225 | py | Python | pyleecan/Methods/Geometry/Segment/discretize.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Methods/Geometry/Segment/discretize.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Methods/Geometry/Segment/discretize.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | # -*- coding: utf-8 -*-
from numpy import linspace
from ....Methods.Machine import LINE_NPOINT_D
from ....Methods.Geometry.Segment import NbPointSegmentDError
def discretize(self, nb_point=LINE_NPOINT_D):
"""Return the discretize version of the Segment.
Begin and end are always returned
Parameters
----------
self : Segment
A Segment object
nb_point : int
Number of points to add to discretize the line (Default value = LINE_NPOINT_D)
Returns
-------
list_point: list
List of complex coordinate of the points
Raises
------
NbPointSegmentDError
nb_point must be an integer >=
"""
self.check()
if not isinstance(nb_point, int):
raise NbPointSegmentDError("discretize : nb_point must be an integer")
if nb_point < 0:
raise NbPointSegmentDError("nb_point must be >=0")
# t start by 0 (begin) and end by 1 (end)
# len(t) = nb_point +2 : begin + end + nb_point between
t = linspace(0, 1, nb_point + 2)
# We use the complex representation of the point
z1 = self.begin
z2 = self.end
# Generate the points with the parametric representation of the line
return z1 - (z1 - z2) * t
| 26.630435 | 86 | 0.648163 | 167 | 1,225 | 4.652695 | 0.419162 | 0.09009 | 0.042471 | 0.050193 | 0.124839 | 0.056628 | 0 | 0 | 0 | 0 | 0 | 0.015436 | 0.259592 | 1,225 | 45 | 87 | 27.222222 | 0.841235 | 0.503673 | 0 | 0 | 0 | 0 | 0.11257 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21d6f524d8375ab9d0bc8eed28866f2443f50ef1 | 2,340 | py | Python | Grid/grid_multi_yaxis.py | pyecharts/pyecharts_gallery | 8430c37df923860b36c9d1d86f2adc9d94b9d72c | [
"MIT"
] | 759 | 2019-04-28T22:42:10.000Z | 2022-03-31T12:32:10.000Z | Grid/grid_multi_yaxis.py | pyecharts/pyecharts_gallery | 8430c37df923860b36c9d1d86f2adc9d94b9d72c | [
"MIT"
] | 65 | 2019-06-10T07:38:25.000Z | 2022-03-24T10:10:03.000Z | Grid/grid_multi_yaxis.py | pyecharts/pyecharts_gallery | 8430c37df923860b36c9d1d86f2adc9d94b9d72c | [
"MIT"
] | 505 | 2019-04-28T08:45:33.000Z | 2022-03-29T07:08:33.000Z | from pyecharts import options as opts
from pyecharts.charts import Bar, Grid, Line
x_data = ["{}月".format(i) for i in range(1, 13)]
bar = (
Bar()
.add_xaxis(x_data)
.add_yaxis(
"蒸发量",
[2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3],
yaxis_index=0,
color="#d14a61",
)
.add_yaxis(
"降水量",
[2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3],
yaxis_index=1,
color="#5793f3",
)
.extend_axis(
yaxis=opts.AxisOpts(
name="蒸发量",
type_="value",
min_=0,
max_=250,
position="right",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#d14a61")
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
)
)
.extend_axis(
yaxis=opts.AxisOpts(
type_="value",
name="温度",
min_=0,
max_=25,
position="left",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#675bba")
),
axislabel_opts=opts.LabelOpts(formatter="{value} °C"),
splitline_opts=opts.SplitLineOpts(
is_show=True, linestyle_opts=opts.LineStyleOpts(opacity=1)
),
)
)
.set_global_opts(
yaxis_opts=opts.AxisOpts(
name="降水量",
min_=0,
max_=250,
position="right",
offset=80,
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#5793f3")
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
),
title_opts=opts.TitleOpts(title="Grid-多 Y 轴示例"),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="cross"),
)
)
line = (
Line()
.add_xaxis(x_data)
.add_yaxis(
"平均温度",
[2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2],
yaxis_index=2,
color="#675bba",
label_opts=opts.LabelOpts(is_show=False),
)
)
bar.overlap(line)
grid = Grid()
grid.add(bar, opts.GridOpts(pos_left="5%", pos_right="20%"), is_control_axis_index=True)
grid.render("grid_multi_yaxis.html")
| 28.536585 | 88 | 0.525214 | 297 | 2,340 | 3.976431 | 0.367003 | 0.101609 | 0.057578 | 0.101609 | 0.385267 | 0.339543 | 0.23116 | 0.160034 | 0.160034 | 0 | 0 | 0.090221 | 0.32265 | 2,340 | 81 | 89 | 28.888889 | 0.654259 | 0 | 0 | 0.358974 | 0 | 0 | 0.070085 | 0.008974 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025641 | 0 | 0.025641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21d9cfb9234f93c241128ce45315616d74f27dd4 | 1,128 | py | Python | ketchuporo/main.py | v0y/ketchuporo | 1e4f7e487ad2cbc36acbee5cb3271b2c20411a98 | [
"MIT"
] | null | null | null | ketchuporo/main.py | v0y/ketchuporo | 1e4f7e487ad2cbc36acbee5cb3271b2c20411a98 | [
"MIT"
] | null | null | null | ketchuporo/main.py | v0y/ketchuporo | 1e4f7e487ad2cbc36acbee5cb3271b2c20411a98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from kivy.app import App
from kivy.core.text import LabelBase
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager
from ketchuporo.const import Files
from ketchuporo.controllers import (
BreakScreen,
BreaksOverScreen,
PomodorosOverScreen,
SettingsScreen,
WelcomeScreen,
)
from ketchuporo.controllers import TimerScreen
Builder.load_file(Files.KV)
LabelBase.register(name='RobotoLight', fn_regular='lib/fonts/roboto_light.ttf')
# Create the screen manager
screen_manager = ScreenManager()
screen_manager.add_widget(WelcomeScreen(name='welcome'))
screen_manager.add_widget(
TimerScreen(screen_manager=screen_manager, name='timer')
)
screen_manager.add_widget(PomodorosOverScreen(name='pomodoros_over'))
screen_manager.add_widget(
BreakScreen(screen_manager=screen_manager, name='break')
)
screen_manager.add_widget(BreaksOverScreen(name='breaks_over'))
screen_manager.add_widget(SettingsScreen(name='settings'))
class KetchuporoApp(App):
def build(self):
return screen_manager
if __name__ == '__main__':
KetchuporoApp().run()
| 25.636364 | 79 | 0.786348 | 133 | 1,128 | 6.43609 | 0.443609 | 0.19743 | 0.11215 | 0.154206 | 0.130841 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000999 | 0.112589 | 1,128 | 43 | 80 | 26.232558 | 0.854146 | 0.041667 | 0 | 0.064516 | 0 | 0 | 0.088126 | 0.024119 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.225806 | 0.032258 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21dc9a19d80f7a66fcdb3bf1134d323402812861 | 379 | py | Python | reading_plan/reading_plan.py | hidenori-t/snippet | d850bd429931f9268162419d3fb8d7803e58cff3 | [
"MIT"
] | 1 | 2020-06-19T10:06:37.000Z | 2020-06-19T10:06:37.000Z | reading_plan/reading_plan.py | hidenori-t/snippet | d850bd429931f9268162419d3fb8d7803e58cff3 | [
"MIT"
] | null | null | null | reading_plan/reading_plan.py | hidenori-t/snippet | d850bd429931f9268162419d3fb8d7803e58cff3 | [
"MIT"
] | null | null | null | # 読書計画用スニペット
from datetime import date
def reading_plan(title, total_number_of_pages, period):
current_page = int(input("Current page?: "))
deadline = (date(int(period[0]), int(period[1]),
int(period[2])) - date.today()).days
print(title, period, "まで残り", deadline, "days",
(total_number_of_pages - current_page) // deadline, "p/day")
| 34.454545 | 70 | 0.638522 | 49 | 379 | 4.755102 | 0.571429 | 0.141631 | 0.111588 | 0.154506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009967 | 0.205805 | 379 | 10 | 71 | 37.9 | 0.76412 | 0.026385 | 0 | 0 | 0 | 0 | 0.076294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21e208b57e398b0bb168739f3f960a4bcb46ffb1 | 603 | py | Python | FRMinutesDiscoutRate-BS.py | henchc/web-scrapers | e9f852524fce8ba8c3948f6de242185220a436cb | [
"MIT"
] | 15 | 2017-11-03T20:44:26.000Z | 2022-01-02T22:07:15.000Z | FRMinutesDiscoutRate-BS.py | henchc/web-scrapers | e9f852524fce8ba8c3948f6de242185220a436cb | [
"MIT"
] | null | null | null | FRMinutesDiscoutRate-BS.py | henchc/web-scrapers | e9f852524fce8ba8c3948f6de242185220a436cb | [
"MIT"
] | 1 | 2020-09-26T22:52:51.000Z | 2020-09-26T22:52:51.000Z | from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.federalreserve.gov/monetarypolicy/discountrate.htm")
bsObj = BeautifulSoup(html.read(), "lxml")
d1 = bsObj.findAll("option")
urls = []
for item in d1:
if "PDF" in str(item.get_text()):
prefix = "http://www.federalreserve.gov"
url = prefix + str(item['value'])
urls.append((url, str(item.get_text())))
urls = urls[:3]
print(len(urls))
for url in urls:
res = urlopen(Request(url[0]))
pdf = open((url[1] + ".pdf"), 'wb')
pdf.write(res.read())
pdf.close()
| 24.12 | 79 | 0.640133 | 84 | 603 | 4.571429 | 0.52381 | 0.054688 | 0.109375 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012146 | 0.180763 | 603 | 24 | 80 | 25.125 | 0.765182 | 0 | 0 | 0 | 0 | 0 | 0.189055 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21e2ac869244887a79d078c7ec21be9ea889f1fc | 644 | py | Python | py_analysis_quick_start/temp_line.py | jack-zheng/olm-analysis | 16bdc06ece9c2b42983722b37d1c1688eaa90e0f | [
"MIT"
] | null | null | null | py_analysis_quick_start/temp_line.py | jack-zheng/olm-analysis | 16bdc06ece9c2b42983722b37d1c1688eaa90e0f | [
"MIT"
] | null | null | null | py_analysis_quick_start/temp_line.py | jack-zheng/olm-analysis | 16bdc06ece9c2b42983722b37d1c1688eaa90e0f | [
"MIT"
] | null | null | null | import random
import matplotlib
from matplotlib import pyplot as plt
# 显示中文
# Windows/Linus
font = {'family' : 'MicroSoft Yahei', 'weight': 'bold', 'size': '9'}
matplotlib.rc("font", **font)
# 设置图片 大小 20x8
fig = plt.figure(figsize=(20, 8), dpi=80)
y = [random.randint(20, 35) for i in range(120)]
x = range(120)
plt.plot(x, y)
# 调整 x 轴刻度
_xticks_labels = ["10点{}分".format(i) for i in range(60)]
_xticks_labels += ["11点{}分".format(i) for i in range(60)]
# 将 x 和 labels 一一绑定; rotation 旋转 label
plt.xticks(list(x)[::3], _xticks_labels[::3], rotation=45)
# 添加描述信息
plt.xlabel("时间")
plt.ylabel("温度 'C")
plt.title("10点到12点温度变化")
# 展示
plt.show() | 21.466667 | 68 | 0.664596 | 110 | 644 | 3.836364 | 0.609091 | 0.028436 | 0.042654 | 0.078199 | 0.099526 | 0.099526 | 0.099526 | 0.099526 | 0 | 0 | 0 | 0.063521 | 0.14441 | 644 | 30 | 69 | 21.466667 | 0.702359 | 0.135093 | 0 | 0 | 0 | 0 | 0.127273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21e4e94462869b5e4859a8999659718ac1c5f326 | 935 | py | Python | main.py | gaurav-karna/ReminderBot | e9c37f1c62a6b0f3e9f69ec5cc983ce2334ae6db | [
"MIT"
] | null | null | null | main.py | gaurav-karna/ReminderBot | e9c37f1c62a6b0f3e9f69ec5cc983ce2334ae6db | [
"MIT"
] | null | null | null | main.py | gaurav-karna/ReminderBot | e9c37f1c62a6b0f3e9f69ec5cc983ce2334ae6db | [
"MIT"
] | null | null | null | from sys import exit
import argparse
ALL_ARGS = None
# found a free server to execute blindly, will update with CRON if need be
def sanity():
if ALL_ARGS.hour > 23 or ALL_ARGS.hour < 0:
print('Error in hour provided, must be >= 0 and <= 23')
exit(0)
if ALL_ARGS.min > 59 or ALL_ARGS.min < 0:
print('Error in minute provided, must be >= 0 and <= 59')
exit(0)
def set_cron():
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='CRON setup to send reminder texts')
parser.add_argument('--cron', action='store_true')
parser.add_argument('--hour', help='hour in EST', type=int)
parser.add_argument('--min', help='min of hour, default = 0', type=int, default=0)
parser.add_argument('--msg', help='message to send', type=str, default='None provided')
ALL_ARGS = parser.parse_args()
print(ALL_ARGS.cron)
# sanity()
# set_cron()
| 28.333333 | 91 | 0.652406 | 142 | 935 | 4.133803 | 0.443662 | 0.083475 | 0.115843 | 0.044293 | 0.061329 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021798 | 0.214973 | 935 | 32 | 92 | 29.21875 | 0.777929 | 0.098396 | 0 | 0.1 | 0 | 0 | 0.274136 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.05 | 0.1 | 0 | 0.2 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21e56d9e00f8e568b7ebb5d7d2e6c10697c117cb | 1,846 | py | Python | tests/test_model.py | JanCVanB/netflix | e3ff4feab832846640dbb3daa5877ef84a00adaf | [
"MIT"
] | 22 | 2016-12-14T03:58:28.000Z | 2021-03-07T00:10:05.000Z | tests/test_model.py | JanCVanB/netflix | e3ff4feab832846640dbb3daa5877ef84a00adaf | [
"MIT"
] | 1 | 2015-06-16T07:30:35.000Z | 2015-06-16T07:39:18.000Z | tests/test_model.py | jvanbrug/netflix | e3ff4feab832846640dbb3daa5877ef84a00adaf | [
"MIT"
] | 5 | 2016-12-05T08:55:15.000Z | 2017-11-01T23:33:05.000Z | import numpy as np
import os
import pickle
import random
from algorithms import model as model_algorithm
from utils import data_paths
def test_model_can_create_instance_with_no_arguments():
model_algorithm.Model()
def test_model_load_creates_the_expected_instance():
model = model_algorithm.Model()
model.x = random.random()
model.y = np.array([random.random()])
load_file_name = 'test.p'
load_file_path = os.path.join(data_paths.MODELS_DIR_PATH, load_file_name)
assert not os.path.isfile(load_file_path), ('{} is for test use only'
.format(load_file_path))
try:
with open(load_file_path, 'wb+') as load_file:
pickle.dump(model, load_file)
loaded_model = model_algorithm.Model.load(load_file_name)
assert loaded_model.x == model.x
np.testing.assert_array_equal(loaded_model.y, model.y)
finally:
try:
os.remove(load_file_path)
except FileNotFoundError:
pass
def test_model_save_writes_the_expected_file():
model = model_algorithm.Model()
model.x = random.random()
model.y = np.array([random.random()])
save_file_name = 'test.p'
save_file_path = os.path.join(model_algorithm.MODELS_DIR_PATH,
save_file_name)
assert not os.path.isfile(save_file_path), ('{} is for test use only'
.format(save_file_path))
try:
model.save(save_file_name)
with open(save_file_path, 'rb') as save_file:
saved_model = pickle.load(save_file)
assert saved_model.x == model.x
np.testing.assert_array_equal(saved_model.y, model.y)
finally:
try:
os.remove(save_file_path)
except FileNotFoundError:
pass
| 33.563636 | 77 | 0.641387 | 247 | 1,846 | 4.481781 | 0.242915 | 0.072267 | 0.054201 | 0.065041 | 0.444444 | 0.34869 | 0.34869 | 0.296296 | 0.187895 | 0.121048 | 0 | 0 | 0.269231 | 1,846 | 54 | 78 | 34.185185 | 0.820608 | 0 | 0 | 0.340426 | 0 | 0 | 0.034128 | 0 | 0 | 0 | 0 | 0 | 0.12766 | 1 | 0.06383 | false | 0.042553 | 0.12766 | 0 | 0.191489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21e6f2feb888d1690e95e165f59b5155700dbb10 | 533 | py | Python | serenote/utils/alert.py | LeptoFlare/todo-bot | 5c619abe421e4731c0dbc73561fbbd4a20839422 | [
"MIT"
] | 3 | 2020-10-07T04:41:25.000Z | 2021-04-23T19:52:53.000Z | serenote/utils/alert.py | LeptoFlare/serenote | 5c619abe421e4731c0dbc73561fbbd4a20839422 | [
"MIT"
] | 8 | 2020-10-07T00:13:00.000Z | 2020-11-23T09:01:08.000Z | serenote/utils/alert.py | LeptoFlare/serenote | 5c619abe421e4731c0dbc73561fbbd4a20839422 | [
"MIT"
] | null | null | null | import discord
class Alert(discord.Embed):
alert_types = {
"error": "<:error:807799799721230347>",
}
def __init__(self, alert_type, title: str, description: str = discord.Embed.Empty):
super().__init__(
color=discord.Color.blurple(),
title=self.process_title(alert_type, title),
description=description
)
@classmethod
def process_title(cls, alert_type, title):
return f"{cls.alert_types[alert_type]} {alert_type.capitalize()}: **{title}**"
| 26.65 | 87 | 0.628518 | 57 | 533 | 5.578947 | 0.438596 | 0.141509 | 0.132075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044335 | 0.238274 | 533 | 19 | 88 | 28.052632 | 0.738916 | 0 | 0 | 0 | 0 | 0 | 0.187617 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0.071429 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21e79dcdcdee9f92045ac51d4e85b03f25e5e3c8 | 2,616 | py | Python | fixture/project.py | AleksandrSmoliak/mantis | be0f48a6a63a2b955755031a721db7fb8761fe97 | [
"Apache-2.0"
] | null | null | null | fixture/project.py | AleksandrSmoliak/mantis | be0f48a6a63a2b955755031a721db7fb8761fe97 | [
"Apache-2.0"
] | null | null | null | fixture/project.py | AleksandrSmoliak/mantis | be0f48a6a63a2b955755031a721db7fb8761fe97 | [
"Apache-2.0"
] | null | null | null | from model.project import Project
class ProjectHelper:
def __init__(self, app):
self.app = app
def open_project_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/manage_proj_create_page.php")):
wd.find_element_by_xpath("//div[@id='main-container']/div[@id='sidebar']/ul[@class='nav nav-list']/li[7]/a").click()
wd.find_element_by_xpath("//div[@class='row']/ul/li[3]/a").click()
def create_project(self, project):
wd = self.app.wd
# Открываем страницу создания проекта
self.open_project_page()
# Переход на форму создания проекта
wd.find_element_by_xpath("//button[@class='btn btn-primary btn-white btn-round']").click()
# Заполнение полей формы
self.fill_project_form(project)
# Нажание на кнопку добавления проекта
wd.find_element_by_xpath("//input[@class='btn btn-primary btn-white btn-round']").click()
def fill_project_form(self, project):
self.change_field_value("name", project.name)
self.change_field_value("description", project.description)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def project_count(self):
wd = self.app.wd
return len(wd.find_elements_by_xpath("//div[@class='widget-box widget-color-blue2']/div[2]/div[1]/div[2]/table/tbody/tr"))
def get_project_list(self):
wd = self.app.wd
self.open_project_page()
self.project_list = []
for element in wd.find_elements_by_xpath("//div[@class='table-responsive']/table/tr"):
cells = element.find_elements_by_tag_name("td")
name = cells[1]
description = cells[5]
self.project_list.append(Project(name=name, description=description))
return list(self.project_list)
def open_project_by_index(self, index):
wd = self.app.wd
self.open_project_page()
row = wd.find_elements_by_xpath("//div[@class='table-responsive']/table/tbody/tr")[index]
cell = row.find_elements_by_tag_name("td")[0]
cell.find_element_by_tag_name("a").click()
def del_project_by_index(self, index):
wd = self.app.wd
self.open_project_by_index(index)
wd.find_element_by_xpath("//input[@value='Удалить проект']").click()
wd.find_element_by_xpath("//input[@value='Удалить проект']").click() | 42.193548 | 130 | 0.65367 | 368 | 2,616 | 4.388587 | 0.271739 | 0.044582 | 0.080495 | 0.083591 | 0.450155 | 0.416718 | 0.320124 | 0.250155 | 0.229102 | 0.180805 | 0 | 0.004348 | 0.208716 | 2,616 | 62 | 131 | 42.193548 | 0.775845 | 0.049312 | 0 | 0.25 | 0 | 0.041667 | 0.200564 | 0.134112 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.020833 | 0 | 0.270833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21e821b8f988e5658b9c301ffb5c85c84dc141ff | 773 | py | Python | main.py | icarus747/DCS_MGRS_Converter | 3ce687a697f1313658726a8881c134d321d67c22 | [
"MIT"
] | null | null | null | main.py | icarus747/DCS_MGRS_Converter | 3ce687a697f1313658726a8881c134d321d67c22 | [
"MIT"
] | 1 | 2020-10-12T20:11:27.000Z | 2020-10-12T22:33:23.000Z | main.py | icarus747/DCS_MGRS_Converter | 3ce687a697f1313658726a8881c134d321d67c22 | [
"MIT"
] | 1 | 2020-10-12T16:43:15.000Z | 2020-10-12T16:43:15.000Z | #!/usr/bin/env python3
# By Icarus747
# Created 10/11/2020
# Used for converting DCS MGRS grid coordinates to Lat/Long coordinates.
import mgrs
import re
def main():
m = mgrs.MGRS()
# dcs = '38TLN046623'
dcs = input("Enter MGRS cord.\n\r")
dcs = validate_mgrs(dcs)
dd = m.toLatLon(dcs)
lat = m.ddtodms(dd[0])
Lat = round(lat[1] + lat[2] / 60, 1)
long = m.ddtodms(dd[1])
Long = round(long[1] + long[2] / 60, 1)
print(f"N {int(lat[0])} {Lat}")
print(f"E 0{int(long[0])} {Long}")
def validate_mgrs(dcs):
pattern = r'^\b\d{2}[a-z]{3}\d{6,10}\b'
if re.match(pattern, dcs.lower()):
return dcs
else:
print("Verify MGRS grid and try again.\n\r")
main()
if __name__ == '__main__':
main()
| 20.342105 | 72 | 0.575679 | 127 | 773 | 3.425197 | 0.480315 | 0.048276 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068376 | 0.243208 | 773 | 37 | 73 | 20.891892 | 0.675214 | 0.186287 | 0 | 0.090909 | 0 | 0 | 0.214744 | 0.041667 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.227273 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21ea48feb3e6f28dc69bc7b3d03c6225f2e7de16 | 637 | py | Python | backend/management/commands/setentryfolder.py | Software-Engineering-Bachelor-Project/mycroft | 8ca3b6bfaa7b573f67def06c637f3c57838440a4 | [
"MIT"
] | 5 | 2020-03-01T11:17:09.000Z | 2021-07-08T20:45:47.000Z | backend/management/commands/setentryfolder.py | Software-Engineering-Bachelor-Project/mycroft | 8ca3b6bfaa7b573f67def06c637f3c57838440a4 | [
"MIT"
] | 245 | 2020-03-28T11:59:12.000Z | 2020-05-26T10:05:22.000Z | backend/management/commands/setentryfolder.py | Software-Engineering-Bachelor-Project/mycroft | 8ca3b6bfaa7b573f67def06c637f3c57838440a4 | [
"MIT"
] | 5 | 2020-02-03T08:15:13.000Z | 2020-04-15T07:22:47.000Z | from django.core.management.base import BaseCommand
from backend.file_manager import build_file_structure
class Command(BaseCommand):
help = 'Sets entry folder to given file path.'
def add_arguments(self, parser):
parser.add_argument('folder', type=str, help='Path to entry folder')
def handle(self, *args, **kwargs):
folder = kwargs['folder']
try:
build_file_structure(file_path=folder)
self.stdout.write("Successfully added entry folder.")
except ValueError as e:
self.stdout.write(str(e))
self.stderr.write("Failed to add entry folder.")
| 33.526316 | 76 | 0.66876 | 81 | 637 | 5.160494 | 0.530864 | 0.105263 | 0.086124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 637 | 18 | 77 | 35.388889 | 0.853061 | 0 | 0 | 0 | 0 | 0 | 0.200942 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21eb2ec30e5683c312af0b66057c5639110b2683 | 2,149 | py | Python | work/adder.py | MorrisMA/py65 | bda1553ff88fc577944bde3d7cb3e75a3b83ccfa | [
"BSD-3-Clause"
] | null | null | null | work/adder.py | MorrisMA/py65 | bda1553ff88fc577944bde3d7cb3e75a3b83ccfa | [
"BSD-3-Clause"
] | null | null | null | work/adder.py | MorrisMA/py65 | bda1553ff88fc577944bde3d7cb3e75a3b83ccfa | [
"BSD-3-Clause"
] | null | null | null | from math import *
N = 8
V = 4
Z = 2
C = 1
wordSign = 1 << 11
wordMask = (1 << 12) - 1
byteSign = 1 << 7
byteMask = (1 << 8) - 1
def adder(op, a, b, cin, siz=False):
if siz:
sign = wordSign
mask = wordMask
else:
sign = byteSign
mask = byteMask
auL = mask & a
if op == 1:
auR = mask & ~b
else:
auR = mask & b
din = mask & b
sum = auL + auR + cin
nvzc = 0
if sign & sum:
nvzc |= N
if (~(auL ^ auR) & (auL ^ sum)) & sign:
nvzc |= V
if (mask & sum) == 0:
nvzc |= Z
if sum > mask:
nvzc |= C
sum &= mask
return nvzc, sum, auL, auR
print(N, V, Z, C, '%04X %04X %02X %02X' % (wordSign, wordMask, \
byteSign, byteMask))
stat ={0 :'----',
1 :'---C',
2 :'--Z-',
3 :'--ZC',
4 :'-V--',
5 :'-V-C',
6 :'-VZ-',
7 :'-VZC',
8 :'N---',
9 :'N--C',
10:'N-Z-',
11:'N-ZC',
12:'NV--',
13:'NV-C',
14:'NVZ-',
15:'NVZC' }
with open("adder08b.txt", "wt") as fout:
k = 0
for i in range(256):
for j in range(256):
nvzc, sum, auL, auR = adder(0, i, j, 0)
print('%s, %1d, %02X, %02X, %02X, %02X, %02X, %1d' % \
(stat[nvzc], 0, sum, i, j, auL, auR, 0), file=fout)
nvzc, sum, auL, auR = adder(0, i, j, 1)
print('%s, %1d, %02X, %02X, %02X, %02X, %02X, %1d' % \
(stat[nvzc], 0, sum, i, j, auL, auR, 1), file=fout)
nvzc, sum, auL, auR = adder(1, i, j, 1)
print('%s, %1d, %02X, %02X, %02X, %02X, %02X, %1d' % \
(stat[nvzc], 1, sum, i, j, auL, auR, 1), file=fout)
nvzc, sum, auL, auR = adder(1, i, j, 0)
print('%s, %1d, %02X, %02X, %02X, %02X, %02X, %1d' % \
(stat[nvzc], 1, sum, i, j, auL, auR, 0), file=fout)
if k == 0:
print('%02X' % (i), end='')
else:
print(', %02X' % (i), end='')
k += 1
if k == 16:
print()
k = 0
| 24.988372 | 69 | 0.378781 | 302 | 2,149 | 2.695364 | 0.241722 | 0.125307 | 0.132678 | 0.117936 | 0.373464 | 0.373464 | 0.373464 | 0.373464 | 0.304668 | 0.304668 | 0 | 0.105221 | 0.420661 | 2,149 | 85 | 70 | 25.282353 | 0.548594 | 0 | 0 | 0.118421 | 0 | 0.052632 | 0.127967 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0 | 0.013158 | 0 | 0.039474 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21ed527c3321a9993fe603a88785851b9688cff9 | 2,289 | py | Python | optimism/contact/PenaltyContact.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | null | null | null | optimism/contact/PenaltyContact.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | 1 | 2022-03-12T00:01:12.000Z | 2022-03-12T00:01:12.000Z | optimism/contact/PenaltyContact.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | 3 | 2021-12-23T19:53:31.000Z | 2022-03-27T23:12:03.000Z | from optimism.JaxConfig import *
from optimism import Mesh
from optimism import QuadratureRule
from optimism import Surface
import numpy as onp
def get_current_coordinates_at_quadrature_points(mesh, dispField, quadRule, edge):
fieldIndex = Surface.get_field_index(edge, mesh.conns)
edgeCoords = Surface.eval_field(mesh.coords, fieldIndex)
edgeDisps = Surface.eval_field(dispField, fieldIndex)
return QuadratureRule.eval_at_iso_points(quadRule.xigauss, edgeCoords+edgeDisps)
def evaluate_levelset_on_edge(levelset, mesh, dispField, quadRule, edge):
fieldIndex = Surface.get_field_index(edge, mesh.conns)
edgeCoords = Surface.eval_field(mesh.coords, fieldIndex)
edgeDisps = Surface.eval_field(dispField, fieldIndex)
quadratureCurCoords = QuadratureRule.eval_at_iso_points(quadRule.xigauss, edgeCoords+edgeDisps)
return levelset(quadratureCurCoords)
def compute_edge_penalty_contact_energy(levelset, mesh, dispField, quadRule, edge, stiffness):
fieldIndex = Surface.get_field_index(edge, mesh.conns)
edgeCoords = Surface.eval_field(mesh.coords, fieldIndex)
edgeDisps = Surface.eval_field(dispField, fieldIndex)
quadratureCurCoords = QuadratureRule.eval_at_iso_points(quadRule.xigauss, edgeCoords+edgeDisps)
lsetField = levelset(quadratureCurCoords)
negativeLsetField = np.minimum(0.0, lsetField)
return stiffness*Surface.integrate_values(quadRule, edgeCoords, np.square(negativeLsetField))
def evaluate_contact_constraints(levelset, dispField, mesh, quadRule, edges):
return vmap(evaluate_levelset_on_edge, (None,None,None,None,0))(levelset, mesh, dispField, quadRule, edges)
def compute_total_penalty_contact_energy(levelset, dispField, mesh, quadRule, edges, stiffness):
return np.sum(vmap(compute_edge_penalty_contact_energy, (None,None,None,None,0,None))(levelset, mesh, dispField, quadRule, edges, stiffness))
def compute_fisher_burmeister_linearization(levelset, disp, mesh, quadRule, edges, lmbda):
for edge in edges:
quadratureCurCoords = get_current_coordinates_at_quadrature_points(mesh, dispField, quadRule, edge)
phi = levelset(quadratureCurCoords)
t = np.sqrt(phi**2 + lmbda**2)
dlambda = ( t**2 - t*(phi+lmbda) ) / (t-lmbda)
| 40.875 | 145 | 0.7737 | 270 | 2,289 | 6.355556 | 0.244444 | 0.045455 | 0.073427 | 0.058275 | 0.613054 | 0.453963 | 0.453963 | 0.453963 | 0.453963 | 0.417249 | 0 | 0.003543 | 0.136741 | 2,289 | 55 | 146 | 41.618182 | 0.864879 | 0 | 0 | 0.323529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.147059 | 0.058824 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21ee589b914a86f78fa5eda580da880b00125bd4 | 1,970 | py | Python | nlplingo/oregon/event_models/uoregon/layers/elmo.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 3 | 2020-10-22T13:28:00.000Z | 2022-03-24T19:57:22.000Z | nlplingo/oregon/event_models/uoregon/layers/elmo.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | null | null | null | nlplingo/oregon/event_models/uoregon/layers/elmo.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 1 | 2020-10-22T13:29:51.000Z | 2020-10-22T13:29:51.000Z | import torch
from torch.nn import ParameterList, Parameter
class Elmo(torch.nn.Module):
"""
Computes a parameterised scalar mixture of N tensors, `mixture = gamma * sum(s_k * tensor_k)`
where `s = softmax(w)`, with `w` and `gamma` scalar parameters.
In addition, if `do_layer_norm=True` then apply layer normalization to each tensor
before weighting.
"""
def __init__(
self,
mixture_size,
do_layer_norm=False,
trainable=True,
):
super().__init__()
self.mixture_size = mixture_size
self.do_layer_norm = do_layer_norm
initial_scalar_parameters = [0.0] * mixture_size
self.scalar_parameters = ParameterList(
[
Parameter(
torch.FloatTensor([initial_scalar_parameters[i]]), requires_grad=trainable
)
for i in range(mixture_size)
]
)
self.gamma = Parameter(torch.FloatTensor([1.0]), requires_grad=trainable)
def forward(self, tensors):
"""
tensors.shape = num feature layers * [batch size, num tokens, rep dim]
"""
def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked):
tensor_masked = tensor * broadcast_mask
mean = torch.sum(tensor_masked) / num_elements_not_masked
variance = (
torch.sum(((tensor_masked - mean) * broadcast_mask) ** 2) / num_elements_not_masked
)
return (tensor - mean) / torch.sqrt(variance + 1e-12)
normed_weights = torch.nn.functional.softmax(
torch.cat([parameter for parameter in self.scalar_parameters]), dim=0
)
normed_weights = torch.split(normed_weights, split_size_or_sections=1)
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(weight * tensor)
return self.gamma * sum(pieces)
| 34.561404 | 103 | 0.607614 | 223 | 1,970 | 5.116592 | 0.403587 | 0.070114 | 0.048203 | 0.052585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007262 | 0.301015 | 1,970 | 56 | 104 | 35.178571 | 0.821351 | 0.167005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.052632 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21f19e371c588730cf753a1e2c9dc29cf4e55481 | 5,082 | py | Python | walter/common/walterWidgets/walterBaseVariantsMenu.py | all-in-one-of/OpenWalter | c2034f7fac20b36ffe3e500c01d40b87e84e2b97 | [
"libtiff"
] | 187 | 2018-08-14T19:06:20.000Z | 2022-03-04T06:03:25.000Z | walter/common/walterWidgets/walterBaseVariantsMenu.py | all-in-one-of/OpenWalter | c2034f7fac20b36ffe3e500c01d40b87e84e2b97 | [
"libtiff"
] | 9 | 2018-08-22T15:34:48.000Z | 2019-11-27T13:45:21.000Z | walter/common/walterWidgets/walterBaseVariantsMenu.py | all-in-one-of/OpenWalter | c2034f7fac20b36ffe3e500c01d40b87e84e2b97 | [
"libtiff"
] | 41 | 2018-08-14T19:06:09.000Z | 2021-09-04T20:01:10.000Z | """
Base classes for USD variants. It's written on Python to be
able to use complex UI and PySide.
"""
# Copyright 2017 Rodeo FX. All rights reserved.
import json
from .Qt import QtWidgets
from walterComplexMenu import ComplexMenu
class VariantSet():
"""Per variantset variants list."""
def __init__(self, name):
self.name = name
self.variants = []
self.selectedVariant = ''
def setVariants(self, variants, selection):
self.variants = variants
self.selectedVariant = selection
class BaseVariantAction(QtWidgets.QAction):
def __init__(self, primPath, index, variantName,
variantSetName, isSelected, menu):
super(BaseVariantAction, self).__init__(variantName, menu)
self.menu = menu
self.index = index
self.primPath = primPath
self.variantName = variantName
self.variantSetName = variantSetName
self.setCheckable(isSelected)
self.setChecked(isSelected)
self.triggered.connect(self.__trigger)
def _setVariantValue(self):
pass
def __trigger(self):
self.menu.uncheckedActions()
self._setVariantValue()
self.setCheckable(True)
self.setChecked(True)
class BaseVariantSetMenu(ComplexMenu):
def __init__(self, primPath, index, variantSet, parent):
super(BaseVariantSetMenu, self).__init__(parent)
self.parent = parent
self.setTitle(variantSet.name)
self.actionList = []
for variant in variantSet.variants:
isSelected = variant == variantSet.selectedVariant
action = self._constructVariantAction(
primPath, index, variant,
variantSet.name, isSelected)
self.addAction(action)
self.actionList.append(action)
def _constructVariantAction(self, primPath, index, variant,
variantSet, isSelected):
pass
def uncheckedActions(self):
for action in self.actionList:
action.setCheckable(False)
action.setChecked(False)
def mouseReleaseEvent(self, event):
"""Implementation of ComplexMenu."""
action = self.activeAction()
ctrlKey = self.ctrlKeyEvent()
if action and action.isEnabled() and ctrlKey:
# QMenu will not disapear if the user clicked by disabled action.
action.setEnabled(False)
action.trigger()
super(ComplexMenu, self).mouseReleaseEvent(event)
action.setEnabled(True)
else:
super(ComplexMenu, self).mouseReleaseEvent(event)
class BaseVariantsMenu(QtWidgets.QMenu):
"""Menu for editing walter variants."""
class ObjectVariantSet():
"""Per object variantsets list."""
def __init__(self, name):
self.name = name
self.variantSets = []
def addVariantSet(self, variantSet):
self.variantSets.append(variantSet)
def __init__(self, parent=None):
super(BaseVariantsMenu, self).__init__(parent)
self.parent = parent
self.nodePath = ''
self.primPath = ''
def reset(self, nodePath, primPath='', title=None,
addSeparators=True, tearOff=True, recursively=True):
self.clear()
self.nodePath = nodePath
self.primPath = primPath
title_ = title
if not title_:
title_ = nodePath + '-' + primPath
self.setTitle(title_)
self.setWindowTitle(title_)
self.setTearOffEnabled(tearOff)
self.setStyleSheet("menu-scrollable: 1;")
if addSeparators:
self.setSeparatorsCollapsible(False)
self.__constructMenu(
self._getVariantList(recursively), addSeparators)
return not self.isEmpty()
def _getVariantList(self, recursively):
pass
def _createMenu(self, primPath, index, variantSet):
pass
def __constructMenu(self, variantSetsStr, addSeparators):
objectVariantSets = []
for variantStr in variantSetsStr:
js = json.loads(variantStr)
objectVariantSet = BaseVariantsMenu.ObjectVariantSet(
js['prim'])
for jsVariant in js['variants']:
variantSet = VariantSet(jsVariant['set'])
variantSet.setVariants(jsVariant['names'], jsVariant['selection'])
objectVariantSet.addVariantSet(variantSet)
objectVariantSets.append(objectVariantSet)
# Creates a variantset menu per object
for idx, objectVariantSet in enumerate(objectVariantSets):
if addSeparators:
action = self.addAction(objectVariantSet.name)
# Cause a diplay bug. The titles are cropped
# action.setSeparator(True)
action.setEnabled(False)
for variantSet in objectVariantSet.variantSets:
self.addMenu(self._createMenu(
objectVariantSet.name,
idx, variantSet))
| 30.987805 | 82 | 0.62436 | 454 | 5,082 | 6.872247 | 0.306167 | 0.026923 | 0.017628 | 0.009615 | 0.086538 | 0.044231 | 0.044231 | 0.022436 | 0.022436 | 0 | 0 | 0.001386 | 0.29024 | 5,082 | 163 | 83 | 31.177914 | 0.863599 | 0.085596 | 0 | 0.160714 | 0 | 0 | 0.010618 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0.035714 | 0.026786 | 0 | 0.223214 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21f244a8537b9d091ee8fe82b19616dc52ef2d25 | 4,983 | py | Python | src/speechless/readers/audio.py | Exepp/SpeechLess | 6e7424e979f39132650db0d7426c1e9449dc43b8 | [
"MIT"
] | 1 | 2022-03-17T14:51:41.000Z | 2022-03-17T14:51:41.000Z | src/speechless/readers/audio.py | Exepp/SpeechLess | 6e7424e979f39132650db0d7426c1e9449dc43b8 | [
"MIT"
] | 14 | 2021-06-23T02:27:22.000Z | 2021-11-27T15:43:39.000Z | src/speechless/readers/audio.py | Exepp/SpeechLess | 6e7424e979f39132650db0d7426c1e9449dc43b8 | [
"MIT"
] | null | null | null | import subprocess
import av
import numpy as np
from av.audio.frame import format_dtypes
from typing import Dict, Generator, Tuple
from logging import Logger
from pathlib import Path
from enum import Enum, auto
from speechless.utils.logging import NULL_LOGGER
class StreamInfo(Enum):
SAMPLE_RATE = auto()
FRAME_SIZE = auto()
class AudioReader:
def __init__(self, file_path: str, logger: Logger = NULL_LOGGER):
"""Audio frame reader
Args:
file_path (str): Path to the recording
logger (Logger, optional): Logger for messages. Defaults to NULL_LOGGER
"""
self.file_path = str(Path(file_path).resolve())
self.logger = logger
self._container = None
def __del__(self):
if self._container is not None:
self._container.close()
def read_stream(self, aud_stream_idx: int = None) \
-> Tuple[Generator[np.ndarray, None, None], Dict[StreamInfo, object]]:
"""Creates a generator of audio frames of a given audio stream in the recording
Args:
aud_stream_idx (int, optional): Index of the audio stream (0 -> first, 1 -> second). \
Defaults to None (the first audio stream)
Returns:
Tuple[Generator[np.ndarray, None, None], Dict[StreamInfo, object]]: A generator of the \
audio frames and a dictionary with info about the stream
"""
if self._container is not None:
self._container.close()
self._container = av.open(self.file_path)
if aud_stream_idx is None:
aud_stream_idx = 0
if len(self._container.streams.audio) > 1:
self.logger.warning(
'Unspecified audio stream for a file with multiple audio streams. Reading '
f'the first one (stream #{self._container.streams.audio[self._stream_idx].index})')
def audio_iter():
for frame in self._container.decode(audio=aud_stream_idx):
yield frame.to_ndarray()
return (audio_iter(), AudioReader.prepare_stream_info(self._container, aud_stream_idx))
@staticmethod
def prepare_stream_info(container: av.container.InputContainer, aud_stream_idx: int) \
-> Dict[StreamInfo, object]:
"""Creates a dictionary with info about a specific audio stream in the container
Args:
container (av.container.InputContainer): A container with the audio stream
aud_stream_idx (int): The index of the audio stream (0 -> first, 1 -> second)
Returns:
Dict[StreamInfo, object]: A dictionary with info about the stream
"""
astream = container.streams.audio[aud_stream_idx]
return {
StreamInfo.SAMPLE_RATE: astream.sample_rate,
StreamInfo.FRAME_SIZE: astream.codec_context.frame_size
}
def read_entire_audio(file_path: str,
aud_stream_idx: int = None,
aud_format: str = 'f32le',
sample_rate: int = None,
logger: Logger = NULL_LOGGER) -> Tuple[np.ndarray, Dict[StreamInfo, object]]:
"""Reads an entire audio stream from a recording
Args:
file_path (str): Path to the recording
aud_stream_idx (int, optional): Index of the audio stream (0 -> first, 1 -> second). \
Defaults to None (the first audio stream)
aud_format (str, optional): A desired audio format. Defaults to 'f32le'
sample_rate (int, optional): A desired sample rate. This will be the sample rate of the \
returned signal.
logger (Logger, optional): Logger for messages. Defaults to NULL_LOGGER
Returns:
Tuple[np.ndarray, Dict[StreamInfo, object]]: The entire audio stream in the specified format \
and a dictionary with info about the (original) stream - sample rate information will be \
that of the original stream, not the one specified here
"""
file_path = str(Path(file_path).resolve())
with av.open(file_path) as container:
if aud_stream_idx is None:
aud_stream_idx = 0
if len(container.streams.audio) > 1:
logger.warning('Unspecified audio stream for a file with multiple audio streams. Reading '
f'the first one (stream #{container.streams.audio[aud_stream_idx].index})')
acodec = f'pcm_{aud_format}'
command = [
'ffmpeg', '-i', f'{file_path}', '-map', f'0:a:{aud_stream_idx}', '-f', f'{aud_format}',
'-acodec', f'{acodec}'
]
command += ['-ar', f'{sample_rate}'] if sample_rate is not None else []
command += ['pipe:1']
process = subprocess.Popen(stdout=subprocess.PIPE, args=command)
buffer, _ = process.communicate()
astream = container.streams.audio[aud_stream_idx]
acodec = av.Codec(acodec, 'r')
dtype = np.dtype(format_dtypes[acodec.audio_formats[0].name])
if acodec.audio_formats[0].is_planar:
data = np.frombuffer(buffer, dtype).reshape((astream.channels, -1))
else:
data = np.frombuffer(buffer, dtype).reshape((-1, astream.channels)).T
return (data, AudioReader.prepare_stream_info(container, aud_stream_idx))
| 37.75 | 100 | 0.675095 | 673 | 4,983 | 4.843982 | 0.200594 | 0.049693 | 0.062577 | 0.027607 | 0.412577 | 0.382822 | 0.330982 | 0.257669 | 0.237423 | 0.169939 | 0 | 0.005167 | 0.223159 | 4,983 | 131 | 101 | 38.038168 | 0.836993 | 0.328116 | 0 | 0.138889 | 0 | 0 | 0.127436 | 0.032787 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0 | 0.305556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21f39b184cd5ca7c84021b18a3faeb579b594682 | 413 | py | Python | scripts/multi_sound.py | deeredman1991/Simple-Mod-Loader | 00fd342b43132d4ee1bd8ef9ac0910d02fece737 | [
"MIT"
] | null | null | null | scripts/multi_sound.py | deeredman1991/Simple-Mod-Loader | 00fd342b43132d4ee1bd8ef9ac0910d02fece737 | [
"MIT"
] | null | null | null | scripts/multi_sound.py | deeredman1991/Simple-Mod-Loader | 00fd342b43132d4ee1bd8ef9ac0910d02fece737 | [
"MIT"
] | null | null | null | from kivy.core.audio import SoundLoader
class MultiSound(object): # for playing the same sound multiple times.
def __init__(self, file, num):
self.num = num
self.sounds = [SoundLoader.load(file) for n in range(num)]
self.index = 0
def play(self):
self.sounds[self.index].play()
self.index += 1
if self.index == self.num:
self.index = 0 | 31.769231 | 70 | 0.598063 | 56 | 413 | 4.339286 | 0.553571 | 0.185185 | 0.098765 | 0.106996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010274 | 0.292978 | 413 | 13 | 71 | 31.769231 | 0.821918 | 0.101695 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21f3edcb7d50ea036f5a264fb5f9a7a4e6b5cf1a | 810 | py | Python | datasets/orange.py | tkemps/mklaren | d9e7890aaa26cb3877e1a82114ab1e52df595d96 | [
"BSD-2-Clause"
] | 3 | 2019-10-28T17:20:37.000Z | 2020-08-20T22:59:18.000Z | datasets/orange.py | tkemps/mklaren | d9e7890aaa26cb3877e1a82114ab1e52df595d96 | [
"BSD-2-Clause"
] | null | null | null | datasets/orange.py | tkemps/mklaren | d9e7890aaa26cb3877e1a82114ab1e52df595d96 | [
"BSD-2-Clause"
] | 1 | 2019-10-28T17:20:35.000Z | 2019-10-28T17:20:35.000Z | from numpy import genfromtxt
from os.path import join, realpath, dirname
ORANGE_PATH = join(dirname(realpath(__file__)), "orange")
def load_ionosphere(n=None):
"""
Load the ionosphere dataset.
:param n: Maximum number of examples.
:return: Dataset in standard form.
"""
header = genfromtxt(join(ORANGE_PATH, "ionosphere.csv"),
delimiter=",", skip_header=0, dtype=str, max_rows=1)
data = genfromtxt(join(ORANGE_PATH, "ionosphere.csv"),
delimiter=",", skip_header=1, dtype=float)
X = data[:, :-1]
y = data[:, -1].ravel()
if n is not None and n < X.shape[0]:
X = X[:n, :]
y = y[:n]
labels = header[:-1]
assert len(labels) == X.shape[1]
return {
"data": X, "target": y, "labels": labels
} | 32.4 | 74 | 0.583951 | 106 | 810 | 4.358491 | 0.481132 | 0.064935 | 0.08658 | 0.103896 | 0.242424 | 0.242424 | 0.242424 | 0.242424 | 0.242424 | 0 | 0 | 0.0134 | 0.262963 | 810 | 25 | 75 | 32.4 | 0.760469 | 0.124691 | 0 | 0 | 0 | 0 | 0.075802 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21f493f41dab43945de7c3a7977465eb24defd66 | 2,524 | py | Python | makahiki/scripts/initialize_instance.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | 1 | 2015-07-22T11:31:20.000Z | 2015-07-22T11:31:20.000Z | makahiki/scripts/initialize_instance.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | null | null | null | makahiki/scripts/initialize_instance.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
Invocation: scripts/initialize_instance .py -t|--type[=] default|demo|test
-r|--heroku[=] <heroku_app>
Use this script to create an instance with different types of configuration:
[default]: includes the basic configuration. The admin needs to create
the settings for rounds, resources, resource goals, teams and
users, prizes, etc. Uses internal authentication.
[test] : includes all of "default" configuration, with more test users
and data. Uses CAS authentication.
if -r or --heroku is specified, it will initialize the instance in the specified
heroku app.
Performs the following:
* installation of any modules in requirements.txt
* re-create the database and database user
* Synchronize and migrates the database schemas.
* Collects and copies the static and media files to the specific location.
* Loads the default or test configuration of data.
"""
import getopt
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + os.sep + os.pardir + os.sep)
from apps.utils import script_utils
def main(argv):
"""main function."""
instance_type = None
heroku_app = None
manage_py = script_utils.manage_py_command()
manage_command = "python " + manage_py
fixture_path = "fixtures"
try:
opts, args = getopt.getopt(argv, "t:r:h", ["type=", "heroku=", "help"])
except getopt.GetoptError:
script_utils.exit_with_help(__doc__)
if not opts:
script_utils.exit_with_help(__doc__)
for opt in opts:
if opt[0] == "-h" or opt[0] == "--help":
script_utils.exit_with_help(__doc__)
if opt[0] == "-t" or opt[0] == "--type":
instance_type = opt[1]
if opt[0] == "-r" or opt[0] == "--heroku":
heroku_app = opt[1]
manage_command = "heroku run --app %s python makahiki/manage.py" % heroku_app
if not instance_type in ("default", "demo", "test", "uh12", "water"):
script_utils.exit_with_help(__doc__)
_ = args
if not heroku_app:
script_utils.install_requirements()
else:
script_utils.create_heroku_app(heroku_app)
script_utils.push_to_heroku(heroku_app)
script_utils.reset_db(heroku_app)
script_utils.syncdb(manage_command)
script_utils.copy_static_media(heroku_app)
script_utils.load_data(manage_command, instance_type, fixture_path)
if __name__ == '__main__':
main(sys.argv[1:])
| 30.409639 | 90 | 0.666799 | 341 | 2,524 | 4.692082 | 0.375367 | 0.089375 | 0.046875 | 0.0625 | 0.0675 | 0.0675 | 0.035 | 0 | 0 | 0 | 0 | 0.005641 | 0.227417 | 2,524 | 82 | 91 | 30.780488 | 0.814872 | 0.388669 | 0 | 0.102564 | 0 | 0 | 0.09085 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.102564 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21f70e00fd9069d8e81d9648fe78c6c73dc2b773 | 2,608 | py | Python | python/xml_parsers.py | warlicks/Real_Estate_Data | 137dc53a24a878cb96ec5132be6e81196902aec1 | [
"MIT"
] | 1 | 2021-01-24T23:59:18.000Z | 2021-01-24T23:59:18.000Z | python/xml_parsers.py | warlicks/Real_Estate_Data | 137dc53a24a878cb96ec5132be6e81196902aec1 | [
"MIT"
] | 3 | 2016-08-13T22:39:14.000Z | 2016-08-21T01:00:51.000Z | python/xml_parsers.py | warlicks/Real_Estate_Data | 137dc53a24a878cb96ec5132be6e81196902aec1 | [
"MIT"
] | null | null | null | # Sean Warlick
# Seattle Housing Project
# XML Parsing Functions
# Date: July 12, 2016
###############################################################################
# Import packages
import xml.etree.ElementTree as et
def location_parse(xml_string):
# Convert the data returned from api to XML
xml_element = et.fromstring(xml_string) # Create to Element from string
xml_tree = et.ElementTree(xml_element) # Convert Element to Element Tree
# To correctly parse xml we need to idenitify the api funciton used.
parameters = xml_tree.findall(".//Parameter")
# There are multiple Parameter nodes, we need to find the child name is "function"
for p in parameters:
if p.find("name").text == 'function':
function = p.find('value').text
else:
next
# To store the data parsed from xml we need to set up a variable. We will use a dictionary with lists.
output = {"name":[], "code":[], "lon":[], "lat":[]}
# Each API function has a slightly different schema, thus different logic to parse. Following if statements set up the logic
if function == 'getStates':
data_node = xml_tree.findall('.//state')
# Iterate through all state nodes & extract data
for states in data_node:
name = states.find('name').text
output["name"].append(name)
code = states.find('stateCode').text
output["code"].append(code)
lon = states.find('longitude').text
output["lon"].append(lon)
lat = states.find('latitude').text
output["lat"].append(lat)
elif function == 'getCountiesInState':
data_node = xml_tree.findall('.//county')
for county in data_node:
name = county.find('name').text
output["name"].append(name)
lon = county.find('longitude').text
output["lon"].append(lon)
lat = county.find('latitude').text
output["lat"].append(lat)
elif function == 'getCitiesInState':
data_node = xml_tree.findall('.//city')
for city in data_node:
name = city.find('name').text
output["name"].append(name)
lon = city.find('longitude').text
output["lon"].append(lon)
lat = city.find("latitude").text
output["lat"].append(lat)
elif function == 'getZipCodesInState':
data_node = xml_tree.findall('.//zipCode')
for code in data_node:
ZIP = code.find('name').text
output["code"].append(ZIP)
elif function == 'getNeighborhoodsInCity':
data_node = xml_tree.findall('.//neighborhood')
for neighborhood in data_node:
name = neighborhood.find('name').text
iD = neighborhood.find('id').text
output["name"].append(name)
output["code"].append(iD)
return output # Return output for use outside the function. | 28.347826 | 126 | 0.669095 | 352 | 2,608 | 4.894886 | 0.306818 | 0.069646 | 0.048752 | 0.043529 | 0.283227 | 0.205456 | 0.205456 | 0.186883 | 0.080093 | 0 | 0 | 0.002759 | 0.166028 | 2,608 | 92 | 127 | 28.347826 | 0.789425 | 0.254985 | 0 | 0.188679 | 0 | 0 | 0.163872 | 0.011898 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.018868 | 0 | 0.056604 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21f9033f249e427aede9e4e1c1b360170f737e4c | 11,009 | py | Python | pygame_gui/elements/ui_horizontal_slider.py | jtiai/pygame_gui | 3da0e1f2c4c60a2780c798d5592f2603ba786b34 | [
"MIT"
] | null | null | null | pygame_gui/elements/ui_horizontal_slider.py | jtiai/pygame_gui | 3da0e1f2c4c60a2780c798d5592f2603ba786b34 | [
"MIT"
] | null | null | null | pygame_gui/elements/ui_horizontal_slider.py | jtiai/pygame_gui | 3da0e1f2c4c60a2780c798d5592f2603ba786b34 | [
"MIT"
] | null | null | null | import pygame
import warnings
from typing import Union, List, Tuple
from .. import ui_manager
from ..core import ui_container
from ..core.ui_element import UIElement
from ..elements.ui_button import UIButton
class UIHorizontalSlider(UIElement):
"""
A horizontal slider is intended to help users adjust values within a range, for example a volume control.
:param relative_rect: A rectangle describing the position and dimensions of the element.
:param start_value: The value to start the slider at.
:param value_range: The full range of values.
:param manager: The UIManager that manages this element.
:param container: The container that this element is within. If set to None will be the root window's container.
:param parent_element: The element this element 'belongs to' in the theming hierarchy.
:param object_id: A custom defined ID for fine tuning of theming.
"""
def __init__(self, relative_rect: pygame.Rect,
start_value: Union[float, int],
value_range: Tuple[Union[float, int], Union[float, int]],
manager: ui_manager.UIManager,
container: ui_container.UIContainer = None,
parent_element: UIElement = None,
object_id: Union[str, None] = None):
new_element_ids, new_object_ids = self.create_valid_ids(parent_element=parent_element,
object_id=object_id,
element_id='horizontal_slider')
super().__init__(relative_rect, manager, container,
object_ids=new_object_ids,
element_ids=new_element_ids,
starting_height=1,
layer_thickness=1)
self.button_width = 20
self.current_percentage = 0.5
self.value_range = value_range
self.border_width = 1
border_width_string = self.ui_theme.get_misc_data(self.object_ids, self.element_ids, 'border_width')
if border_width_string is not None:
self.border_width = int(border_width_string)
self.shadow_width = 1
shadow_width_string = self.ui_theme.get_misc_data(self.object_ids, self.element_ids, 'shadow_width')
if shadow_width_string is not None:
self.shadow_width = int(shadow_width_string)
self.background_colour = self.ui_theme.get_colour(self.object_ids, self.element_ids, 'dark_bg')
self.border_colour = self.ui_theme.get_colour(self.object_ids, self.element_ids, 'normal_border')
if self.shadow_width > 0:
self.image = self.ui_manager.get_shadow(self.rect.size)
else:
self.image = pygame.Surface(self.rect.size, flags=pygame.SRCALPHA)
border_rect = pygame.Rect((self.shadow_width, self.shadow_width),
(self.rect.width - (2 * self.shadow_width),
self.rect.height - (2 * self.shadow_width)))
if self.border_width > 0:
self.image.fill(self.border_colour,
border_rect)
relative_background_rect = pygame.Rect((self.border_width + self.shadow_width,
self.border_width + self.shadow_width),
(border_rect.width - (2 * self.border_width),
border_rect.height - (2 * self.border_width)))
background_rect = pygame.Rect((relative_background_rect.x + relative_rect.x,
relative_background_rect.y + relative_rect.y),
relative_background_rect.size)
self.image.fill(self.background_colour,
relative_background_rect)
value_range_length = self.value_range[1] - self.value_range[0]
self.current_value = int(self.value_range[0] + (self.current_percentage * value_range_length))
self.scrollable_width = background_rect.width - (3 * self.button_width)
self.left_limit_position = 0.0
self.right_limit_position = self.scrollable_width
self.scroll_position = self.scrollable_width/2
self.left_button = UIButton(pygame.Rect(background_rect.topleft,
(self.button_width, background_rect.height)),
'◀',
self.ui_manager, self.ui_container, starting_height=2,
parent_element=self,
object_id='#left_button')
self.right_button = UIButton(pygame.Rect((background_rect.x + background_rect.width - self.button_width,
background_rect.y),
(self.button_width, background_rect.height)),
'▶',
self.ui_manager, self.ui_container, starting_height=2,
parent_element=self,
object_id='#right_button')
sliding_x_pos = background_rect.x + background_rect.width/2 - self.button_width/2
self.sliding_button = UIButton(pygame.Rect((sliding_x_pos,
background_rect.y),
(self.button_width, background_rect.height)),
'', self.ui_manager, self.ui_container, starting_height=2,
parent_element=self,
object_id='#sliding_button')
self.sliding_button.set_hold_range((background_rect.width, 100))
self.grabbed_slider = False
self.starting_grab_x_difference = 0
self.has_moved_recently = False
self.set_current_value(start_value)
def kill(self):
"""
Overrides the normal sprite kill() method to also kill the button elements that help make up the slider.
"""
self.left_button.kill()
self.right_button.kill()
self.sliding_button.kill()
super().kill()
def update(self, time_delta: float):
"""
Takes care of actually moving the slider based on interactions reported by the buttons or based on movement of
the mouse if we are gripping the slider itself.
:param time_delta: the time in seconds between calls to update.
"""
if self.alive():
moved_this_frame = False
if self.left_button.held and self.scroll_position > self.left_limit_position:
self.scroll_position -= (250.0 * time_delta)
self.scroll_position = max(self.scroll_position, self.left_limit_position)
x_pos = self.scroll_position + self.rect.x + self.shadow_width + self.border_width + self.button_width
y_pos = self.rect.y + self.shadow_width + self.border_width
self.sliding_button.set_position(pygame.Vector2(x_pos, y_pos))
moved_this_frame = True
elif self.right_button.held and self.scroll_position < self.right_limit_position:
self.scroll_position += (250.0 * time_delta)
self.scroll_position = min(self.scroll_position, self.right_limit_position)
x_pos = self.scroll_position + self.rect.x + self.shadow_width + self.border_width + self.button_width
y_pos = self.rect.y + self.shadow_width + self.border_width
self.sliding_button.set_position(pygame.Vector2(x_pos, y_pos))
moved_this_frame = True
mouse_x, mouse_y = pygame.mouse.get_pos()
if self.sliding_button.held and self.sliding_button.in_hold_range((mouse_x, mouse_y)):
if not self.grabbed_slider:
self.grabbed_slider = True
real_scroll_pos = (self.scroll_position + self.rect.x +
self.shadow_width + self.border_width + self.button_width)
self.starting_grab_x_difference = mouse_x - real_scroll_pos
real_scroll_pos = (self.scroll_position + self.rect.x +
self.shadow_width + self.border_width + self.button_width)
current_grab_difference = mouse_x - real_scroll_pos
adjustment_required = current_grab_difference - self.starting_grab_x_difference
self.scroll_position = self.scroll_position + adjustment_required
self.scroll_position = min(max(self.scroll_position, self.left_limit_position),
self.right_limit_position)
x_pos = self.scroll_position + self.rect.x + self.shadow_width + self.border_width + self.button_width
y_pos = self.rect.y + self.shadow_width + self.border_width
self.sliding_button.set_position(pygame.Vector2(x_pos, y_pos))
moved_this_frame = True
elif not self.sliding_button.held:
self.grabbed_slider = False
if moved_this_frame:
self.current_value = self.value_range[0] + (
(self.scroll_position / self.scrollable_width) * (self.value_range[1] - self.value_range[0]))
if not self.has_moved_recently:
self.has_moved_recently = True
def get_current_value(self) -> Union[float, int]:
"""
Gets the current value the slider is set to.
:return: The current value recorded by the slider.
"""
self.has_moved_recently = False
return self.current_value
def set_current_value(self, value: Union[float, int]):
"""
Sets the value of the slider, which will move the position of the slider to match. Will issue a warning if the
value set is not in the value range.
:param value: The value to set.
"""
if min(self.value_range[0], self.value_range[1]) <= value <= max(self.value_range[0], self.value_range[1]):
self.current_value = float(value)
value_range_size = (self.value_range[1] - self.value_range[0])
if value_range_size != 0:
percentage = (self.current_value - self.value_range[0])/value_range_size
self.scroll_position = self.scrollable_width * percentage
x_pos = self.scroll_position + self.rect.x + self.shadow_width + self.border_width + self.button_width
y_pos = self.rect.y + self.shadow_width + self.border_width
self.sliding_button.set_position(pygame.Vector2(x_pos, y_pos))
self.has_moved_recently = True
else:
warnings.warn('value not in range', UserWarning)
| 51.929245 | 118 | 0.597239 | 1,303 | 11,009 | 4.768227 | 0.147352 | 0.044906 | 0.06084 | 0.053114 | 0.447449 | 0.398841 | 0.327217 | 0.301626 | 0.272815 | 0.246419 | 0 | 0.007133 | 0.325098 | 11,009 | 211 | 119 | 52.175355 | 0.828802 | 0.111273 | 0 | 0.248322 | 0 | 0 | 0.012595 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033557 | false | 0 | 0.04698 | 0 | 0.09396 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21fb519669ab124f568a6a6b5a2310b85412e2fa | 2,614 | py | Python | src/django_datatables/mixins.py | ryanbowen/django-datatables | 995b82ecf666d4fcae3322861312d927cba07c82 | [
"Apache-2.0"
] | null | null | null | src/django_datatables/mixins.py | ryanbowen/django-datatables | 995b82ecf666d4fcae3322861312d927cba07c82 | [
"Apache-2.0"
] | null | null | null | src/django_datatables/mixins.py | ryanbowen/django-datatables | 995b82ecf666d4fcae3322861312d927cba07c82 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import logging
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, JsonResponse
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils.translation import ugettext as _
from django.utils.cache import add_never_cache_headers
from django.utils.html import strip_tags
try:
from openpyxl import Workbook
import openpyxl.writer.excel as ExcelWriter
except ImportError:
ExcelWriter = None
LOG = logging.getLogger(__name__)
class LazyEncoder(DjangoJSONEncoder):
"""Encodes django's lazy i18n strings
"""
def default(self, obj):
if isinstance(obj, Promise):
return force_text(obj)
return super(LazyEncoder, self).default(obj)
class DataResponse(object):
def create_excel_response(self, request):
"""
Return an excel writer as a response.
"""
headers = self.get_column_titles()
rows = self.get_data(request)
title = getattr(self._meta, "title", "Sheet")
wb = Workbook(write_only=True)
ws = wb.create_sheet(title)
ws.append(headers)
for row in rows:
ws.append([strip_tags(c) for c in row])
response = HttpResponse(
ExcelWriter.save_virtual_workbook(wb),
content_type='application/vnd.ms-excel'
)
response['Content-Disposition'] = \
'attachment; filename="{0}"'.format(f'{title}-{datetime.now().strftime("%Y-%m-%d %H%m")}.xlsx')
return response
def create_data_response(self, func_val, request):
try:
assert isinstance(func_val, dict)
response = dict(func_val)
if 'result' not in response:
response['result'] = 'ok'
except KeyboardInterrupt:
# Allow keyboard interrupts through for debugging.
raise
except Exception as e:
LOG.exception('JSON view error: %s', request.path)
msg = getattr(e, 'message', _('Internal error') + ': ') + str(e)
response = {'result': 'error', 'sError': msg, 'text': msg}
return JsonResponse(response)
def dispatch(self, request, *args, **kwargs):
self.request = request
response = None
if request.GET.get("export") == "excel":
return self.create_excel_response(request)
func_val = self.get_context_data(request)
response = self.create_data_response(func_val, request)
add_never_cache_headers(response)
return response
| 30.752941 | 107 | 0.643458 | 302 | 2,614 | 5.437086 | 0.430464 | 0.042631 | 0.045676 | 0.024361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001542 | 0.25593 | 2,614 | 84 | 108 | 31.119048 | 0.842674 | 0.048585 | 0 | 0.067797 | 0 | 0 | 0.090317 | 0.026851 | 0 | 0 | 0 | 0 | 0.016949 | 1 | 0.067797 | false | 0 | 0.20339 | 0 | 0.40678 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21fbcb929911a995b3f6095877a796a92d8d46dc | 2,193 | py | Python | src/dsa/misc/utils/class_generator.py | tvatter/dsa | e5ae217e38441d90914a55103e23d86f5821dc2f | [
"MIT"
] | null | null | null | src/dsa/misc/utils/class_generator.py | tvatter/dsa | e5ae217e38441d90914a55103e23d86f5821dc2f | [
"MIT"
] | null | null | null | src/dsa/misc/utils/class_generator.py | tvatter/dsa | e5ae217e38441d90914a55103e23d86f5821dc2f | [
"MIT"
] | null | null | null | import re
from os import path
def preprocess_file(class_name='MyClass', file_name=None, property_name=None):
if file_name is None:
file_name = class_name.lower() + '.py'
found = [False] * 3
class_info = {
'name': class_name,
'definition': 'class {}:'.format(class_name)
}
if path.exists(file_name):
found[0] = True
file = open(file_name, 'r')
file_str = file.read()
file.close()
if found[0] and class_info['definition'] in file_str:
found[1] = True
if found[0] and not found[1] and file_str != '':
class_info['definition'] = '\n\n' + class_info['definition']
if found[0] and found[1] and property_name is not None:
class_info = extend_class_info(class_info, file_str)
if 'self._{} ='.format(property_name) in class_info['class_str']:
found[2] = True
return file_name, found, class_info
def get_property_defaults(property_name='my_property'):
self_property = 'self._{}'.format(property_name)
init = """
self._{0} = {0}\n""".format(property_name)
getter = """
@property
def {}(self):
return {}\n""".format(property_name, self_property)
setter = """
@{0}.setter
def {0}(self, new_{0}):
{1} = new_{0}\n""".format(property_name, self_property)
deleter = """
@{0}.deleter
def {0}(self):
del {1}\n""".format(property_name, self_property)
return init, getter, setter, deleter
def extend_class_info(class_info, file_str):
# Start and end locations (could be improved)
class_info['start'] = file_str.find(class_info['definition'])
class_start_locations = [
x.start() for x in re.compile(r'class .*:').finditer(file_str)
]
class_start_index = class_start_locations.index(class_info['start'])
if class_start_index + 1 == len(class_start_locations):
class_end_location = len(file_str)
else:
class_end_index = class_start_index + 1
class_end_location = class_start_locations[class_end_index] - 2
class_info['end'] = class_end_location
# To facilitation the addition of the property
class_info['file_str'] = file_str
class_str = file_str[class_info['start']:class_info['end']]
class_info['class_str'] = class_str
return class_info
| 27.074074 | 78 | 0.679891 | 321 | 2,193 | 4.35514 | 0.214953 | 0.128755 | 0.077253 | 0.054363 | 0.164521 | 0.110873 | 0.044349 | 0 | 0 | 0 | 0 | 0.012168 | 0.175559 | 2,193 | 80 | 79 | 27.4125 | 0.761062 | 0.040128 | 0 | 0 | 0 | 0 | 0.159372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.035088 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
21ff78f085a5d50f157b5c45478181711a1254eb | 2,086 | py | Python | kilojoule/templates/humidair_USCS_F.py | johnfmaddox/kilojoule | b4c146ded82e3ef51a0252ff48b1066a076e9aeb | [
"MIT"
] | null | null | null | kilojoule/templates/humidair_USCS_F.py | johnfmaddox/kilojoule | b4c146ded82e3ef51a0252ff48b1066a076e9aeb | [
"MIT"
] | null | null | null | kilojoule/templates/humidair_USCS_F.py | johnfmaddox/kilojoule | b4c146ded82e3ef51a0252ff48b1066a076e9aeb | [
"MIT"
] | null | null | null | import kilojoule.humidair
import kilojoule.realfluid
import kilojoule.idealgas as idealgas
from kilojoule.organization import QuantityTable
from kilojoule.display import Calculations, Summary
from kilojoule.units import units, Quantity
import kilojoule.magics
humidair = kilojoule.humidair.Properties(unit_system='English_F')
water = kilojoule.realfluid.Properties('Water',unit_system='English_F')
properties_dict = {
'T':'degF', # Temperature
'p':'psi', # pressure
'v':'ft^3/lb_dry_air', # specific volume
'h':'Btu/lb_dry_air', # specific enthalpy
'h_w':'Btu/lb_water', # specific enthalpy
's':'Btu/lb_dry_air/degR', # specific entropy
's_w':'But/lb_water', # entropy of water
'x':'', # vapor quality
'm_a':'lb_dry_air', # mass
'm_w':'lb_water', # mass
'mdot_a':'lb_dry_air/s', # mass flow rate
'mdot_w':'lb_water/s', # mass flow rate of water
'Vol':'ft^3', # volume
'Vdot':'ft^3/s', # volumetric flow rate
'Vel':'ft/s', # velocity
'X':'Btu', # exergy
'Xdot':'hp', # exergy flow rate
# 'phi':'Btu/lb_dry_air', # specific exergy
'psi':'Btu/lb_dry_air', # specific flow exergy
'y':'', # water mole fraction
'c_v':'Btu/lb_dry_air/degR', # constant volume specific heat
'c_p':'Btu/lb_dry_air/degR', # constant pressure specific heat
'k':'Btu/ft/degR', # conductivity
'T_wb':'degF', # Wet-bulb Temperature
'T_dp':'degF', # Dew-point Temperature
'p_w':'psi', # partial pressure of water vapor
'rel_hum':'', # relative humidity
'phi':'', # relative humidity
'omega':'lb_water/lb_dry_air'# humidity ratio
}
states = QuantityTable(properties_dict, unit_system='USCS_F', add_to_namespace=True)
| 47.409091 | 85 | 0.553691 | 243 | 2,086 | 4.559671 | 0.37037 | 0.045126 | 0.072202 | 0.059567 | 0.106498 | 0.041516 | 0 | 0 | 0 | 0 | 0 | 0.00211 | 0.318313 | 2,086 | 43 | 86 | 48.511628 | 0.777075 | 0.247843 | 0 | 0 | 0 | 0 | 0.228916 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.175 | 0 | 0.175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d0018b86cdd965ac8f6b7d70eb9745a2cec6bbc | 7,640 | py | Python | CRF/scan_text_rev.py | henchc/MHG-Scansion-BLSTM | 551abd3da9a2f989fb770ee97dbd73445edd0d3c | [
"MIT"
] | 4 | 2018-04-21T08:52:25.000Z | 2020-01-08T13:57:05.000Z | CRF/scan_text_rev.py | henchc/MHG-Scansion-BLSTM | 551abd3da9a2f989fb770ee97dbd73445edd0d3c | [
"MIT"
] | null | null | null | CRF/scan_text_rev.py | henchc/MHG-Scansion-BLSTM | 551abd3da9a2f989fb770ee97dbd73445edd0d3c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals # for python2 compatibility
from __future__ import division
from __future__ import absolute_import
# created at UC Berkeley 2015
# Authors: Christopher Hench
# This program scans MHG epic poetry, returning data to analyze statistically
import codecs
import pycrfsuite
import numpy as np
import itertools
import codecs
import pycrfsuite
import numpy as np
from get_features import get_features
import itertools
from get_features import syllableweight
def only_four_stresses(lines_w_features, tagger, sylls):
labs = ["MORA_HAUPT", "MORA", "DOPPEL", "HALB_HAUPT", "HALB", "EL"]
stressed = ["MORA_HAUPT", "DOPPEL", "HALB_HAUPT"]
four_stress = []
for i, line in enumerate(lines_w_features):
t_line = tagger.tag(line)
line_sylls = sylls[i]
stress = 0
for i2, l in enumerate(t_line):
# increment stress
if t_line[i2] in stressed:
stress += 1
# no doppel can be light
if l == "DOPPEL" and syllableweight(line_sylls[i2]) == "L":
stress += 5 # auto sends to reweight probs
# no halb-haupt can be heavy
if l == "HALB_HAUPT" and syllableweight(line_sylls[i2]) == "H":
stress += 5 # auto sends to reweight probs
# no EL can be heavy
if l == "EL" and syllableweight(line_sylls[i2]) == "H":
stress += 5 # auto sends to reweight probs
# error alternation, recount
accs = ["MORA_HAUPT", "HALB_HAUPT"]
if i2 > 0 and l in accs: # rule out if two
if t_line[i2 - 1] in accs:
stress += 5
# rule out if no stress following doppel
if i2 < len(t_line) - 1 and l == "DOPPEL":
if t_line[i2 + 1] not in stressed:
stress += 5
if i2 > 0 and l == "DOPPEL": # rule out stress before double
if t_line[i2 - 1] in accs:
stress += 5
if 0 < i2 < len(t_line) - 1 and l == "EL":
if t_line[i2 - 1] in accs and t_line[i2 + 1] in accs:
stress += 5
if "DOPPELDOPPEL" in ''.join(t_line):
stress += 5
# if > 4 stresses, look at probs
if stress != 4:
line_probs = []
for i3, l in enumerate(t_line):
# marginal probablities
probs = [(lb, tagger.marginal(lb, i3)) for lb in labs]
# no doppel can be light
if syllableweight(line_sylls[i3]) == "L":
probs = [x for x in probs if x[0] != "DOPPEL"]
# no halbhaupt or EL can be heavy
if syllableweight(line_sylls[i3]) == "H":
probs = [x for x in probs if x[0] != "HALB_HAUPT"]
probs = [x for x in probs if x[0] != "EL"]
probs = sorted(probs, key=lambda tup: tup[1], reverse=True)
# if very certain, only take top 2, otherwise top 4
if probs[0][1] > .9:
line_probs.append(probs[:2])
else:
line_probs.append(probs[:4])
# get combinations of syll values with probs
combos = itertools.product(*line_probs)
# verify each combo and rank
final_line = (0, 0)
for c in combos:
stress = 0
tot_prob = 0
for i4, l in enumerate(c):
tot_prob += l[1]
if l[0] in stressed:
stress += 1
if i4 < (
len(c) - 1) and l[0] == "DOPPEL": # rule out if no stress after double
if c[i4 + 1][0] not in stressed:
stress += 5
# error alternation recount
accs = ["MORA_HAUPT", "HALB_HAUPT"]
if i4 > 0 and l[0] in accs: # rule out if two
if c[i4 - 1][0] in accs:
stress += 5
if i4 > 0 and l[0] == "DOPPEL": # no stress b4 double
if c[i4 - 1][0] in accs:
stress += 5
if 0 < i4 < len(c) - 1 and l[0] == "EL":
if c[i4 - 1][0] in accs and c[i4 + 1][0] in accs:
stress += 5
if "DOPPELDOPPEL" in ''.join([l[0] for l in c]):
stress += 5
if stress == 4 and tot_prob > final_line[1]:
final_line = (c, tot_prob)
try:
t_line = [x[0] for x in final_line[0]]
except TypeError:
pass # not continue, pass will do nothing
four_stress.append(t_line) # will take orig if no errors
# additional fixes
final_labels = []
for line in four_stress:
count = 0
new_line = []
for i, l in enumerate(line):
# fix / X' ◡ /
if (0 < i < (len(line) - 1) and
line[i - 1] == "MORA_HAUPT" and
l == "HALB" and
line[i + 1] in stressed):
new_line.append("MORA")
else:
new_line.append(l)
final_labels.append(new_line)
final_labels2 = []
for line in final_labels:
new_line = []
for i, l in enumerate(line):
# fix / X' ◡ X / X'
if (1 < i < (len(line) - 1) and
line[i - 1] == "HALB" and
line[i - 2] == "MORA_HAUPT" and
l == "MORA" and
line[i + 1] in stressed):
new_line.append("HALB")
else:
new_line.append(l)
final_labels2.append(new_line)
final_labels = final_labels2
final_labels2 = []
for line in final_labels:
new_line = []
for i, l in enumerate(line):
# fix / X' X ◡ / X'
if (0 < i < (len(line) - 2) and
line[i - 1] == "MORA_HAUPT" and
l == "MORA" and
line[i + 1] == "HALB" and
line[i + 2] in stressed):
new_line.append("HALB")
else:
new_line.append(l)
final_labels2.append(new_line)
final_labels = final_labels2
final_labels2 = []
for line in final_labels:
new_line = line
for i, l in enumerate(line):
# fix / X' X X / X'
if (0 < i < (len(line) - 2) and
line[i - 1] == "MORA_HAUPT" and
l == "MORA" and
line[i + 1] == "MORA" and
line[i + 2] in stressed):
new_line[i] = "HALB"
new_line[i + 1] = "HALB"
final_labels2.append(new_line)
final_labels = final_labels2
# # zweisilbig maennlich kadenz
# final_labels2 = []
# prefixes = ["ge", "be", "en"]
# for i, line in enumerate(final_labels):
# new_line = line
# if line[-4:] == ["MORA_HAUPT", "HALB", "HALB", "MORA_HAUPT"]:
# if len(sylls[i][-1]) > 1 and sylls[
# i][-1][-2] not in prefixes and syllableweight(sylls[i][-1][-2]) == "L":
# new_line[-4:] = ["MORA_HAUPT", "MORA", "HALB_HAUPT", "HALB"]
# final_labels2.append(new_line)
# final_labels = final_labels2
return(final_labels)
| 32.7897 | 99 | 0.47055 | 951 | 7,640 | 3.659306 | 0.170347 | 0.038218 | 0.025287 | 0.02069 | 0.540805 | 0.448563 | 0.419828 | 0.385632 | 0.321552 | 0.223563 | 0 | 0.03297 | 0.424346 | 7,640 | 232 | 100 | 32.931034 | 0.757617 | 0.185471 | 0 | 0.486301 | 0 | 0 | 0.045823 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006849 | false | 0.006849 | 0.089041 | 0 | 0.09589 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d01a7ba705971e5adcf5ebca0f865bd17b58593 | 12,084 | py | Python | aps_32id/run/energy_scan.py | tomography/scanscripts | f7486fe1285da4684f3709661f112ccc15c2e4b8 | [
"BSD-3-Clause"
] | null | null | null | aps_32id/run/energy_scan.py | tomography/scanscripts | f7486fe1285da4684f3709661f112ccc15c2e4b8 | [
"BSD-3-Clause"
] | 18 | 2017-03-27T01:35:35.000Z | 2018-04-03T20:30:49.000Z | aps_32id/run/energy_scan.py | tomography/scanscripts | f7486fe1285da4684f3709661f112ccc15c2e4b8 | [
"BSD-3-Clause"
] | 8 | 2017-02-22T16:31:43.000Z | 2017-10-15T18:22:30.000Z | # -*- coding: utf-8 -*-
#######################
'''For each energy step, a projection and then a flat field are
acquired. The script calls the move_energy method from the TXM class.
'''
import time
import os
import logging
import warnings
import numpy as np
import h5py
import tqdm
from scanlib.scan_variables import update_variable_dict, parse_list_variable
from scanlib.tools import energy_range_from_points, loggingConfig
from aps_32id.txm import new_txm
__author__ = 'Mark Wolfman'
__copyright__ = 'Copyright (c) 2017, UChicago Argonne, LLC.'
__docformat__ = 'restructuredtext en'
__platform__ = 'Unix'
__all__ = ['run_energy_scan', 'getVariableDict']
variableDict = {
'PreDarkImages': 0,
'SampleXOut': 0.2,
'SampleYOut': 0.0,
'SampleZOut': 0.0,
'SampleRotOut': 90.0, # In degrees
'SampleXIn': 0.0,
'SampleYIn': 0.0,
'SampleZIn': 0.0,
'SampleRotIn': 0.0, # In degrees
'StartSleep_min': 0.0,
'StabilizeSleep_ms': 3000,
'ExposureTime': 1,
'Energy_limits': '8.33, 8.34, 8.37, 8.43',
'Energy_Step': '0.002, 0.001, 0.002',
'ZP_X_drift': 0.,
'constant_mag': True, # will CCD move to maintain constant magnification?
# 'BSC_diameter': 1320,
# 'BSC_drn': 60
'Repetitions': 1,
'Pause': 0, # in minutes
'Use_Fast_Shutter': 1,
# Logging: 0=UNSET, 10=DEBUG, 20=INFO, 30=WARNING, 40=ERROR, 50=CRITICAL
'Log_Level': logging.INFO,
}
SHUTTER_PERMIT = False
log = logging.getLogger(__name__)
def getVariableDict():
return variableDict
def _capture_energy_frames(txm, energies, constant_mag,
stabilize_sleep_ms, sample_pos, out_pos, ZP_X_drift_array):
"""A helper method for collected a set of energy frames.
The TXM should already be set up before calling this function.
Parameters
----------
txm :
A NanoTXM or MicroCT object that this script will control.
energies : np.ndarray
An array with the energies (in keV) for capturing frames.
constant_mag : bool
Whether to move the detector at each energy to keep the
magnification constant.
stabilize_sleep_ms : int
How long to wait after moving energy to allow the beamline
(monochromator, etc.) to stabilize.
sample_pos : 4-tuple
(x, y, z, θ°) with the position for the sample.
out_pos : 4-tuple
(x, y, z, θ°) with the position for the flat field.
ZP_X_drift_array : np.ndarray
Each entry is the change in x position of the zoneplate needed
to keep the sample centered at that energy.
"""
correct_backlash = True # First energy only
for idx, energy in enumerate(tqdm.tqdm(energies, "Energy scan")):
log.debug('Preparing to capture energy: %f keV', energy)
# Check whether we should collect the sample or white field first
sample_first = not bool(idx % 2)
log.info("Collecting %s first.", "sample" if sample_first else "white-field")
# Move sample, zone plate and energy
txm.zone_plate_x = ZP_X_drift_array[idx]
if sample_first:
txm.move_sample(*sample_pos)
else:
txm.move_sample(*out_pos)
txm.move_energy(energy, constant_mag=constant_mag,
correct_backlash=correct_backlash)
correct_backlash = False # Needed on first energy only
# Pause for a moment to allow the beam to stabilize
log.debug('Stabilize Sleep %f ms', stabilize_sleep_ms)
time.sleep(stabilize_sleep_ms / 1000.0)
# Sample projection acquisition (or white-field on odd passes)
if sample_first:
log.info("Acquiring sample position %s at %.4f eV", sample_pos, energy)
txm.capture_projections()
else:
log.info("Acquiring white-field position %s at %.4f eV", out_pos, energy)
txm.capture_white_field()
# Flat-field projection acquisition (or sample on odd passes)
if sample_first:
txm.move_sample(*out_pos)
log.info("Acquiring white-field position %s at %.4f eV", out_pos, energy)
# time.sleep(3)
txm.capture_white_field()
else:
txm.move_sample(*sample_pos)
log.info("Acquiring sample position %s at %.4f eV", sample_pos, energy)
txm.capture_projections()
def run_energy_scan(energies, exposure=0.5, n_pre_dark=5,
has_permit=True, sample_pos=(0.,), out_pos=(0.2,),
ZP_X_drift_array=None,
constant_mag=True, stabilize_sleep_ms=1000,
repetitions=1,
pause=0,
use_fast_shutter=True,
log_level=logging.INFO,
txm=None):
"""Collect a series of 2-dimensional projections across a range of energies.
At each position, a sample projection and white-field projection
will be collected by moving the sample along the X direction.
Parameters
----------
energies : np.ndarray
An array with the list of energies to scan, in keV.
exposure : float, optional
How long to collect each frame for, in seconds.
n_pre_dark : int, optional
How many dark-field projections to collect before starting the
energy scan.
is_attached : bool, optional
Determines whether the instrument is available.
has_permit : bool, optional
Does the user have permission to open the shutters and change
source energy.
sample_pos : 4-tuple, optional
(x, y, z, θ) tuple for positioning the sample in the beam.
out_pos : 4-tuple, optional
(x, y, z, θ) tuple for removing the sample from the beam.
ZP_X_drift_array : np.ndarray, optional
Each entry is the change in x position of the zoneplate needed
to keep the sample centered at that energy.
constant_mag : bool, optional
Whether to adjust the camera position to maintain a constant
focus.
stabilize_sleep_ms : int, optional
How long, in milliseconds, to wait for the beam to stabilize
before collecting projections.
repetitions : int, optional
How many times to run this energy scan, including the first one.
pause : int, optional
How long, in minute, the scan pause in between each energy scan repetition
use_fast_shutter : bool, optional
Whether to open and shut the fast shutter before triggering
projections.
log_level : int, optional
Temporary log level to use. ``None`` does not change the logging.
txm : optional
An instance of the NanoTXM class. If not given, a new one will
be created. Mostly used for testing.
"""
log.debug("Starting run_energy_scan()")
start_time = time.time()
total_projections = n_pre_dark + 2 * len(energies)
# Fix up default parameters
if ZP_X_drift_array is None:
ZP_X_drift_array = np.zeros_like(energies)
elif ZP_X_drift_array.shape != energies.shape:
raise ValueError("ZP_X_drift_array shape does not match energies: "
"{} vs {}".format(ZP_X_drift_array.shape, energies.shape))
log.debug('ZP x-drift corrections: {}'.format(ZP_X_drift_array))
# Create the TXM object for this scan
if txm is None:
txm = new_txm()
# Execute the actual scan script
with txm.run_scan():
if use_fast_shutter:
txm.enable_fast_shutter()
# Collect repetitions of the energy scan
for rep in range(repetitions):
# Prepare TXM for capturing data
txm.setup_detector(exposure=exposure,
num_projections=total_projections)
if use_fast_shutter:
txm.enable_fast_shutter()
txm.setup_hdf_writer(num_projections=total_projections)
time.sleep(5)
txm.start_logging(log_level)
# Capture pre dark field images
if n_pre_dark > 0:
txm.close_shutters()
log.info('Capturing %d Pre Dark Field images', n_pre_dark)
txm.capture_dark_field(num_projections=n_pre_dark)
# Calculate the array of energies that will be scanned
log.info('Capturing %d energies', len(energies))
# Collect frames at each energy
txm.open_shutters()
_capture_energy_frames(txm=txm, energies=energies,
constant_mag=constant_mag,
stabilize_sleep_ms=stabilize_sleep_ms,
sample_pos=sample_pos, out_pos=out_pos,
ZP_X_drift_array=ZP_X_drift_array)
txm.close_shutters()
# Add the energy array to the active HDF file
hdf_filename = txm.hdf_filename
print ('1', hdf_filename)
if pause:
log.info("Pausing between scans for %f min", pause)
time.sleep(pause * 60.0) # convert min to sec
try:
print ('2', hdf_filename)
with txm.hdf_file(hdf_filename, mode="r+") as hdf_f:
log.debug('Saving energies to file: %s', hdf_filename)
hdf_f.create_dataset('/exchange/energy',
data=energies)
except (OSError, IOError):
# Could not load HDF file, so raise a warning
msg = "Could not save energies to file %s" % hdf_filename
warnings.warn(msg, RuntimeWarning)
log.warning(msg)
# Log the duration and output file
duration = time.time() - start_time
log.info('Energy scan took %d sec and saved in file %s',
duration, hdf_filename)
return txm
def main():
# Enter the main script function
update_variable_dict(variableDict)
# Set up default logging
# Choices are DEBUG, INFO, WARNING, ERROR, CRITICAL
# logging.basicConfig(filename='energy_scan_debug.log', level=logging.DEBUG)
log_level = variableDict['Log_Level']
loggingConfig(level=log_level)
# Get the requested sample positions
sample_pos = (variableDict.get('SampleXIn', None),
variableDict.get('SampleYIn', None),
variableDict.get('SampleZIn', None),
variableDict.get('SampleRotIn', None))
out_pos = (variableDict.get('SampleXOut', None),
variableDict.get('SampleYOut', None),
variableDict.get('SampleZOut', None),
variableDict.get('SampleRotOut', None))
# Prepare the list of energies requested
energy_limits = parse_list_variable(variableDict['Energy_limits'], dtype=float)
energy_steps = parse_list_variable(variableDict['Energy_Step'], dtype=float)
energies = energy_range_from_points(energy_points=energy_limits,
energy_steps=energy_steps)
ZP_X_drift = float(variableDict['ZP_X_drift'])
ZP_X_drift_array = (energies-energies[0]) * ZP_X_drift / (energies[-1]-energies[0])
# Start scan sleep in min so min * 60 = sec
sleep_min = float(variableDict.get('StartSleep_min', 0))
stabilize_sleep_ms = float(variableDict.get("StabilizeSleep_ms"))
repetitions = int(variableDict['Repetitions'])
pause = float(variableDict['Pause'])
constant_mag = bool(variableDict['constant_mag'])
use_fast_shutter = bool(int(variableDict['Use_Fast_Shutter']))
if sleep_min > 0:
log.debug("Sleeping for %f min", sleep_min)
time.sleep(sleep_min * 60.0)
# Start the energy scan
run_energy_scan(
energies=energies, has_permit=SHUTTER_PERMIT,
exposure=float(variableDict['ExposureTime']),
n_pre_dark=int(variableDict['PreDarkImages']),
sample_pos=sample_pos,
out_pos=out_pos,
stabilize_sleep_ms=stabilize_sleep_ms,
ZP_X_drift_array=ZP_X_drift_array,
constant_mag=constant_mag,
repetitions=repetitions,
pause=pause,
log_level=log_level,
use_fast_shutter=use_fast_shutter,
)
if __name__ == '__main__':
main()
| 40.28 | 87 | 0.642337 | 1,578 | 12,084 | 4.723701 | 0.220532 | 0.008452 | 0.022538 | 0.027904 | 0.187416 | 0.151462 | 0.118326 | 0.098739 | 0.074054 | 0.074054 | 0 | 0.014075 | 0.270937 | 12,084 | 299 | 88 | 40.414716 | 0.831782 | 0.328037 | 0 | 0.158192 | 0 | 0 | 0.155816 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022599 | false | 0 | 0.056497 | 0.00565 | 0.090395 | 0.011299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d02c486ba87d6a7adf60c5da4f6c32ac5c73634 | 308 | py | Python | Section 1: Linux, Virtual Machines, Docker/home_exercise_4_-_flask/IMDB/data_test.py | MarkVoitov/DS_ABB | 130ba32fe7187d41fee5651800b965211d1ccaa9 | [
"MIT"
] | null | null | null | Section 1: Linux, Virtual Machines, Docker/home_exercise_4_-_flask/IMDB/data_test.py | MarkVoitov/DS_ABB | 130ba32fe7187d41fee5651800b965211d1ccaa9 | [
"MIT"
] | null | null | null | Section 1: Linux, Virtual Machines, Docker/home_exercise_4_-_flask/IMDB/data_test.py | MarkVoitov/DS_ABB | 130ba32fe7187d41fee5651800b965211d1ccaa9 | [
"MIT"
] | null | null | null | from contextlib import closing
import sqlite3
def query(db_name, sql):
with closing(sqlite3.connect(db_name)) as con, con, \
closing(con.cursor()) as cur:
cur.execute(sql)
print(cur.fetchall())
if __name__ == '__main__':
query("IMDB.db","""SELECT * FROM title_basics LIMIT 200""")
| 23.692308 | 61 | 0.672078 | 43 | 308 | 4.55814 | 0.627907 | 0.061224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01992 | 0.185065 | 308 | 12 | 62 | 25.666667 | 0.760956 | 0 | 0 | 0 | 0 | 0 | 0.165584 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d0439f0057cae94a572c82de4fa0e43b8aad68d | 4,551 | py | Python | scripts/frontend/page_components/InformationBlock.py | MichaelLapshin/Virtual-Hand-Application | 7c27317feae3d54fd10e616858ab0ab79bda6338 | [
"MIT"
] | 1 | 2021-08-31T05:22:04.000Z | 2021-08-31T05:22:04.000Z | scripts/frontend/page_components/InformationBlock.py | MichaelLapshin/Virtual-Hand-Application | 7c27317feae3d54fd10e616858ab0ab79bda6338 | [
"MIT"
] | null | null | null | scripts/frontend/page_components/InformationBlock.py | MichaelLapshin/Virtual-Hand-Application | 7c27317feae3d54fd10e616858ab0ab79bda6338 | [
"MIT"
] | null | null | null | import tkinter
import tkinter.font
from scripts import General, Parameters, Constants
from scripts.frontend.custom_widgets.CustomLabels import InformationLabel
from scripts.frontend.custom_widgets.WidgetInterface import WidgetInterface
TITLE_FONT_SIZE = 8
class Frame(tkinter.Frame, WidgetInterface):
def __init__(self, root, num_columns, num_rows,
title=None, column=0, row=0, columnspan=1, rowspan=1):
# Asserts dimensions
assert num_columns > 0 and num_rows > 0
self.num_columns = num_columns
self.num_rows = num_rows
# Saves the colour
self.frame_colour = None
self.label_colour = None
# Creates self frame
tkinter.Frame.__init__(self, root, relief=tkinter.RIDGE, bd=1)
self.grid(column=column, row=row,
columnspan=columnspan, rowspan=rowspan,
padx=Constants.STANDARD_SPACING, pady=Constants.STANDARD_SPACING,
sticky=tkinter.NSEW)
# Configure weights
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
# Creates title bar
self.titlebar = None
if title is not None:
self.titlebar = InformationLabel(self, text=title, column=0, row=0)
self.titlebar.config(padx=Constants.STANDARD_SPACING, pady=Constants.STANDARD_SPACING)
self.titlebar.config(font=TITLE_FONT_SIZE)
self.titlebar.grid(padx=Constants.STANDARD_SPACING, pady=Constants.STANDARD_SPACING)
# Dynamic number of columns
self.info_frame = tkinter.Frame(self, relief=tkinter.RIDGE)
self.info_frame.grid(column=0, row=1)
self.info_frame.grid(padx=Constants.STANDARD_SPACING, pady=Constants.STANDARD_SPACING)
self.info_frame.grid(sticky=tkinter.NSEW)
# Configure info frame weights
for x in range(0, num_columns):
self.info_frame.columnconfigure(x, weight=1)
# Creates the info spaces
self.info_spaces = [] # spaces[x, y] = position
for y in range(0, num_rows):
self.info_spaces.append([])
for x in range(0, num_columns):
widget = InformationLabel(self.info_frame, column=x, row=y)
# widget.config(bg=General.washed_colour_hex(label_colour, Parameters.ColourGrad_C))
self.info_spaces[y].append(widget)
def update_colour(self):
super().update_colour()
if self.titlebar is not None:
self.titlebar.update_colour()
for y in range(0, len(self.info_spaces)):
for x in range(0, len(self.info_spaces[y])):
self.info_spaces[y][x].update_colour()
self.config(bg=General.washed_colour_hex(self.frame_colour, Parameters.ColourGrad_B))
self.info_frame.config(bg=General.washed_colour_hex(self.frame_colour, Parameters.ColourGrad_B))
if self.titlebar is not None:
self.titlebar.config(bg=General.washed_colour_hex(self.label_colour, Parameters.ColourGrad_D))
def set_frame_colour(self, colour):
self.frame_colour = colour
def set_label_colour(self, colour):
self.label_colour = colour
# Functionality Methods
def assert_within_grid(self, column, row):
assert column >= 0 and row >= 0
assert column < self.num_columns and row < self.num_rows
def set_info(self, column, row, text):
self.assert_within_grid(column, row)
self.info_spaces[row][column].config(text=text)
def set_font(self, column, row, font_size):
self.assert_within_grid(column, row)
font = tkinter.font.Font(size=font_size)
self.info_spaces[row][column].config(font=font)
def set_justify(self, column, row, justify):
self.assert_within_grid(column, row)
self.info_spaces[row][column].config(justify=justify)
def set_column_weight(self, column, weight):
self.info_frame.columnconfigure(column, weight=weight)
def set_row_weight(self, row, weight):
self.info_frame.rowconfigure(row, weight=weight)
def set_anchor(self, column, row, anchor):
self.info_spaces[row][column].config(anchor=anchor)
def add_info(self, column, row, text):
self.assert_within_grid(column, row)
if self.info_spaces[row][column]["text"] is not None:
self.info_spaces[row][column].config(text=str(self.info_spaces[row][column]["text"]) + str(text))
else:
self.info_spaces[row][column].config(text=str(text))
| 37.925 | 109 | 0.667985 | 594 | 4,551 | 4.93771 | 0.161616 | 0.062734 | 0.066826 | 0.046369 | 0.378111 | 0.342312 | 0.28401 | 0.239004 | 0.171497 | 0.108762 | 0 | 0.006849 | 0.230059 | 4,551 | 119 | 110 | 38.243697 | 0.830194 | 0.06548 | 0 | 0.101266 | 0 | 0 | 0.001886 | 0 | 0 | 0 | 0 | 0 | 0.101266 | 1 | 0.151899 | false | 0 | 0.063291 | 0 | 0.227848 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d04634957ca46a9855555b61ec6eb3e8a8ca0fd | 2,320 | py | Python | tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | 2 | 2020-04-28T03:49:10.000Z | 2020-04-28T03:49:13.000Z | tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py | liyong126/mindspore | 930a1fb0a8fa9432025442c4f4732058bb7af592 | [
"Apache-2.0"
] | 7 | 2020-03-30T08:31:56.000Z | 2020-04-01T09:54:39.000Z | tests/st/ops/gpu/test_softmax_cross_entropy_with_logits_op.py | liyong126/mindspore | 930a1fb0a8fa9432025442c4f4732058bb7af592 | [
"Apache-2.0"
] | 1 | 2020-03-30T17:07:43.000Z | 2020-03-30T17:07:43.000Z | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
from mindspore import Tensor
import mindspore.nn as nn
from mindspore.common.api import ms_function
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
class NetSoftmaxCrossEntropyWithLogits(nn.Cell):
def __init__( self):
super(NetSoftmaxCrossEntropyWithLogits, self).__init__()
self.loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
def construct(self, logits, labels):
return self.loss(logits, labels)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_softmax_cross_entropy_with_logits():
logits = Tensor(np.array([[1,1,10],
[1,10,1],
[10,1,1]]).astype(np.float32))
labels = Tensor(np.array([[0,0,1],
[0,1,0],
[1,0,0]]).astype(np.float32))
expect_loss = [0.00024673, 0.00024673, 0.00024673]
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
softmax_cross_entropy_with_logits = NetSoftmaxCrossEntropyWithLogits()
output = softmax_cross_entropy_with_logits(logits, labels)
error0 = 1.0e-6
diff0 = output.asnumpy() - expect_loss
assert np.all(abs(diff0) < error0)
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
softmax_cross_entropy_with_logits = NetSoftmaxCrossEntropyWithLogits()
output = softmax_cross_entropy_with_logits(logits, labels)
error0 = 1.0e-6
diff0 = output.asnumpy() - expect_loss
assert np.all(abs(diff0) < error0)
| 39.322034 | 78 | 0.693534 | 296 | 2,320 | 5.290541 | 0.435811 | 0.038314 | 0.060664 | 0.073436 | 0.311622 | 0.265006 | 0.242656 | 0.242656 | 0.242656 | 0.242656 | 0 | 0.040676 | 0.184052 | 2,320 | 58 | 79 | 40 | 0.786582 | 0.275 | 0 | 0.27027 | 0 | 0 | 0.003599 | 0 | 0 | 0 | 0 | 0 | 0.054054 | 1 | 0.081081 | false | 0 | 0.216216 | 0.027027 | 0.351351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d058146fd52903447fe36684e99aa705f6fd6f5 | 2,747 | py | Python | charts.py | glichfalls/covid-19-charts | 52e9b78632388d3459bdc04f91513c00eaee000c | [
"MIT"
] | null | null | null | charts.py | glichfalls/covid-19-charts | 52e9b78632388d3459bdc04f91513c00eaee000c | [
"MIT"
] | null | null | null | charts.py | glichfalls/covid-19-charts | 52e9b78632388d3459bdc04f91513c00eaee000c | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from db import DatabaseConnection
from datetime import datetime, timedelta
class Charts:
def __init__(self, db: DatabaseConnection):
self._db = db
# get pie chart of the total cases by continent and of the given date
def show_pie_of_total_cases_by_continent(self, date: str = None):
if date is None:
date = datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d')
# load continent labels
labels = [country[1] for country in self._db.get_continents()]
# load cases by continent
sizes = [case[1] for case in self._db.get_continent_cases(date)]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.1f%%')
ax1.axis('equal')
plt.show()
def show_line_total_cases(self):
cases = self._db.get_total_cases()
vaccines = self._db.get_total_vaccinations()
labels = [datetime.strptime(x[0], '%Y-%m-%d') for x in cases]
plt.plot(labels, [x[1] for x in cases])
plt.plot(labels, [x[2] for x in cases])
plt.plot(labels, [x[1] for x in vaccines])
plt.title('total worldwide cases')
plt.gcf().autofmt_xdate()
plt.gca().yaxis.set_major_formatter(ticker.EngFormatter())
plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=2))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
plt.grid(True)
plt.show()
def show_case_death_chart(self):
data = self._db.get_total_cases()
date = [datetime.strptime(x[0], '%Y-%m-%d') for x in data]
cases = [x[1] for x in data]
deaths = [x[2] for x in data]
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Datum')
ax1.set_ylabel('Fälle', color=color)
ax1.plot(date, cases, color=color)
ax1.tick_params(axis='y', labelcolor=color)
plt.gca().yaxis.set_major_formatter(ticker.EngFormatter())
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Gestorbene', color=color) # we already handled the x-label with ax1
ax2.plot(date, deaths, color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.gcf().autofmt_xdate()
plt.gca().yaxis.set_major_formatter(ticker.EngFormatter())
plt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=2))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
plt.grid(True)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
| 37.121622 | 92 | 0.636695 | 386 | 2,747 | 4.393782 | 0.305699 | 0.024764 | 0.024764 | 0.033019 | 0.380896 | 0.348467 | 0.348467 | 0.31309 | 0.274764 | 0.274764 | 0 | 0.013783 | 0.234074 | 2,747 | 73 | 93 | 37.630137 | 0.7923 | 0.092829 | 0 | 0.254545 | 0 | 0 | 0.041868 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072727 | false | 0 | 0.090909 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d06db36083ac9ceb764551230b0138b99515c1f | 4,045 | py | Python | soccer3d/soccerdepth/models/encoder_decoder.py | ngerstle/soccerontable | 25426ff0f8fe0ce008b99c5c0fdbb35091d8d92c | [
"BSD-2-Clause"
] | 465 | 2018-05-18T04:43:40.000Z | 2022-02-27T03:09:25.000Z | soccer3d/soccerdepth/models/encoder_decoder.py | ngerstle/soccerontable | 25426ff0f8fe0ce008b99c5c0fdbb35091d8d92c | [
"BSD-2-Clause"
] | 18 | 2018-06-20T15:03:10.000Z | 2021-05-05T04:33:38.000Z | soccer3d/soccerdepth/models/encoder_decoder.py | ngerstle/soccerontable | 25426ff0f8fe0ce008b99c5c0fdbb35091d8d92c | [
"BSD-2-Clause"
] | 97 | 2018-05-03T09:12:30.000Z | 2022-01-25T12:49:33.000Z | import torch
import torch.nn as nn
class G(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64):
super(G, self).__init__()
# 128 x 128
self.conv1 = nn.Conv2d(input_nc, ngf, 4, 2, 1)
# 64 x 64 x1
self.conv2 = nn.Conv2d(ngf, ngf*2, 4, 2, 1)
# 32 x 32 x 2
self.conv3 = nn.Conv2d(ngf*2, ngf*4, 4, 2, 1)
# 16 x 16 x 4
self.conv4 = nn.Conv2d(ngf*4, ngf*8, 4, 2, 1)
# 8 x 8 x 8
self.conv5 = nn.Conv2d(ngf*8, ngf*8, 4, 2, 1)
# 4 x 4 x 8 --------------------------------------------------------
self.conv6 = nn.Conv2d(ngf*8, ngf*8, 4, 2, 1)
# 2 x 2 x 8--------------------------------------------------- |
self.conv7 = nn.Conv2d(ngf*8, ngf*8, 4, 2, 1) # | |
# 1 x 1 | |
self.dconv1 = nn.ConvTranspose2d(ngf*8, ngf*8, 4, 2, 1) # | |
# 2 x 2 x 8--------------------------------------------------- |
self.dconv2 = nn.ConvTranspose2d(ngf*8 * 2, ngf*8, 4, 2, 1)
# 4 x 4 x 8 --------------------------------------------------------
self.dconv3 = nn.ConvTranspose2d(ngf*8 * 2, ngf*8, 4, 2, 1)
# 8 x 8 x 8
self.dconv4 = nn.ConvTranspose2d(ngf*8 * 2, ngf*4, 4, 2, 1)
# 16 x 16
self.dconv5 = nn.ConvTranspose2d(ngf*4 * 2, ngf*2, 4, 2, 1)
# 32 x 32
self.dconv6 = nn.ConvTranspose2d(ngf*2 * 2, ngf, 4, 2, 1)
# 64 x 64
self.dconv7 = nn.ConvTranspose2d(ngf, output_nc, 4, 2, 1)
# 128 x 128
self.batch_norm = nn.BatchNorm2d(ngf)
self.batch_norm2 = nn.BatchNorm2d(ngf*2)
self.batch_norm4 = nn.BatchNorm2d(ngf*4)
self.batch_norm8 = nn.BatchNorm2d(ngf*8)
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.dropout = nn.Dropout(0.5)
self.tanh = nn.Tanh()
self.log_softmax = nn.LogSoftmax()
self.sigmoid = nn.Sigmoid()
def forward(self, input):
# Encoder
# Convolution layers:
# input is (nc) x 128 x 128
e1 = self.conv1(input)
# state size is (ngf) x 64 x 64
e2 = self.batch_norm2(self.conv2(self.leaky_relu(e1)))
# state size is (ngf x 2) x 32 x 32
e3 = self.batch_norm4(self.conv3(self.leaky_relu(e2)))
# state size is (ngf x 4) x 16 x 16
e4 = self.batch_norm8(self.conv4(self.leaky_relu(e3)))
# state size is (ngf x 8) x 8 x 8
e5 = self.batch_norm8(self.conv5(self.leaky_relu(e4)))
# state size is (ngf x 8) x 4 x 4
e6 = self.batch_norm8(self.conv6(self.leaky_relu(e5)))
# state size is (ngf x 8) x 2 x 2
# No batch norm on output of Encoder
e7 = self.conv7(self.leaky_relu(e6))
# Decoder
# Deconvolution layers:
# state size is (ngf x 8) x 1 x 1
d1_ = self.dropout(self.batch_norm8(self.dconv1(self.relu(e7))))
# state size is (ngf x 8) x 2 x 2
d1 = torch.cat((d1_, e6), 1)
d2_ = self.dropout(self.batch_norm8(self.dconv2(self.relu(d1))))
# state size is (ngf x 8) x 4 x 4
d2 = torch.cat((d2_, e5), 1)
d3_ = self.dropout(self.batch_norm8(self.dconv3(self.relu(d2))))
# state size is (ngf x 8) x 8 x 8
d3 = torch.cat((d3_, e4), 1)
d4_ = self.batch_norm4(self.dconv4(self.relu(d3)))
# state size is (ngf x 8) x 16 x 16
d4 = torch.cat((d4_, e3), 1)
d5_ = self.batch_norm2(self.dconv5(self.relu(d4)))
# state size is (ngf x 4) x 32 x 32
d5 = torch.cat((d5_, e2), 1)
d6 = self.batch_norm(self.dconv6(self.relu(d5)))
# state size is (ngf x 2) x 64 x 64
# d6 = torch.cat((d6_, e1), 1)
d7 = self.dconv7(self.relu(d6))
# state size is (ngf) x 128 x 128
# output = self.tanh(d7)
output = self.log_softmax(d7)
# output = self.sigmoid(d7)
# output = d7
return output | 40.858586 | 76 | 0.500371 | 640 | 4,045 | 3.089063 | 0.135938 | 0.020233 | 0.021244 | 0.09914 | 0.313101 | 0.297926 | 0.243298 | 0.180577 | 0.168437 | 0.135559 | 0 | 0.113901 | 0.320643 | 4,045 | 99 | 77 | 40.858586 | 0.605531 | 0.274907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.039216 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d07976c7f74517929e92feef1305db889f52a18 | 264 | py | Python | slotter/__init__.py | saurabh-hirani/slotter | a6d9dcacb61b5e1111e383855d181ef782ea494e | [
"MIT"
] | 5 | 2016-10-21T06:58:30.000Z | 2016-11-22T16:12:07.000Z | slotter/__init__.py | saurabh-hirani/slotter | a6d9dcacb61b5e1111e383855d181ef782ea494e | [
"MIT"
] | null | null | null | slotter/__init__.py | saurabh-hirani/slotter | a6d9dcacb61b5e1111e383855d181ef782ea494e | [
"MIT"
] | 1 | 2020-02-27T03:36:01.000Z | 2020-02-27T03:36:01.000Z | """
Slotter
Slotter is used to slot elements in buckets
"""
from .version import __version__
__title__ = 'slotter'
__author__ = 'Saurabh Hirani'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Saurabh Hirani'
from .slotter import Slotter
create = Slotter
| 15.529412 | 47 | 0.757576 | 31 | 264 | 5.806452 | 0.645161 | 0.144444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0181 | 0.162879 | 264 | 16 | 48 | 16.5 | 0.79638 | 0.19697 | 0 | 0 | 0 | 0 | 0.259804 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d0814786340bf7cae3bfaf13859f75091f59cb9 | 941 | py | Python | pytglib/api/types/chat_type_supergroup.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/chat_type_supergroup.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/chat_type_supergroup.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class ChatTypeSupergroup(Object):
"""
A supergroup (i.e. a chat with up to GetOption("supergroup_max_size") other users), or channel (with unlimited members)
Attributes:
ID (:obj:`str`): ``ChatTypeSupergroup``
Args:
supergroup_id (:obj:`int`):
Supergroup or channel identifier
is_channel (:obj:`bool`):
True, if the supergroup is a channel
Returns:
ChatType
Raises:
:class:`telegram.Error`
"""
ID = "chatTypeSupergroup"
def __init__(self, supergroup_id, is_channel, **kwargs):
self.supergroup_id = supergroup_id # int
self.is_channel = is_channel # bool
@staticmethod
def read(q: dict, *args) -> "ChatTypeSupergroup":
supergroup_id = q.get('supergroup_id')
is_channel = q.get('is_channel')
return ChatTypeSupergroup(supergroup_id, is_channel)
| 25.432432 | 124 | 0.624867 | 105 | 941 | 5.409524 | 0.47619 | 0.147887 | 0.073944 | 0.110915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.268863 | 941 | 36 | 125 | 26.138889 | 0.825581 | 0.428268 | 0 | 0 | 0 | 0 | 0.124211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d09e8d254b0e97154810e8229ddfdd861fe0345 | 12,370 | py | Python | utils/logger_utils.py | sunhailin-Leo/TeamLeoX_BlogsCrawler | 389ff31e02bdff415c8bc470a3a48da1acb14c4c | [
"MIT"
] | null | null | null | utils/logger_utils.py | sunhailin-Leo/TeamLeoX_BlogsCrawler | 389ff31e02bdff415c8bc470a3a48da1acb14c4c | [
"MIT"
] | null | null | null | utils/logger_utils.py | sunhailin-Leo/TeamLeoX_BlogsCrawler | 389ff31e02bdff415c8bc470a3a48da1acb14c4c | [
"MIT"
] | null | null | null | import os
import sys
import logging
from typing import Dict
from concurrent_log_handler import ConcurrentRotatingFileHandler
LOG_NAME: str = "service.log"
LOG_LEVEL_INT: int = 20
formatter_dict = {
1: logging.Formatter(
"日志时间【%(asctime)s】 - 日志名称【%(name)s】 - 文件【%(filename)s】 - "
"第【%(lineno)d】行 - 日志等级【%(levelname)s】 - 日志信息【%(message)s】",
"%Y-%m-%d %H:%M:%S",
),
2: logging.Formatter(
"%(asctime)s - %(name)s - %(filename)s - %(funcName)s - "
"%(lineno)d - %(levelname)s - %(message)s",
"%Y-%m-%d %H:%M:%S",
),
3: logging.Formatter(
'%(asctime)s - %(name)s - 【File "%(pathname)s", '
"line %(lineno)d, in %(funcName)s】 - %(levelname)s - %(message)s",
"%Y-%m-%d %H:%M:%S",
),
# 一个模仿 traceback 异常的可跳转到打印日志地方的模板
4: logging.Formatter(
'%(asctime)s - %(name)s - "%(filename)s" - %(funcName)s - %(lineno)d - '
'%(levelname)s - %(message)s - File "%(pathname)s", line %(lineno)d ',
"%Y-%m-%d %H:%M:%S",
),
# 支持日志跳转
5: logging.Formatter(
'%(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - '
"%(funcName)s - %(levelname)s - %(message)s",
"%Y-%m-%d %H:%M:%S",
),
# 推荐模板
6: logging.Formatter(
"%(name)s - %(asctime)-15s - %(filename)s - %(lineno)d - "
"%(levelname)s: %(message)s",
"%Y-%m-%d %H:%M:%S",
),
# 一个只显示简短文件名和所处行数的日志模板
7: logging.Formatter("%(levelname)s - %(filename)s - %(lineno)d - %(message)s"),
# uvicorn default 的 formatters -- without logger.Formatter
8: '%(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - '
"%(levelname)s - %(message)s",
# uvicorn access 的 foramtters -- without logger.Formatter
9: '%(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - '
'%(levelname)s - %(client_addr)s - "%(request_line)s" - %(status_code)s',
}
class LogLevelException(Exception):
def __init__(self, log_level):
err = "设置的日志级别是 {0},设置错误,请设置为 1 2 3 4 5 范围的数字".format(log_level)
Exception.__init__(self, err)
class ColorHandler(logging.Handler):
blue = 96 if os.name == "nt" else 36
yellow = 93 if os.name == "nt" else 33
def __init__(self, stream=None):
"""Initialize the handler. If stream is not specified, sys.stderr is used."""
logging.Handler.__init__(self)
if stream is None:
stream = sys.stdout # stderr无彩。
self.stream = stream
def flush(self):
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record: logging.LogRecord):
"""
30 40 黑色
31 41 红色
32 42 绿色
33 43 黃色
34 44 蓝色
35 45 紫红色
36 46 青蓝色
37 47 白色
"""
try:
msg = self.format(record)
stream = self.stream
msg_color_dict = {
10: "\033[0;32m%s\033[0m" % msg,
20: "\033[0;%sm%s\033[0m" % (self.blue, msg),
30: "\033[0;%sm%s\033[0m" % (self.yellow, msg),
40: "\033[0;31m%s\033[0m" % msg,
50: "\033[0;34m%s\033[0m" % msg,
}
try:
msg_color = msg_color_dict[record.levelno]
except KeyError:
msg_color = msg
stream.write(msg_color)
stream.write("\n")
self.flush()
except Exception as err:
print(err)
self.handleError(record)
def __repr__(self):
level = logging.getLevelName(self.level)
name = getattr(self.stream, "name", "")
if name:
name += " "
return "<%s %s(%s)>" % (self.__class__.__name__, name, level)
class LogManager(object):
"""一个日志管理类,用于创建 logger 和添加 handler,支持将日志打印到控制台打印和写入日志文件和邮件。"""
logger_name_list: list = []
logger_list: list = []
def __init__(self, logger_name=None):
"""
:param logger_name: 日志名称,当为 None 时候创建 root 命名空间的日志
一般不要传 None,除非你确定需要这么做
"""
self._logger_name = logger_name
self.logger = logging.getLogger(logger_name)
self._logger_level = None
self._is_add_stream_handler = None
self._do_not_use_color_handler = None
self._log_path = None
self._log_filename = None
self._log_file_size = None
self._formatter = None
# 加 * 是为了强制在调用此方法时候使用关键字传参,如果以位置传参强制报错,
# 因为此方法后面的参数中间可能以后随时会增加更多参数,造成之前的使用位置传参的代码参数意义不匹配。
def get_logger_and_add_handlers(
self,
log_level_int: int = LOG_LEVEL_INT,
*,
is_add_stream_handler=True,
do_not_use_color_handler=False,
log_path=os.getcwd() + "/log",
log_filename=LOG_NAME,
log_file_size=100,
formatter_template=5,
):
"""
:param log_level_int: 日志输出级别,设置为 1 2 3 4 5,
分别对应原生 logging.DEBUG(10),logging.INFO(20),
logging.WARNING(30),logging.ERROR(40),logging.CRITICAL(50)级别
现在可以直接用 10 20 30 40 50。
:param is_add_stream_handler: 是否打印日志到控制台, True / False
:param do_not_use_color_handler: 是否禁止使用 color 彩色日志, True / False
:param log_path: 设置存放日志的文件夹路径
:param log_filename: 日志的名字,仅当 log_path 和 log_filename 都不为 None 时候才写入到日志文件。
:param log_file_size: 日志大小,单位 M,默认 10M, 默认值 int
:param formatter_template: 日志模板,1 为 formatter_dict 的详细模板,2 为简要模板, 5 为最好模板
"""
self._logger_level = log_level_int * 10 if log_level_int < 10 else log_level_int
self._is_add_stream_handler = is_add_stream_handler
self._do_not_use_color_handler = do_not_use_color_handler
self._log_path = log_path
self._log_filename = log_filename
self._log_file_size = log_file_size
self._formatter = formatter_dict[formatter_template]
self.__set_logger_level()
self.__add_handlers()
self.logger_name_list.append(self._logger_name)
self.logger_list.append(self.logger)
return self.logger
def get_logger_without_handlers(self):
"""返回一个不带 handlers 的 logger, 就是一个带红色字体的 print 输出"""
return self.logger
def look_over_all_handlers(self):
print(f"{self._logger_name}名字的日志的所有 handlers 是--> {self.logger.handlers}")
def remove_all_handlers(self):
for hd in self.logger.handlers:
self.logger.removeHandler(hd)
def remove_handler_by_handler_class(self, handler_class: type):
"""
去掉指定类型的 handler
:param handler_class: logging.StreamHandler,ColorHandler,
ConcurrentRotatingFileHandler,CompatibleSMTPSSLHandler的一种
"""
if handler_class not in (
ColorHandler,
logging.StreamHandler,
ConcurrentRotatingFileHandler,
):
raise TypeError("设置的 handler 类型不正确")
for handler in self.logger.handlers:
if isinstance(handler, handler_class):
self.logger.removeHandler(handler)
def __set_logger_level(self):
self.logger.setLevel(self._logger_level)
def __remove_handlers_from_other_logger_when_logger_name_is_none(
self, handler_class
):
"""
当 logger name 为 None 时候需要移出其他 logger 的 handler,否则重复记录日志
:param handler_class: handler 类型
"""
if self._logger_name is None:
for logger in self.logger_list:
for handler in logger.handlers:
if isinstance(handler, handler_class):
logger.removeHandler(handler)
@staticmethod
def __judge_logger_contain_handler_class(logger: logging.Logger, handler_class):
for h in logger.handlers + logging.getLogger().handlers:
if isinstance(h, (handler_class,)):
return True
def __add_handlers(self):
if self._is_add_stream_handler:
if not self.__judge_logger_contain_handler_class(self.logger, ColorHandler):
# 主要是阻止给 logger 反复添加同种类型的 handler 造成重复记录
self.__remove_handlers_from_other_logger_when_logger_name_is_none(
ColorHandler
)
self.__add_stream_handler()
if all([self._log_path, self._log_filename]):
if not self.__judge_logger_contain_handler_class(
self.logger, ConcurrentRotatingFileHandler
):
self.__remove_handlers_from_other_logger_when_logger_name_is_none(
ConcurrentRotatingFileHandler
)
self.__add_file_handler()
def __add_stream_handler(self):
"""
日志显示到控制台
"""
# stream_handler = logging.StreamHandler()
# 不使用 streamhandler,使用自定义的彩色日志
stream_handler = (
ColorHandler()
if not self._do_not_use_color_handler
else logging.StreamHandler()
)
stream_handler.setLevel(self._logger_level)
stream_handler.setFormatter(self._formatter)
self.logger.addHandler(stream_handler)
def __add_file_handler(self):
"""日志写入日志文件"""
if not os.path.exists(self._log_path):
os.mkdir(self._log_path)
log_file = os.path.join(self._log_path, self._log_filename)
rotate_file_handler = None
if os.name == "nt":
# windows 下用这个,非进程安全
rotate_file_handler = ConcurrentRotatingFileHandler(
log_file,
maxBytes=self._log_file_size * 1024 * 1024,
backupCount=3,
encoding="utf-8",
)
if os.name == "posix":
# linux 下可以使用 ConcurrentRotatingFileHandler,进程安全的日志方式
rotate_file_handler = ConcurrentRotatingFileHandler(
log_file,
maxBytes=self._log_file_size * 1024 * 1024,
backupCount=3,
encoding="utf-8",
)
rotate_file_handler.setLevel(self._logger_level)
rotate_file_handler.setFormatter(self._formatter)
self.logger.addHandler(rotate_file_handler)
class LoggerMixin(object):
subclass_logger_dict: dict = {}
@property
def logger(self):
if self.__class__.__name__ + "1" not in self.subclass_logger_dict:
logger_var = LogManager(
self.__class__.__name__
).get_logger_and_add_handlers()
self.subclass_logger_dict[self.__class__.__name__ + "1"] = logger_var
return logger_var
else:
return self.subclass_logger_dict[self.__class__.__name__ + "1"]
@property
def logger_with_file(self):
if self.__class__.__name__ + "2" not in self.subclass_logger_dict:
logger_var = LogManager(type(self).__name__).get_logger_and_add_handlers(
log_filename=type(self).__name__ + ".log", log_file_size=50
)
self.subclass_logger_dict[self.__class__.__name__ + "2"] = logger_var
return logger_var
else:
return self.subclass_logger_dict[self.__class__.__name__ + "2"]
UVICORN_LOGGING_CONFIG: Dict = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": formatter_dict[8],
"use_colors": None,
"datefmt": "%Y-%m-%d %H:%M:%S",
},
"access": {
"()": "uvicorn.logging.AccessFormatter",
"fmt": formatter_dict[9],
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
"access": {
"formatter": "access",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
},
"loggers": {
"": {"handlers": ["default"], "level": "INFO"},
"uvicorn.error": {"level": "INFO"},
"uvicorn.access": {"handlers": ["access"], "level": "INFO", "propagate": False},
},
}
| 35.342857 | 89 | 0.576475 | 1,398 | 12,370 | 4.796853 | 0.211016 | 0.040262 | 0.003579 | 0.004772 | 0.314942 | 0.268118 | 0.219058 | 0.174172 | 0.16105 | 0.147331 | 0 | 0.021383 | 0.304365 | 12,370 | 349 | 90 | 35.444126 | 0.757931 | 0.136621 | 0 | 0.188462 | 0 | 0.026923 | 0.167584 | 0.024423 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073077 | false | 0 | 0.019231 | 0 | 0.157692 | 0.007692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d0a33a6f3a05c3c57bcd109bd1331137ac931d0 | 1,945 | py | Python | Blogs/Automate table mappings JSON creation/Create_JSON.py | subepie/DMS_tool | de4d3b3295366bb2118b31eb5ec268a278d4a557 | [
"Apache-2.0"
] | 43 | 2017-11-09T23:01:35.000Z | 2020-09-08T14:51:41.000Z | Blogs/Automate table mappings JSON creation/Create_JSON.py | subepie/DMS_tool | de4d3b3295366bb2118b31eb5ec268a278d4a557 | [
"Apache-2.0"
] | 1 | 2019-09-11T09:54:55.000Z | 2020-05-12T11:37:44.000Z | Blogs/Automate table mappings JSON creation/Create_JSON.py | subepie/DMS_tool | de4d3b3295366bb2118b31eb5ec268a278d4a557 | [
"Apache-2.0"
] | 34 | 2017-07-19T14:37:31.000Z | 2020-08-30T00:54:01.000Z | import json
import csv
import os
data = {}
line_count = 0
data['rules'] = []
def writeJSON():
with open('automated_json.json', 'w') as outfile:
json.dump(data, outfile)
def createJSON(csvfile,action):
global line_count
with open(csvfile) as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
counter = str(line_count + 1)
data['rules'].append({
"rule-type": "selection",
"rule-id": counter,
"rule-name": counter,
"object-locator": {
"schema-name": row[0],
"table-name": row[1]
},
"rule-action": action
})
line_count += 1
if __name__ == "__main__":
print("This program expects a folder location from the user. ")
print("The folder can have 2 different types of files in csv format.")
print("The file types are include table list and exclude table list.")
print(" ")
print("The file name should start with include or exclude to indicate "
"whether the content of a particular file has to included or excluded.")
print(" ")
print("Both include and exclude files should contain schema name and the table name "
"to be included or excluded separated by comma.")
print("It is not necessary to have both include and exclude files.")
print(" ")
File_Location = raw_input("Enter the Folder location: ")
if("/" in File_Location):
separater = "/"
else:
separater = "\\"
listOfFiles = os.listdir(File_Location)
for entry in listOfFiles:
if (entry.startswith("include")):
createJSON(File_Location+separater+entry,"include")
elif (entry.startswith("exclude")):
createJSON(File_Location+separater+entry, "exclude")
writeJSON()
| 32.416667 | 90 | 0.579949 | 226 | 1,945 | 4.89823 | 0.429204 | 0.054201 | 0.056911 | 0.03794 | 0.112014 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004484 | 0.312082 | 1,945 | 59 | 91 | 32.966102 | 0.82287 | 0 | 0 | 0.06 | 0 | 0 | 0.355779 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.06 | 0 | 0.1 | 0.18 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d0ece57935690eb57b92110e051ae26ccac522f | 502 | py | Python | website/events/helpers/views.py | eamanu/asoc_members | bf2e99e9c63c60a59bdfd10ca1812d78851cbde6 | [
"MIT"
] | 9 | 2019-03-24T21:56:52.000Z | 2022-03-14T04:21:48.000Z | website/events/helpers/views.py | eamanu/asoc_members | bf2e99e9c63c60a59bdfd10ca1812d78851cbde6 | [
"MIT"
] | 111 | 2018-04-30T03:26:58.000Z | 2021-12-01T13:06:24.000Z | website/events/helpers/views.py | eamanu/asoc_members | bf2e99e9c63c60a59bdfd10ca1812d78851cbde6 | [
"MIT"
] | 23 | 2018-09-06T21:39:56.000Z | 2022-03-25T15:08:52.000Z | from django.db.models import Q
import functools
import operator
def search_filtered_queryset(base_queryset, search_fields, search_value):
filters = []
for key, value in search_fields.items():
field_filter = key
if value != 'equal':
field_filter = field_filter + '__' + value
filter_dict = {field_filter: search_value}
filters.append(Q(**filter_dict))
queryset = base_queryset.filter(functools.reduce(operator.or_, filters))
return queryset
| 31.375 | 76 | 0.695219 | 61 | 502 | 5.442623 | 0.47541 | 0.13253 | 0.120482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.213147 | 502 | 15 | 77 | 33.466667 | 0.840506 | 0 | 0 | 0 | 0 | 0 | 0.013944 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dfb51cb719fc893ece63b1717e0432e40ccdc7b | 1,103 | py | Python | machine_vision/yolov2_20class.py | huningxin/MaixPy_scripts | a54015f97989c46cc6b3d58f985156e619666a51 | [
"MIT"
] | 1 | 2020-05-04T15:22:53.000Z | 2020-05-04T15:22:53.000Z | machine_vision/yolov2_20class.py | huningxin/MaixPy_scripts | a54015f97989c46cc6b3d58f985156e619666a51 | [
"MIT"
] | null | null | null | machine_vision/yolov2_20class.py | huningxin/MaixPy_scripts | a54015f97989c46cc6b3d58f985156e619666a51 | [
"MIT"
] | null | null | null | #refer to http://blog.sipeed.com/p/677.html
import sensor,image,lcd,time
import KPU as kpu
lcd.init(freq=15000000)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_vflip(1)
sensor.run(1)
clock = time.clock()
classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
task = kpu.load(0x500000)
anchor = (1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52)
a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
while(True):
clock.tick()
img = sensor.snapshot()
code = kpu.run_yolo2(task, img)
print(clock.fps())
if code:
for i in code:
a=img.draw_rectangle(i.rect())
a = lcd.display(img)
for i in code:
lcd.draw_string(i.x(), i.y(), classes[i.classid()], lcd.RED, lcd.WHITE)
lcd.draw_string(i.x(), i.y()+12, '%f1.3'%i.value(), lcd.RED, lcd.WHITE)
else:
a = lcd.display(img)
a = kpu.deinit(task)
| 35.580645 | 205 | 0.602901 | 176 | 1,103 | 3.732955 | 0.568182 | 0.041096 | 0.018265 | 0.030441 | 0.05175 | 0.05175 | 0.05175 | 0 | 0 | 0 | 0 | 0.075536 | 0.19583 | 1,103 | 30 | 206 | 36.766667 | 0.665163 | 0.038078 | 0 | 0.142857 | 0 | 0 | 0.113208 | 0 | 0 | 0 | 0.007547 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dfcdef8833ed5833ba57eeec1abdbc79c20e892 | 14,393 | py | Python | DupLess.py | FadyMohareb/dupless | 60fef3e36dab619bc3c543d05dcb6d2fa359b62f | [
"MIT"
] | 1 | 2020-02-03T14:35:11.000Z | 2020-02-03T14:35:11.000Z | DupLess.py | FadyMohareb/dupless | 60fef3e36dab619bc3c543d05dcb6d2fa359b62f | [
"MIT"
] | null | null | null | DupLess.py | FadyMohareb/dupless | 60fef3e36dab619bc3c543d05dcb6d2fa359b62f | [
"MIT"
] | null | null | null | #!/usr/bin/python
# DupLess is a python script to detect and remove artifact duplications in assemblies.
# Assemblies from heterozygous genomes tend to create two contigs instead of
# one in areas of high heterozygosity. DupLess detects these regions based on
# read coverage and sequence similarity.
# Dependencies:
# python v2.7 or higher
# samtools v1.9 or higher (important for the "-o" parameter)
# bedtools v2.27 (lower version should now work)
# blastn v2.6.0+
# pandas, numpy, matplotlib, multiprocessing, getopt, biopython, sys, os, subprocess
# sed and awk
import getopt
import subprocess
import sys
import os
import detect_het_regions_from_coverage as dh
import detect_duplicates_from_het_regions as dd
import processing as proc
import utils_dupless as ud
global VERSION
VERSION = "0.1.0"
# Default values for the input
window_size = 1000 # The coverage of each window will be based on the median of all the coverages inside the window.
coverage_bed = None # Bed file with the coverage value for each position. Can be produced with "bedtools coverage".
assembly_name = None # Assembly in fasta format, used to extract the het regions and also check the scaffold lengths.
expected_coverage = None # Any window with 0 < coverage < expected_cov/1.5 will be considered as heterozygous.
gaps_bed = None # Optional. Used to draw gaps as grey bars on the coverage graphs.
output_folder = "./DupLess_out/"
nbThreads = 10
blast_identity_threshold = 90 # Two regions will be considered duplicated if...
blast_length_threshold = 300 # ...these two blast thresholds are met (min identity and min length).
blast_overlap_threshold = 90.0 # % of duplication to cover contig (default 90%).
skip_het_dect = False # Possibility to skip the first step (het detection) if bed of heterozygous regions is provided.
het_bed = None # Bed defining the heterozygous region (Created by DupLess, or given by the user if skip_het_dect = T).
skip_blast = False # Possibility to skip the "het detection" and "pairwise blasting" and just filter the blast results.
blast_output = None # Default output file for blast results (given by the user if skip_blast = T)
skip_plot = False # Skip the generation of the coverage plots
def print_version():
"""Print the version.
"""
global VERSION
print("DupLess v" + VERSION)
def usage():
"""Print the usage.
"""
print("\npython DupLess.py -t [nb_threads] -b [coverage.bed] -a [assembly.fasta] -w [window_size] -c [expected_coverage] -i [min_blast_identity] -l [min_blast_length] -o [output_folder]")
print("\nRequired:")
print(" -a/--assembly The assembly corresponding to the bed coverage in fasta format.")
print("")
print(" -b/--bed_cov The bed file containing the coverage at each base (can be generated with 'bedtools genomecov').")
print(" /!\ If using paired end reads: make sure that you set the -w or -l option higher than the insert size,")
print(" to avoid false positives due to coverage drop at the ends of contigs (because of unaligned mates).")
print("\nOptional:")
print(" -t/--nThreads The number of threads (default ",nbThreads,")")
print(" -o/--out_folder The output folder (default '",output_folder,"')")
print("")
print(" -c/--expected_cov The expected read coverage for the homozygous regions. The homozygosity / heterozygosity will be determined based on this value.")
print(" You can determine the value by plotting the coverage distribution. It should correspond to the homozygous peak")
print(" If no value is given, it will be based on the mode of the coverage distribution (not reliable if high heterozygosity).")
print("")
print(" -w/--window_size The size of the windows in basepairs (default: ",window_size,")")
print(" The value of the coverage for each window will be the median of the coverage at each base.")
print(" All the windows classified as 'heterozygous' will be considered for the detection of duplication.")
print("")
print(" -g/--bed_gaps A bed file containing the gaps along the genome. If given, the graphs will contain a grey background where the gaps are.")
print("")
print(" -i/--blast_identity The minimum percentage of identity between the het regions to consider them duplicates (default: ",blast_identity_threshold,", range 0 to 100).")
print(" -l/--blast_length The blast alignments with a length lower than this threshold will be filtered (default=",blast_length_threshold,").")
print(" -p/--blast_overlap The contigs overlapped at least by this threshold are written to a filter list (default=",blast_overlap_threshold,").")
print("")
print(" -n/--no_plot Skip the creation of all the plots")
print("\nSkipping part of pipeline:")
print(" -s/--skip_het_detection Skip the detection of the heterozygous regions. If so, you must provide a bed identifying the heterozygous regions:")
print(" python DupLess.py -s [het_regions_bed] -t [nb_threads] -a [assembly.fasta] -i [min_blast_identity] -l [min_blast_length] -o [new_output_folder]")
print("")
print(" -f/--filter_blast_only Skip the detection of the heterozygous regions AND the pairwise alignments. If so, you must provide a blast ouput with -oufmt 6:")
print(" python DupLess.py -f [blast_output] -t [nb_threads] -a [assembly.fasta] -i [min_blast_identity] -l [min_blast_length] -o [new_output_folder]")
print("\nOther:")
print(" -h/--help Print the usage and help and exit.")
print(" -v/--version Print the version and exit.")
#=================================================================
# GetOpt =
#=================================================================
try:
opts, args = getopt.getopt(sys.argv[1:], "t:w:b:a:c:g:o:s:f:i:l:p:nhv", ["nThreads=", "window_size=", "bed_cov=", "assembly=", "expected_cov=", "bed_gaps=",
"out_folder=", "skip_het_detection=", "filter_blast_only=",
"blast_identity=", "blast_length=", "blast_overlap=", "no_plot", "help", "version"])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
for o,a in opts:
if o in ("-t", "--nThreads"):
nbThreads = int(a)
elif o in ("-w", "--window_size"):
window_size = int(a)
elif o in ("-b", "--bed_cov"):
coverage_bed = str(a)
elif o in ("-a", "--assembly"):
assembly_name = str(a)
elif o in ("-c", "--expected_cov"):
expected_coverage = int(a)
elif o in ("-g", "--bed_gaps"):
gaps_bed = str(a)
elif o in ("-o", "--out_folder"):
output_folder = str(a)
elif o in ("-s", "--skip_het_dect"):
het_bed = str(a)
skip_het_dect = True
elif o in ("-f", "--filter_blast_only"):
blast_output = str(a)
skip_het_dect = True
skip_blast = True
elif o in ("-i", "--blast_identity"):
blast_identity_threshold = int(a)
elif o in ("-l", "--blast_length"):
blast_length_threshold = float(a)
elif o in ("-p", "--blast_overlap"):
blast_overlap_threshold = float(a)
elif o in ("-n", "--no_plot"):
skip_plot = True
elif o in ("-h", "--help"):
usage()
sys.exit(1)
elif o in ("-v", "--version"):
print_version()
sys.exit(1)
else:
assert False, "Unhandled option !"
# If we do not skip the het detection step:
if not skip_het_dect:
# Then we need the coverage bed
file_ok, error_mssg = ud.check_file(coverage_bed)
if not file_ok:
print("Error with option -b/--bed_cov: "+error_mssg)
usage()
sys.exit(2)
# Exit if output folder already exists (to avoid overwriting an already existing project)
if(os.path.isdir(output_folder)):
print("\nFolder '"+output_folder+"' already exists, stopping now...\n")
sys.exit(2)
file_ok, error_mssg = ud.check_file(assembly_name)
if not file_ok:
print("Error with option -a/--assembly: "+error_mssg)
usage()
sys.exit(2)
if(window_size <= 0):
print("The window size can not be lower than 0 (-w/--window_size option).\n")
usage()
sys.exit(2)
if(nbThreads <= 0):
print("The number of threads can not be lower than 0 (-t/--nThreads option).\n")
usage()
sys.exit(2)
if((blast_identity_threshold < 0) or (blast_identity_threshold > 100)):
print("The blast identity treshold (-i/--blast_identity) must be between 0 and 100. Current value: "+str(blast_identity_threshold)+"\n")
usage()
sys.exit(2)
if((blast_length_threshold < 0)):
print("The blast coverage treshold (-l/--blast_length) can not be lower than 0. Current value: "+str(blast_length_threshold)+"\n")
usage()
sys.exit(2)
#=================================================================
# Main =
#=================================================================
# Creating the output folder architecture
# individuals_beds/ contains the bed files describing the het. regions for each sequence.
# invidual_blasts/ contains the blast results for each het. region.
# graphs/ contains the coverage graphs for each sequence.
# temp/ contains temp file for blast.
# deduplicated/ contains the results of DupLess: deduplicated.fasta and discarded.fasta
for folder in [output_folder, output_folder+"/individual_beds", output_folder+"/graphs",
output_folder+"/individual_blasts", output_folder+"/temp", output_folder+"/deduplicated"]:
try:
pr = subprocess.Popen(["mkdir", folder], shell=False, stdout=subprocess.PIPE)
pr.communicate()
ud.check_return_code(pr.returncode, "mkdir "+folder)
except Exception as e:
print("Error during mkdir "+folder)
print("Exception:"+str(e))
sys.exit()
# Indexing the assembly, needed later on for extraction of het regions
# Also a good way to check if samtools exists at the start of the script
ud.index_fasta_file(assembly_name)
#==================================================
# Detection of the heterozygous regions
#==================================================
if not skip_het_dect:
# If the user does not skip the het dect then we need the coverage bed file
# We check if it exists
file_ok, error_mssg = ud.check_file(coverage_bed)
if file_ok:
# Launch the bed and graph creation for heterozygous regions, detection based on coverage values.
het_bed = dh.detect_het_regions(coverage_bed, gaps_bed, expected_coverage, window_size, output_folder,
nbThreads, skip_plot)
else:
print("Error with the coverage bed file: "+error_mssg)
usage()
sys.exit(2)
#==================================================
# Pairwise alignment of heterozygous regions
#==================================================
if not skip_blast:
# Check if the het_bed file exists (wether it has been created by the step before or just given by the user)
file_ok, error_mssg = ud.check_file(het_bed)
if file_ok:
# Launch pairwise blast comparisons between the detected heterozygous regions to detect duplication
blast_output = dd.detect_dupl_regions(assembly_name, het_bed, output_folder, nbThreads)
else:
print("Error with the heterozygous bed file: "+error_mssg)
usage()
sys.exit(2)
#==================================================
# Filtering the blast results
#==================================================
# Check if the blast output file exists (wether it has been created by the step before or just given by the user)
file_ok, error_mssg = ud.check_file(blast_output)
if file_ok:
# Filter the blasts by identity and length
toRemoveBed, discardedBed, toFilterList = dd.filter_blast_results(blast_output, blast_identity_threshold,
blast_length_threshold, blast_overlap_threshold,
assembly_name, output_folder)
else:
print("Error with the blast output file: "+error_mssg)
usage()
sys.exit(2)
#==================================================
# Generating the output files
#==================================================
deduplicated_assembly = output_folder+"/deduplicated/deduplicated_assembly.fasta"
discarded_assembly = output_folder+"/deduplicated/discarded.fasta"
print("Generating the deduplicated fasta files from the blast results...")
proc.remove_duplications_assembly(deduplicated_assembly, assembly_name, toRemoveBed, output_folder)
proc.generate_discarded_fasta(assembly_name, discardedBed, discarded_assembly)
# If we skip blast, no intermediate files created, no need to clean
if not skip_blast:
# Cleaning the intermediate files:
ud.remove_file(output_folder+"/All_Blasts_region_coord.tab")
ud.remove_file(output_folder+"/assembly_HET_ONLY.fa")
ud.remove_file(output_folder+"/assembly_HET_ONLY.fa.fai")
print("Done !\n")
print("Deduplicated assembly generated in: " + deduplicated_assembly)
print("Discarded sequences in: " + discarded_assembly)
print("Contigs with a blast hit covering more than ", blast_overlap_threshold, "% of their length in: toFilter.list")
| 51.039007 | 195 | 0.606406 | 1,810 | 14,393 | 4.682873 | 0.20442 | 0.03681 | 0.011562 | 0.015337 | 0.192426 | 0.150071 | 0.108424 | 0.084592 | 0.07067 | 0.049316 | 0 | 0.006072 | 0.256236 | 14,393 | 281 | 196 | 51.220641 | 0.785708 | 0.273258 | 0 | 0.273684 | 0 | 0.063158 | 0.445423 | 0.023454 | 0 | 0 | 0 | 0 | 0.005263 | 1 | 0.010526 | false | 0 | 0.042105 | 0 | 0.052632 | 0.294737 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dfe6c5821cbd16700bb97be17dc7d969044d263 | 6,889 | py | Python | arduino_utils.py | diegocepedaw/lasergo | 584a1b7f2c2bd96f5b8f1d4ede09f5992c774b1e | [
"MIT"
] | 12 | 2020-12-30T23:50:56.000Z | 2022-02-14T03:27:02.000Z | arduino_utils.py | diegocepedaw/lasergo | 584a1b7f2c2bd96f5b8f1d4ede09f5992c774b1e | [
"MIT"
] | null | null | null | arduino_utils.py | diegocepedaw/lasergo | 584a1b7f2c2bd96f5b8f1d4ede09f5992c774b1e | [
"MIT"
] | 1 | 2022-01-26T02:00:40.000Z | 2022-01-26T02:00:40.000Z | import cv2
import numpy as np
import time
import math
import serial
import pickle
LASER_START = (100,100)
arduino = serial.Serial('COM6', 9600, timeout=5)
def clear_leds():
# turn off all leds
data = bytes("C0,0\r\n", "utf8")
arduino.write(data) # write increment to serial port
print("wrote: " + str(data))
reachedPos = str(arduino.readline()) # read serial port for arduino echo
print("read: " + str(reachedPos))
def set_led_coordinates(x_coord,y_coord):
y_coord += 19
# light up the leds on the board to indicate a coordinate
data = bytes("O" + str(x_coord) + "," + str(y_coord)+'\r\n', 'utf8')
arduino.write(data) # write position to serial port
print("wrote: " + str(data))
reachedPos = str(arduino.readline()) # read serial port for arduino echo
print("read: " + str(reachedPos))
def control_laser(x_travel,y_travel):
data = bytes("I" + str(x_travel) + "," + str(y_travel)+'\r\n', 'utf8')
arduino.write(data) # write increment to serial port
reachedPos = str(arduino.readline()) # read serial port for arduino echo
time.sleep(0.5)
print(reachedPos)
def set_laser_pos(x_travel,y_travel):
data = bytes("S" + str(x_travel) + "," + str(y_travel)+'\r\n', 'utf8')
arduino.write(data) # write position to serial port
reachedPos = str(arduino.readline()) # read serial port for arduino echo
time.sleep(1)
print(reachedPos)
def calibrate_laser(cap):
''' determine how many pixel sevo angle travels'''
set_laser_pos(LASER_START[0],LASER_START[1])
ret, frame = cap.read()
frame = image_resize(frame, maxLength = 720, inter = cv2.INTER_AREA)
laser_coords = get_laser_coords(frame)
print(laser_coords)
# move laser by 1 pos and measure change in pixel x,y
control_laser(3,3)
ret, frame = cap.read()
new_laser_coords = get_laser_coords(frame)
x_ratio = (new_laser_coords[0] - laser_coords[0])
y_ratio = (new_laser_coords[1] - laser_coords[1])
print(new_laser_coords)
print("ratio:")
print(x_ratio,y_ratio)
return(x_ratio,y_ratio)
def image_resize(image, maxLength = 720, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# check to see if height is larger than width
if max(h, w) == h:
# calculate the ratio of the height and construct the
# dimensions
r = maxLength / float(h)
dim = (int(w * r), maxLength)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = maxLength / float(w)
dim = (maxLength, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
def get_laser_coords(frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# in the HSV range red is split up in two parts so these masks capture different red values which work under different conditions
# currently I am just manually setting one but in the future this should be done in a better way
lower_red = np.array([0, 70, 50])
upper_red = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
lower_red = np.array([170, 70, 50])
upper_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask = mask2
thresh = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY)[1]
cv2.imshow("mask", thresh)
lel = cv2.findNonZero(thresh)
if lel is not None:
if len(lel) > 500:
lel = lel[-450:]
x = 0
y = 0
if lel is not None:
for element in lel:
x += element[0][0]
y += element[0][1]
x = x / len(lel)
y = y / len(lel)
laser_coords = (int(x),int(y))
#cv2.circle(frame, laser_coords, 10, (0, 0, 0), 2)
return laser_coords
def target_laser(target, cap, mask = True):
x_ratio, y_ratio = calibrate_laser(cap)
x_ratio, y_ratio = -35, -14
ret, frame = cap.read()
frame = image_resize(frame, maxLength = 720, inter = cv2.INTER_AREA)
with open('corner_original_coords.data', 'rb') as filehandle:
# read the data as binary data stream
pts1 = pickle.load(filehandle)
#arrange points to be drawn as polygon
poly = [pts1[0], pts1[2], pts1[3], pts1[1]]
# MASK NON BOARD AREA
fill_color = [0, 0, 0] # any BGR color value to fill with
mask_value = 255 # 1 channel white (can be any non-zero uint8 value)
# our stencil - some `mask_value` contours on black (zeros) background,
# the image has same height and width as `img`, but only 1 color channel
stencil = np.zeros(frame.shape[:-1])
cv2.fillPoly(stencil, np.array([poly], dtype=np.int32), mask_value)
sel = stencil != mask_value # select everything that is not mask_value
arrived = False
while (1):
ret, frame = cap.read()
frame = image_resize(frame, maxLength = 720, inter = cv2.INTER_AREA)
frame[sel] = fill_color # fill masked area with fill_color
laser_coords = get_laser_coords(frame)
print(laser_coords)
cv2.circle(frame, target, 10, (0, 0, 0), 2)
x_dist = abs(target[0] - laser_coords[0])
y_dist = abs(target[1] - laser_coords[1])
if x_dist < 8 and y_dist < 8 or arrived:
arrived = True
cv2.circle(frame, target, 17, (0, 255, 0), 2)
else:
x_travel = ((target[0] - laser_coords[0] ) / x_ratio)
y_travel = ((target[1] - laser_coords[1] ) / y_ratio)
if x_dist < 5:
x_travel = 0
elif x_travel < 0:
x_travel = math.floor(x_travel)
else:
x_travel = math.ceil(x_travel)
if y_dist < 5:
y_travel = 0
elif y_travel < 0:
y_travel = math.floor(y_travel)
else:
y_travel = math.floor(y_travel)
print(laser_coords, target)
print(x_travel,y_travel)
time.sleep(3)
control_laser(x_travel,y_travel)
#cv2.imshow('mask', mask)
cv2.imshow('Track Laser', frame)
waitkey = cv2.waitKey(1)
if waitkey & 0xFF == ord('q') or waitkey & 0xFF == ord('q') or waitkey == 9:
break
set_laser_pos(0,0)
cv2.destroyAllWindows()
if __name__ == "__main__":
cap = cv2.VideoCapture(0)
#calibrate_laser(cap)
target_laser((340, 200), cap)
cap.release()
if arduino.isOpen() == True:
arduino.close() | 35.510309 | 133 | 0.59791 | 974 | 6,889 | 4.090349 | 0.262834 | 0.060743 | 0.008785 | 0.013052 | 0.357932 | 0.314006 | 0.246737 | 0.212098 | 0.212098 | 0.189006 | 0 | 0.039837 | 0.289447 | 6,889 | 194 | 134 | 35.510309 | 0.774055 | 0.211497 | 0 | 0.234043 | 0 | 0 | 0.024481 | 0.005007 | 0 | 0 | 0.001484 | 0 | 0 | 1 | 0.056738 | false | 0 | 0.042553 | 0 | 0.113475 | 0.092199 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0dfea64ad5882b1a0e160dc452c8c73b059a7bdc | 22,984 | py | Python | sktime_dl/deeplearning/mcnn/_classifier.py | talhaanwarch/sktime-dl | da4db17b78b645667b9be18e80283d0af4768c40 | [
"BSD-3-Clause"
] | 1 | 2021-06-13T05:29:16.000Z | 2021-06-13T05:29:16.000Z | sktime_dl/deeplearning/mcnn/_classifier.py | oustella/sktime-dl | f454893012567519f12d04050991cbec53ab1ef0 | [
"BSD-3-Clause"
] | null | null | null | sktime_dl/deeplearning/mcnn/_classifier.py | oustella/sktime-dl | f454893012567519f12d04050991cbec53ab1ef0 | [
"BSD-3-Clause"
] | null | null | null | # todo keras/tesnorflow memory problem when search over network parameters
# currently just deleting EVERY model and retraining the best parameters
# at the end, see **1
__author__ = "Aaron Bostrom, James Large"
import gc
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow import keras
from sktime_dl.deeplearning.base.estimators import BaseDeepClassifier
from sktime_dl.utils import check_and_clean_data
from sktime_dl.utils import check_is_fitted
from sklearn.utils import check_random_state
class MCNNClassifier(BaseDeepClassifier):
"""Multi-scale Convolutional Neural Network (MCNN).
Adapted from the implementation from Fawaz et. al
https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mcnn.py
Network originally defined in:
@article{cui2016multi,
title={Multi-scale convolutional neural networks for time series
classification},
author={Cui, Zhicheng and Chen, Wenlin and Chen, Yixin},
journal={arXiv preprint arXiv:1603.06995},
year={2016}
}
"""
def __init__(
self,
pool_factors=[2, 3, 5],
filter_sizes=[0.05, 0.1, 0.2],
window_size=0.2,
nb_train_batch=10,
nb_epochs=200,
max_train_batch_size=256,
slice_ratio=0.9,
random_state=0,
verbose=False,
model_name="mcnn",
model_save_directory=None,
):
"""
:param pool_factors: array of shape
:param filter_sizes: array of shape
:param window_size: int,
:param nb_train_batch: int,
:param nb_epochs: int, the number of epochs to train the model
:param max_train_batch_size: int,
:param slice_ratio: int,
:param random_state: int, seed to any needed random actions
:param verbose: boolean, whether to output extra information
:param model_name: string, the name of this model for printing and
file writing purposes
:param model_save_directory: string, if not None; location to save
the trained keras model in hdf5 format
"""
super(MCNNClassifier, self).__init__(
model_save_directory=model_save_directory,
model_name=model_name
)
self.random_state = random_state
self.verbose = verbose
self.pool_factors = (
pool_factors # used for hyperparameters grid search
)
self.filter_sizes = (
filter_sizes # used for hyperparameters grid search
)
self.window_size = window_size
self.nb_train_batch = nb_train_batch
self.nb_epochs = nb_epochs
self.max_train_batch_size = max_train_batch_size
self.slice_ratio = slice_ratio
self._is_fitted = False
def set_hyperparameters(self):
# *******set up the ma and ds********#
self.ma_base = 5
self.ma_step = 6
self.ma_num = 1
self.ds_base = 2
self.ds_step = 1
self.ds_num = 4
def slice_data(self, data_x, data_y, slice_ratio):
n = data_x.shape[0]
length = data_x.shape[1]
n_dim = data_x.shape[2] # for MTS
length_sliced = int(length * slice_ratio)
increase_num = (
length - length_sliced + 1
) # if increase_num =5, it means one ori becomes 5 new instances.
n_sliced = n * increase_num
new_x = np.zeros((n_sliced, length_sliced, n_dim))
new_y = None
if data_y is not None:
nb_classes = data_y.shape[1]
new_y = np.zeros((n_sliced, nb_classes))
for i in range(n):
for j in range(increase_num):
new_x[i * increase_num + j, :, :] = data_x[
i, j: j + length_sliced, :
]
if data_y is not None:
new_y[i * increase_num + j] = np.int_(
data_y[i].astype(np.float32)
)
return new_x, new_y
def _downsample(self, data_x, sample_rate, offset=0):
num = data_x.shape[0]
length_x = data_x.shape[1]
num_dim = data_x.shape[2] # for MTS
last_one = 0
if length_x % sample_rate > offset:
last_one = 1
new_length = int(np.floor(length_x / sample_rate)) + last_one
output = np.zeros((num, new_length, num_dim))
for i in range(new_length):
output[:, i] = np.array(data_x[:, offset + sample_rate * i])
return output
def _movingavrg(self, data_x, window_size):
num = data_x.shape[0]
length_x = data_x.shape[1]
num_dim = data_x.shape[2] # for MTS
output_len = length_x - window_size + 1
output = np.zeros((num, output_len, num_dim))
for i in range(output_len):
output[:, i] = np.mean(data_x[:, i: i + window_size], axis=1)
return output
def movingavrg(self, data_x, window_base, step_size, num):
if num == 0:
return (None, [])
out = self._movingavrg(data_x, window_base)
data_lengths = [out.shape[1]]
for i in range(1, num):
window_size = window_base + step_size * i
if window_size > data_x.shape[1]:
continue
new_series = self._movingavrg(data_x, window_size)
data_lengths.append(new_series.shape[1])
out = np.concatenate([out, new_series], axis=1)
return (out, data_lengths)
def downsample(self, data_x, base, step_size, num):
# the case for dataset JapaneseVowels MTS
if data_x.shape[1] == 26:
return (None, []) # too short to apply downsampling
if num == 0:
return (None, [])
out = self._downsample(data_x, base, 0)
data_lengths = [out.shape[1]]
# for offset in range(1,base): #for the base case
# new_series = _downsample(data_x, base, offset)
# data_lengths.append( new_series.shape[1] )
# out = np.concatenate( [out, new_series], axis = 1)
for i in range(1, num):
sample_rate = base + step_size * i
if sample_rate > data_x.shape[1]:
continue
for offset in range(0, 1): # sample_rate):
new_series = self._downsample(data_x, sample_rate, offset)
data_lengths.append(new_series.shape[1])
out = np.concatenate([out, new_series], axis=1)
return (out, data_lengths)
def train(self, x_train, y_train, pool_factor, filter_size):
# split train into validation set with validation_size = 0.2 train_size
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.2
)
ori_len = x_train.shape[1] # original_length of time series
kernel_size = int(ori_len * filter_size)
# restrict slice ratio when data lenght is too large
current_slice_ratio = self.slice_ratio
if ori_len > 500:
current_slice_ratio = (
self.slice_ratio if self.slice_ratio > 0.98 else 0.98
)
increase_num = (
ori_len - int(ori_len * current_slice_ratio) + 1
) # this can be used as the bath size
# print(increase_num)
train_batch_size = int(
x_train.shape[0] * increase_num / self.nb_train_batch
)
current_n_train_batch = self.nb_train_batch
if train_batch_size > self.max_train_batch_size:
# limit the train_batch_size
current_n_train_batch = int(x_train.shape[0] * increase_num /
self.max_train_batch_size)
# data augmentation by slicing the length of the series
x_train, y_train = self.slice_data(
x_train, y_train, current_slice_ratio
)
x_val, y_val = self.slice_data(x_val, y_val, current_slice_ratio)
train_set_x, train_set_y = x_train, y_train
valid_set_x, valid_set_y = x_val, y_val
valid_num = valid_set_x.shape[0]
# print("increase factor is ", increase_num, ', ori len', ori_len)
valid_num_batch = int(valid_num / increase_num)
length_train = train_set_x.shape[1] # length after slicing.
current_window_size = (
int(length_train * self.window_size)
if self.window_size < 1
else int(self.window_size)
)
ds_num_max = length_train / (pool_factor * current_window_size)
current_ds_num = int(min(self.ds_num, ds_num_max))
ma_train, ma_lengths = self.movingavrg(
train_set_x, self.ma_base, self.ma_step, self.ma_num
)
ma_valid, ma_lengths = self.movingavrg(
valid_set_x, self.ma_base, self.ma_step, self.ma_num
)
ds_train, ds_lengths = self.downsample(
train_set_x, self.ds_base, self.ds_step, current_ds_num
)
ds_valid, ds_lengths = self.downsample(
valid_set_x, self.ds_base, self.ds_step, current_ds_num
)
# concatenate directly
data_lengths = [length_train]
# downsample part:
if ds_lengths != []:
data_lengths += ds_lengths
train_set_x = np.concatenate([train_set_x, ds_train], axis=1)
valid_set_x = np.concatenate([valid_set_x, ds_valid], axis=1)
# moving average part
if ma_lengths != []:
data_lengths += ma_lengths
train_set_x = np.concatenate([train_set_x, ma_train], axis=1)
valid_set_x = np.concatenate([valid_set_x, ma_valid], axis=1)
# print("Data length:", data_lengths)
n_train_size = train_set_x.shape[0]
# n_valid_size = valid_set_x.shape[0]
batch_size = int(n_train_size / current_n_train_batch)
n_train_batches = int(n_train_size / batch_size)
# data_dim = train_set_x.shape[1]
num_dim = train_set_x.shape[2] # For MTS
nb_classes = train_set_y.shape[1]
self.input_shapes, max_length = self.get_list_of_input_shapes(
data_lengths, num_dim
)
model = self.build_sub_model(
self.input_shapes, nb_classes, pool_factor, kernel_size
)
# print('submodel built', model)
if self.verbose:
model.summary()
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
max_before_stopping = 500
best_validation_loss = np.inf
# best_iter = 0
valid_loss = 0.0
epoch = 0
done_looping = False
num_no_update_epoch = 0
epoch_avg_cost = float("inf")
# epoch_avg_err = float("inf")
while (epoch < self.nb_epochs) and (not done_looping):
epoch = epoch + 1
epoch_train_err = 0.0
epoch_cost = 0.0
num_no_update_epoch += 1
if num_no_update_epoch == max_before_stopping:
break
for minibatch_index in range(n_train_batches):
iteration = (epoch - 1) * n_train_batches + minibatch_index
x = train_set_x[
minibatch_index
* batch_size: (minibatch_index + 1) * batch_size]
y = train_set_y[
minibatch_index
* batch_size: (minibatch_index + 1) * batch_size]
x = self.split_input_for_model(x, self.input_shapes)
# print('\t pre train batch')
cost_ij, accuracy = model.train_on_batch(x, y)
# print('\t post train batch')
train_err = 1 - accuracy
epoch_train_err = epoch_train_err + train_err
epoch_cost = epoch_cost + cost_ij
if (iteration + 1) % validation_frequency == 0:
valid_losses = []
for i in range(valid_num_batch):
x = valid_set_x[
i * (increase_num): (i + 1) * (increase_num)
]
y_pred = model.predict_on_batch(
self.split_input_for_model(x, self.input_shapes)
)
# convert the predicted from binary to integer
y_pred = np.argmax(y_pred, axis=1)
label = np.argmax(valid_set_y[i * increase_num])
(
unique_value,
sub_ind,
correspond_ind,
count,
) = np.unique(y_pred, True, True, True)
unique_value = unique_value.tolist()
curr_err = 1.0
if label in unique_value:
target_ind = unique_value.index(label)
count = count.tolist()
sorted_count = sorted(count)
if count[target_ind] == sorted_count[-1]:
if (len(sorted_count) > 1 and
sorted_count[-1] == sorted_count[-2]):
curr_err = 0.5 # tie
else:
curr_err = 0
valid_losses.append(curr_err)
valid_loss = sum(valid_losses) / float(len(valid_losses))
# print('...epoch%i,valid err: %.5f |' %
# (epoch, valid_loss))
# if we got the best validation score until now
if valid_loss <= best_validation_loss:
num_no_update_epoch = 0
# improve patience if loss improvement is good enough
if (
valid_loss
< best_validation_loss * improvement_threshold
):
patience = max(
patience, iteration * patience_increase
)
# save best validation score and iteration number
best_validation_loss = valid_loss
# best_iter = iteration
# save model in h5 format
# self.model.save(self.output_directory+'best_model.hdf5')
if patience <= iteration:
done_looping = True
break
epoch_avg_cost = epoch_cost / n_train_batches
# epoch_avg_err = epoch_train_err / n_train_batches
# print('train err %.5f, cost %.4f' % (epoch_avg_err,
# epoch_avg_cost))
if epoch_avg_cost == 0:
break
return best_validation_loss, model
def split_input_for_model(self, x, input_shapes):
res = []
indx = 0
for input_shape in input_shapes:
res.append(x[:, indx: indx + input_shape[0], :])
indx = indx + input_shape[0]
return res
def get_list_of_input_shapes(self, data_lengths, num_dim):
input_shapes = []
max_length = 0
for i in data_lengths:
input_shapes.append((i, num_dim))
max_length = max(max_length, i)
return input_shapes, max_length
def build_sub_model(
self, input_shapes, nb_classes, pool_factor, kernel_size
):
input_layers = []
stage_1_layers = []
for input_shape in input_shapes:
input_layer = keras.layers.Input(input_shape)
input_layers.append(input_layer)
conv_layer = keras.layers.Conv1D(
filters=256,
kernel_size=kernel_size,
padding="same",
activation="sigmoid",
kernel_initializer="glorot_uniform",
)(input_layer)
# should all concatenated have the same length
pool_size = int(int(conv_layer.shape[1]) / pool_factor)
max_layer = keras.layers.MaxPooling1D(pool_size=pool_size)(
conv_layer
)
# max_layer = keras.layers.GlobalMaxPooling1D()(conv_layer)
stage_1_layers.append(max_layer)
concat_layer = keras.layers.Concatenate(axis=-1)(stage_1_layers)
kernel_size = int(
min(kernel_size, int(concat_layer.shape[1]))
) # kernel shouldn't exceed the length
full_conv = keras.layers.Conv1D(
filters=256,
kernel_size=kernel_size,
padding="same",
activation="sigmoid",
kernel_initializer="glorot_uniform",
)(concat_layer)
pool_size = int(int(full_conv.shape[1]) / pool_factor)
full_max = keras.layers.MaxPooling1D(pool_size=pool_size)(full_conv)
full_max = keras.layers.Flatten()(full_max)
fully_connected = keras.layers.Dense(
units=256,
activation="sigmoid",
kernel_initializer="glorot_uniform",
)(full_max)
output_layer = keras.layers.Dense(
units=nb_classes,
activation="softmax",
kernel_initializer="glorot_uniform",
)(fully_connected)
model = keras.models.Model(inputs=input_layers, outputs=output_layer)
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.Adam(lr=0.1),
metrics=["accuracy"],
)
return model
def fit(self, X, y, input_checks=True, **kwargs):
"""
Fit the classifier on the training set (X, y)
----------
X : a nested pd.Dataframe, or (if input_checks=False) array-like of
shape = (n_instances, series_length, n_dimensions)
The training input samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
y : array-like, shape = [n_instances]
The class labels.
input_checks: boolean
whether to check the X and y parameters
Returns
-------
self : object
"""
self.random_state = check_random_state(self.random_state)
self.set_hyperparameters()
X = check_and_clean_data(X, y, input_checks=input_checks)
y_onehot = self.convert_y(y)
# best_df_metrics = None
best_valid_loss = np.inf
# grid search
for pool_factor in self.pool_factors:
for filter_size in self.filter_sizes:
# print('pretrain')
valid_loss, model = self.train(
X, y_onehot, pool_factor, filter_size
)
# print('posttrain')
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
self.best_pool_factor = pool_factor
self.best_filter_size = filter_size
# self.model = model # see **1 below
# print('postbest', self.model)
# clear memory in all the ways... **1
del model
gc.collect()
keras.backend.clear_session()
# print('postclear',self.model)
_, self.model = self.train(X, y_onehot, pool_factor, filter_size)
self.save_trained_model()
self._is_fitted = True
return self
def predict_proba(self, X, input_checks=True, **kwargs):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : a nested pd.Dataframe, or (if input_checks=False) array-like of
shape = (n_instances, series_length, n_dimensions)
The training input samples. If a 2D array-like is passed,
n_dimensions is assumed to be 1.
input_checks: boolean
whether to check the X parameter
Returns
-------
output : array of shape = [n_instances, n_classes] of probabilities
"""
check_is_fitted(self)
X = check_and_clean_data(X, input_checks=input_checks)
ori_len = X.shape[1] # original_length of time series
# restrict slice ratio when data lenght is too large
current_slice_ratio = self.slice_ratio
if ori_len > 500:
current_slice_ratio = (
self.slice_ratio if self.slice_ratio > 0.98 else 0.98
)
increase_num = (
ori_len - int(ori_len * current_slice_ratio) + 1
) # this can be used as the bath size
# will need to slice at some poin
x_test, _ = self.slice_data(X, None, current_slice_ratio)
length_train = x_test.shape[1] # length after slicing.
current_window_size = (
int(length_train * self.window_size)
if self.window_size < 1
else int(self.window_size)
)
ds_num_max = length_train / (
self.best_pool_factor * current_window_size
)
current_ds_num = int(min(self.ds_num, ds_num_max))
# need to batch and downsample the test data.
ma_test, ma_lengths = self.movingavrg(
x_test, self.ma_base, self.ma_step, self.ma_num
)
ds_test, ds_lengths = self.downsample(
x_test, self.ds_base, self.ds_step, current_ds_num
)
test_set_x = x_test
# concatenate directly
data_lengths = [length_train]
# downsample part:
if ds_lengths != []:
data_lengths += ds_lengths
test_set_x = np.concatenate([test_set_x, ds_test], axis=1)
# moving average part
if ma_lengths != []:
data_lengths += ma_lengths
test_set_x = np.concatenate([test_set_x, ma_test], axis=1)
test_num = x_test.shape[0]
test_num_batch = int(test_num / increase_num)
# get the true predictions of the test set
y_predicted = []
for i in range(test_num_batch):
x = test_set_x[i * (increase_num): (i + 1) * (increase_num)]
preds = self.model.predict_on_batch(
self.split_input_for_model(x, self.input_shapes)
)
y_predicted.append(np.average(preds, axis=0))
y_pred = np.array(y_predicted)
return y_pred
| 35.578947 | 82 | 0.561956 | 2,824 | 22,984 | 4.279745 | 0.146955 | 0.012411 | 0.009929 | 0.006371 | 0.355039 | 0.310607 | 0.274946 | 0.255171 | 0.23763 | 0.209333 | 0 | 0.014818 | 0.356987 | 22,984 | 645 | 83 | 35.634109 | 0.802964 | 0.197833 | 0 | 0.218137 | 0 | 0 | 0.008743 | 0.001337 | 0 | 0 | 0 | 0.00155 | 0 | 1 | 0.031863 | false | 0 | 0.019608 | 0 | 0.088235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df011455a0e6333338175cf0393900bcda2fcb02 | 3,280 | py | Python | bgm_block/models.py | MCG-NJU/BCN | c637dcdc436717104d4c14a6fbb21e2d2299b087 | [
"MIT"
] | 69 | 2020-08-21T01:19:24.000Z | 2022-03-28T02:39:44.000Z | bgm_block/models.py | redwang/BCN | e5c494d8ca396d5a535309575a7a652db54f14b7 | [
"MIT"
] | 7 | 2020-11-12T08:21:08.000Z | 2021-12-29T04:50:11.000Z | bgm_block/models.py | redwang/BCN | e5c494d8ca396d5a535309575a7a652db54f14b7 | [
"MIT"
] | 9 | 2020-08-28T06:46:17.000Z | 2022-01-29T11:27:01.000Z | # -*- coding: utf-8 -*-
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import init
import copy
class fullBGM(torch.nn.Module):
def __init__(self):
super(fullBGM, self).__init__()
self.feat_dim = 2048
self.batch_size = 1
self.c_hidden = 256
self.bgm_best_loss = 10000000
self.bgm_best_f1 = -10000000
self.bgm_best_precision = -10000000
self.output_dim = 1
self.num_layers=3
self.conv_in = nn.Conv1d(self.feat_dim, self.c_hidden, 1)
self.layers = nn.ModuleList([copy.deepcopy(DilatedResidualLayer(2 ** (i+2), self.c_hidden, self.c_hidden)) for i in range(self.num_layers)])
self.conv_out = nn.Conv1d(self.c_hidden, self.output_dim, 1)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal(m.weight)
init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
out = self.conv_in(x)
for layer in self.layers:
out = layer(out)
out = self.conv_out(out)
out = torch.sigmoid(0.01*out)
return out
class DilatedResidualLayer(nn.Module):
def __init__(self, dilation, in_channels, out_channels):
super(DilatedResidualLayer, self).__init__()
self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding=dilation, dilation=dilation)
self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1)
self.dropout = nn.Dropout() # default value is 0.5
def forward(self, x):
out = F.relu(self.conv_dilated(x))
out = self.conv_1x1(out)
out = self.dropout(out)
return (x + out)
class resizedBGM(torch.nn.Module):
def __init__(self, dataset):
super(resizedBGM, self).__init__()
self.feat_dim = 2048
if dataset == 'breakfast' or dataset == 'gtea':
self.temporal_dim = 300
elif dataset == '50salads':
self.temporal_dim = 400
self.batch_size = 40
self.batch_size_test = 10
self.c_hidden = 512
self.bgm_best_loss = 10000000
self.bgm_best_f1= -10000000
self.output_dim = 1
self.conv1 = torch.nn.Conv1d(in_channels=self.feat_dim, out_channels=self.c_hidden, kernel_size=3, stride=1,
padding=1, groups=1)
self.conv2 = torch.nn.Conv1d(in_channels=self.c_hidden, out_channels=self.c_hidden, kernel_size=3, stride=1,
padding=1, groups=1)
self.conv3 = torch.nn.Conv1d(in_channels=self.c_hidden, out_channels=self.output_dim, kernel_size=1, stride=1,
padding=0)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal(m.weight)
init.constant(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = torch.sigmoid(self.conv3(x))
return x
| 34.893617 | 148 | 0.607622 | 450 | 3,280 | 4.222222 | 0.222222 | 0.026316 | 0.057895 | 0.037895 | 0.467895 | 0.428421 | 0.343684 | 0.343684 | 0.343684 | 0.343684 | 0 | 0.049111 | 0.279878 | 3,280 | 93 | 149 | 35.268817 | 0.755292 | 0.012805 | 0 | 0.3875 | 0 | 0 | 0.006496 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0 | 0.2625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df012a7576e9516f4a23ace4d324b5a6e610a767 | 508 | py | Python | Random Problems/Merge the Tools/solutions.py | matheuscordeiro/HackerRank | 003ab87fdfa9e7c0535972abd06caebb1165423f | [
"MIT"
] | null | null | null | Random Problems/Merge the Tools/solutions.py | matheuscordeiro/HackerRank | 003ab87fdfa9e7c0535972abd06caebb1165423f | [
"MIT"
] | null | null | null | Random Problems/Merge the Tools/solutions.py | matheuscordeiro/HackerRank | 003ab87fdfa9e7c0535972abd06caebb1165423f | [
"MIT"
] | null | null | null | def merge_the_tools(string, k):
substrings = int(len(string)/k)
size_substring = int(len(string)/substrings)
count = 0
frequency = {}
word = ""
for value in string:
count += 1
if not value in frequency:
word += value
frequency[value] = True
if count == size_substring:
print(word)
word = ""
count = 0
frequency = {}
if __name__ == "__main__":
merge_the_tools("AABCAAADA", 3) | 25.4 | 48 | 0.521654 | 55 | 508 | 4.563636 | 0.490909 | 0.063745 | 0.103586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012539 | 0.372047 | 508 | 20 | 49 | 25.4 | 0.774295 | 0 | 0 | 0.333333 | 0 | 0 | 0.033399 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.055556 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df019cd58e39253b69a98f540baa40894015701a | 1,915 | py | Python | utils/cmd_parser.py | SecureThemAll/CquenceR | b2d4d578a8a055df8370798769cdc6e6e1039721 | [
"MIT"
] | null | null | null | utils/cmd_parser.py | SecureThemAll/CquenceR | b2d4d578a8a055df8370798769cdc6e6e1039721 | [
"MIT"
] | null | null | null | utils/cmd_parser.py | SecureThemAll/CquenceR | b2d4d578a8a055df8370798769cdc6e6e1039721 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
from collections import Callable
from utils.command import Command
from utils.commands.preprocess import Preprocess
from utils.commands.test import Test
from utils.commands.train import Train
from utils.commands.repair import Repair
from utils.commands.stats import Stats
from utils.commands.clean import Clean
COMMANDS = {}
parser = argparse.ArgumentParser(prog="CquenceR",
description='Program Repair Tool based on Sequence-to-Sequence Learning.')
main_parser = argparse.ArgumentParser(add_help=False)
main_parser.add_argument('-v', '--verbose', help='Verbose output.', action='store_true')
main_parser.add_argument('-seed', default=0, type=int,
help='Seed used for better reproducibility between experiments')
main_parser.add_argument('-l', '--log_file', type=str, default=None, help='Log file to write the results to.')
subparsers = parser.add_subparsers()
def add_command(name: str, command: Command, description: str):
cmd_parser = subparsers.add_parser(name=name, help=description, parents=[main_parser])
cmd_parser.set_defaults(command=command)
cmd_parser.set_defaults(name=name)
return cmd_parser
def register(definition: Callable, arguments: Callable):
"""Register a command as a positional argument"""
cmd_parser = add_command(**definition())
arguments(cmd_parser)
register(definition=Preprocess.definition, arguments=Preprocess.add_arguments)
register(definition=Train.definition, arguments=Train.add_arguments)
register(definition=Test.definition, arguments=Test.add_arguments)
register(definition=Repair.definition, arguments=Repair.add_arguments)
register(definition=Stats.definition, arguments=Stats.add_arguments)
register(definition=Clean.definition, arguments=Clean.add_arguments)
def run(command: Command, **kwargs):
cmd = command(**kwargs)
cmd()
| 36.132075 | 110 | 0.768146 | 240 | 1,915 | 6.008333 | 0.316667 | 0.043689 | 0.070735 | 0.104022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001195 | 0.126371 | 1,915 | 52 | 111 | 36.826923 | 0.860729 | 0.033943 | 0 | 0 | 0 | 0 | 0.113341 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.257143 | 0 | 0.371429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df035418a124dfc6d3a28d6cc0fd7c45993e05d4 | 3,884 | py | Python | lanefinder/CamModel.py | sheayun-kmu/CarND-Advanced-Lane-Lines | f8f38767acb9a65bc942f53706c6d223dfb666a2 | [
"MIT"
] | null | null | null | lanefinder/CamModel.py | sheayun-kmu/CarND-Advanced-Lane-Lines | f8f38767acb9a65bc942f53706c6d223dfb666a2 | [
"MIT"
] | null | null | null | lanefinder/CamModel.py | sheayun-kmu/CarND-Advanced-Lane-Lines | f8f38767acb9a65bc942f53706c6d223dfb666a2 | [
"MIT"
] | 1 | 2020-09-23T12:43:22.000Z | 2020-09-23T12:43:22.000Z | import logging
import numpy as np
import cv2
from lanefinder.params import camera_params
from lanefinder.params import perspective_params
# Capture camera model
# 1. calibrate using a set of images of chessboards
# 2. undistort image based on calibration
# 3. warp image to get a bird's eye view of it
class CamModel:
# Configure logger, initialize distortion parameters
# and perspective transform parameters.
def __init__(self):
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.WARN)
self.mtx = None
self.dist = None
self.M = None
self.Minv = None
# Given a set of chessboard images (and # of corners),
# calibrate the camera and derive the conversion matrix.
def calibrate(self, image_files, nx=0, ny=0):
# Read from configuration if nx & ny are not specified.
if nx == 0:
nx = camera_params['nx']
if ny == 0:
ny = camera_params['ny']
# Initialize empty imgpoints and objpoints
imgpoints = []
objpoints = []
# Prepare uniform object points using simple arithmetic
objp = np.zeros((ny * nx, 3), np.float32)
objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
self.log.info("Beginning camera calibration with"
" %d images" % len(image_files))
for fname in image_files:
img = cv2.imread(fname)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, add object points and image points
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
else:
self.log.warn("Failed to find %d * %d corners from"
" image %s" % (nx, ny, fname))
# Calibrate
self.log.info("Gathered %d sets of corners" % len(imgpoints))
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
objpoints, imgpoints, img.shape[1::-1], None, None
)
self.log.info("Finished calibrating camera")
self.mtx = mtx
self.dist = dist
# Given an image, return its undistorted version, based on
# calibration parameters determined in calibrate()
def undistort(self, img):
undist = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
return undist
# Given two sets of four points (corners) - src & dst -
# compute perspective transformation (matrix M) and its inverse.
def init_perspective(self, src=None, dst=None):
if not src:
src = np.float32([
perspective_params['src']['ul'],
perspective_params['src']['ur'],
perspective_params['src']['ll'],
perspective_params['src']['lr'],
])
if not dst:
dst = np.float32([
perspective_params['dst']['ul'],
perspective_params['dst']['ur'],
perspective_params['dst']['ll'],
perspective_params['dst']['lr'],
])
self.M = cv2.getPerspectiveTransform(src, dst)
self.Minv = cv2.getPerspectiveTransform(dst, src)
# Warp image using previously initialized transform.
def warp(self, img):
r, c = img.shape[:2]
warped = cv2.warpPerspective(
img, self.M, (c, r),
flags=perspective_params['flags']
)
return warped
# Inverse-warp image using previously initialized transform.
def inverse_warp(self, img):
r, c = img.shape[:2]
inverse = cv2.warpPerspective(
img, self.Minv, (c, r),
flags=perspective_params['flags']
)
return inverse
| 36.299065 | 74 | 0.58033 | 454 | 3,884 | 4.903084 | 0.328194 | 0.084007 | 0.035939 | 0.02336 | 0.093441 | 0.093441 | 0.093441 | 0.019766 | 0 | 0 | 0 | 0.013178 | 0.316169 | 3,884 | 106 | 75 | 36.641509 | 0.824925 | 0.240216 | 0 | 0.08 | 0 | 0 | 0.066598 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df04f18f1abbee7ba636186d9cf666a08ecb1a2d | 4,446 | py | Python | poc/hex_grid/hex.py | CoryOwens/hex | 83253b52f9ee86d13402fc7475a0cdd772921728 | [
"MIT"
] | null | null | null | poc/hex_grid/hex.py | CoryOwens/hex | 83253b52f9ee86d13402fc7475a0cdd772921728 | [
"MIT"
] | null | null | null | poc/hex_grid/hex.py | CoryOwens/hex | 83253b52f9ee86d13402fc7475a0cdd772921728 | [
"MIT"
] | null | null | null | import math
import kivy
from kivy.graphics import Line
from kivy.graphics import Color
from kivy.properties import ListProperty, NumericProperty
from kivy.uix.widget import Widget
from kivy.logger import Logger
kivy.require('1.10.0')
class HexTile(Widget):
corners = ListProperty()
sides = ListProperty()
wedge_size = NumericProperty()
rotation_offset = NumericProperty()
def __init__(self, **kwargs):
super(HexTile, self).__init__()
self.debug = kwargs.get('debug', False)
size = kwargs.get('size', None)
if size:
self.size = size
else:
self.wedge_size = kwargs.get('wedge_size', 10)
rotation_offset = kwargs.get('rotation_offset', 0)
self.rotation_offset = rotation_offset
center = kwargs.get('center', None)
if not center:
center = (self.center_x, self.center_y)
self.center = center
@staticmethod
def calc_tile_width(size, deg):
deg = deg % 60
h_rad = math.radians(deg)
off = math.radians(60)
max_width = min(size)
angles = [
math.fabs(math.cos(h_rad)), math.fabs(math.cos(off - h_rad))
]
return max_width * max(angles)
@staticmethod
def calc_tile_height(size, deg):
deg = deg % 60
v_rad = math.radians(deg + 60)
off = math.radians(60)
max_height = min(size)
angles = [
math.fabs(math.sin(v_rad)), math.fabs(math.sin(off - v_rad))
]
return max_height * max(angles)
@classmethod
def calc_wedge_size(cls, size, deg):
return max(cls.calc_tile_width(size, deg),
cls.calc_tile_height(size, deg)) / 2
def update_wedge_size(self):
self.wedge_size = self.calc_wedge_size(self.size, self.rotation_offset)
def on_rotation_offset(self, instance, value):
self.update_wedge_size()
if not self.center:
Logger.info('HexTile: on_rotation_offset -- center not set')
return
self.populate_corners()
def on_size(self, instance, value):
self.update_wedge_size()
if not self.center:
Logger.info('HexTile: on_size -- center not set')
return
self.populate_corners()
def on_center(self, instance, value):
if not self.wedge_size:
Logger.info('HexTile: on_center -- wedge_size not set')
return
self.populate_corners()
def populate_corners(self):
corners = []
for i in range(6):
angle_deg = 60 * i - self.rotation_offset
angle_rad = math.pi / 180 * angle_deg
corner = (self.center_x + self.wedge_size * math.cos(angle_rad),
self.center_y + self.wedge_size * math.sin(angle_rad))
corners.append(corner)
Logger.info('HexTile: populate_corners -- corners: {}'.format(corners))
self.corners = corners
def on_corners(self, instance, value):
self.populate_sides()
def populate_sides(self):
sides = []
for i in range(6):
corner_a = self.corners[i]
corner_b = self.corners[(i + 1) % 6]
side = (corner_a, corner_b)
sides.append(side)
Logger.info('HexTile: populate_sides -- sides: {}'.format(sides))
self.sides = sides
def on_sides(self, instance, value):
self.draw()
def draw(self):
with self.canvas:
self.canvas.clear()
Color(1, 1, 1)
Line(points=self.corners+[self.corners[0]], width=1)
if self.debug:
# Draw center line
Line(points=[self.center, self.corners[0]], width=1)
# Draw bounding box
box_size = min(self.size) / 2 # Dist from center to orth edge
box_corners = [
(self.center_x - box_size, self.center_y - box_size),
(self.center_x + box_size, self.center_y - box_size),
(self.center_x + box_size, self.center_y + box_size),
(self.center_x - box_size, self.center_y + box_size),
]
Line(points=box_corners + [box_corners[0]], width=1)
if __name__ == '__main__':
from kivy.app import App
class HexApp(App):
def build(self):
return HexTile(debug=True)
HexApp().run()
| 32.217391 | 79 | 0.580072 | 555 | 4,446 | 4.448649 | 0.181982 | 0.064804 | 0.031187 | 0.048198 | 0.269745 | 0.18307 | 0.162819 | 0.149048 | 0.149048 | 0.115026 | 0 | 0.012076 | 0.310841 | 4,446 | 137 | 80 | 32.452555 | 0.793734 | 0.014395 | 0 | 0.175439 | 0 | 0 | 0.056875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122807 | false | 0 | 0.070175 | 0.017544 | 0.307018 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df07eeff5a871ae595d36ebd4e1dcb711889d0e5 | 816 | py | Python | run/gold-key-splitter.py | osmanbaskaya/mapping-impact | 8024dd3b916ac2dfc336221dd32faba4c0a98442 | [
"MIT"
] | 1 | 2016-03-14T15:28:22.000Z | 2016-03-14T15:28:22.000Z | run/gold-key-splitter.py | osmanbaskaya/mapping-impact | 8024dd3b916ac2dfc336221dd32faba4c0a98442 | [
"MIT"
] | null | null | null | run/gold-key-splitter.py | osmanbaskaya/mapping-impact | 8024dd3b916ac2dfc336221dd32faba4c0a98442 | [
"MIT"
] | null | null | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
"""
Split the keys according to target words. Our gold file is merged.
All keys are in the same file. In order to create a development set
in run/gold/twitter|gigaword, we need to split keys in dev set
into separate files.
input_file should be in the Semeval 2013 format.
Please check the files in keys/gold
Not: Gigaword icin calismiyor, formatta sorun var.
"""
import sys
import os
from collections import defaultdict as dd
input_file = open(sys.argv[1])
output_dir = sys.argv[2]
devset = set(sys.argv[3:])
d = dd(list)
for line in input_file:
tw = line.split()[0]
if tw in devset:
d[tw].append(line)
for tw in devset:
with open(os.path.join(output_dir, tw + ".key"), 'w') as f:
f.write("".join(d[tw]))
| 24 | 69 | 0.688725 | 141 | 816 | 3.921986 | 0.595745 | 0.048825 | 0.036166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013699 | 0.194853 | 816 | 33 | 70 | 24.727273 | 0.828006 | 0.047794 | 0 | 0 | 0 | 0 | 0.044226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df08f0d2fd7a1589d78358cb6d71d6ae0bb1ece0 | 857 | py | Python | resource/engine/add_worker.py | CybersecurityLuxembourg/cyberlux-cron | dc1a8e4fad067b6d00c73a434d2a278576bbafab | [
"BSD-2-Clause"
] | null | null | null | resource/engine/add_worker.py | CybersecurityLuxembourg/cyberlux-cron | dc1a8e4fad067b6d00c73a434d2a278576bbafab | [
"BSD-2-Clause"
] | null | null | null | resource/engine/add_worker.py | CybersecurityLuxembourg/cyberlux-cron | dc1a8e4fad067b6d00c73a434d2a278576bbafab | [
"BSD-2-Clause"
] | null | null | null | from flask_apispec import MethodResource
from flask_apispec import doc
from flask_restful import Resource
from queue import Queue
from flask_jwt_extended import jwt_required
from decorator.catch_exception import catch_exception
from decorator.verify_admin_access import verify_admin_access
class AddWorker(MethodResource, Resource):
def __init__(self, db, engine):
self.db = db
self.engine = engine
@doc(tags=['engine'])
@jwt_required
@verify_admin_access
@catch_exception
def post(self):
try:
message_queue = Queue(1)
self.engine.queue.put((message_queue, "ADD_WORKER"))
res = message_queue.get(timeout=1)
del message_queue
except Exception as e:
del message_queue # noqa: F821
return str(e), 200
return res, 200
| 26.78125 | 64 | 0.683781 | 108 | 857 | 5.185185 | 0.416667 | 0.107143 | 0.091071 | 0.078571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017161 | 0.252042 | 857 | 31 | 65 | 27.645161 | 0.856474 | 0.011669 | 0 | 0.08 | 0 | 0 | 0.018935 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.28 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df09ac413fdfd3e722c4aa22f5197075d17783b1 | 1,463 | py | Python | react-flask-app/api/TripDB.py | GabCas28/Smart-Mazda-On-Board | 2c3aa53c34bd90868a55985a01dcb5a5954d6a8b | [
"MIT"
] | null | null | null | react-flask-app/api/TripDB.py | GabCas28/Smart-Mazda-On-Board | 2c3aa53c34bd90868a55985a01dcb5a5954d6a8b | [
"MIT"
] | 1 | 2022-02-13T13:14:46.000Z | 2022-02-13T13:14:46.000Z | react-flask-app/api/TripDB.py | GabCas28/Smart-Mazda-On-Board | 2c3aa53c34bd90868a55985a01dcb5a5954d6a8b | [
"MIT"
] | null | null | null | from tinydb import TinyDB, Query, where
from tinydb.operations import delete, add, set
# import json
from pymongo import MongoClient, ReplaceOne
from bson import json_util, objectid
from pymongo.errors import BulkWriteError
import time
import pprint
class TripDB:
database = None
def __init__(self):
self.database = self.getDataBase()
def getDataBase(self):
""" Retrieves the content of the database, it creates a new one if it doesn't exist """
db = TinyDB("Trips.json")
return db
def updateTrip(self, DBentry):
""" Insert or Update new entry into the table """
self.database.upsert(DBentry, Query().startTime==DBentry['startTime'])
def upload(self):
""" Upload the content of the database to the remote server """
client = MongoClient("mongodb+srv://mazda:V2KMvmtixGkOxq2h@cluster0-lpt2w.gcp.mongodb.net/test?retryWrites=true&w=majority")
remoteDB = client.SmartMazda.trips
operations = [ReplaceOne(
filter={"startTime": doc["startTime"]},
replacement=doc,
upsert=True
) for doc in self.database.all()]
try:
result = remoteDB.bulk_write(operations)
except BulkWriteError as bwe:
print(bwe.details)
return (json_util.dumps(result.bulk_api_result))
def clear(self):
""" Delete the content of the local database """
self.database.truncate() | 35.682927 | 132 | 0.656186 | 174 | 1,463 | 5.465517 | 0.54023 | 0.050473 | 0.037855 | 0.047319 | 0.04837 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003626 | 0.24607 | 1,463 | 41 | 133 | 35.682927 | 0.858568 | 0.160629 | 0 | 0 | 0 | 0.032258 | 0.113787 | 0.083056 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.225806 | 0 | 0.516129 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df0ae9e699494b11d2f763f0b863647a022b4718 | 940 | py | Python | shebang/account/tasks.py | KNU-shebang/Shebang | 117d6d55344442d76def5c7682ab826ea2521d15 | [
"MIT"
] | 2 | 2017-03-18T07:43:58.000Z | 2017-05-02T16:21:42.000Z | shebang/account/tasks.py | KNU-shebang/Shebang | 117d6d55344442d76def5c7682ab826ea2521d15 | [
"MIT"
] | 2 | 2017-03-24T09:24:35.000Z | 2017-05-02T14:48:21.000Z | shebang/account/tasks.py | KNU-shebang/Shebang | 117d6d55344442d76def5c7682ab826ea2521d15 | [
"MIT"
] | 1 | 2020-10-18T18:38:14.000Z | 2020-10-18T18:38:14.000Z | from django.core.mail import EmailMessage
from celery.decorators import task
from django.conf import settings
import base64
@task(name='send_email_task')
def send_email_task(email, name):
email_value = email.split('@')[0].encode('utf-8') # 사용자 인증 url로 이메일의 @ 앞부분을 base64 기반으로 인코딩
encoded_email = base64.b64encode(email_value) # 인코딩
from_email = settings.EMAIL_HOST_USER # 발신 메일 주소 - settings 파일에 지정(현재 임의)
subject = '{} 님 회원가입 알림'.format(name) # 메일 제목
refined_email = str(encoded_email)[1:].strip("'") # 이메일 인코딩 값 b'c2F6MDU0OQ==' -> c2F6MDU0OQ==로 변경.
html_content = """<h1>{0}님 가입을 환영합니다.</h1>
<p>가입 인증을 위해서 아래 링크를 클릭해주세요</p>
<a href='http://127.0.0.1:8000/account/{1}/'>http://127.0.0.1:8000/account/{2}/</a>
""".format(name, refined_email, refined_email)
msg = EmailMessage( subject, html_content, from_email, [email])
msg.content_subtype = "html"
msg.send()
| 33.571429 | 102 | 0.660638 | 144 | 940 | 4.1875 | 0.534722 | 0.059701 | 0.043118 | 0.029851 | 0.069652 | 0.069652 | 0.069652 | 0 | 0 | 0 | 0 | 0.05483 | 0.185106 | 940 | 27 | 103 | 34.814815 | 0.732376 | 0.138298 | 0 | 0 | 0 | 0.055556 | 0.245025 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df0cb59e9d0a265d0b3224ad16dd4f909e4faa37 | 5,703 | py | Python | murasame/maker.py | amaotone/caruta-contest-manager | 33bbbc8a8ff2903a2763a1270715f224c329e7a2 | [
"MIT"
] | null | null | null | murasame/maker.py | amaotone/caruta-contest-manager | 33bbbc8a8ff2903a2763a1270715f224c329e7a2 | [
"MIT"
] | 20 | 2016-07-21T16:01:36.000Z | 2016-10-20T07:42:57.000Z | murasame/maker.py | amaotone/murasame | 33bbbc8a8ff2903a2763a1270715f224c329e7a2 | [
"MIT"
] | null | null | null | import os
import warnings
import numpy as np
import pandas as pd
from .utils import classname_sorted, match_count
class Board(object):
def __init__(self, match_count, keys=None):
"""Initialize match display board.
Args:
match_count (int): Number of matches.
keys (list[str]): Keys for deciding whether the match is valid.
Default value is `["club"]`.
"""
self._upper = list()
self._lower = list()
self.match_count = match_count
self.keys = keys if keys else ["club"]
def append(self, player):
"""Append player to board.
This match-making algorithm acts upon a Guidelines for Caruta
Competition proposed by All Japan Caruta Association.
Args:
player (pd.Series): A player to be added.
See Also:
http://www.karuta.or.jp/kitei/kyougikai.pdf
"""
assert len(self._upper) >= len(self._lower)
if self._on_upper():
self._upper.append(player)
return
if self._is_valid(self._single_player, player):
self._lower.append(player)
return
if not self._is_last(self._upper):
self._upper.append(player)
return
if not self._is_last(self._lower):
self._change_players(player)
return
warnings.warn("Match-making is already completed.")
def validate(self):
"""Check all organized matches are valid."""
for a, b in zip(self._upper, self._lower):
if not self._is_valid(a, b):
return False
return True
def as_dataframe(self):
return pd.DataFrame(self.all)
@property
def all(self):
return list(sum(zip(self._upper, self._lower), ()))
@property
def completed(self):
if not self.validate():
return False
return len(self._upper) == len(self._lower) == self.match_count
@property
def _single_player(self):
assert len(self._upper) > len(self._lower)
return self._upper[len(self._lower)]
def _change_players(self, player):
opponent = self._single_player
for i, (a, b) in enumerate(zip(self._upper, self._lower)):
if self._is_valid(player, b) and self._is_valid(opponent, a):
self._upper[i] = player
self._lower.append(a)
return
if self._is_valid(a, player) and self._is_valid(opponent, b):
self._lower[i] = player
self._lower.append(b)
return
warnings.warn("No player is changeable.")
self._lower.append(player)
def _is_valid(self, a, b):
for key in self.keys:
if a.ix[key] == b.ix[key]:
return False
return True
def _is_last(self, lst):
return len(lst) >= self.match_count
def _on_upper(self):
return len(self._upper) == len(self._lower)
def index(self, player):
return self.all.index(player)
def __contains__(self, item):
return item in self.all
def __getitem__(self, i):
return self.all[i]
def __len__(self):
return len(self.all)
class Maker(object):
def __init__(self, file):
self.dfs = pd.read_excel(file, sheetname=None)
self.results = dict()
def make_board(self, keys):
def make(df, keys):
player_count = df.shape[0]
board = Board(match_count(player_count), keys=keys)
shuffled = df.reindex(np.random.permutation(df.index))
for i, row in shuffled.iterrows():
board.append(row)
if board.completed:
break
return board.as_dataframe()
def trim(df, start_index=1):
df.reset_index(drop=True, inplace=True)
df.index += start_index
return df
start = 1
for classname, df in classname_sorted(self.dfs.items()):
assert start % 2 == 1
board = make(df, keys=keys)
board = trim(board, start_index=start)
self.results[classname] = dict()
self.results[classname]['board'] = board
start += board.shape[0]
def make_sheet(self, id_label, seat_label, fill):
def make(df, board, id_label, seat_label, fill):
ref = pd.DataFrame({id_label: board[id_label], seat_label:
board.index})
sheet = df.merge(right=ref, on=id_label, how='left')
sheet[seat_label] = sheet[seat_label].fillna(fill)
return sheet
assert len(self.results) != 0
for classname, df in classname_sorted(self.dfs.items()):
board = self.results[classname]['board']
sheet = make(df, board, id_label, seat_label, fill)
self.results[classname]['sheet'] = sheet
def save_board(self, path):
w = self.writer(path)
for classname, res in classname_sorted(self.results.items()):
res['board'].to_excel(w, classname, index=False)
w.save()
def save_sheet(self, path, sort_by=None):
w = self.writer(path)
for classname, res in classname_sorted(self.results.items()):
sheet = res['sheet'].copy()
if sort_by is not None:
sheet.sort_values(by=sort_by, inplace=True)
sheet.to_excel(w, classname, index=False)
w.save()
@staticmethod
def writer(path):
root, _ = os.path.split(path)
os.makedirs(root, exist_ok=True)
return pd.ExcelWriter(path, engine='xlsxwriter')
| 29.396907 | 75 | 0.575662 | 717 | 5,703 | 4.400279 | 0.23152 | 0.042789 | 0.020919 | 0.025357 | 0.279239 | 0.206973 | 0.183835 | 0.130586 | 0.09065 | 0.039937 | 0 | 0.001792 | 0.315097 | 5,703 | 193 | 76 | 29.549223 | 0.80594 | 0.08364 | 0 | 0.198473 | 0 | 0 | 0.019765 | 0 | 0 | 0 | 0 | 0 | 0.030534 | 1 | 0.183206 | false | 0 | 0.038168 | 0.061069 | 0.427481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df155ccc7955d0006b4f5d0a5362f1ace1aaba5e | 1,367 | py | Python | sweep.py | binshengliu/bm25f | 6d6d8578494a2ee091639dc9fa2f40fbb9254c39 | [
"MIT"
] | 6 | 2020-12-11T03:54:10.000Z | 2022-03-22T04:04:13.000Z | sweep.py | binshengliu/bm25f | 6d6d8578494a2ee091639dc9fa2f40fbb9254c39 | [
"MIT"
] | null | null | null | sweep.py | binshengliu/bm25f | 6d6d8578494a2ee091639dc9fa2f40fbb9254c39 | [
"MIT"
] | null | null | null | import logging
import subprocess
from pathlib import Path
import hydra # type: ignore
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
initialized = False
@hydra.main(config_path="conf/", config_name="ax") # type: ignore
def main(cfg: DictConfig) -> float:
field_wt_list = [f"{k[:-3]}:{v}" for k, v in cfg.items() if k.endswith("_wt")]
field_wt = ",".join(field_wt_list)
field_b_list = [f"{k[:-2]}:{v}" for k, v in cfg.items() if k.endswith("_b")]
field_b = ",".join(field_b_list)
output_path = f"k1={cfg.k1}-fieldWt={field_wt}-fieldB={field_b}.run"
cmd = (
f"{cfg.script_path} -index={cfg.index_path} -k1={cfg.k1} "
f"-fieldWt={field_wt} -fieldB={field_b} "
f"-threads={cfg.threads} {cfg.query_path}"
)
out = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, encoding="utf-8")
with open(output_path, "w") as f:
f.write(out.stdout)
trec_cmd = f"trec_eval -m {cfg.metric} {cfg.qrels_path} {output_path}"
proc = subprocess.run(trec_cmd, shell=True, stdout=subprocess.PIPE, text=True)
value = proc.stdout.split()[2]
new_path = (
f"k1={cfg.k1}-fieldWt={field_wt}-fieldB={field_b}-{cfg.metric}={value}.run"
)
Path(output_path).rename(new_path)
logger.info(new_path)
return float(value)
if __name__ == "__main__":
main()
| 30.377778 | 83 | 0.649598 | 208 | 1,367 | 4.048077 | 0.355769 | 0.049881 | 0.024941 | 0.071259 | 0.261283 | 0.261283 | 0.154394 | 0.154394 | 0.154394 | 0.154394 | 0 | 0.008889 | 0.17703 | 1,367 | 44 | 84 | 31.068182 | 0.739556 | 0.018288 | 0 | 0 | 0 | 0.030303 | 0.271098 | 0.125467 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.151515 | 0 | 0.212121 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df17a532cbd4448e549e2c95877c8b22f5178d2e | 2,127 | py | Python | utils/compute_contrast.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | 2 | 2019-01-10T03:44:03.000Z | 2019-05-24T08:50:14.000Z | utils/compute_contrast.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | null | null | null | utils/compute_contrast.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | null | null | null | """
@Time : 202/20/19 09:41
@Author : TaylorMei
@Email : mhy845879017@gmail.com
@Project : iccv
@File : compute_contrast.py
@Function:
"""
import os
import numpy as np
import cv2
import skimage.io
from misc import data_write
# image_path = '/home/iccd/data/2019/msd9_all/all_images/'
# mask_path = '/home/iccd/data/2019/msd9_all/all_masks/'
# image_path = '/home/iccd/data/MSRA10K/DUT-OMRON/image/'
# mask_path = '/home/iccd/data/MSRA10K/DUT-OMRON/mask/'
image_path = '/home/iccd/data/SBU-all/image/'
mask_path = '/home/iccd/data/SBU-all/mask/'
imglist = os.listdir(image_path)
chi_sq_color = []
def chi2(arr1, arr2):
return np.sum((arr1 - arr2)**2 / (arr1 + arr2 + np.finfo(np.float).eps))
for i, imgname in enumerate(imglist):
print(i, imgname)
image = skimage.io.imread(image_path + imgname)
# name = imgname.split('.')[0]
name = imgname[:-4]
mask = skimage.io.imread(mask_path + name + '.png')
mask_f = np.where(mask != 0, 1, 0).astype(np.uint8)
mask_b = np.where(mask == 0, 1, 0).astype(np.uint8)
if np.sum(mask_f) == 0:
print('llllllllllllllllllllllllllllllllllllllllllllllll')
continue
hist_f_r = cv2.calcHist([image], [0], mask_f, [256], [0,256])
hist_f_g = cv2.calcHist([image], [1], mask_f, [256], [0,256])
hist_f_b = cv2.calcHist([image], [2], mask_f, [256], [0,256])
hist_b_r = cv2.calcHist([image], [0], mask_b, [256], [0,256])
hist_b_g = cv2.calcHist([image], [1], mask_b, [256], [0,256])
hist_b_b = cv2.calcHist([image], [2], mask_b, [256], [0,256])
chi_sq_r = chi2(hist_f_r.flatten()/np.sum(mask_f), hist_b_r.flatten()/np.sum(mask_b))
chi_sq_g = chi2(hist_f_g.flatten()/np.sum(mask_f), hist_b_g.flatten()/np.sum(mask_b))
chi_sq_b = chi2(hist_f_b.flatten()/np.sum(mask_f), hist_b_b.flatten()/np.sum(mask_b))
chi_sq_color.append(((chi_sq_r + chi_sq_g + chi_sq_b) / 3).item())
chi_sq_color = np.array(chi_sq_color)
chi_sq_color = (chi_sq_color - np.min(chi_sq_color)) / (np.max(chi_sq_color - np.min(chi_sq_color)))
print(chi_sq_color)
data_write('./shadow_chi_sq.xlsx', [chi_sq_color])
| 31.746269 | 100 | 0.662906 | 365 | 2,127 | 3.613699 | 0.260274 | 0.068234 | 0.083397 | 0.072782 | 0.507961 | 0.482183 | 0.322972 | 0.124337 | 0.04094 | 0 | 0 | 0.062084 | 0.151857 | 2,127 | 66 | 101 | 32.227273 | 0.669069 | 0.184297 | 0 | 0 | 0 | 0 | 0.076429 | 0.062427 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.142857 | 0.028571 | 0.2 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df199fd0ad58d48f73efca3982f9ee10e7470114 | 3,108 | py | Python | sherry/inherit/bar.py | py-mu/sherry | af1e95f1eada663ba3b3fb607ca88f099f894f36 | [
"MIT"
] | 1 | 2021-06-27T05:54:23.000Z | 2021-06-27T05:54:23.000Z | sherry/inherit/bar.py | py-mu/sherry | af1e95f1eada663ba3b3fb607ca88f099f894f36 | [
"MIT"
] | null | null | null | sherry/inherit/bar.py | py-mu/sherry | af1e95f1eada663ba3b3fb607ca88f099f894f36 | [
"MIT"
] | 1 | 2021-07-28T09:00:43.000Z | 2021-07-28T09:00:43.000Z | # encoding=utf-8
"""
create by pymu
on 2021/6/6
at 2:09
"""
from sherry.inherit.component import Component
class BaseBar(Component):
bar_normal = None # 自定义标题栏的最大化最小化及关闭按钮
bar_close = None
bar_mini = None
def __init__(self, master, *args, **kwargs):
super().__init__(master, *args, **kwargs)
if not master:
raise ValueError(self.resource.translate('Bar', '父类窗体不能为空'))
# noinspection PyUnresolvedReferences
def set_signal(self):
"""设置标题栏信号"""
super(BaseBar, self).set_signal()
if not self.master:
return
if self.bar_normal:
self.bar_normal.clicked.connect(self.change_normal)
if self.bar_close:
self.bar_close.clicked.connect(self.master.accept)
if self.bar_mini:
self.bar_mini.clicked.connect(self.master.showMinimized)
def configure(self):
super(BaseBar, self).configure()
self.set_default_btn_icon()
translate = self.resource.translate
self.bar_normal.setToolTip(translate('Bar', "最大化"))
self.bar_mini.setToolTip(translate('Bar', "最小化"))
self.bar_close.setToolTip(translate('Bar', "关闭"))
def set_default_btn_icon(self):
"""设置默认按钮图标"""
if self.bar_normal:
self.bar_normal.setIcon(self.resource.font_icon("fa.window-maximize", color="black"))
if self.bar_mini:
self.bar_mini.setIcon(self.resource.font_icon("fa.window-minimize", color="black"))
if self.bar_close:
self.bar_close.setIcon(self.resource.font_icon("fa.close", color="black"))
# noinspection PyUnresolvedReferences
def change_normal(self):
"""
切换到恢复窗口大小按钮,
"""
if not self.bar_normal:
return
self.master.layout().setContentsMargins(*[0] * 4)
self.master.showMaximized() # 先实现窗口最大化
self.bar_normal.setIcon(self.resource.font_icon("fa.window-restore", color="black"))
self.bar_normal.setToolTip(self.resource.translate('Bar', "恢复")) # 更改按钮提示
self.bar_normal.disconnect() # 断开原本的信号槽连接
self.bar_normal.clicked.connect(self.change_max) # 重新连接信号和槽
# noinspection PyUnresolvedReferences
def change_max(self):
"""
切换到最大化按钮
"""
if not hasattr(self, "bar_normal"):
return
if not hasattr(self.master, "border_width"):
self.master.border_width = 0
self.master.layout().setContentsMargins(*[self.master.border_width] * 4)
self.master.showNormal()
self.bar_normal.setIcon(self.resource.font_icon("fa.window-maximize", color="black"))
self.bar_normal.setToolTip(self.resource.translate('Bar', "最大化"))
self.bar_normal.disconnect() # 关闭信号与原始槽连接
self.bar_normal.clicked.connect(self.change_normal)
def mouseDoubleClickEvent(self, event):
"""鼠标双击(在y轴上小于标题栏高度的双击均被认为是双击头部,随后进行窗体的最大化跟恢复效果)"""
if not self.bar_normal:
return
if event.pos().y() < self.y() + self.height():
self.bar_normal.click()
| 35.724138 | 97 | 0.634813 | 358 | 3,108 | 5.346369 | 0.268156 | 0.098746 | 0.115465 | 0.060084 | 0.373041 | 0.33908 | 0.298851 | 0.193312 | 0.14838 | 0.14838 | 0 | 0.005915 | 0.238417 | 3,108 | 86 | 98 | 36.139535 | 0.802704 | 0.099421 | 0 | 0.310345 | 0 | 0 | 0.060908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.017241 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df1ab6b4b0ece312421f86b8f892a47e6416b0f6 | 3,970 | py | Python | build/env/lib/python2.7/site-packages/windmill-1.6-py2.7.egg/windmill/dep/_functest/collector.py | bopopescu/myhue | 5f566970a5a1fa5af9f01832c9e9808c47634bc7 | [
"Apache-2.0"
] | 61 | 2015-03-16T18:36:06.000Z | 2021-12-02T10:08:17.000Z | windmill/dep/_functest/collector.py | admc/windmill | 4304ee7258eb0c2814f215d8ce90abf02b1f737f | [
"Apache-2.0"
] | 8 | 2015-03-10T10:01:26.000Z | 2020-05-18T10:51:24.000Z | windmill/dep/_functest/collector.py | admc/windmill | 4304ee7258eb0c2814f215d8ce90abf02b1f737f | [
"Apache-2.0"
] | 14 | 2015-01-29T16:28:33.000Z | 2021-09-04T11:19:48.000Z | # Copyright (c) 2007 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import inspect
import new
import imp
import copy
from time import sleep
class Collector(object):
post_collection_functions = []
def import_module(self, path):
if os.path.isfile(path):
sys.path.insert(0, os.path.dirname(path))
name = os.path.split(path)[-1].split('.')[0]
filename, pathname, description = imp.find_module(name, [os.path.dirname(path)])
module = imp.load_module(name, filename, pathname, description)
module.functest_module_path = path
module.__file__ = os.path.abspath(path)
sys.path.pop(0)
elif os.path.isdir(path):
if os.path.isfile(os.path.join(path, '__init__.py')):
sys.path.insert(0, os.path.abspath(os.path.join(path, os.path.pardir)))
name = os.path.split(path)[-1]
filename, pathname, description = imp.find_module(
name, [os.path.abspath(os.path.join(path, os.path.pardir))])
module = imp.load_module(name, filename, pathname, description)
module.functest_module_path = path
module.__file__ = os.path.abspath(os.path.join(path, '__init__.py'))
sys.path.pop(0)
else:
module = new.module(os.path.split(path)[-1])
module.functest_module_path = path
else:
raise ImportError('path is not file or directory')
return module
def create_module_chain(self, path):
path = os.path.abspath(path)
module_chain = []
if not os.path.isdir(path):
path = os.path.dirname(path)
# For every valid python module the test is in we need to import it incase it contains setup/teardown
while os.path.isfile(os.path.join(path, '__init__.py')):
module_chain.append(self.import_module(path))
path = os.path.join(*os.path.split(path)[:-1])
module_chain.reverse()
return module_chain
def create_test_module(self, path):
path = os.path.abspath(path)
if os.path.isfile(path):
test_module = self.import_module(path)
for func in self.post_collection_functions:
func(test_module)
elif os.path.isdir(path):
test_module = self.import_module(path)
for func in self.post_collection_functions:
func(test_module)
for filename in [ f for f in os.listdir(path) if ( not f.startswith('.') ) and
( f.startswith('test') ) and
( ( f.endswith('.py') ) or
( os.path.isdir(os.path.join(path, f)) and
os.path.isfile(os.path.join(path, f, '__init__.py'))
)
)
]:
setattr(test_module, filename.split('.')[0], self.create_test_module(os.path.join(path, filename)))
else:
sys.__stdout__.write(path+' is not a valid python module path or filename\n')
sys.__stdout__.flush()
sleep(.5)
sys.exit()
return test_module
def register_post_collection(func):
test_collector.post_collection_functions.append(func)
| 42.234043 | 115 | 0.587406 | 500 | 3,970 | 4.518 | 0.276 | 0.087649 | 0.039841 | 0.049579 | 0.418327 | 0.385569 | 0.320496 | 0.283311 | 0.26826 | 0.198318 | 0 | 0.006917 | 0.30806 | 3,970 | 93 | 116 | 42.688172 | 0.815435 | 0.16927 | 0 | 0.309859 | 0 | 0 | 0.039939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.169014 | 0 | 0.295775 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df1b51fe6d72128fafb48b80c98baaeec85bd5b2 | 2,643 | py | Python | Tools/BlenderPlugins/scripts/addons/zmey_properties/ui.py | nikoladimitroff/Zmey | 4bcea8d66cd3452c532fa68286aa03ad8528a3b4 | [
"MIT"
] | 22 | 2017-05-06T18:08:48.000Z | 2022-01-12T02:10:22.000Z | Tools/BlenderPlugins/scripts/addons/zmey_properties/ui.py | nikoladimitroff/Zmey | 4bcea8d66cd3452c532fa68286aa03ad8528a3b4 | [
"MIT"
] | 21 | 2017-12-11T18:42:49.000Z | 2018-08-27T23:13:47.000Z | Tools/BlenderPlugins/scripts/addons/zmey_properties/ui.py | nikoladimitroff/Zmey | 4bcea8d66cd3452c532fa68286aa03ad8528a3b4 | [
"MIT"
] | null | null | null | import bpy
# Lists
class ZmeySceneTypeList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname):
if item:
layout.prop(item, "name", text="", emboss=False)
# Panels
class ZmeyWorldPropertyPanel(bpy.types.Panel):
"""Zmey Scene Type Properties"""
bl_label = "Zmey World Types"
bl_idname = "ZMEY_SCENE_TYPES"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "world"
@classmethod
def poll(cls, context):
return context.scene
def draw(self, context):
layout = self.layout
world = context.world
zmey = world.zmey_scene_types
layout.row().prop(zmey, "name")
layout.row().prop(zmey, "types")
row = layout.row()
row.template_list("ZmeySceneTypeList", "", zmey, "types", zmey, "active_type")
column = row.column()
column.operator("zmey.add_scene_type", icon="ZOOMIN", text="")
column.operator("zmey.remove_scene_type", icon="ZOOMOUT", text="")
if len(zmey.types):
self.draw_type(zmey.types[zmey.active_type], layout.box())
def draw_type(self, item, layout):
layout.row().prop(item, "name")
mesh_box = layout.box()
mesh_box.row().prop(item, "mesh_reference")
item.components.draw_type(layout)
class ZmeyObjectPropertiesPanel(bpy.types.Panel):
"""Zmey Object Properties"""
bl_label = "Zmey Object"
bl_idname = "ZMEY_OBJECT"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
def draw(self, context):
layout = self.layout
obj = context.object
layout.row().prop(obj.zmey_props, "enabled", toggle=True)
if obj.zmey_props.enabled:
layout.row().prop(obj.zmey_props, "type")
box = layout.box()
zmey_type = bpy.context.scene.world.zmey_scene_types.types[int(obj.zmey_props.type)]
box.box().prop(
obj.zmey_props,
"mesh_export",
text="Export Mesh" if zmey_type.mesh_reference == None else "Override Type Mesh")
obj.zmey_props.components.draw_type(box)
def register():
bpy.utils.register_class(ZmeySceneTypeList)
bpy.utils.register_class(ZmeyWorldPropertyPanel)
bpy.utils.register_class(ZmeyObjectPropertiesPanel)
def unregister():
bpy.utils.unregister_class(ZmeySceneTypeList)
bpy.utils.unregister_class(ZmeyWorldPropertyPanel)
bpy.utils.unregister_class(ZmeyObjectPropertiesPanel) | 33.884615 | 98 | 0.631858 | 306 | 2,643 | 5.281046 | 0.228758 | 0.033416 | 0.044554 | 0.029703 | 0.17698 | 0.132426 | 0.101485 | 0.059406 | 0.059406 | 0.059406 | 0 | 0 | 0.248203 | 2,643 | 78 | 99 | 33.884615 | 0.813286 | 0.023837 | 0 | 0.135593 | 0 | 0 | 0.106742 | 0.008828 | 0 | 0 | 0 | 0 | 0 | 1 | 0.118644 | false | 0 | 0.016949 | 0.016949 | 0.372881 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |