hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1deb3a29a0068a66f60f7a72e5a69387e26b00c4 | 2,777 | py | Python | planning-and-learning/dyna.py | Sirius79/reinforcement-learning | 96ea39260cd399c75278745365aad7e5babab0a0 | [
"MIT"
] | null | null | null | planning-and-learning/dyna.py | Sirius79/reinforcement-learning | 96ea39260cd399c75278745365aad7e5babab0a0 | [
"MIT"
] | null | null | null | planning-and-learning/dyna.py | Sirius79/reinforcement-learning | 96ea39260cd399c75278745365aad7e5babab0a0 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import itertools
from collections import defaultdict
import environment
class Dyna():
def __init__(self, alpha, epsilon, gamma):
'''
Q(s,a) -> action value
model(s,a) -> R, s'
'''
self.alpha = alpha
self.epsilon = epsilon
self.gamma = gamma
self.Q = defaultdict(lambda: np.random.randint(5,size=4))
self.model_reward = defaultdict(lambda: np.random.randint(5,size=4))
self.model_obs = defaultdict(lambda : np.random.randint(5, size=400))
def greedy_policy(self, state):
'''
returns action from state using epsilon greedy policy
derived from Q
'''
prob = np.random.random()
if prob <= self.epsilon:
return np.random.randint(4)
else:
return np.argmax(self.Q[state])
def play(self):
score = np.zeros(episode_num)
for episode in range(episode_num):
env.reset()
env.render()
state = env.env.flatten()
action = self.greedy_policy(tuple(state))
observation, reward, done = env.step(action)
observation = observation.flatten()
next_action = np.argmax(self.Q[tuple(observation)])
self.Q[tuple(state)][action] += self.alpha * (reward + (self.gamma * self.Q[tuple(observation)][next_action]) - self.Q[tuple(state)][action])
self.model_reward[tuple(state)][action] = reward
self.model_obs[tuple(state)][action*100:(action*100)+100] = observation
for step in range(20):
random_state = list(self.Q.keys())[np.random.choice(len(list(self.Q.keys())))]
random_action = np.random.randint(4)
R = self.model_reward[tuple(random_state)][random_action]
print("Reward and Action ",R, random_action)
next_S = self.model_obs[tuple(random_state)][random_action*100: (random_action*100)+100]
a = np.argmax(self.Q[tuple(next_S)])
self.Q[tuple(random_state)][random_action] += self.alpha * (R + (self.gamma * self.Q[tuple(next_S)][a]) - self.Q[tuple(random_state)][random_action])
env.render()
if done:
print("Finished episode", episode+1)
score[episode] = step
break
print("Finished time step "+str(step)+"of episode"+str(episode+1))
return score
env = env.Environment(10,10)
agent = Dyna(0.1,0.1,0.95)
episode_num = 100
score = agent.play()
print(np.amax(score))
| 37.527027 | 165 | 0.565718 | 338 | 2,777 | 4.553254 | 0.248521 | 0.038986 | 0.051982 | 0.05718 | 0.251462 | 0.160494 | 0.128005 | 0.061079 | 0.061079 | 0.061079 | 0 | 0.023983 | 0.309327 | 2,777 | 73 | 166 | 38.041096 | 0.778415 | 0.039971 | 0 | 0.037736 | 0 | 0 | 0.024353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.113208 | 0 | 0.245283 | 0.075472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1deec6ae5dce012d14c4e9ed45f53515b88ecefc | 4,734 | py | Python | src/week4/partitioned_stream_producer.py | shamilbi/ru202 | ffeb78616bcffe3c887ffbcd3d875358f9a11cf6 | [
"MIT"
] | null | null | null | src/week4/partitioned_stream_producer.py | shamilbi/ru202 | ffeb78616bcffe3c887ffbcd3d875358f9a11cf6 | [
"MIT"
] | null | null | null | src/week4/partitioned_stream_producer.py | shamilbi/ru202 | ffeb78616bcffe3c887ffbcd3d875358f9a11cf6 | [
"MIT"
] | null | null | null | # Use Case: Partitioned Stream Example with Python
# Usage: Part of Redis University RU202 courseware
#
# Simulates a temperature logging device that
# continuously outputs new temperature readings, and
# pushes them into a date-partitioned set of streams
# in Redis. A new stream is created for each new
# day, and set to expire a few days after creation
# to ensure that memory usage is managed.
#
# The producer starts at a configurable date and
# generates readings for a configurable number of days
# at a configurable interval. By default it starts
# on January 1st 2025, generating 10 days worth of
# data at 1 second intervals.
import random
from datetime import datetime
import util.constants as const
from util.connection import get_connection
ONE_DAY_SECONDS = 60 * 60 * 24
# Expire stream partitions 2 days after we finish
# writing to them.
PARTITION_EXPIRY_TIME = ONE_DAY_SECONDS * 2
# Record temperature readings every second.
TEMPERATURE_READING_INTERVAL_SECONDS = 1
# Date that we'll start recording temperatures for -
# using a future date so that all students get the
# same dataset rather than using dates relative to
# when the producer is run. So this timestamp
# represents the oldest temperature reading that
# will be generated.
TIMESTAMP_START = 1735689600 # 01/01/2025 00:00:00 UTC
# Number of days of data to generate.
DAYS_TO_GENERATE = 10
# Utility class to produce wandering temperature range.
class Measurement:
def __init__(self):
self.current_temp = 50
self.max_temp = 100
self.min_temp = 0
def get_next(self):
if random.random() >= 0.5:
if self.current_temp + 1 <= self.max_temp:
self.current_temp += 1
elif self.current_temp - 1 >= self.min_temp:
self.current_temp -= 1
return {'temp_f': self.current_temp}
# To make this demonstration repeatable, running
# the producer resets all the streams.
def reset_state():
redis = get_connection()
# Delete any old streams that have not yet expired.
keys_to_delete = []
stream_timestamp = TIMESTAMP_START
print("Deleting old streams:")
for _ in range(DAYS_TO_GENERATE):
s = datetime.utcfromtimestamp(stream_timestamp).strftime('%Y%m%d')
stream_key_name = f"{const.STREAM_KEY_BASE}:{s}"
print(stream_key_name)
keys_to_delete.append(stream_key_name)
stream_timestamp += ONE_DAY_SECONDS
redis.delete(*keys_to_delete)
# Return a string containing the UTC date for
# the supplied timestamp in the format specified by
# format_pattern. See http://strftime.org/
def format_timestamp_as_utc(timestamp, format_pattern):
return datetime.utcfromtimestamp(timestamp).strftime(format_pattern)
# Calculate key name for the stream partition that
# the supplied timestamp belongs to. Each day has
# its own stream key name, all times are in UTC.
def get_stream_key_for_timestamp(timestamp):
return f"{const.STREAM_KEY_BASE}:{format_timestamp_as_utc(timestamp, '%Y%m%d')}"
# Entry point: clean up any old state and run the producer.
def main():
reset_state()
measurement = Measurement()
previous_stream_key = ""
current_timestamp = TIMESTAMP_START
# End data production a configurable number of days after we began.
end_timestamp = TIMESTAMP_START + (ONE_DAY_SECONDS * DAYS_TO_GENERATE)
redis = get_connection()
stream_key = ""
while current_timestamp < end_timestamp:
# Get the stream partition key name that this timestamp should
# be written to.
stream_key = get_stream_key_for_timestamp(current_timestamp)
# Get a temperature reading.
entry = measurement.get_next()
# Publish to the current stream partition and set
# or update expiry time on the stream partition.
# This is done as a pipeline so that both commands are
# executed with a single round trip to the Redis Server
# for performance reasons. An alternative strategy might
# be to only update the expiry time every 100th message
# or similar.
# Pipeline: https://redis.io/topics/pipelining
pipe = redis.pipeline()
pipe.xadd(stream_key, entry, current_timestamp)
pipe.expireat(stream_key, current_timestamp + PARTITION_EXPIRY_TIME)
pipe.execute()
# Have we started a new stream?
if stream_key != previous_stream_key:
# A new day's stream started.
print(f"Populating stream partition {stream_key}.")
previous_stream_key = stream_key
# Move on to the next timestamp value.
current_timestamp += TEMPERATURE_READING_INTERVAL_SECONDS
if __name__ == "__main__":
main()
| 34.808824 | 84 | 0.716308 | 660 | 4,734 | 4.962121 | 0.35303 | 0.049466 | 0.027481 | 0.019542 | 0.099542 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016495 | 0.218842 | 4,734 | 135 | 85 | 35.066667 | 0.869118 | 0.463456 | 0 | 0.034483 | 0 | 0 | 0.071974 | 0.03458 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.068966 | 0.034483 | 0.241379 | 0.051724 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1df2df202426ece1c28a8eca0173d137a249a259 | 461 | py | Python | api/urls.py | Somsubhra1/Django-rest-framework-todo | 1f3d527987a835ea2f901f3bb778ecd3dd57c646 | [
"MIT"
] | 2 | 2021-05-16T18:51:02.000Z | 2021-11-06T04:01:27.000Z | api/urls.py | Somsubhra1/Django-rest-framework-todo | 1f3d527987a835ea2f901f3bb778ecd3dd57c646 | [
"MIT"
] | null | null | null | api/urls.py | Somsubhra1/Django-rest-framework-todo | 1f3d527987a835ea2f901f3bb778ecd3dd57c646 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path("", views.apiOverView, name="api-overview"),
path("task-list/", views.taskList, name="task-list"),
path("task-detail/<int:pk>", views.taskDetail, name="task-detail"),
path("task-create/", views.taskCreate, name="task-create"),
path("task-update/<int:pk>", views.taskUpdate, name="task-update"),
path("task-delete/<int:pk>", views.taskDelete, name="task-delete"),
]
| 35.461538 | 71 | 0.670282 | 61 | 461 | 5.065574 | 0.393443 | 0.12945 | 0.097087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121475 | 461 | 12 | 72 | 38.416667 | 0.762963 | 0 | 0 | 0 | 0 | 0 | 0.318872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1df369bab97bebb47ebd600adec10c1f1a6a30eb | 1,895 | py | Python | link_start.py | povsister/yuanshen-trend | b03c21e720b1c196b5caf20f2f1b1b70ec3ad41a | [
"MIT"
] | 4 | 2019-07-24T09:45:32.000Z | 2020-10-11T07:18:40.000Z | link_start.py | kami-sama-dp/yuanshen-trend | b03c21e720b1c196b5caf20f2f1b1b70ec3ad41a | [
"MIT"
] | null | null | null | link_start.py | kami-sama-dp/yuanshen-trend | b03c21e720b1c196b5caf20f2f1b1b70ec3ad41a | [
"MIT"
] | 1 | 2020-03-05T11:31:30.000Z | 2020-03-05T11:31:30.000Z | from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
import traceback
from json import dumps
from urllib.parse import urlparse, unquote
from sourcelib.basiclib import get_query_as_dict
from sourcelib.TapTap import SourceTapTap
class YS_Factory:
@staticmethod
def factory(url, action):
CLASS_DICT = {
'www.taptap.com': SourceTapTap
}
parsed_url = urlparse(url)
selected_class = CLASS_DICT.get(parsed_url.netloc)
return selected_class(parsed_url, action)
class YS_HTTPHandler(BaseHTTPRequestHandler):
__software_version = 'ys-trend/0.1'
def __writeHeaders(self):
self.send_header('Content-type', 'application/json')
self.send_header('X-Powered-By', self.__software_version)
self.end_headers()
def __respond(self, code, js):
self.send_response(code)
self.send_header('Content-Length', str(len(js)))
self.__writeHeaders()
self.wfile.write(js)
self.wfile.flush()
def respond(self, js):
self.__respond(200, js)
def respondNotFond(self):
js = dumps({
'msg': 'not implemented'
}).encode('utf8')
self.__respond(404, js)
def do_GET(self):
# Incoming request like http://127.0.0.1:1551/?url=xxx&action=xxx
# self.path looks like /?url=xxx&action=xxx
print('[Query Path]:', unquote(self.path))
query = get_query_as_dict(self.path)
try:
if query.get('url') is not None:
lib = YS_Factory.factory(query['url'], query)
data = lib.getJSON()
self.respond(data)
else:
self.respondNotFond()
except Exception:
traceback.print_exc()
if __name__ == '__main__':
server = ThreadingHTTPServer(('', 1571), YS_HTTPHandler)
server.serve_forever()
| 28.712121 | 73 | 0.629024 | 221 | 1,895 | 5.171946 | 0.434389 | 0.027997 | 0.036745 | 0.024497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016393 | 0.259631 | 1,895 | 65 | 74 | 29.153846 | 0.798289 | 0.055409 | 0 | 0 | 0 | 0 | 0.072188 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122449 | false | 0 | 0.122449 | 0 | 0.326531 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1df400ab1fff54ddad11224dfcfcbbb9ca856386 | 6,729 | py | Python | graphics/right_menu_options.py | VMS19/Inhalator | 77ff3f063efa48e825d1c5ef648203b2d70b753e | [
"MIT"
] | 9 | 2020-03-30T08:27:57.000Z | 2020-04-11T12:37:28.000Z | graphics/right_menu_options.py | VMS19/Inhalator | 77ff3f063efa48e825d1c5ef648203b2d70b753e | [
"MIT"
] | 145 | 2020-03-25T20:41:24.000Z | 2020-04-15T17:39:10.000Z | graphics/right_menu_options.py | VMS19/Inhalator | 77ff3f063efa48e825d1c5ef648203b2d70b753e | [
"MIT"
] | 4 | 2020-03-22T09:57:27.000Z | 2020-04-15T18:10:48.000Z | import os
import time
from data.alerts import AlertCodes
from graphics.alerts_history_screen import AlertsHistoryScreen
from graphics.configure_alerts_screen import ConfigureAlarmsScreen
from graphics.imagebutton import ImageButton
from tkinter import *
from graphics.themes import Theme
THIS_DIRECTORY = os.path.dirname(__file__)
RESOURCES_DIRECTORY = os.path.join(os.path.dirname(THIS_DIRECTORY), "resources")
class BaseButton(object):
def enable_button(self):
self.button.configure(
state="normal",
)
def disable_button(self):
self.button.configure(
state="disabled",
)
def update(self):
pass
class ClearAlertsButton(BaseButton):
IMAGE_PATH = os.path.join(RESOURCES_DIRECTORY,
"baseline_history_white_48dp.png")
def __init__(self, parent, events):
self.parent = parent
self.root = parent.element
self.events = events
self.button = ImageButton(
master=self.root,
image_path=self.IMAGE_PATH,
command=self.on_click,
font=("Roboto", 9),
text="Clear",
pady=10,
compound="top",
state="normal",
relief="flat",
bg=Theme.active().RIGHT_SIDE_BUTTON_BG,
fg=Theme.active().RIGHT_SIDE_BUTTON_FG,
)
def on_click(self):
self.events.alerts_queue.clear_alerts()
def render(self):
self.button.place(relx=0, rely=0.01, relwidth=1, relheight=0.2)
class MuteAlertsButton(BaseButton):
PATH_TO_MUTED = os.path.join(RESOURCES_DIRECTORY,
"round_notifications_off_white_48dp.png")
PATH_TO_UNMUTED = os.path.join(RESOURCES_DIRECTORY,
"baseline_notifications_active_white_48dp.png")
def __init__(self, parent, events):
self.parent = parent
self.root = parent.element
self.events = events
self.muted = False
self.button = ImageButton(
master=self.root,
image_path=self.PATH_TO_UNMUTED,
command=self.on_click,
font=("Roboto", 9),
relief="flat",
text="Mute",
pady=10,
compound="top",
bg=Theme.active().RIGHT_SIDE_BUTTON_BG,
fg=Theme.active().RIGHT_SIDE_BUTTON_FG,
activebackground=Theme.active().RIGHT_SIDE_BUTTON_BG_ACTIVE,
activeforeground=Theme.active().RIGHT_SIDE_BUTTON_FG_ACTIVE,
)
def on_click(self):
self.events.mute_alerts.mute_alerts()
self.update()
def render(self):
self.button.place(relx=0, rely=0.27, relwidth=1, relheight=0.2)
def update(self):
if self.events.mute_alerts._alerts_muted:
self.button.set_image(self.PATH_TO_MUTED)
self.button.configure(text="Unmute")
else:
self.button.set_image(self.PATH_TO_UNMUTED)
self.button.configure(text="Mute")
class LockThresholdsButton(BaseButton):
UNLOCK_IMAGE_PATH = os.path.join(RESOURCES_DIRECTORY,
"baseline_lock_open_white_48dp.png")
LOCK_IMAGE_PATH = os.path.join(RESOURCES_DIRECTORY,
"outline_lock_white_24dp.png")
def __init__(self, parent):
self.parent = parent
self.root = parent.element
self.button = ImageButton(
master=self.root,
image_path=self.UNLOCK_IMAGE_PATH,
command=self.parent.lock_buttons,
text="Lock",
relief="flat",
font=("Roboto", 9),
pady=10,
compound="top",
bg=Theme.active().RIGHT_SIDE_BUTTON_BG,
fg=Theme.active().RIGHT_SIDE_BUTTON_FG,
activebackground=Theme.active().RIGHT_SIDE_BUTTON_BG_ACTIVE,
activeforeground=Theme.active().RIGHT_SIDE_BUTTON_FG_ACTIVE,
state="normal",
)
def lock_button(self):
self.button.configure(
text="Lock"
)
self.button.set_image(
self.LOCK_IMAGE_PATH
)
def unlock_button(self):
self.button.configure(
text = "Unlock"
)
self.button.set_image(self.UNLOCK_IMAGE_PATH)
def render(self):
self.button.place(relx=0, rely=0.53, relwidth=1, relheight=0.2)
class OpenConfigureAlertsScreenButton(BaseButton):
IMAGE_PATH = os.path.join(RESOURCES_DIRECTORY,
"baseline_settings_white_48dp.png")
def __init__(self, parent, drivers, observer):
self.parent = parent
self.root = parent.element
self.drivers = drivers
self.observer = observer
self.button = ImageButton(
master=self.root,
image_path=self.IMAGE_PATH,
command=self.on_click,
font=("Roboto", 9),
relief="flat",
text="Settings",
pady=11,
compound="top",
bg=Theme.active().RIGHT_SIDE_BUTTON_BG,
fg=Theme.active().RIGHT_SIDE_BUTTON_FG,
activebackground=Theme.active().RIGHT_SIDE_BUTTON_BG_ACTIVE,
activeforeground=Theme.active().RIGHT_SIDE_BUTTON_FG_ACTIVE,
)
def on_click(self):
master_frame = self.parent.parent.element
screen = ConfigureAlarmsScreen(master_frame,
drivers=self.drivers,
observer=self.observer)
screen.show()
def render(self):
self.button.place(relx=0, rely=0.79, relwidth=1, relheight=0.2)
class OpenAlertsHistoryScreenButton(BaseButton):
PATH_TO_HISTORY = os.path.join(RESOURCES_DIRECTORY,
"baseline_history_white_24dp.png")
def __init__(self, parent, events):
self.parent = parent
self.root = parent.element
self.events = events
self.button = ImageButton(
master=self.root,
image_path=self.PATH_TO_HISTORY,
command=self.on_click,
font=("Roboto", 9),
relief="flat",
bg=Theme.active().RIGHT_SIDE_BUTTON_BG,
fg=Theme.active().RIGHT_SIDE_BUTTON_FG,
activebackground=Theme.active().RIGHT_SIDE_BUTTON_BG_ACTIVE,
activeforeground=Theme.active().RIGHT_SIDE_BUTTON_FG_ACTIVE,
)
def on_click(self):
master_frame = self.parent.parent.element
screen = AlertsHistoryScreen(master_frame, events=self.events)
screen.show()
def render(self):
self.button.place(relx=0, rely=0.79, relwidth=1, relheight=0.2)
| 31.009217 | 82 | 0.602616 | 743 | 6,729 | 5.208614 | 0.15074 | 0.05168 | 0.074419 | 0.093023 | 0.673902 | 0.650129 | 0.583204 | 0.543152 | 0.497158 | 0.448837 | 0 | 0.013138 | 0.298707 | 6,729 | 216 | 83 | 31.152778 | 0.806951 | 0 | 0 | 0.540698 | 0 | 0 | 0.055589 | 0.035077 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0.005814 | 0.046512 | 0 | 0.238372 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1df59c4741b3466781cd371a9af543304321f5c9 | 5,353 | py | Python | 2020/python/day-03-cleaned.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | 1 | 2021-12-04T18:09:44.000Z | 2021-12-04T18:09:44.000Z | 2020/python/day-03-cleaned.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | 2020/python/day-03-cleaned.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | from math import prod
from typing import Any, List
from tutils import (
lmap,
splitstriplines,
load_and_process_input,
run_tests,
)
DAY = "03"
INPUT = f"input-{DAY}.txt"
TEST = f"test-input-{DAY}.txt"
TA1 = 7
TA2 = 336
ANSWER1 = 145
ANSWER2 = 3424528800
def process_one(data: List[str]) -> int:
return count_trees(data, 3, 1)
def process_two(data: List[str]) -> Any:
ct = lambda x: count_trees(data, *x)
vals = [
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2, 2),
]
return prod(lmap(ct, vals))
def count_trees(
lines: List[str], rightstep: int, downstep: int, start: int = 1
) -> int:
lnlen = len(lines[0])
right, trees = 0, 0
for ln in lines[start::downstep]:
right = right + rightstep
if right > (lnlen - 1):
right = abs(right - lnlen)
if ln[right] == "#":
trees = trees + 1
return trees
def cli_main() -> None:
input_funcs = [splitstriplines]
data = load_and_process_input(INPUT, input_funcs)
run_tests(TEST, TA1, TA2, ANSWER1, input_funcs, process_one, process_two)
answer_one = process_one(data)
assert answer_one == ANSWER1
print("Answer one:", answer_one)
answer_two = process_two(data)
assert answer_two == ANSWER2
print("Answer two:", answer_two)
if __name__ == "__main__":
cli_main()
"""
--- Day 3: Toboggan Trajectory ---
With the toboggan login problems resolved, you set off toward the airport.
While travel by toboggan might be easy, it's certainly not safe: there's very
minimal steering and the area is covered in trees. You'll need to see which
angles will take you near the fewest trees.
Due to the local geology, trees in this area only grow on exact integer
coordinates in a grid. You make a map (your puzzle input) of the open squares
(.) and trees (#) you can see. For example:
..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#
These aren't the only trees, though; due to something you read about once
involving arboreal genetics and biome stability, the same pattern repeats to
the right many times:
..##.........##.........##.........##.........##.........##....... --->
#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..
.#....#..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#.
..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#
.#...##..#..#...##..#..#...##..#..#...##..#..#...##..#..#...##..#.
..#.##.......#.##.......#.##.......#.##.......#.##.......#.##..... --->
.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#
.#........#.#........#.#........#.#........#.#........#.#........#
#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...
#...##....##...##....##...##....##...##....##...##....##...##....#
.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.# --->
You start on the open square (.) in the top-left corner and need to reach the
bottom (below the bottom-most row on your map).
The toboggan can only follow a few specific slopes (you opted for a cheaper
model that prefers rational numbers); start by counting all the trees you would
encounter for the slope right 3, down 1:
From your starting position at the top-left, check the position that is right 3
and down 1. Then, check the position that is right 3 and down 1 from there, and
so on until you go past the bottom of the map.
The locations you'd check in the above example are marked here with O where
there was an open square and X where there was a tree:
..##.........##.........##.........##.........##.........##....... --->
#..O#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..
.#....X..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#.
..#.#...#O#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#
.#...##..#..X...##..#..#...##..#..#...##..#..#...##..#..#...##..#.
..#.##.......#.X#.......#.##.......#.##.......#.##.......#.##..... --->
.#.#.#....#.#.#.#.O..#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#
.#........#.#........X.#........#.#........#.#........#.#........#
#.##...#...#.##...#...#.X#...#...#.##...#...#.##...#...#.##...#...
#...##....##...##....##...#X....##...##....##...##....##...##....#
.#..#...#.#.#..#...#.#.#..#...X.#.#..#...#.#.#..#...#.#.#..#...#.# --->
In this example, traversing the map using this slope would cause you to
encounter 7 trees.
Starting at the top-left corner of your map and following a slope of right 3
and down 1, how many trees would you encounter?
Your puzzle answer was 145.
--- Part Two ---
Time to check the rest of the slopes - you need to minimize the probability of
a sudden arboreal stop, after all.
Determine the number of trees you would encounter if, for each of the following
slopes, you start at the top-left corner and traverse the map all the way to
the bottom:
Right 1, down 1.
Right 3, down 1. (This is the slope you already checked.)
Right 5, down 1.
Right 7, down 1.
Right 1, down 2.
In the above example, these slopes would find 2, 7, 3, 4, and 2 tree(s)
respectively; multiplied together, these produce the answer 336.
What do you get if you multiply together the number of trees encountered on
each of the listed slopes?
Your puzzle answer was 3424528800.
"""
| 33.248447 | 79 | 0.485896 | 627 | 5,353 | 4.090909 | 0.357257 | 0.015595 | 0.015595 | 0.018713 | 0.05614 | 0.02807 | 0.02807 | 0.02807 | 0.02807 | 0.02807 | 0 | 0.019669 | 0.164207 | 5,353 | 160 | 80 | 33.45625 | 0.553643 | 0 | 0 | 0 | 0 | 0 | 0.049168 | 0 | 0 | 0 | 0 | 0 | 0.039216 | 1 | 0.078431 | false | 0 | 0.058824 | 0.019608 | 0.196078 | 0.039216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1df643918b3d787d3328fb2b2e1aa044930642a4 | 2,362 | py | Python | Python3/Books/Douson/chapter10/movie_chooser.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Books/Douson/chapter10/movie_chooser.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Books/Douson/chapter10/movie_chooser.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | # Movie Chooser
# Demonstrates check buttons
from tkinter import *
class Application(Frame):
""" GUI Application for favorite movie types. """
def __init__(self, master):
super(Application, self).__init__(master)
self.grid()
self.create_widgets()
def create_widgets(self):
""" Create widgets for movie type choices. """
# create description label
Label(self,
text = "Choose your favorite movie types"
).grid(row = 0, column = 0, sticky = W)
# create instruction label
Label(self,
text = "Select all that apply:"
).grid(row = 1, column = 0, sticky = W)
# create Comedy check button
self.likes_comedy = BooleanVar()
Checkbutton(self,
text = "Comedy",
variable = self.likes_comedy,
command = self.update_text
).grid(row = 2, column = 0, sticky = W)
# create Drama check button
self.likes_drama = BooleanVar()
Checkbutton(self,
text = "Drama",
variable = self.likes_drama,
command = self.update_text
).grid(row = 3, column = 0, sticky = W)
# create Romance check button
self.likes_romance = BooleanVar()
Checkbutton(self,
text = "Romance",
variable = self.likes_romance,
command = self.update_text
).grid(row = 4, column = 0, sticky = W)
# create text field to display results
self.results_txt = Text(self, width = 40, height = 5, wrap = WORD)
self.results_txt.grid(row = 5, column = 0, columnspan = 3)
def update_text(self):
""" Update text widget and display user's favorite movie types. """
likes = ""
if self.likes_comedy.get():
likes += "You like comedic movies.\n"
if self.likes_drama.get():
likes += "You like dramatic movies.\n"
if self.likes_romance.get():
likes += "You like romantic movies."
self.results_txt.delete(0.0, END)
self.results_txt.insert(0.0, likes)
# main
root = Tk()
root.title("Movie Chooser")
app = Application(root)
root.mainloop()
| 31.918919 | 75 | 0.539797 | 257 | 2,362 | 4.856031 | 0.342412 | 0.064904 | 0.052083 | 0.05609 | 0.176282 | 0.067308 | 0 | 0 | 0 | 0 | 0 | 0.013219 | 0.359441 | 2,362 | 73 | 76 | 32.356164 | 0.811633 | 0.151566 | 0 | 0.170213 | 0 | 0 | 0.082448 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.021277 | 0 | 0.106383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dff4abd36bf5302e229243451841450c38b4467 | 6,102 | py | Python | src/metrics/metric_computer.py | bnewm0609/refining-tse | db423f80246ba2d7c2fc602fc514943949cfaf71 | [
"MIT"
] | null | null | null | src/metrics/metric_computer.py | bnewm0609/refining-tse | db423f80246ba2d7c2fc602fc514943949cfaf71 | [
"MIT"
] | null | null | null | src/metrics/metric_computer.py | bnewm0609/refining-tse | db423f80246ba2d7c2fc602fc514943949cfaf71 | [
"MIT"
] | null | null | null | """Defines general structure of a metric computer, as well as some general utility functions.
Attributes:
example_aggregator_name_to_function: Maps name of an example aggregator to the corresponding function.
"""
import numpy as np
from ..constants import INVALID_SCORE_VALUE, BATCH_SIZE
def mean_example_aggregator(score_per_example, extra_info_per_example):
"""Aggregates example scores via mean. Returns invalid score if 0 scores are given."""
if len(score_per_example) == 0:
return INVALID_SCORE_VALUE, {}
else:
return np.mean(score_per_example), {}
example_aggregator_name_to_function = {
"mean": mean_example_aggregator,
}
class MetricComputer(object):
"""Abstract class that computes a metric for each example, records some information, and later produces a summary.
Functions to override:
get_example_aggregator_from_name
_compute
Attributes:
extra_info_per_example (list): List of per-example info needed for later aggregation, e.g. example weight.
example_aggregator: Function that aggregates information.
Args:
score_per_example (List[float]): List of scores for each example.
extra_info_per_example (list): List of extra information for each example.
Returns:
Tuple([float, dict]): Overall score and any supplementary information.
"""
def __init__(self, config):
"""Initializes metric computer.
Args:
config: Metric-level config dict.
"""
self._score_per_example = []
self.extra_info_per_example = [] # for more complicated aggregation functions, e.g. weights per example
self.example_aggregator = self.get_example_aggregator_from_name(config["example_aggregator"])
self._summary_functions = self.initialize_summary_functions()
self.use_custom_dataset = config.get("use_custom_dataset", False)
@property
def summary_functions(self):
"""list: List of functions to run during summarization.
Returns:
list: Update output `summary_dict` with more information to give to the `Logger`.
Children should include `self.summary_functions.extend([func1, func2])` in their `__init__`.
"""
return self._summary_functions
@property
def score_per_example(self):
"""List[float]: List of scores for each example."""
return self._score_per_example
def initialize_summary_functions(self):
"""Returns list of functions to run during summarization.
Returns:
list: List of functions to run.
Returns:
dict: Information to give to `Logger`.
"""
return []
def get_example_aggregator_from_name(self, aggregator_name):
"""Returns the example aggregator function given the aggregator name. Override me.
Args:
aggregator_name (str): Name of example aggregator.
Returns:
Function that aggregates information, as described in `MetricComputer` docstring.
"""
return example_aggregator_name_to_function[aggregator_name]
def _compute(self, logits, label, template_id, word_to_index):
"""Computes score for a single example. Override me.
Same Args as `compute`.
Returns:
Tuple[float, Any, dict]: The example score, any extra info for later example aggregation (e.g., weight),
and a dict to update this example's `metrics_dict` with.
"""
raise NotImplementedError
def compute(self, logits, label, template_id, word_to_index):
"""Computes metrics for a single example, and keeps some internal notes.
Records whether the score was invalid. If not, tracks them.
Args:
logits (torch.Tensor): Predicted logits with shape (1, vocab_size) or just (vocab_size).
label (..constants.Number): Correct singular/plural label of this example.
template_id: Template ID of this example.
word_to_index: Dict-like indexer object mapping a word to an index.
Returns:
dict: Information for the `Logger` to record for this example. Includes "score" key.
"""
score, extra_info, metrics_dict = self._compute(logits, label, template_id, word_to_index)
if score == INVALID_SCORE_VALUE:
metrics_dict["score"] = "INVALID_SCORE_VALUE"
else:
metrics_dict["score"] = score
if isinstance(score, list):
# used ony by ML metric
valid_idxs, valid_scores, valid_extra_infos = zip(*[(i, s, ei) for i, (s, ei) in enumerate(zip(score, extra_info)) if s != INVALID_SCORE_VALUE])
metrics_dict["valid_idxs"] = valid_idxs
self.score_per_example.extend(valid_scores)
self.extra_info_per_example.extend(valid_extra_infos)
else:
# used by main metric, but main metric tracks its own state so
# `score` is not meaningful
self.score_per_example.append(score)
self.extra_info_per_example.append(score)
return metrics_dict
def summarize(self):
"""Summarizes the model's score on the dataset by producing information for the `Logger`.
Returns:
dict: Summary information for the `Logger`, including an "Overall model score" key.
"""
overall_score, summary_dict = self.example_aggregator(self.score_per_example, self.extra_info_per_example)
if overall_score == INVALID_SCORE_VALUE:
summary_dict["Overall model score"] = "INVALID_SCORE_VALUE"
else:
summary_dict["Overall model score"] = overall_score
summary_dict["Number of examples"] = len(self.score_per_example)
# Iterate through other summary functions.
for summary_function in self.summary_functions:
summary_dict.update(summary_function())
return summary_dict
| 40.144737 | 160 | 0.660603 | 738 | 6,102 | 5.239837 | 0.246612 | 0.05172 | 0.042669 | 0.034394 | 0.228601 | 0.123093 | 0.116886 | 0.093613 | 0.075511 | 0.027929 | 0 | 0.001114 | 0.26434 | 6,102 | 151 | 161 | 40.410596 | 0.860325 | 0.476893 | 0 | 0.111111 | 0 | 0 | 0.054941 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.037037 | 0 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1dff7b5e8e48ffd1098d3accc04a72edc1fd41bc | 3,776 | py | Python | pm4pymdl/algo/mvp/utils/build_graph.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 5 | 2021-01-31T22:45:29.000Z | 2022-02-22T14:26:06.000Z | pm4pymdl/algo/mvp/utils/build_graph.py | Javert899/pm4py-mdl | 4cc875999100f3f1ad60b925a20e40cf52337757 | [
"MIT"
] | 3 | 2021-07-07T15:32:55.000Z | 2021-07-07T16:15:36.000Z | pm4pymdl/algo/mvp/utils/build_graph.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 9 | 2020-09-23T15:34:11.000Z | 2022-03-17T09:15:40.000Z | import networkx
from pm4py.objects.log.log import EventLog, Trace, Event
from datetime import datetime
from pm4py.objects.log.util import sorting
from collections import Counter
def apply(df, source_attr, target_attr, type_attr, timestamp_key, reverse=False):
df[source_attr] = df[source_attr].astype(str)
df[target_attr] = df[target_attr].astype(str)
df[type_attr] = df[type_attr].astype(str)
first_df = df.groupby(source_attr).first().reset_index()
timestamps = first_df[[timestamp_key, source_attr]].to_dict('r')
timestamps = {x[source_attr]: x[timestamp_key] for x in timestamps}
map_types = df[[source_attr, type_attr]].to_dict('r')
map_types = {x[source_attr]: x[type_attr] for x in map_types if x[type_attr] != 'None'}
map_source_target = dict(df.groupby([source_attr, target_attr]).size())
map_source_target = list(map_source_target.keys())
map_source_target = [(map_types[x] + "=" + x, map_types[y] + "=" + y) for (x, y) in map_source_target if
x in map_types and y in map_types]
map_types = {y + "=" + x: y for x, y in map_types.items()}
G = networkx.DiGraph()
for k in map_types:
G.add_node(k)
for el in map_source_target:
if reverse:
G.add_edge(el[1], el[0])
else:
G.add_edge(el[0], el[1])
conn_comp = sorted(list(networkx.connected_components(networkx.Graph(G))), key=lambda x: len(x), reverse=True)
return G, conn_comp, timestamps
def describe_graph(G, comp):
SG = G.subgraph(comp)
nodes = list(SG.nodes)
edges = list(SG.edges)
nodes = [(x, y) for x, y in Counter(n.split("=")[0] for n in nodes).items()]
for i in range(len(nodes)):
if nodes[i][1] > 1:
nodes[i] = (nodes[i][0], "N")
else:
nodes[i] = (nodes[i][0], "1")
source0 = {n[0]: [e for e in edges if e[0].split("=")[0] == n[0]] for n in nodes}
target0 = {n[0]: [e for e in edges if e[1].split("=")[0] == n[0]] for n in nodes}
source1 = {x: {} for x in source0}
target1 = {x: {} for x in target0}
for x in source0:
ik = set(y[0] for y in source0[x])
for k in ik:
source1[x][k.split("=")[1]] = Counter(y[1].split("=")[0] for y in source0[x] if y[0] == k)
for x in target0:
ik = set(y[1] for y in target0[x])
for k in ik:
target1[x][k.split("=")[1]] = Counter(y[0].split("=")[0] for y in source0[x] if y[1] == k)
print(source1)
print(target1)
# print(edges)
def create_log(G, conn_comp, timestamps, max_comp_len=50, include_loops=False):
log = EventLog()
for i in range(len(conn_comp)):
if len(conn_comp[i]) <= max_comp_len:
trace = Trace()
trace.attributes["concept:name"] = str(i)
SG = G.subgraph(conn_comp[i])
SGG = networkx.DiGraph(SG)
edges = list(SGG.edges)
for e in edges:
if e[0] == e[1]:
SGG.remove_edge(e[0], e[1])
sorted_nodes = list(networkx.topological_sort(SGG))
for n in sorted_nodes:
selfloop = 1 if (n, n) in SG.edges else 0
trace.append(Event({'time:timestamp': timestamps[n.split("=")[1]], 'concept:name': n.split("=")[0],
'value': n.split("=")[1], 'typevalue': n, 'selfloop': selfloop}))
if include_loops and selfloop:
trace.append(Event({'time:timestamp': timestamps[n.split("=")[1]], 'concept:name': n.split("=")[0],
'value': n.split("=")[1], 'typevalue': n, 'selfloop': selfloop}))
log.append(trace)
log = sorting.sort_timestamp_log(log, "time:timestamp")
return log
| 41.494505 | 119 | 0.574947 | 575 | 3,776 | 3.648696 | 0.177391 | 0.038132 | 0.017159 | 0.00858 | 0.254051 | 0.198761 | 0.161106 | 0.15348 | 0.135367 | 0.097235 | 0 | 0.021583 | 0.263771 | 3,776 | 90 | 120 | 41.955556 | 0.733094 | 0.003178 | 0 | 0.105263 | 0 | 0 | 0.038809 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039474 | false | 0 | 0.065789 | 0 | 0.131579 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
380174a9e1c618e5d205ff702047439c04fbce30 | 50,176 | py | Python | main.py | hiromin0627/discord-mlg | 7d178d20983eba94dd1a1a209f5a1aa17987af0f | [
"MIT"
] | 1 | 2020-03-05T03:56:15.000Z | 2020-03-05T03:56:15.000Z | main.py | hiromin0627/discord-mlg | 7d178d20983eba94dd1a1a209f5a1aa17987af0f | [
"MIT"
] | 4 | 2020-03-18T10:55:33.000Z | 2020-03-18T12:25:13.000Z | main.py | hiromin0627/discord-mlg | 7d178d20983eba94dd1a1a209f5a1aa17987af0f | [
"MIT"
] | 3 | 2020-03-06T13:07:50.000Z | 2020-03-08T03:51:58.000Z | #coding: utf-8
#created by @hiromin0627
#MilliShita Gacha v5
mlgbotver = '5.1.2'
import glob
import gettext
import os
import discord
import asyncio
import re,random
import datetime
from threading import (Event, Thread)
from urllib import request
import configparser
import json
ini = configparser.ConfigParser()
ini.read('./config.ini', 'UTF-8')
lang = ini['Language']['lang']
path_to_locale_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'./locale'))
if lang == 'cn': translang = 'zh_TW'
elif lang == 'kr': translang = 'ko_KR'
else: translang = 'ja_JP'
translater = gettext.translation('messages',localedir=path_to_locale_dir,languages=[translang],fallback=True,codeset="utf8")
translater.install()
token = ini['tokens']['token']
bgm_id = int(ini['ids']['bgm-room'])
log_id = int(ini['ids']['log-room'])
version = ini['Data']['Version']
prefix = ini['Prefix']['commandprefix']
timeout = float(ini['Reaction']['timeout'])
aftermsgdel = ini['Reaction']['aftermsgdel']
client = discord.Client()
mlg_all = [[],[],[]]
mlg_data = [[],[],[]]
pickup_id = [[],[],[]]
gacha_mode = ['','','']
current_ver = ['','','']
pickup_name = ['','','']
pickup_img = ['','','']
rarity_str = ['R','SR','SSR','FES']
langnamelist = ['ja','kr','cn']
timer = 0
@client.event
async def on_ready():
print(strtimestamp() + '---Millishita Gacha ' + mlgbotver + '---')
print(strtimestamp() + 'discord.py ver:' + discord.__version__)
print(strtimestamp() + 'Logged in as ' + client.user.name + '(ID:' + str(client.user.id) + ')')
print(strtimestamp() + 'Bot created by @hiromin0627')
await gacha_reload(0,None,version)
@client.event
async def on_message(message):
if message.author.bot:
return
if message.content.startswith("MLhelp"):
if not aftermsgdel == 'false': await message.delete()
print(strtimestamp() + 'Start MLhelp')
await message.channel.send('ミリシタガシャシミュレーターDiscordボット ' + mlgbotver + '\n' +\
'MLhelp:ヘルプコマンドです。ミリシタガシャの説明を見ることができます。\n' +\
prefix + 'update:ミリシタガシャデータベースをダウンロードして更新します。\n' +\
prefix + 'reset:全ユーザーのMLガシャを引いた回数をリセットします。\n' +\
prefix + 'cards:MLガシャで引いたカード名を確認することができます。\n' +\
prefix + 'pickup:現在のガシャ名とピックアップカードを確認できます。\n' +\
prefix + 'call:MLガシャで引いたカード画像を検索できます。スペースを挟んでカード名を入力してください。(制服シリーズはアイドル名も記入)\n' +\
prefix + 'ガシャ or ' + prefix + '轉蛋 or ' + prefix + '촬영 or ' + prefix + 'gacha:ミリシタガシャシミュレーターができます。' +\
'10を後ろに付け加えると、10連ガシャになります。jp(日本語版)、cn(中国語繁体字版)、kr(韓国語版)を後ろに付け加えると、その言語のガシャが引くことができます。')
if message.content.startswith(prefix):
global version
if not aftermsgdel == 'false':
if "change" in message.content or "update" in message.content or "uselatest" in message.content or "retention" in message.content or "cards" in message.content or "reset" in message.content or "pickup" in message.content or "call" in message.content or "ガシャ" in message.content or "gacha" in message.content or "轉蛋" in message.content or "촬영" in message.content:
await message.delete()
langint = 0
if not message.content == '':
langint = langstrtoint(message.content[6:])
else:
langint = langtoint()
if message.content.startswith(prefix + "change"):
try:
mlgver = message.content.split()[1]
except IndexError:
await message.channel.send('コマンドが間違っています。バージョン名はスペースを空けて入力してください。(例:MLchange 20200101)')
return
if await gacha_check_available(mlgver):
version = mlgver
else:
await message.channel.send('該当バージョンが見つかりませんでした。バージョン名を確認してください。(検索バージョン名:' + mlgver + ')')
return
current = await current_version_loader()
if ini['Data']['Version'] == 'Latest':
msgupdate = await message.channel.send('**現在のガシャデータベース**\n日本語版:' + current["version"][0] + ' アジア版:' + current["version"][1] + '\n**見つかったガシャデータベース**\n' + mlgver + '\n**ローカルバージョンを維持する設定に変更**し、バージョンを入れ替えますか?')
await msgupdate.add_reaction('⭕')
await msgupdate.add_reaction('❌')
else:
msgupdate = await message.channel.send('**現在のガシャデータベース**\n日本語版:' + current["version"][0] + ' アジア版:' + current["version"][1] + '\n**見つかったガシャデータベース**\n' + mlgver + '\nバージョンを入れ替えますか?')
await msgupdate.add_reaction('⭕')
await msgupdate.add_reaction('❌')
while True:
try:
target_reaction, user = await client.wait_for('reaction_add', timeout=timeout)
if target_reaction.emoji == '⭕' and user != msgupdate.author:
await msgupdate.edit(content='入れ替えを開始します。')
await msgupdate.clear_reactions()
ini.set("Data","Version","Retention")
ini.write(open('./config.ini', 'w'), 'UTF-8')
await gacha_reload(1, message, mlgver)
await msgupdate.edit(content='入れ替えが完了しました。')
return
if target_reaction.emoji == '❌' and user != msgupdate.author:
await msgupdate.edit(content='入れ替えを中止します。')
return
except:
await msgupdate.edit(content='コマンドに失敗しました。もう一度やり直してください。')
return
elif message.content.startswith(prefix + "uselatest"):
ini.set("Data","Version","Latest")
ini.write(open('./config.ini', 'w'), 'UTF-8')
await message.channel.send('起動時に最新版をロードするように設定されました。')
elif message.content.startswith(prefix + "retention"):
ini.set("Data","Version","Retention")
ini.write(open('./config.ini', 'w'), 'UTF-8')
await message.channel.send('起動時に保存されているバージョンでロードするように設定されました。')
elif message.content.startswith(prefix + "update"):
latest = gacha_check_update()
current = await current_version_loader()
if latest["version"] == current["version"]:
msgl = await message.channel.send('現在のガシャデータベースは最新のものが使われています。')
return
else:
msgl = await message.channel.send('**最新のガシャデータベース**\n日本語版:' + latest["version"][0] + ' アジア版:' + latest["version"][1] + '\n**現在のガシャデータベース**\n日本語版:' + current["version"][0] + ' アジア版:' + current["version"][1] + '\nアップデートしますか?')
await msgl.add_reaction('⭕')
await msgl.add_reaction('❌')
while True:
try:
target_reaction, user = await client.wait_for('reaction_add', timeout=timeout)
if target_reaction.emoji == '⭕' and user != msgl.author:
await msgl.edit(content='アップデートを開始します。')
await msgl.clear_reactions()
version = latest["version"]
await gacha_reload(1, message)
await msgl.edit(content='アップデートが完了しました。')
return
if target_reaction.emoji == '❌' and user != msgl.author:
await msgl.edit(content='アップデートを中止します。')
return
except:
await msgl.edit(content='コマンドに失敗しました。もう一度やり直してください。')
return
elif message.content.startswith(prefix + 'cards'):
print(strtimestamp() + 'Start MLGacha[cards].')
await gacha_note(message,langint)
elif message.content.startswith(prefix + 'reset'):
print(strtimestamp() + 'Start MLGacha[reset].')
file_list = glob.glob("./gacha_count/*.txt")
for file in file_list:
os.remove(file)
await message.channel.send(_('すべてのユーザーのガチャカウントをリセットしました。'))
elif message.content.startswith(prefix + 'pickup'):
print(strtimestamp() + 'Start MLGacha[pickup].')
name = pickupcheck(langint)
emb = discord.Embed(title=_('ピックアップカード一覧'), description=name)
emb.set_image(url=pickup_img[langint])
emb.set_author(name=pickup_name[langint])
await message.channel.send('', embed=emb)
elif message.content.startswith(prefix + 'call'):
print(strtimestamp() + 'Start MLGacha[call].')
await gacha_call(message,langint)
elif message.content.startswith(prefix + "ガシャ") or message.content.startswith(prefix + "gacha") or message.content.startswith(prefix + "轉蛋") or message.content.startswith(prefix + "촬영"):
if voicecheck():
await message.channel.send(_('他のユーザーがプレイ中です。終了までお待ちください。'))
return
elif gacha_mode[langint] == "skip":
await message.channel.send('ガシャデータがありません。現在使われているバージョンにてこの言語のガシャ情報がありません。')
return
gacha_count = int()
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'r') as f:
gacha_count = int(f.read())
except:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'w') as f:
f.write('0')
if gacha_count >= 300 and (gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes"):
await gacha_prepare_select(message,langint)
else:
await gacha_prepare(message,langint,gacha_count)
async def gacha_prepare_select(message,langint):
try:
vc_id = message.author.voice.channel.id
channel = client.get_channel(vc_id)
except:
vc_id = None
kind = ''
result = []
count_emoji = ['1⃣','2⃣','3⃣','4⃣','5⃣','6⃣','7⃣','8⃣','9⃣','🔟']
pickup_counter = 0
pickup_alllist = list()
name = pickupcheck(langint)
for row in mlg_data[langint]:
if row["id"] in pickup_id[langint]:
pickup_alllist.append(row)
pickup_counter += 1
mlgpickupemb = discord.Embed(title=_('交換カード一覧'), description=name)
mlgpickupemb.set_author(name=message.author.name, icon_url=message.author.avatar_url)
mlgpickupemb.set_footer(text=pickup_name[langint])
msgs = await message.channel.send(_('ドリームスターがカード交換数に達しているため、ガシャをご利用いただけません。カードを交換してください。\n該当番号のリアクションを返すと交換できます。'), embed=mlgpickupemb)
for r in range(pickup_counter):
await msgs.add_reaction(count_emoji[r])
kind = _('ドリームスター交換') + '「' + pickup_name[langint] + '」'
pickup_num = int()
numemoji_to_int = {'1⃣':0, '2⃣':1, '3⃣':2, '4⃣':3, '5⃣':4, '6⃣':5, '7⃣':6, '8⃣':7, '9⃣':8, '🔟':9}
while True:
target_reaction, user = await client.wait_for('reaction_add')
if not user == msgs.author:
if target_reaction.emoji in numemoji_to_int:
pickup_num = numemoji_to_int[target_reaction.emoji]
break
result = [pickup_alllist[pickup_num]]
await msgs.delete()
print(strtimestamp() + 'Start MLChange[' + kind + '] by ' + str(message.author.id) + '.')
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'w') as f:
f.write(str(0))
except:
print(strtimestamp() + '[ERROR]Gacha count FAILED.')
char_list = list()
try:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'r') as f:
listline = f.read()
char_list = list(listline)
except:
pass
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'w+') as f:
try:
char_list[result[0]["id"]] = '1'
except:
for n in range(500):
char_list.append('0')
char_list[result[0]["id"]] = '1'
newlistline = ''.join(char_list)
f.write(newlistline)
if vc_id == None:
vc = None
botmsg = None
else:
if not bgm_id == 0:
toBot = client.get_channel(bgm_id)
botmsg = await toBot.send('ML' + str(vc_id))
vc = await channel.connect()
await mlg_touch(message,result,kind,vc,botmsg,langint)
return
async def gacha_prepare(message,langint,gacha_count):
try:
vc_id = message.author.voice.channel.id
channel = client.get_channel(vc_id)
except:
vc_id = None
role = 0
if gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes" or gacha_mode[langint] == "type":
if '10' in message.content or '10' in message.content:
role = 10
else:
role = 1
elif gacha_mode[langint] == "party":
role = 10
else:
role = 1
if gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes":
try:
gacha_count += role
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'w') as f:
f.write(str(gacha_count))
except:
print(strtimestamp() + '[ERROR]Failed to count.')
result = await gacha_emission(langint,role)
print(strtimestamp() + 'Start MLGacha[' + pickup_name[langint] + '] by ' + message.author.name + '.')
char_list = list()
try:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'r') as f:
listline = f.read()
char_list = list(listline)
except:
pass
for box in result:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'w+') as f:
try:
char_list[box["id"]] = '1'
except:
for n in range(500):
char_list.append('0')
char_list[box["id"]] = '1'
newlistline = ''.join(char_list)
f.write(newlistline)
mess = random.randint(1,10)
fes_flag = 0
ssr_flag = 0
sr_flag = 0
for val in result:
if val["rarity"] == 3:
fes_flag = 1
elif val["rarity"] == 2:
ssr_flag = 1
elif val["rarity"] == 1:
sr_flag = 1
phrase = [_('最高の一枚ができましたのでぜひご確認ください!'),_('みんなのいい表情が撮れました!'),_('楽しそうなところが撮れましたよ')]
cameratxt = ''
if mess >= 2 :
if ssr_flag == 1 or fes_flag == 1: cameratxt = phrase[0]
elif sr_flag == 1 or ssr_flag == 1 or fes_flag == 1: cameratxt = phrase[1]
elif sr_flag == 1 or ssr_flag == 1 or fes_flag == 1: cameratxt = phrase[2]
if vc_id == None:
vc = None
botmsg = None
if not cameratxt == '':
camera = await message.channel.send(cameratxt)
await asyncio.sleep(3)
await camera.delete()
else:
vc = await channel.connect()
if not cameratxt == '':
vc.play(discord.FFmpegPCMAudio('./resources/message.mp3'))
camera = await message.channel.send(cameratxt)
while vc.is_playing():
await asyncio.sleep(1)
await camera.delete()
if not bgm_id == 0:
toBot = client.get_channel(bgm_id)
botmsg = await toBot.send('ML' + str(vc_id))
await mlg_touch(message,result,pickup_name[langint],vc,botmsg,langint)
async def gacha_emission(langint,role):
#慣れてないのでメモ
#gachaMode = [normal,fes,party,final,special,type,skip]
result = []
ssr_rate = 9700
pick_rate = 99
if gacha_mode[langint] == "fes":
ssr_rate = 9400
pick_rate = 198
if gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes" or gacha_mode[langint] == "final":
if gacha_mode[langint] == "final":
role = 10
rpick = list()
rcard = list()
srpick = list()
srcard = list()
ssrpick = list()
ssrcard = list()
for row in mlg_data[langint]:
if row["rarity"] == 0 and row["id"] in pickup_id[langint]:
rpick.append(row)
elif row["rarity"] == 0 and not row["id"] in pickup_id[langint]:
rcard.append(row)
elif row["rarity"] == 1 and row["id"] in pickup_id[langint]:
srpick.append(row)
elif row["rarity"] == 1 and not row["id"] in pickup_id[langint]:
srcard.append(row)
elif row["rarity"] >= 2 and row["id"] in pickup_id[langint]:
ssrpick.append(row)
elif row["rarity"] >= 2 and not row["id"] in pickup_id[langint]:
ssrcard.append(row)
if len(rpick) == 0: rpick = rcard
if len(srpick) == 0: srpick = srcard
for n in range(role):
rand = random.randint(0, 9999)
if n < 9:
if rand >= 0 and rand < 850:
if len(rpick) > 1:
result.append(rpick[random.randrange(len(rpick) - 1)])
else:
result.append(rpick[0])
elif rand >= 850 and rand < 8500:
result.append(rcard[random.randrange(len(rcard) - 1)])
elif rand >= 8500 and rand <= 8740:
if len(srpick) > 1:
result.append(srpick[random.randrange(len(srpick) - 1)])
else:
result.append(srpick[0])
elif rand >= 8740 and rand < ssr_rate:
result.append(srcard[random.randrange(len(srcard) - 1)])
elif rand >= ssr_rate and rand <= ssr_rate + pick_rate:
if len(ssrpick) > 1:
result.append(ssrpick[random.randrange(len(ssrpick) - 1)])
else:
result.append(ssrpick[0])
elif rand >= ssr_rate + pick_rate:
result.append(ssrcard[random.randrange(len(ssrcard) - 1)])
elif n == 9:
if gacha_mode[langint] == "normal" or gacha_mode[langint] == "fes":
if rand >= 0 and rand <= 240:
if len(srpick) > 1:
result.append(srpick[random.randrange(len(srpick) - 1)])
else:
result.append(srpick[0])
elif rand >= 240 and rand <= ssr_rate:
result.append(srcard[random.randrange(len(srcard) - 1)])
elif rand >= ssr_rate and rand <= ssr_rate + pick_rate:
if len(ssrpick) > 1:
result.append(ssrpick[random.randrange(len(ssrpick) - 1)])
else:
result.append(ssrpick[0])
elif rand >= ssr_rate + pick_rate:
result.append(ssrcard[random.randrange(len(ssrcard) - 1)])
elif gacha_mode[langint] == "final":
result.append(ssrpick[random.randrange(len(ssrpick) - 1)])
elif gacha_mode[langint] == "party":
rcard = list()
srcard = list()
ssrcard = list()
limcard = list()
for row in mlg_data[langint]:
if row["rarity"] == 0:
rcard.append(row)
elif row["rarity"] == 1:
srcard.append(row)
elif row["rarity"] >= 2 and not row["limited"]:
ssrcard.append(row)
elif row["rarity"] >= 2 and row["limited"]:
ssrcard.append(row)
limcard.append(row)
for n in range(10):
if n < 9:
rand = random.randint(0, 9999)
if rand >= 0 and rand < 8500:
if len(rcard) > 1:
result.append(rcard[random.randrange(len(rcard) - 1)])
else:
result.append(rcard[0])
elif rand >= 8500 and rand <= 9700:
if len(srcard) > 1:
result.append(srcard[random.randrange(len(srcard) - 1)])
else:
result.append(srcard[0])
elif rand >= 9700 and rand <= 9999:
if len(ssrcard) > 1:
result.append(ssrcard[random.randrange(len(ssrcard) - 1)])
else:
result.append(ssrcard[0])
elif n == 9:
result.append(limcard[random.randrange(len(limcard) - 1)])
elif gacha_mode[langint] == "special":
rcard = list()
srcard = list()
ssrcard = list()
limcard = list()
for row in mlg_data[langint]:
if row["rarity"] == 0:
rcard.append(row)
elif row["rarity"] == 1:
srcard.append(row)
elif row["rarity"] >= 2:
ssrcard.append(row)
if len(rcard) == 0: rcard = srcard
if len(srcard) == 0: srcard = ssrcard
rand = random.randint(0, 9999)
if rand >= 0 and rand < 8500:
if len(rcard) > 1:
result.append(rcard[random.randrange(len(rcard) - 1)])
else:
result.append(rcard[0])
elif rand >= 8500 and rand <= 9700:
if len(srcard) > 1:
result.append(srcard[random.randrange(len(srcard) - 1)])
else:
result.append(srcard[0])
elif rand >= 9700 and rand <= 9999:
if len(ssrcard) > 1:
result.append(ssrcard[random.randrange(len(ssrcard) - 1)])
else:
result.append(ssrcard[0])
return result
async def gacha_call(message,langint):
char_list = list()
carddata = {}
try:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'r') as f:
listline = f.read()
char_list = list(listline)
except:
print(strtimestamp() + '[ERROR]Failed to load gacha result file.')
return
if '制服シリーズ' in message.content[6:]:
for data in mlg_all[langint]:
if message.content[6:] in data["idol"] and data["name"] == '制服シリーズ':
carddata = data
break
elif 'シアターデイズ' in message.content[6:] or '劇場時光' in message.content[6:] or '시어터 데이즈' in message.content[6:]:
for data in mlg_all[langint]:
if data["idol"] in message.content[6:] and (data["name"] == 'シアターデイズ' or data["name"] == '劇場時光' or data["name"] == '시어터 데이즈'):
carddata = data
break
elif 'MILLION LIVE CLOSET!' in message.content[6:]:
for data in mlg_all[langint]:
if message.content[6:] in data["idol"] and data["name"] == 'MILLION LIVE CLOSET!':
carddata = data
break
elif 'フォーチュンガール' in message.content[6:]:
for data in mlg_all[langint]:
if message.content[6:] in data["idol"] and data["name"] == 'フォーチュンガール':
carddata = data
break
else:
for data in mlg_all[langint]:
if data["name"] in message.content[6:] and char_list[data["id"]] == '1':
carddata = data
break
if len(carddata) == 0:
msgn = await message.channel.send(_('カードが見つかりませんでした。\n「MLcheck」で自分が所持しているカード名を確認してください。'))
await asyncio.sleep(10)
await msgn.delete()
return
cardname = '[' + rarity_str[int(carddata["rarity"])] + ']' + carddata["name"] + ' ' + carddata["idol"]
embmsg1 = discord.Embed(title=cardname, description='(CV.' + carddata["cv"] + ')', colour=int(carddata["color"], 0))
embmsg1.set_author(name=message.author.name + _('所持カード'), icon_url=message.author.avatar_url)
embmsg1.set_image(url=carddata["image"])
msg = await message.channel.send('', embed=embmsg1)
await msg.add_reaction('👆')
while True:
target_reaction, user = await client.wait_for('reaction_add')
if target_reaction.emoji == '👆' and user != msg.author:
if carddata["rarity"] >= 2:
await msg.remove_reaction(target_reaction.emoji, user)
cardname = '[' + rarity_str[int(carddata["rarity"])] + '+]' + carddata["name"] + ' ' + carddata["idol"]
embmsg1 = discord.Embed(title=cardname, description='(CV.' + carddata["cv"] + ')', colour=int(carddata["color"], 0))
embmsg1.set_author(name=message.author.name + _('所持カード'), icon_url=message.author.avatar_url)
embmsg1.set_image(url=carddata["imageAwake"])
await msg.edit(embed=embmsg1)
while True:
target_reaction, user = await client.wait_for('reaction_add')
if target_reaction.emoji == '👆' and user != msg.author:
await msg.delete()
return
else:
target_reaction, user = await client.wait_for('reaction_add')
if target_reaction.emoji == '👆' and user != msg.author:
await msg.delete()
return
return
def gacha_check_update():
url = "https://data.hiromin.xyz/latest"
readObj = request.urlopen(url)
response = readObj.read()
data = json.loads(response)
return data
async def gacha_check_available(mlgver):
try:
response = request.urlopen('https://data.hiromin.xyz/gachadata/'+mlgver)
if response.getcode() == 200:
return True
else:
return False
except:
return False
async def current_version_loader():
current = dict()
try:
with open('./gacha_data/version.json', 'r') as f:
current = json.load(f)
if "version" not in current:
with open('./gacha_data/version.json', 'w') as f:
pre = {"version": ["Nodata","Nodata"]}
json.dump(pre, f)
current = pre
except:
with open('./gacha_data/version.json', 'w') as f:
pre = {"version": ["Nodata","Nodata"]}
json.dump(pre, f)
current = pre
return current
async def gacha_reload(flag,message,version="Latest"):
global mlg_all, mlg_data, pickup_id, gacha_mode, current_ver
print(strtimestamp() + '----------[MLG ' + mlgbotver + ' MLreload]----------')
if flag == 1: msg = await message.channel.send('MLreload Start.')
mlg_all = [[],[],[]]
mlg_data = [[],[],[]]
pickup_id = [[],[],[]]
gacha_mode = ['','','']
name = ['','','']
print(strtimestamp() + 'MLG temporary data cleaned.')
if flag == 1: await msg.edit(content='MLG temporary data cleaned.')
url = "https://data.hiromin.xyz/"
current = await current_version_loader()
if version == "Retention":
mlgver = current["version"]
elif version == "Latest":
readObj_latest = request.urlopen(url+"latest")
response = readObj_latest.read()
data = json.loads(response)
mlgver = data["version"]
with open('./gacha_data/version.json', 'w') as f:
json.dump(data, f)
else:
try:
if "ja" in version:
mlgver = [version,current["version"][1]]
with open('./gacha_data/version.json', 'w') as f:
json.dump({"version":[version,current["version"][1]]}, f)
else:
mlgver = [current["version"][0],version]
with open('./gacha_data/version.json', 'w') as f:
json.dump({"version":[current[0],version]}, f)
except:
import traceback
traceback.print_exc()
current_ver = [mlgver[0],mlgver[1],mlgver[1]]
print(strtimestamp() + 'Using version JP:"' + mlgver[0] + '", ASIA:"' + mlgver[1] + '". Start to load card datas.')
info = list()
for row in current_ver:
readObj_gachadata = request.urlopen(url+"gachadata/"+row)
response_gachadata = readObj_gachadata.read()
info.append(json.loads(response_gachadata))
for langint,langname in enumerate(langnamelist):
pickup_id[langint] = info[langint]["pickupIDs"]
readObj_cards = request.urlopen(url+"cards")
response_cards = readObj_cards.read()
reader = json.loads(response_cards)
for langint,langname in enumerate(langnamelist):
count = [0,0,0,0]
gacha_mode[langint] = info[langint]["gachaMode"]
print('[Step ' + str(langint + 1) + '/3 (Lang:' + langname + ', Gacha mode is "' + gacha_mode[langint] + '")]')
if flag == 1: await msg.edit(content='MLG Database Loading... \nStep ' + str(langint + 1) + '/3 (Lang:' + langname + ', Gacha mode is "' + gacha_mode[langint] + '")')
if langint < 2:
pickup_img[langint] = info[langint]["gachaImageUrl"]
pickup_name[langint] = info[langint]["gachaName"]
else:
pickup_img[langint] = info[langint]["gachaImageUrlCN"]
pickup_name[langint] = info[langint]["gachaNameCN"]
mlg_all[langint] = reader[langname]
#慣れてないのでメモ
#gachaMode = [normal,fes,party,final,special,skip]
if gacha_mode[langint] == 'skip':
#skip :スキップする
name[langint] = ""
continue
elif gacha_mode[langint] == 'special':
#special:スペシャルガチャ(pickupIDsで指定したidのカードしか出ない)
pickup_id[langint] = info[langint]["pickupIDs"]
for row in reader[langname]:
if row["id"] in info[langint]["pickupIDs"]:
mlg_data[langint].append(row)
count[row["rarity"]] += 1
if info[langint]["lastIDs"] == row["id"]:
break
elif gacha_mode[langint] == 'party' or gacha_mode[langint] == 'type':
#party :打ち上げガシャ(pickupIDsで指定したアイドルidのキャラしか出ない)(3回目のガシャ仕様)
#type :タイプガシャ(pickupIDsで指定したアイドルidのキャラしか出ない)
pickup_id[langint] = info[langint]["pickupIDs"]
for row in reader[langname]:
if row["idolNum"] in info[langint]["pickupIDs"]:
mlg_data[langint].append(row)
count[row["rarity"]] += 1
else:
#final :SSR確定ガシャ(10連目はpickupIDsで指定したidのSSRカードしか出ない)
#normal :通常のガシャ
#fes :ミリオンフェス(SSR確率が2倍)
pickup_id[langint] = info[langint]["pickupIDs"]
for row in reader[langname]:
if not row["limited"]:
mlg_data[langint].append(row)
count[row["rarity"]] += 1
elif row["limited"] and row["id"] in pickup_id[langint]:
mlg_data[langint].append(row)
count[row["rarity"]] += 1
elif row["rarity"] == 3 and gacha_mode[langint] == "fes":
mlg_data[langint].append(row)
count[row["rarity"]] += 1
if info[langint]["lastIDs"] == row["id"]:
break
print('Gacha name is 「' + pickup_name[langint] + '」')
if gacha_mode[langint] == 'party': name[langint] = '**```打ち上げガチャ3回目の仕様です。10枚目は期間限定SSRが確定で排出されます。以下のアイドルのみ排出されます。```**\n'
elif gacha_mode[langint] == 'special': name[langint] = '**```以下のカードのみ排出されます。```**\n'
elif gacha_mode[langint] == 'final': name[langint] = '**```10連目は以下のカードのみ排出されます。```**\n'
elif gacha_mode[langint] == 'fes': name[langint] = '**```ミリオンフェス開催中!!SSR排出率が通常の2倍!```**\n'
if gacha_mode[langint] == 'party' or gacha_mode[langint] == 'type':
print('Pickup idols')
idollist = []
for row in mlg_data[langint]:
idollist.append(row["idol"])
idollist_set = set(idollist)
for row in idollist_set:
print(row)
name[langint] += row + '・'
else:
print('Pickup cards')
for row in mlg_data[langint]:
if row["id"] in pickup_id[langint]:
lim = _('限定') if row["limited"] == True else ''
print('[' + lim + rarity_str[row["rarity"]] + ']' + row["name"] + ' ' + row["idol"] + ' (CV.' + row["cv"] + ')')
name[langint] += '[' + lim + rarity_str[row["rarity"]] + ']' + row["name"] + ' ' + row["idol"] + ' (CV.' + row["cv"] + ')\n'
print('Actived ' + str(len(mlg_data[langint])) + ' cards.([FES]' + str(count[3]) + ', [SSR]' + str(count[2]) + ', [SR]' + str(count[1]) + ', [R]' + str(count[0]) + ')')
print('Loaded cards. (Japanese:' + str(len(mlg_all[0])) + ', Korea:' + str(len(mlg_all[1])) + ', China:' + str(len(mlg_all[2])) + ')')
emb = discord.Embed(title='Pickup Cards')
if not gacha_mode[0] == 'skip': emb.add_field(name='Japanese:' + pickup_name[0], value=name[0])
if not gacha_mode[1] == 'skip': emb.add_field(name='Korean:' + pickup_name[1], value=name[1])
if not gacha_mode[2] == 'skip': emb.add_field(name='Chinese:' + pickup_name[2], value=name[2])
emb.set_footer(text='Version JA:' + mlgver[0] + ', ASIA:' + mlgver[1])
if flag == 1: await msg.edit(content='All MLreload process completed successfully.', embed=emb)
print(strtimestamp() + 'All MLreload process completed successfully.')
print(strtimestamp() + '-----------------------------------------')
return
async def gacha_note(message,langint):
char_list = list()
try:
with open('./gacha/' + langnamelist[langint] + str(message.author.id) + '.txt', 'r') as f:
listline = f.read()
char_list = list(listline)
except:
import traceback
traceback.print_exc()
await message.channel.send(message.author.mention + _('所持SSRの記録がないか、エラーが発生しました。'))
return
text = ['']
cards = []
page = 0
count = 0
for n in range(4):
for val in mlg_all[langint]:
try:
if char_list[val["id"]] == '1' and val["rarity"] == n:
cards.insert(0, val)
except:
pass
for val in cards:
if count == 10:
text.append('')
page += 1
count = 0
text[page] += '\n[' + rarity_str[val["rarity"]] + ']' + val["name"] + ' ' + val["idol"]
count += 1
gacha_count = str()
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'r') as f:
gacha_count = f.read()
except:
gacha_count = '0'
fotter_text = _('ドリームスター所持数:') + gacha_count
now = 1
emb = discord.Embed(title=_('所持SSR一覧') + ' Page ' + str(now) + '/' + str(len(text)), description=text[now - 1])
emb.set_author(name=message.author.name, icon_url=message.author.avatar_url)
emb.set_footer(text=fotter_text)
msg = await message.channel.send(_('見終わったら×で消してね!'), embed=emb)
await msg.add_reaction('◀')
await msg.add_reaction('▶')
await msg.add_reaction('❌')
while True:
try:
target_reaction, user = await client.wait_for('reaction_add', timeout=timeout)
if target_reaction.emoji == '◀' and user != msg.author:
if not now == 1:
now -= 1
else:
now = len(text)
emb = discord.Embed(title=_('所持SSR一覧') + ' Page ' + str(now) + '/' + str(len(text)), description=text[now - 1])
emb.set_author(name=message.author.name, icon_url=message.author.avatar_url)
emb.set_footer(text=fotter_text)
await msg.edit(embed=emb)
await msg.remove_reaction(target_reaction.emoji, user)
elif target_reaction.emoji == '▶' and user != msg.author:
if not now == len(text):
now += 1
else:
now = 1
emb = discord.Embed(title=_('所持SSR一覧') + ' Page ' + str(now) + '/' + str(len(text)), description=text[now - 1])
emb.set_author(name=message.author.name, icon_url=message.author.avatar_url)
emb.set_footer(text=fotter_text)
await msg.edit(embed=emb)
await msg.remove_reaction(target_reaction, user)
elif target_reaction.emoji == '❌' and user != msg.author:
await msg.delete()
break
else:
pass
except asyncio.TimeoutError:
await msg.edit(content=_('しばらく操作がなかったため、タイムアウトしました。'),embed=None)
await asyncio.sleep(10)
await msg.delete()
break
async def mlg_touch(message,result,kind,vc,botmsg,langint):
fes_flag = 0
ssr_flag = 0
sr_flag = 0
author = message.author
if kind == 'ミリオンフェス' or kind == '百萬祭典' or kind == '밀리언 페스티벌':
for val in result:
if val["rarity"] == 3:
fes_flag = 1
pink_flag = random.randint(1, 20)
if pink_flag == 10:
img = 'https://i.imgur.com/fGpfCgB.gif'
elif pink_flag == 20:
img = 'https://i.imgur.com/jWTTZ0d.gif'
else:
img = 'https://i.imgur.com/0DxyVhm.gif'
break
elif val["rarity"] == 2:
ssr_flag = 1
elif val["rarity"] == 1:
sr_flag = 1
if not fes_flag == 1:
if ssr_flag == 1:
img = 'https://i.imgur.com/jWTTZ0d.gif'
elif sr_flag == 1 and not ssr_flag == 1:
img = 'https://i.imgur.com/vF7fDn3.gif'
else:
img = 'https://i.imgur.com/hEHa49X.gif'
else:
for val in result:
if val["rarity"] == 2:
ssr_flag = 1
break
if val["rarity"] == 1:
sr_flag = 1
if ssr_flag == 1:
img = 'https://i.imgur.com/jWTTZ0d.gif'
elif sr_flag == 1 and not ssr_flag == 1:
img = 'https://i.imgur.com/vF7fDn3.gif'
else:
img = 'https://i.imgur.com/hEHa49X.gif'
await asyncio.sleep(0.7)
waitemb = discord.Embed()
if fes_flag == 1 and pink_flag == 10: waitemb.set_image(url='https://i.imgur.com/ZC8JK9i.gif')
else: waitemb.set_image(url='https://i.imgur.com/da2w9YS.gif')
waitemb.set_footer(text=pickup_name[langint])
msg = await message.channel.send(message.author.mention, embed=waitemb)
await msg.add_reaction('👆')
try:
log = ''
count = 0
ssr_skip = []
ssr_count = []
while True:
target_reaction, user = await client.wait_for('reaction_add', timeout=timeout)
if user == author and target_reaction.emoji == '👆':
await msg.clear_reactions()
openemb = discord.Embed()
openemb.set_footer(text=kind)
openemb.set_image(url=img)
await msg.edit(embed=openemb)
if not vc == None:
await asyncio.sleep(0.4)
if fes_flag == 1 and not pink_flag == 20:
vc.play(discord.FFmpegPCMAudio('./resources/open_fes.mp3'))
else:
vc.play(discord.FFmpegPCMAudio('./resources/open.mp3'))
while vc.is_playing():
await asyncio.sleep(1)
else:
await asyncio.sleep(6)
break
while count < len(result):
result_10 = result[count]
if result_10["rarity"] == 3:
player_show = discord.FFmpegPCMAudio('./resources/fes.mp3')
await msg.clear_reactions()
elif result_10["rarity"] == 2:
player_show = discord.FFmpegPCMAudio('./resources/ssr.mp3')
await msg.clear_reactions()
elif result_10["rarity"] <= 1:
player_show = discord.FFmpegPCMAudio('./resources/normal.mp3')
desc = rarity_str[result_10["rarity"]] + ' ' + result_10["name"] + ' ' + result_10["idol"]
mlgnormalemb = discord.Embed(title=desc, description='(CV.' + result_10["cv"] + ')', colour=int(result_10["color"], 0))
footer_text = kind + ' ' + str((count + 1)) + '/' + str(len(result))
mlgnormalemb.set_author(name=author.name, icon_url=author.avatar_url)
mlgnormalemb.set_footer(text=footer_text)
mlgnormalemb.set_image(url=result_10["image"])
if not vc == None: vc.play(player_show)
#カード表示(SSRの場合特訓前)
await msg.edit(content=author.mention, embed=mlgnormalemb)
if result_10["rarity"] >= 2:
if not vc == None:
while vc.is_playing():
await asyncio.sleep(1)
vc.play(discord.FFmpegPCMAudio('./resources/ssr_talk.mp3'))
line = result_10["ssrText"].replace("ProP", author.name + "P")
mlgssremb = discord.Embed(title=desc, description='(CV.' + result_10["cv"] + ')', colour=int(result_10["color"], 0))
mlgssremb.set_footer(text=footer_text, icon_url=author.avatar_url)
mlgssremb.set_image(url=result_10["imageAwake"])
await asyncio.sleep(4.2)
await msg.edit(content=author.mention, embed=mlgssremb)
await asyncio.sleep(3)
await msg.edit(content=author.mention + ' ' + result_10["idol"] + '「' + line + '」', embed=mlgssremb)
await msg.add_reaction('👆')
await msg.add_reaction('⏭')
while True:
target_reaction2, user = await client.wait_for('reaction_add', timeout=timeout)
if target_reaction2.emoji == '👆' and user == author:
if not vc == None and vc.is_playing(): vc.stop()
count += 1
log += '[' + rarity_str[result_10["rarity"]] + ']' + result_10["name"] + ' ' + result_10["idol"] + '\n'
if count == len(result):
if not vc == None:
if not bgm_id == 0:
await botmsg.add_reaction('⏹')
await vc.disconnect()
await msg.clear_reactions()
await msg.delete()
gacha_count = str()
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'r') as f:
gacha_count = f.read()
except:
print(strtimestamp() + '[ERROR]Gacha count read FAILED.')
toLog = client.get_channel(log_id)
footer_text = kind
mlglogemb = discord.Embed(title=_('ガシャ結果'), description=log + '\n' + _('ドリームスター所持数:') + gacha_count)
mlglogemb.set_author(name=author.name, icon_url=author.avatar_url)
mlglogemb.set_footer(text=footer_text)
await toLog.send(embed=mlglogemb)
break
else:
await msg.remove_reaction(target_reaction2.emoji, user)
break
elif target_reaction2.emoji == '⏭' and user == author and len(result) == 10:
for n,box in enumerate(result):
if count > n:
continue
log += '[' + rarity_str[box["rarity"]] + ']' + box["name"] + ' ' + box["idol"] + '\n'
if box["rarity"] >= 2:
ssr_skip.append(box)
ssr_count.append(str(n+1))
if len(ssr_skip) > 0:
for n,result_ssr in enumerate(ssr_skip):
if result_ssr["rarity"] == 3:
player_show = discord.FFmpegPCMAudio('./resources/fes.mp3')
await msg.clear_reactions()
elif result_ssr["rarity"] == 2:
player_show = discord.FFmpegPCMAudio('./resources/ssr.mp3')
await msg.clear_reactions()
desc = rarity_str[result_ssr["rarity"]] + ' ' + result_ssr["name"] + ' ' + result_ssr["idol"]
mlgnormalemb = discord.Embed(title=desc, description='(CV.' + result_ssr["cv"] + ')', colour=int(result_ssr["color"], 0))
footer_text = kind + ' ' + str(ssr_count[n]) + '/' + str(len(result))
mlgnormalemb.set_author(name=author.name, icon_url=author.avatar_url)
mlgnormalemb.set_footer(text=footer_text)
mlgnormalemb.set_image(url=result_ssr["image"])
if not vc == None and vc.is_playing():
vc.stop()
vc.play(player_show)
await msg.edit(content=author.mention, embed=mlgnormalemb)
if not vc == None:
while vc.is_playing():
await asyncio.sleep(1)
vc.play(discord.FFmpegPCMAudio('./resources/ssr_talk.mp3'))
line = result_ssr["ssrText"].replace('ProP', author.name + 'P')
mlgssremb = discord.Embed(title=desc, description='(CV.' + result_ssr["cv"] + ')', colour=int(result_ssr["color"], 0))
mlgssremb.set_footer(text=footer_text, icon_url=author.avatar_url)
mlgssremb.set_image(url=result_ssr["imageAwake"])
await asyncio.sleep(4.2)
await msg.edit(content=author.mention, embed=mlgssremb)
await asyncio.sleep(3)
await msg.edit(content=author.mention + ' ' + result_ssr["idol"] + '「' + line + '」', embed=mlgssremb)
await msg.add_reaction('👆')
while True:
target_reaction2, user = await client.wait_for('reaction_add')
if target_reaction2.emoji == '👆' and user == author:
if not vc == None and vc.is_playing(): vc.stop()
count += 1
await msg.remove_reaction(target_reaction2.emoji, user)
break
if not vc == None:
if not bgm_id == 0:
await botmsg.add_reaction('⏹')
await vc.disconnect()
gacha_count = str()
try:
with open('./gacha_count/' + current_ver[langint] + '_' + str(message.author.id) + '.txt', 'r') as f:
gacha_count = f.read()
except:
print(strtimestamp() + '[ERROR]Gacha count read FAILED.')
count += 10
await msg.delete()
toLog = client.get_channel(log_id)
footer_text = kind
mlglogemb = discord.Embed(title=_('ガシャ結果'), description=log + '\n' + _('ドリームスター所持数:') + gacha_count)
mlglogemb.set_author(name=author.name, icon_url=author.avatar_url)
mlglogemb.set_footer(text=footer_text)
await toLog.send(embed=mlglogemb)
break
print(strtimestamp() + 'MLGacha complete. ' + author.name + '`s result\n' + log)
except TimeoutError:
await msg.delete()
if not vc == None:
await vc.disconnect()
if not bgm_id == 0:
await botmsg.add_reaction('⏹')
await message.channel.send(_('しばらく操作がなかったため、タイムアウトしました。'))
def voicecheck():
try:
if not client.voice_clients[0] is None:
return True
except:
return False
def pickupcheck(langint):
global pickup_id
name = ''
if gacha_mode[langint] == 'party': name = '**```打ち上げガチャ3回目の仕様です。10枚目は期間限定SSRが確定で排出されます。```**\n'
elif gacha_mode[langint] == 'special' or gacha_mode[langint] == 'final': name = '**```以下のカードのみ排出されます。```**\n'
elif gacha_mode[langint] == 'fes': name = '**```ミリオンフェス開催中!!SSR排出率が通常の2倍!```**\n'
if gacha_mode[langint] == 'party' or gacha_mode[langint] == 'type':
print('Pickup idols')
idollist = []
for row in mlg_data[langint]:
idollist.append(row["idol"])
idollist_set = set(idollist)
for row in idollist_set:
name += row + '・'
else:
for row in mlg_data[langint]:
if row["id"] in pickup_id[langint]:
lim = _('限定') if row["limited"] == True else ''
name += '[' + lim + rarity_str[row["rarity"]] + ']' + row["name"] + ' ' + row["idol"] + ' (CV.' + row["cv"] + ')\n'
print(name)
return name
def langtoint():
if lang == 'ja':
return 0
elif lang == 'kr':
return 1
elif lang == 'cn':
return 2
else:
return 0
def langtostr(langint):
if langint == 0:
return 'ja'
elif langint == 1:
return 'kr'
elif langint == 2:
return 'cn'
else:
return 0
def langstrtoint(langstr):
if 'ja' in langstr:
return 0
elif 'kr' in langstr:
return 1
elif 'cn' in langstr:
return 2
else:
return 0
def strtimestamp():
date = datetime.datetime.now()
timestamp = '[' + str(date.year) + '-' + str(date.month) + '-' + str(date.day) + ' ' + str(date.hour) + ':' + str(date.minute) + ':' + str(date.second) + ']'
return timestamp
client.run(token) | 41.883139 | 374 | 0.526905 | 5,479 | 50,176 | 4.726227 | 0.09217 | 0.016335 | 0.024097 | 0.020429 | 0.620274 | 0.552655 | 0.508631 | 0.483066 | 0.448658 | 0.422514 | 0 | 0.016824 | 0.333068 | 50,176 | 1,198 | 375 | 41.883139 | 0.755379 | 0.008789 | 0 | 0.547317 | 0 | 0 | 0.116249 | 0.03407 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006829 | false | 0.003902 | 0.012683 | 0 | 0.059512 | 0.033171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3803dc06e5ee974e517f2282aaeb71e8db1bf6ca | 850 | py | Python | LeetCode/2018-12-25-88-Merge-Sorted-Array.py | HeRuivio/-Algorithm | 1fbe6256630758fda3af68f469471ee246730afc | [
"MIT"
] | 5 | 2018-10-30T05:07:32.000Z | 2019-06-18T08:11:38.000Z | LeetCode/2018-12-25-88-Merge-Sorted-Array.py | HeRuivio/-Algorithm | 1fbe6256630758fda3af68f469471ee246730afc | [
"MIT"
] | 1 | 2020-05-09T09:05:16.000Z | 2020-05-09T09:05:16.000Z | LeetCode/2018-12-25-88-Merge-Sorted-Array.py | HeRuivio/-Algorithm | 1fbe6256630758fda3af68f469471ee246730afc | [
"MIT"
] | 2 | 2020-05-09T09:02:22.000Z | 2020-12-09T13:23:00.000Z | # -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2018-12-25 17:10:16
# @Last Modified by: 何睿
# @Last Modified time: 2018-12-25 17:10:16
class Solution:
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
p, q = m-1, n-1
while p >= 0 and q >= 0:
if nums1[p] > nums2[q]:
nums1[p+q+1] = nums1[p]
p = p-1
else:
nums1[p+q+1] = nums2[q]
q = q-1
nums1[:q+1] = nums2[:q+1]
if __name__ == "__main__":
so = Solution()
nums1 = [1, 2, 3, 0, 0, 0]
nums2 = [2, 5, 6]
so.merge(nums1, 3, nums2, 3)
print(nums1)
| 25 | 75 | 0.454118 | 126 | 850 | 3 | 0.436508 | 0.026455 | 0.042328 | 0.05291 | 0.074074 | 0.074074 | 0 | 0 | 0 | 0 | 0 | 0.131021 | 0.389412 | 850 | 33 | 76 | 25.757576 | 0.597303 | 0.343529 | 0 | 0 | 0 | 0 | 0.016064 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.117647 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
380578ad7be33432df72be80883318068123c9d9 | 593 | py | Python | stanCode projects/movie_review/interactive.py | dianapei/SC-projects | f6a9a7f5b20cc81a8dce9b94621bac58274669d5 | [
"MIT"
] | null | null | null | stanCode projects/movie_review/interactive.py | dianapei/SC-projects | f6a9a7f5b20cc81a8dce9b94621bac58274669d5 | [
"MIT"
] | null | null | null | stanCode projects/movie_review/interactive.py | dianapei/SC-projects | f6a9a7f5b20cc81a8dce9b94621bac58274669d5 | [
"MIT"
] | null | null | null | """
File: interactive.py
Name: Pei-Rung Yu
------------------------
This file uses the function interactivePrompt
from util.py to predict the reviews input by
users through Console. Remember to read the weights
and build a Dict[str: float]
"""
import util
import submission
def main():
weights = {}
# get weight from file weights.py
with open('weights', 'r', encoding='utf-8') as w:
for line in w:
line = line.split()
weights[line[0]] = float(line[1])
util.interactivePrompt(featureExtractor=submission.extractWordFeatures, weights=weights)
if __name__ == '__main__':
main() | 22.807692 | 89 | 0.698145 | 82 | 593 | 4.95122 | 0.658537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005976 | 0.153457 | 593 | 26 | 90 | 22.807692 | 0.802789 | 0.453626 | 0 | 0 | 0 | 0 | 0.066246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38062ad37c4e05eb3986f8c4f66b52ff5a11db16 | 875 | py | Python | UMAP_analysis/visualization.py | seasonsOfTheSun/UMAPanalysis | d7f413af59e01aa5e9b1aa909d1fd5858019aa23 | [
"MIT"
] | 2 | 2022-02-27T19:19:36.000Z | 2022-03-15T10:38:36.000Z | UMAP_analysis/visualization.py | menchelab/UMAPanalysis | 09f9b4a7823f6eceb6b40e25ee21412f3bf1c7fe | [
"MIT"
] | null | null | null | UMAP_analysis/visualization.py | menchelab/UMAPanalysis | 09f9b4a7823f6eceb6b40e25ee21412f3bf1c7fe | [
"MIT"
] | 1 | 2021-02-09T12:39:42.000Z | 2021-02-09T12:39:42.000Z | import networkx as nx
def kamada_kawaii(G):
j = None
G_h = G.copy()
for s in nx.weakly_connected_components(G):
i = s.pop()
if j == None:
pass
else:
G_h.add_edge(i, j)
j = i
return nx.layout.kamada_kawai_layout(G_h, weight=None)
def draw(G):
fig= plt.figure(figsize = [10,10])
ax = fig.add_axes([0,0,1,1])
ax.set_xticks([])
ax.set_yticks([])
simplices = [i for i in nx.clique.find_cliques(G.to_undirected()) if len(i) == 3]
collec = PolyCollection([[layout[i],layout[j],layout[k]] for i,j,k in simplices], facecolors = ["#00000044" for i in range(len(simplices))])
ax.add_collection(collec)
nx.draw_networkx(G,
ax = ax,
pos = layout,
labels = {},
node_color = df.Species.map(d.get))
| 29.166667 | 144 | 0.544 | 129 | 875 | 3.55814 | 0.511628 | 0.013072 | 0.026144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028333 | 0.314286 | 875 | 29 | 145 | 30.172414 | 0.736667 | 0 | 0 | 0 | 0 | 0 | 0.010286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0.04 | 0.04 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
380911a4654d528400ce615872d6b5986205a46a | 94,120 | py | Python | eval/eval.py | grammatek/GreynirCorrect | 4bdd006c0c866e06d5697772ff4aee34fdcae4f4 | [
"MIT"
] | 2 | 2022-02-15T19:58:34.000Z | 2022-02-16T17:55:25.000Z | eval/eval.py | grammatek/GreynirCorrect | 4bdd006c0c866e06d5697772ff4aee34fdcae4f4 | [
"MIT"
] | null | null | null | eval/eval.py | grammatek/GreynirCorrect | 4bdd006c0c866e06d5697772ff4aee34fdcae4f4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Greynir: Natural language processing for Icelandic
Evaluation of spelling and grammar correction
Copyright (C) 2021 Miðeind ehf.
This software is licensed under the MIT License:
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This program uses an Icelandic spelling & grammar error corpus
(https://github.com/antonkarl/iceErrorCorpus) to evaluate the
performance of the GreynirCorrect package.
The program reads a development set of hand-annotated texts in
TEI XML format and automatically annotates errors using GreynirCorrect.
The machine-generated annotations are then compared with the hand-annotated
gold reference.
This program uses Python's multiprocessing.Pool() to perform
the evaluation using all available CPU cores, simultaneously.
A normal way to configure this program is to clone the iceErrorCorpus
repository (from the above path) into a separate directory, and
then place a symlink to it to the /eval directory. For example:
$ cd github
$ git clone https://github.com/antonkarl/iceErrorCorpus
$ cd GreynirCorrect/eval
$ ln -s ../../iceErrorCorpus/ .
$ python eval.py
An alternate method is to specify a glob path to the error corpus as an
argument to eval.py:
$ python eval.py ~/github/iceErrorCorpus/data/**/*.xml
To measure GreynirCorrect's performance on the test set
(by default located in ./iceErrorCorpus/testCorpus/):
$ python eval.py -m
To measure GreynirCorrect's performance on the test set
excluding malformed sentences:
$ python eval.py -m -x
To run GreynirCorrect on the entire development corpus
(by default located in ./iceErrorCorpus/data):
$ python eval.py
To run GreynirCorrect on 10 files in the development corpus:
$ python eval.py -n 10
To run GreynirCorrect on a randomly chosen subset of 10 files
in the development corpus:
$ python eval.py -n 10 -r
To get an analysis report of token comparisons:
$ python eval.py -a
"""
from typing import (
TYPE_CHECKING,
Dict,
List,
Optional,
Set,
Union,
Tuple,
Iterable,
cast,
Any,
DefaultDict,
Counter,
)
import os
from collections import defaultdict
from datetime import datetime
import glob
import random
import argparse
import xml.etree.ElementTree as ET
if TYPE_CHECKING:
# For some reason, types seem to be missing from the multiprocessing module
# but not from multiprocessing.dummy
import multiprocessing.dummy as multiprocessing
else:
import multiprocessing
from reynir import _Sentence
from tokenizer import detokenize, Tok, TOK
from reynir_correct.annotation import Annotation
from reynir_correct.checker import AnnotatedSentence, check as gc_check
# Disable Pylint warnings arising from Pylint not understanding the typing module
# pylint: disable=no-member
# pylint: disable=unsubscriptable-object
# The type of a single error descriptor, extracted from a TEI XML file
ErrorDict = Dict[str, Union[str, int, bool]]
# The type of the dict that holds statistical information about sentences
# within a particular content category
SentenceStatsDict = DefaultDict[str, Union[float, int]]
# The type of the dict that holds statistical information about
# content categories
CategoryStatsDict = DefaultDict[str, SentenceStatsDict]
# This tuple should agree with the parameters of the add_sentence() function
StatsTuple = Tuple[
str, int, bool, bool, int, int, int, int, int, int, int, int, int, int, int, int
]
# Counter of tp, tn, right_corr, wrong_corr, right_span, wrong_span
TypeFreqs = Counter[str]
# Stats for each error type for each content category
# tp, fn, right_corr, wrong_corr, right_span, wrong_span
ErrTypeStatsDict = DefaultDict[str, TypeFreqs]
CatResultDict = Dict[str, Union[int, float, str]]
# Create a lock to ensure that only one process outputs at a time
OUTPUT_LOCK = multiprocessing.Lock()
# Content categories in iceErrorCorpus, embedded within the file paths
GENRES = (
"essays",
"onlineNews",
"wikipedia",
)
# Error codes in iceErrorCorpus that are considered out of scope
# for GreynirCorrect, at this stage at least
OUT_OF_SCOPE = {
"act4mid",
"act4pass",
"adj4noun",
"adjective-inflection",
"agreement-pro", # samræmi fornafns við undanfara grammar ...vöðvahólf sem sé um dælinguna. Hann dælir blóðinu > Það dælir blóðinu
"aux", # meðferð vera og verða, hjálparsagna wording mun verða eftirminnilegt > mun vera eftirminnilegt
"bad-contraction",
"bracket4square", # svigi fyrir hornklofa punctuation (Portúgal) > [Portúgal]
"caps4low",
"case-verb",
"case-prep",
"case-adj",
"case-collocation",
# "collocation-idiom", # fast orðasamband með ógagnsæja merkingu collocation hélt hvorki vindi né vatni > hélt hvorki vatni né vindi
# "collocation", # fast orðasamband collocation fram á þennan dag > fram til þessa dags
"comma4conjunction", # komma fyrir samtengingu punctuation ...fara með vald Guðs, öll löggjöf byggir... > ...fara með vald Guðs og öll löggjöf byggir...
"comma4dash", # komma fyrir bandstrik punctuation , > -
"comma4ex", # komma fyrir upphrópun punctuation Viti menn, almúginn... > Viti menn! Almúginn...
"comma4period", # komma fyrir punkt punctuation ...kynnast nýju fólki, er á þrítugsaldri > ...kynnast nýju fólki. Hann er á þrítugsaldri
"comma4qm", # komma fyrir spurningarmerki punctuation Höfum við réttinn, eins og að... > Höfum við réttinn? Eins og að...
"conjunction",
"conjunction4comma", # samtenging fyrir kommu punctuation ...geta orðið þröngvandi og erfitt getur verið... > ...geta orðið þröngvandi, erfitt getur verið...
"conjunction4period", # samtenging fyrir punkt punctuation ...tónlist ár hvert og tónlistarstefnurnar eru orðnar... > ...tónlist ár hvert. Tónlistarstefnurnar eru orðnar...
"context", # rangt orð í samhengi other
"dash4semicolon", # bandstrik fyrir semíkommu punctuation núna - þetta > núna; þetta
"def4ind", # ákveðið fyrir óákveðið grammar skákinni > skák
"dem-pro", # hinn í stað fyrir sá; sá ekki til eða ofnotað grammar hinn > sá
"dem4noun", # ábendingarfornafn í stað nafnorðs grammar hinn > maðurinn
"dem4pers", # ábendingarfornafn í stað persónufornafns grammar þessi > hún
"extra-comma", # auka komma punctuation stríð, við náttúruna > stríð við náttúruna
"extra-dem-pro",
"extra-number", # tölustöfum ofaukið other 139,0 > 139
"extra-period", # auka punktur punctuation á morgun. Og ... > á morgun og...
"extra-punctuation", # auka greinarmerki punctuation ... að > að
"extra-space", # bili ofaukið spacing 4 . > 4.
"extra-sub",
"extra-symbol", # tákn ofaukið other Dalvík + gaf... > Dalvík gaf...
"extra-word", # orði ofaukið insertion augun á mótherja > augu mótherja
"extra-words", # orðum ofaukið insertion ...ég fer að hugsa... > ...ég hugsa...
"foreign-error", # villa í útlendu orði foreign Supurbowl > Super Bowl
"foreign-name", # villa í erlendu nafni foreign Warwixk > Warwick
"fw4ice", # erlent orð þýtt yfir á íslensku style Elba > Saxelfur
"gendered", # kynjað mál, menn fyrir fólk exclusion menn hugsa oft > fólk hugsar oft
"genitive",
"geta",
"have",
"ice4fw", # íslenskt orð notað í stað erlends Demókrata öldungarþings herferðarnefndina > Democratic Senatorial Campaign Committee
"ind4def", # óákveðið fyrir ákveðið grammar gítartakta > gítartaktana
"ind4sub", # framsöguháttur fyrir vh. grammar Þrátt fyrir að konfúsíanismi er upprunninn > Þrátt fyrir að konfúsíanismi sé upprunninn
"indef-pro", # óákveðið fornafn grammar enginn > ekki neinn
"interr-pro",
"it4nonit", # skáletrað fyrir óskáletrað Studdi Isma'il > Studdi Isma'il
"loan-syntax", # lánuð setningagerð style ég vaknaði upp > ég vaknaði
"low4caps",
"marked4unmarked",
"mid4act",
"mid4pass",
"missing-commas", # kommur vantar utan um innskot punctuation Hún er jafn verðmæt ef ekki verðmætari en háskólapróf > Hún er verðmæt, ef ekki verðmætari, en háskólapróf
"missing-conjunction", # samtengingu vantar punctuation í Noregi suður að Gíbraltarsundi > í Noregi og suður að Gíbraltarsundi
"missing-dem-pro",
"missing-ex", # vantar upphrópunarmerki punctuation Viti menn ég komst af > Viti menn! Ég komst af
"missing-fin-verb",
"missing-obj",
"missing-quot", # gæsalöpp vantar punctuation „I'm winning > „I'm winning“
"missing-quots", # gæsalappir vantar punctuation I'm winning > „I'm winning“
"missing-semicolon", # vantar semíkommu punctuation Haukar Björgvin Páll > Haukar; Björgvin Páll
"missing-square", # vantar hornklofi punctuation þeir > [þeir]
"missing-sub",
"missing-symbol", # tákn vantar punctuation 0 > 0%
"missing-word", # orð vantar omission í Donalda > í þorpinu Donalda
"missing-words", # fleiri en eitt orð vantar omission því betri laun > því betri laun hlýtur maður
"nominal-inflection",
"nonit4it", # óskáletrað fyrir skáletrað orðið qibt > orðið qibt
"noun4adj",
"noun4dem", # nafnorð í stað ábendingarfornafns grammar stærsta klukkan > sú stærsta
"noun4pro", # nafnorð í stað fornafns grammar menntun má nálgast > hana má nálgast
"number4word",
"numeral-inflection",
"pass4act",
"pass4mid",
"passive",
"past4pres", # sögn í þátíð í stað nútíðar grammar þegar hún leigði spólur > þegar hún leigir spólur
"perfect4tense",
"period4comma", # punktur fyrir kommu punctuation meira en áður. Hella meira í sig > meira en áður, hella meira í sig
"period4conjunction", # punktur fyrir samtengingu punctuation ...maður vill gera. Vissulega > ...maður vill gera en vissulega
"period4ex", # punktur fyrir upphrópun punctuation Viti menn. > Viti menn!
"pers4dem", # persónufornafn í staðinn fyrir ábendingarf. grammar það > þetta
"pres4past", # sögn í nútíð í stað þátíðar grammar Þeir fara út > Þeir fóru út
"pro4noun", # fornafn í stað nafnorðs grammar þau voru spurð > parið var spurt
"pro4reflexive", # persónufornafn í stað afturbeygðs fn. grammar Fólk heldur að það geri það hamingjusamt > Fólk heldur að það geri sig hamingjusamt
"pro-inflection",
"punctuation", # greinarmerki punctuation hún mætti og hann var ekki tilbúinn > hún mætti en hann var ekki tilbúinn
"qm4ex", # spurningarmerki fyrir upphrópun punctuation Algjört hrak sjálf? > Algjört hrak sjálf!
"reflexive4noun", # afturbeygt fornafn í stað nafnorðs grammar félagið hélt aðalfund þess > félagið hélt aðalfund sinn
"reflexive4pro", # afturbeygt fornafn í stað persónufornafns grammar gegnum líkama sinn > gegnum líkama hans
"simple4cont", # nútíð í stað vera að + nafnh. grammar ók > var að aka
"square4bracket", # hornklofi fyrir sviga punctuation [börnin] > (börnin)
"sub4ind",
"style", # stíll style urðu ekkert frægir > urðu ekki frægir
"syntax-other",
"tense4perfect",
"unicelandic", # óíslenskuleg málnotkun style ...fer eftir persónunni... > ...fer eftir manneskjunni...
"upper4lower-proper", # stór stafur í sérnafni þar sem hann á ekki að vera capitalization Mál og Menning > Mál og menning
"upper4lower-noninitial",
"v3-subordinate",
"wording", # orðalag wording ...gerðum allt í raun... > ...gerðum í raun allt...
"word4number",
"wrong-prep",
"xxx", # unclassified unclassified
"zzz", # to revisit unannotated
}
# Default glob path of the development corpus TEI XML files to be processed
# Using a symlink (ln -s /my/location/of/iceErrorCorpus .) can be a good idea
_DEV_PATH = "iceErrorCorpus/data/**/*.xml"
# Default glob path of the test corpus TEI XML files to be processed
_TEST_PATH = "iceErrorCorpus/testCorpus/**/*.xml"
NAMES = {
"tp": "True positives",
"tn": "True negatives",
"fp": "False positives",
"fn": "False negatives",
"true_positives": "True positives",
"true_negatives": "True negatives",
"false_positives": "False positives",
"false_negatives": "False negatives",
"right_corr": "Right correction",
"wrong_corr": "Wrong correction",
"ctp": "True positives - error correction",
"ctn": "True negatives - error correction",
"cfp": "False positives - error correction",
"cfn": "False negatives - error correction",
"right_span": "Right span",
"wrong_span": "Wrong span",
}
# Three levels: Supercategories, subcategories and error codes
# supercategory: {subcategory : [error code]}
SUPERCATEGORIES: DefaultDict[str, DefaultDict[str, List[str]]] = defaultdict(
lambda: defaultdict(list)
)
GCtoIEC = {
"A001": ["abbreviation-period"],
"A002": ["abbreviation-period"],
"Z001": ["upper4lower-common", "upper4lower-proper", "upper4lower-noninitial"],
"Z002": ["lower4upper-initial", "lower4upper-proper", "lower4upper-acro"],
"Z003": ["upper4lower-common"],
"Z004": ["upper4lower-common"],
"Z005": ["upper4lower-common"],
"Z005/w": ["upper4lower-common"],
"Z006": ["lower4upper-acro"],
"E001": ["No responding iEC category"],
"E002": ["No responding iEC category"],
"E003": ["No responding iEC category"],
"E004": ["fw"],
"C001": ["repeat-word"],
"C002": ["merged-words"],
"C003": ["split-compound", "split-word", "split-words"],
"C004": ["repeat-word"],
"C004/w": ["repeat-word"],
"C005": ["split-compound", "split-word", "split-words"],
"C005/w": ["split-compound", "split-word", "split-words"],
"Y001/w": ["style", "wording", "context"],
"C006": ["compound-nonword"],
"P_NT_Að/w": ["extra-conjunction"],
"P_NT_AnnaðHvort": ["conjunction"],
"P_NT_Annaðhvort": ["conjunction"],
"P_NT_Annara": ["pro-inflection"],
"P_NT_Annarar": ["pro-inflection"],
"P_NT_Annari": ["pro-inflection"],
"P_NT_Einkunn": ["agreement-concord"],
"P_NT_EinnAf": ["agreement"],
"P_NT_EndingANA": ["n4nn"],
"P_NT_EndingIR": ["nominal-inflection"],
"P_NT_FjöldiHluti": ["agreement"],
"P_NT_FráÞvíAð": ["missing-conjunction"],
"P_NT_FsMeðFallstjórn": ["case-prep"],
"P_NT_Heldur/w": ["conjunction"],
"P_NT_ÍTölu": ["plural4singular", "singular4plural"],
"P_NT_Komma/w": ["extra-comma"],
"P_NT_Né": ["conjunction"],
"P_NT_Sem/w": ["extra-conjunction"],
"P_NT_Síðan/w": ["extra-word"],
"P_NT_SíðastLiðinn": ["split-compound"],
"P_NT_SvigaInnihaldNl": ["case-verb", "case-prep", "case-adj"],
"P_NT_TvípunkturFs": ["extra-colon"],
"P_NT_VantarKommu": ["missing-comma"],
"P_NT_VístAð": ["conjunction"],
"P_VeraAð": ["cont4simple"],
"P_NT_ÞóAð": ["conjunction"],
"P_redundant_word": ["extra-word"],
"P_wrong_person": ["verb-inflection"],
"P_wrong_phrase": ["wording"],
"P_wrong_word": ["wording"],
"P_wrong_case": ["case-noun"],
"P_wrong_gender": ["agreement-concord"],
"P_wrong_number": ["agreement-concord"],
"P_wrong_form": ["agreement-concord"],
"P_transposition": ["swapped-letters"],
"P_WRONG_CASE_nf_þf": ["case-verb"],
"P_WRONG_CASE_nf_þgf": ["case-verb"],
"P_WRONG_CASE_nf_ef": ["case-verb"],
"P_WRONG_CASE_þf_nf": ["case-verb"],
"P_WRONG_CASE_þf_þgf": ["case-verb"],
"P_WRONG_CASE_þf_ef": ["case-verb"],
"P_WRONG_CASE_þgf_nf": ["case-verb"],
"P_WRONG_CASE_þgf_þf": ["case-verb"],
"P_WRONG_CASE_þgf_ef": ["case-verb"],
"P_WRONG_CASE_ef_nf": ["case-verb"],
"P_WRONG_CASE_ef_þf": ["case-verb"],
"P_WRONG_CASE_ef_þgf": ["case-verb"],
"P_WRONG_NOUN_WITH_VERB": ["collocation"],
"P_WRONG_OP_FORM": ["verb-inflection"],
"P_WRONG_PLACE_PP": ["wrong-prep"],
"P_aðaf": ["að4af"],
"P_afað": ["af4að"],
"P_kvhv": ["kv4hv"],
"P_hvkv": ["hv4kv"],
"P_nn": ["n4nn"],
"P_n": ["nn4n"],
"P_yi": ["y4i"],
"P_iy": ["i4y"],
"P_yyii": ["ý4í"],
"P_iiyy": ["í4ý"],
"P_WRONG_PREP_AÐ": ["að4af"],
"P_WRONG_PREP_AF": ["af4að"],
"P_WRONG_VERB_USE": ["collocation"],
"P_DIR_LOC": ["dir4loc"],
"P_MOOD_ACK": ["ind4sub-conj"],
"P_MOOD_REL": ["ind4sub-conj"],
"P_MOOD_TEMP": ["sub4ind-conj"],
"P_MOOD_TEMP/w": ["sub4ind-conj"],
"P_MOOD_COND": ["sub4ind-conj"],
"P_MOOD_PURP": ["sub4ind-conj"],
"P_DOUBLE_DEFINITE": ["extra-dem-pro"],
"X_number4word": ["number4word"],
"N001": ["wrong-quot"],
"N002": ["extra-punctuation"],
"N003": ["extra-punctuation"],
"S001": ["nonword"],
"S002": ["nonword"],
"S003": ["nonword"],
"S004": ["nonword"],
"S005": ["nonword"], # No better information available, most likely this
"S006": ["nonword"],
"T001": ["taboo-word"],
"T001/w": ["taboo-word"],
"U001": ["fw"],
"U001/w": ["fw"],
"W001/w": ["nonword"],
"1ORD42": ["nonword", "merged-words"],
"2ORD41": ["nonword", "split-word", "split-words"],
"ANDA4ENDA": ["nonword", "nominal-inflection"],
"ARI4NI": ["nonword", "adjective-inflection"],
"ASLAUKABRODD": ["nonword", "extra-accent", "wrong-accent"],
"ASLAUKASTAF": ["nonword", "extra-letter", "extra-letters"],
"ASLBRODDVANTAR": ["nonword", "missing-accent", "wrong-accent"],
"ASLSTAFVANTAR": ["nonword", "missing-letter", "missing-letters"],
"ASLVITLSTAF": ["nonword", "letter-rep"],
"ASLVIXL": ["nonword", "swapped-letters"],
"ASLVIXLBRODD": ["nonword", "wrong-accent"],
"AUKAG": ["nonword", "extra-letter"],
"AUKAJ": ["nonword", "adjective-inflection"],
"AUKARÞFFT": ["nonword", "nominal-inflection", "extra-letter"],
"B4P": ["nonword", "letter-rep"],
"BAND-OF": ["nonword", "extra-hyphen"],
"BAND-VANT": ["nonword", "missing-hyphen"],
"BEYGSJALD": [
"nonword",
"nominal-inflection",
"verb-inflection",
"adjective-inflection",
"numeral-inflection",
],
"BEYGVILLA": [
"nonword",
"nominal-inflection",
"verb-inflection",
"adjective-inflection",
"numeral-inflection",
],
"CE4CJE": ["nonword", "nominal-inflection"],
"CJE4CE": ["nonword", "extra-letter"],
"CÉ4CE": ["nonword", "extra-accent"],
"DN4NN": ["nonword", "letter-rep"],
"E4EI": ["nonword", "missing-letter"],
"EBEYG": ["nonword", "nominal-inflection"],
"EFINGU": ["nonword", "nominal-inflection"],
"EI4E": ["nonword", "extra-letter"],
"EI4EY": ["nonword", "i4y"],
"EKKIORD": ["nonword"],
"ETVILLA": ["nonword", "nominal-inflection"],
"EY4EI": ["nonword", "y4i"],
"F4FF": ["nonword", "missing-letter"],
"F4V": ["nonword", "letter-rep"],
"FS4PS": ["nonword", "letter-rep"],
"FT4PT": ["nonword", "letter-rep"],
"FTVILLA": ["nonword", "nominal-inflection"],
"G4J": ["nonword", "letter-rep"],
"G4K": ["nonword", "letter-rep"],
"GGÐ4GÐ": ["nonword", "extra-letter"],
"GL4GGL": ["nonword", "missing-letter"],
"GMVILLA": ["nonword", "verb-inflection"],
"GN4GGN": ["nonword", "missing-letter"],
"GN4NG": ["nonword", "swapped-letters"],
"GST4GGST": ["nonword", "missing-letter"],
"GV4GGV": ["nonword", "missing-letter"],
"GVANTAR": ["nonword", "missing-letter"],
"GÐ4GGÐ": ["nonword", "missing-letter"],
"HA4LAG": ["nonword", "upper4lower-common"],
"HK4KVK": ["nonword", "nominal-inflection"],
"HV4KV": ["nonword", "hv4kv"],
"I4Y": ["nonword", "i4y"],
"I4Í": ["nonword", "i4í"],
"J4G": ["nonword", "letter-rep"],
"J4GJ": ["nonword", "extra-letter"],
"JE4É": ["nonword", "extra-letter"],
"JVANTAR": ["nonword", "missing-letter"],
"JÉ4JE": ["nonword", "extra-accent"],
"KKN4KN": ["nonword", "extra-letter"],
"KKT4KT": ["nonword", "extra-letter"],
"KN4KKN": ["nonword", "missing-letter"],
"KT4GT": ["nonword", "letter-rep"],
"KT4KKT": ["nonword", "missing-letter"],
"KV4HV": ["nonword", "kv4hv"],
"KV4HV-FNSP": ["nonword", "kv4hv"],
"KVK4HK": ["nonword", "nominal-inflection"],
"KVK4KK": ["nonword", "nominal-inflection"],
"LAG4HA": ["nonword", "lower4upper-proper"],
"LG4GL": ["nonword", "swapped-letters"],
"LLJ4LJ": ["nonword", "extra-letter"],
"LLST4LST": ["nonword", "extra-letter"],
"LLT4LT": ["nonword", "extra-letter"],
"LS4LLS": ["nonword", "missing-letter"],
"LST4LLST": ["nonword", "missing-letter"],
"LT4LLT": ["nonword", "missing-letter"],
"M4FN": ["nonword", "pronun-writing"],
"M4MM": ["nonword", "missing-letter"],
"MIÐSTIGV": ["nonword", "adjective-inflection"],
"MM4M": ["nonword", "extra-letter"],
"N4NN-END": ["nonword", "nominal-inflection"],
"N4NN-ORD": ["nonword", "nominal-inflection"],
"N4NN-SAM": ["nonword", "nominal-inflection"],
"NG4GN": ["nonword", "swapped-letters"],
"NGNK": ["nonword", "swapped-letters"],
"NN4N-END": ["nonword", "nominal-inflection", "adjective-inflection"],
"NN4N-ORD": ["nonword", "nominal-inflection"],
"NN4N-SAM": ["nonword", "nominal-inflection"],
"O4Ó": ["nonword", "missing-accent"],
"O4Ó-NGNK": ["nonword", "missing-accent"],
"OF-U": ["nonword", "letter-rep"],
"P4B": ["nonword", "letter-rep"],
"P4Þ": ["nonword", "letter-rep"],
"PL4FL": ["nonword", "letter-rep"],
"PPL4PL": ["nonword", "extra-letter"],
"PPN4PN": ["nonword", "extra-letter"],
"PS4PPS": ["nonword", "missing-letter"],
"PT4PPT": ["nonword", "missing-letter"],
"R4RR": ["nonword", "missing-letter"],
"RFTGR": ["nonword", "missing-letter"],
"RN4RFN": ["nonword", "pronun-writing"],
"RN4RÐN": ["nonword", "pronun-writing"],
"RR4R": ["nonword", "extra-letter"],
"RS4RFS": ["nonword", "pronun-writing"],
"SAMS-V": ["nonword", "compound-collocation"],
"SK4STK": ["nonword", "pronun-writing"],
"SKSTV": ["nonword", "missing-hyphen"],
"SL4RSL": ["nonword", "pronun-writing"],
"SN-TALA-GR": ["nonword", "compound-collocation"],
# "SO-ÞGF4ÞF": [""],
"SPMYNDV": ["nonword", "bad-contraction"],
"SST4ST": ["nonword", "pronun-writing"],
"ST4RST": ["nonword", "pronun-writing"],
"ST4SKT": ["nonword", "pronun-writing"],
"S4AR-EF": ["nonword", "nominal-inflection"],
"S-EFGR": ["nonword", "nominal-inflection"],
"STAFAGERD": ["nonword"], # No corresponding category in iEC?
"STAFS-ERL": ["nonword", "fw4ice"],
"STAFSVVIXL": ["nonword", "swapped-letters"],
"STK4SK": ["nonword", "pronun-writing"],
"STN4SN": ["nonword", "pronun-writing"],
"T4TT": ["nonword", "missing-letter"],
"TOKV": ["nonword", "fw4ice"], # No corresponding category in iEC?
"TTN4TN": ["nonword", "pronun-writing", "extra-letter"],
"U4Y": ["nonword", "u4y"],
"U4Ú": ["nonword", "missing-accent"],
"V4F": ["nonword", "letter-rep"],
"VANTAR-J-FT": ["nonword", "nominal-inflection"],
"Y4I": ["nonword", "y4i"],
"Z4S": ["nonword", "letter-rep"],
"É4JE": ["nonword", "pronun-writing"],
"Í4I": ["nonword", "í4i"],
"Í4Ý": ["nonword", "í4ý"],
"Ý4Y": ["nonword", "ý4y"],
"Ý4Í": ["nonword", "ý4í"],
# Malsnid errors
"URE": ["style", "wording", "context"],
"VILLA": ["style", "wording", "context"],
# Einkunn errors
"R000": ["style"],
"R002": ["style"],
"R003": ["style"],
"R004": ["style"],
"R005": ["style"],
}
# Value given to float metrics when there is none available
# to avoid magic numbers
NO_RESULTS = -1.0
GCSKIPCODES = frozenset(("E001", "C005", "Z002", "W001"))
# Define the command line arguments
parser = argparse.ArgumentParser(
description=(
"This program evaluates the spelling and grammar checking performance "
"of GreynirCorrect on iceErrorCorpus"
)
)
parser.add_argument(
"path",
nargs="?",
type=str,
help=f"glob path of XML files to process (default: {_DEV_PATH})",
)
parser.add_argument(
"-n",
"--number",
type=int,
default=0,
help="number of files to process (default=all)",
)
parser.add_argument(
"-c",
"--cores",
type=int,
help=f"number of CPU cores to use (default=all, i.e. {os.cpu_count() or 1})",
)
parser.add_argument(
"-m",
"--measure",
action="store_true",
help="run measurements on test corpus and output results only",
)
parser.add_argument(
"-r",
"--randomize",
action="store_true",
help="process a random subset of files",
)
parser.add_argument(
"-q",
"--quiet",
default=None,
action="store_true",
help="output results only, not individual sentences",
)
parser.add_argument(
"-v",
"--verbose",
default=None,
action="store_true",
help="output individual sentences as well as results, even for the test corpus",
)
parser.add_argument(
"-x",
"--exclude",
action="store_true",
help="Exclude sentences marked for exclusion",
)
parser.add_argument(
"-s",
"--single",
type=str,
default="",
help="Get results for a single error category",
)
parser.add_argument(
"-a",
"--analysis",
action="store_true",
help="Create an analysis report for token results",
)
parser.add_argument(
"-f", "--catfile", type=str, default="iceErrorCorpus/errorCodes.tsv"
)
# This boolean global is set to True for quiet output,
# which is the default when processing the test corpus
QUIET = False
# This boolean global is set to True if only a single
# error category should be analyzed
SINGLE = False
# This boolean global is set to True if sentences marked
# with an exclusion flag should be excluded from processing
EXCLUDE = False
# This boolean global is set to True for token-level analysis
ANALYSIS = False
def element_text(element: ET.Element) -> str:
"""Return the text of the given element,
including all its subelements, if any"""
return "".join(element.itertext())
class Stats:
"""A container for key statistics on processed files and sentences"""
def __init__(self) -> None:
"""Initialize empty defaults for the stats collection"""
self._starttime = datetime.utcnow()
self._files: Dict[str, int] = defaultdict(int)
# We employ a trick to make the defaultdicts picklable between processes:
# instead of the usual lambda: defaultdict(int), use defaultdict(int).copy
self._sentences: CategoryStatsDict = CategoryStatsDict(
SentenceStatsDict(int).copy
)
self._errtypes: ErrTypeStatsDict = ErrTypeStatsDict(Counter)
self._true_positives: DefaultDict[str, int] = defaultdict(int)
self._false_negatives: DefaultDict[str, int] = defaultdict(int)
self._tp: DefaultDict[str, int] = defaultdict(int)
self._tn: DefaultDict[str, int] = defaultdict(int)
self._fp: DefaultDict[str, int] = defaultdict(int)
self._fn: DefaultDict[str, int] = defaultdict(int)
self._right_corr: DefaultDict[str, int] = defaultdict(int)
self._wrong_corr: DefaultDict[str, int] = defaultdict(int)
self._ctp: DefaultDict[str, int] = defaultdict(int)
self._ctn: DefaultDict[str, int] = defaultdict(int)
self._cfp: DefaultDict[str, int] = defaultdict(int)
self._cfn: DefaultDict[str, int] = defaultdict(int)
self._right_span: DefaultDict[str, int] = defaultdict(int)
self._wrong_span: DefaultDict[str, int] = defaultdict(int)
# reference error code : freq - for hypotheses with the unparsable error code
self._tp_unparsables: DefaultDict[str, int] = defaultdict(int)
def add_file(self, category: str) -> None:
"""Add a processed file in a given content category"""
self._files[category] += 1
def add_result(
self,
*,
stats: List[StatsTuple],
true_positives: Dict[str, int],
false_negatives: Dict[str, int],
ups: Dict[str, int],
errtypefreqs: ErrTypeStatsDict,
) -> None:
"""Add the result of a process() call to the statistics collection"""
for sent_result in stats:
self.add_sentence(*sent_result)
for k, v in true_positives.items():
self._true_positives[k] += v
for k, v in false_negatives.items():
self._false_negatives[k] += v
for k, v in ups.items():
self._tp_unparsables[k] += v
for okey, d in errtypefreqs.items(): # okey = xtype; d = DefaultDict[str, int]
for ikey, vv in d.items(): # ikey = tp, fn, ...
self._errtypes[okey][ikey] += vv # v = freq for each metric
def add_sentence(
self,
category: str,
num_tokens: int,
ice_error: bool,
gc_error: bool,
tp: int,
tn: int,
fp: int,
fn: int,
right_corr: int,
wrong_corr: int,
ctp: int,
ctn: int,
cfp: int,
cfn: int,
right_span: int,
wrong_span: int,
) -> None:
"""Add a processed sentence in a given content category"""
d = self._sentences[category]
d["count"] += 1
d["num_tokens"] += num_tokens
d["ice_errors"] += 1 if ice_error else 0
d["gc_errors"] += 1 if gc_error else 0
# True negative: neither iceErrorCorpus nor GC report an error
true_negative = not ice_error and not gc_error
d["true_negatives"] += 1 if true_negative else 0
# True positive: both iceErrorCorpus and GC report an error
true_positive = ice_error and gc_error
d["true_positives"] += 1 if true_positive else 0
# False negative: iceErrorCorpus reports an error where GC doesn't
false_negative = ice_error and not gc_error
d["false_negatives"] += 1 if false_negative else 0
# False positive: GC reports an error where iceErrorCorpus doesn't
false_positive = gc_error and not ice_error
d["false_positives"] += 1 if false_positive else 0
# Stats for error detection for sentence
d["tp"] += tp
d["tn"] += tn
d["fp"] += fp
d["fn"] += fn
# Stats for error correction ratio
d["right_corr"] += right_corr
d["wrong_corr"] += wrong_corr
# Stats for error correction for sentence
d["ctp"] += ctp
d["ctn"] += ctn
d["cfp"] += cfp
d["cfn"] += cfn
# Stats for error span
d["right_span"] += right_span
d["wrong_span"] += wrong_span
# Causes of unparsable sentences
def output(self, cores: int) -> None:
"""Write the statistics to stdout"""
# Accumulate standard output in a buffer, for writing in one fell
# swoop at the end (after acquiring the output lock)
if SINGLE:
bprint(f"")
num_sentences: int = sum(
cast(int, d["count"]) for d in self._sentences.values()
)
def output_duration() -> None: # type: ignore
"""Calculate the duration of the processing"""
dur = int((datetime.utcnow() - self._starttime).total_seconds())
h = dur // 3600
m = (dur % 3600) // 60
s = dur % 60
# Output a summary banner
bprint(f"\n" + "=" * 7)
bprint(f"Summary")
bprint(f"=" * 7 + "\n")
# Total number of files processed, and timing stats
bprint(f"Processing started at {str(self._starttime)[0:19]}")
bprint(f"Total processing time {h}h {m:02}m {s:02}s, using {cores} cores")
bprint(f"\nFiles processed: {sum(self._files.values()):6}")
for c in GENRES:
bprint(f" {c:<13}: {self._files[c]:6}")
# Total number of tokens processed
num_tokens = sum(d["num_tokens"] for d in self._sentences.values())
bprint(f"\nTokens processed: {num_tokens:6}")
for c in GENRES:
bprint(f" {c:<13}: {self._sentences[c]['num_tokens']:6}")
# Total number of sentences processed
bprint(f"\nSentences processed: {num_sentences:6}")
for c in GENRES:
bprint(f" {c:<13}: {self._sentences[c]['count']:6}")
def perc(n: int, whole: int) -> str:
"""Return a percentage of total sentences, formatted as 3.2f"""
if whole == 0:
return "N/A"
return f"{100.0*n/whole:3.2f}"
def write_basic_value(
val: int, bv: str, whole: int, errwhole: Optional[int] = None
) -> None:
"""Write basic values for sentences and their freqs to stdout"""
if errwhole:
bprint(
f"\n{NAMES[bv]+':':<20} {val:6} {perc(val, whole):>6}% / {perc(val, errwhole):>6}%"
)
else:
bprint(f"\n{NAMES[bv]+':':<20} {val:6} {perc(val, whole):>6}%")
for c in GENRES:
bprint(f" {c:<13}: {self._sentences[c][bv]:6}")
def calc_PRF(
tp: int,
tn: int,
fp: int,
fn: int,
tps: str,
tns: str,
fps: str,
fns: str,
recs: str,
precs: str,
) -> None:
"""Calculate precision, recall and F0.5-score"""
# Recall
if tp + fn == 0:
result = "N/A"
recall = 0.0
else:
recall = tp / (tp + fn)
result = f"{recall:1.4f}"
bprint(f"\nRecall: {result}")
for c in GENRES:
d = self._sentences[c]
denominator = d[tps] + d[fns]
if denominator == 0:
bprint(f" {c:<13}: N/A")
else:
rc = d[recs] = d[tps] / denominator
bprint(f" {c:<13}: {rc:1.4f}")
# Precision
if tp + fp == 0:
result = "N/A"
precision = 0.0
else:
precision = tp / (tp + fp)
result = f"{precision:1.4f}"
bprint(f"\nPrecision: {result}")
for c in GENRES:
d = self._sentences[c]
denominator = d[tps] + d[fps]
if denominator == 0:
bprint(f" {c:<13}: N/A")
else:
p = d[precs] = d[tps] / denominator
bprint(f" {c:<13}: {p:1.4f}")
# F0.5 score
if precision + recall > 0.0:
f05 = 1.25 * (precision * recall) / (0.25 * precision + recall)
result = f"{f05:1.4f}"
else:
f05 = 0.0
result = "N/A"
bprint(f"\nF0.5 score: {result}")
for c in GENRES:
d = self._sentences[c]
if recs not in d or precs not in d:
bprint(f" {c:<13}: N/A")
continue
rc = d[recs]
p = d[precs]
if p + rc > 0.0:
f05 = 1.25 * (p * rc) / (0.25 * p + rc)
bprint(f" {c:<13}: {f05:1.4f}")
else:
bprint(f" {c:<13}: N/A")
def calc_recall(
right: int, wrong: int, rights: str, wrongs: str, recs: str
) -> None:
"""Calculate precision for binary classification"""
# Recall
if right + wrong == 0:
result = "N/A"
recall = 0.0
else:
recall = right / (right + wrong)
result = f"{recall:1.4f}"
bprint(f"\nRecall: {result}")
for c in GENRES:
d = self._sentences[c]
denominator = d[rights] + d[wrongs]
if denominator == 0:
bprint(f" {c:<13}: N/A")
else:
rc = d[recs] = d[rights] / denominator
bprint(f" {c:<13}: {rc:1.4f}")
def calc_error_category_metrics(cat: str) -> CatResultDict:
"""Calculates precision, recall and f0.5-score for a single error code
N = Number of errors in category z in reference corpus,
Nall = number of tokens
TP = Errors correctly classified as category z
FP = Errors (or non-errors) incorrectly classified as category z
FN = Errors in category z in reference but not hypothesis
Recall = TPz/(TPz+FPz)
Precision = TPz/(TPz+FNz)
F0.5-score = 1.25*(P*R)/(0.25*P+R)
"""
catdict: CatResultDict = {k: v for k, v in self._errtypes[cat].items()}
tp = cast(int, catdict.get("tp", 0))
fn = cast(int, catdict.get("fn", 0))
fp = cast(int, catdict.get("fp", 0))
recall: float = NO_RESULTS
precision: float = NO_RESULTS
ctp = cast(int, catdict.get("ctp", 0))
cfn = cast(int, catdict.get("cfn", 0))
cfp = cast(int, catdict.get("cfp", 0))
crecall: float = NO_RESULTS
cprecision: float = NO_RESULTS
catdict["freq"] = tp + fn
if tp + fn + fp == 0: # No values in category
catdict["recall"] = NO_RESULTS
catdict["precision"] = NO_RESULTS
catdict["f05score"] = NO_RESULTS
catdict["crecall"] = NO_RESULTS
catdict["cprecision"] = NO_RESULTS
catdict["cf05score"] = NO_RESULTS
else:
# Error detection metrics
# Recall
if tp + fn != 0:
recall = catdict["recall"] = tp / (tp + fn)
# Precision
if tp + fp != 0:
precision = catdict["precision"] = tp / (tp + fp)
# F0.5 score
if recall + precision > 0.0:
catdict["f05score"] = (
1.25 * (precision * recall) / (0.25 * precision + recall)
)
else:
catdict["f05score"] = NO_RESULTS
# Error correction metrics
# Recall
if ctp + cfn != 0:
crecall = catdict["crecall"] = ctp / (ctp + cfn)
# Precision
if ctp + cfp != 0:
cprecision = catdict["cprecision"] = ctp / (ctp + cfp)
# F0.5 score
if crecall + cprecision > 0.0:
catdict["cf05score"] = (
1.25 * (cprecision * crecall) / (0.25 * cprecision + crecall)
)
else:
catdict["cf05score"] = NO_RESULTS
# Correction recall (not used)
right_corr = cast(int, catdict.get("right_corr", 0))
if right_corr > 0:
catdict["corr_rec"] = right_corr / (
right_corr + cast(int, catdict.get("wrong_corr", 0))
)
else:
catdict["corr_rec"] = -1.0
# Span recall
right_span = cast(int, catdict.get("right_span", 0))
if right_span > 0:
catdict["span_rec"] = right_span / (
right_span + cast(int, catdict.get("wrong_span", 0))
)
else:
catdict["span_rec"] = NO_RESULTS
return catdict
def output_sentence_scores() -> None: # type: ignore
"""Calculate and write sentence scores to stdout"""
# Total number of true negatives found
bprint(f"\nResults for error detection for whole sentences")
true_positives: int = sum(
cast(int, d["true_positives"]) for d in self._sentences.values()
)
true_negatives: int = sum(
cast(int, d["true_negatives"]) for d in self._sentences.values()
)
false_positives: int = sum(
cast(int, d["false_positives"]) for d in self._sentences.values()
)
false_negatives: int = sum(
cast(int, d["false_negatives"]) for d in self._sentences.values()
)
write_basic_value(true_positives, "true_positives", num_sentences)
write_basic_value(true_negatives, "true_negatives", num_sentences)
write_basic_value(false_positives, "false_positives", num_sentences)
write_basic_value(false_negatives, "false_negatives", num_sentences)
# Percentage of true vs. false
true_results = true_positives + true_negatives
false_results = false_positives + false_negatives
if num_sentences == 0:
result = "N/A"
else:
result = (
perc(true_results, num_sentences)
+ "%/"
+ perc(false_results, num_sentences)
+ "%"
)
bprint(f"\nTrue/false split: {result:>16}")
for c in GENRES:
d = self._sentences[c]
num_sents = d["count"]
true_results = cast(int, d["true_positives"] + d["true_negatives"])
false_results = cast(int, d["false_positives"] + d["false_negatives"])
if num_sents == 0:
result = "N/A"
else:
result = f"{100.0*true_results/num_sents:3.2f}%/{100.0*false_results/num_sents:3.2f}%"
bprint(f" {c:<13}: {result:>16}")
# Precision, recall, F0.5-score
calc_PRF(
true_positives,
true_negatives,
false_positives,
false_negatives,
"true_positives",
"true_negatives",
"false_positives",
"false_negatives",
"sentrecall",
"sentprecision",
)
# Most common false negative error types
# total = sum(self._false_negatives.values())
# if total > 0:
# bprint(f"\nMost common false negative error types")
# bprint(f"--------------------------------------\n")
# for index, (xtype, cnt) in enumerate(
# heapq.nlargest(
# 20, self._false_negatives.items(), key=lambda x: x[1]
# )
# ):
# bprint(f"{index+1:3}. {xtype} ({cnt}, {100.0*cnt/total:3.2f}%)")
# Most common error types in unparsable sentences
# tot = sum(self._tp_unparsables.values())
# if tot > 0:
# bprint(f"\nMost common error types for unparsable sentences")
# bprint(f"------------------------------------------------\n")
# for index, (xtype, cnt) in enumerate(
# heapq.nlargest(20, self._tp_unparsables.items(), key=lambda x: x[1])
# ):
# bprint(f"{index+1:3}. {xtype} ({cnt}, {100.0*cnt/tot:3.2f}%)")
def output_token_scores() -> None: # type: ignore
"""Calculate and write token scores to stdout"""
bprint(f"\n\nResults for error detection within sentences")
num_tokens = sum(
cast(int, d["num_tokens"]) for d in self._sentences.values()
)
bprint(f"\nTokens processed: {num_tokens:6}")
for c in GENRES:
bprint(f" {c:<13}: {self._sentences[c]['num_tokens']:6}")
tp = sum(cast(int, d["tp"]) for d in self._sentences.values())
tn = sum(cast(int, d["tn"]) for d in self._sentences.values())
fp = sum(cast(int, d["fp"]) for d in self._sentences.values())
fn = sum(cast(int, d["fn"]) for d in self._sentences.values())
all_ice_errs = tp + fn
write_basic_value(tp, "tp", num_tokens, all_ice_errs)
write_basic_value(tn, "tn", num_tokens)
write_basic_value(fp, "fp", num_tokens, all_ice_errs)
write_basic_value(fn, "fn", num_tokens, all_ice_errs)
calc_PRF(
tp,
tn,
fp,
fn,
"tp",
"tn",
"fp",
"fn",
"detectrecall",
"detectprecision",
)
# Stiff: Of all errors in error corpora, how many get the right correction?
# Loose: Of all errors the tool correctly finds, how many get the right correction?
# Can only calculate recall.
bprint(f"\nResults for error correction")
right_corr = sum(
cast(int, d["right_corr"]) for d in self._sentences.values()
)
wrong_corr = sum(
cast(int, d["wrong_corr"]) for d in self._sentences.values()
)
write_basic_value(right_corr, "right_corr", num_tokens, tp)
write_basic_value(wrong_corr, "wrong_corr", num_tokens, tp)
calc_recall(
right_corr, wrong_corr, "right_corr", "wrong_corr", "correctrecall"
)
# Stiff: Of all errors in error corpora, how many get the right span?
# Loose: Of all errors the tool correctly finds, how many get the right span?
# Can only calculate recall.
bprint(f"\nResults for error span")
right_span = sum(
cast(int, d["right_span"]) for d in self._sentences.values()
)
wrong_span = sum(
cast(int, d["wrong_span"]) for d in self._sentences.values()
)
write_basic_value(right_span, "right_span", num_tokens, tp)
write_basic_value(wrong_span, "wrong_span", num_tokens, tp)
calc_recall(
right_span, wrong_span, "right_span", "wrong_span", "spanrecall"
)
def output_error_cat_scores() -> None:
"""Calculate and write scores for each error category to stdout"""
bprint(f"\n\nResults for each error category in order by frequency")
freqdict: Dict[str, int] = dict()
microf05: float = 0.0
nfreqs: int = 0
resultdict: Dict[str, CatResultDict] = dict()
# Iterate over category counts
for cat in self._errtypes.keys():
# Get recall, precision and F0.5; recall for correction and span
catdict = resultdict[cat] = calc_error_category_metrics(cat)
# Collect micro scores, both overall and for in-scope categories
freq = cast(int, catdict["freq"])
assert isinstance(freq, int)
f05score = cast(float, catdict["f05score"])
assert isinstance(f05score, float)
if cat not in OUT_OF_SCOPE:
microf05 += f05score * freq
nfreqs += freq
# Create freqdict for sorting error categories by frequency
freqdict[cat] = freq
# Print results for each category by frequency
for k in sorted(freqdict, key=freqdict.__getitem__, reverse=True):
rk = resultdict[k]
bprint("{} (in_scope={})".format(k, k not in OUT_OF_SCOPE))
bprint(
"\tTP, FP, FN: {}, {}, {}".format(
rk.get("tp", 0),
rk.get("fp", 0),
rk.get("fn", 0),
)
)
bprint(
"\tRe, Pr, F0.5: {:3.2f}, {:3.2f}, {:3.2f}".format(
cast(float, rk.get("recall", 0.0)) * 100.0,
cast(float, rk.get("precision", 0.0)) * 100.0,
cast(float, rk.get("f05score", 0.0)) * 100.0,
)
)
if (
rk.get("corr_rec", "N/A") == "N/A"
or rk.get("span_rec", "N/A") == "N/A"
):
bprint("\tCorr, span: N/A, N/A")
else:
bprint(
"\tCorr, span: {:3.2f}, {:3.2f}".format(
cast(float, rk.get("corr_rec", 0.0)) * 100.0,
cast(float, rk.get("span_rec", 0.0)) * 100.0,
)
)
# Micro F0.5-score
# Results for in-scope categories and all categories
if nfreqs != 0:
bprint(
"F0.5-score: {:3.2f}".format(
microf05 / nfreqs * 100.0,
)
)
else:
bprint(f"F0.5-score: N/A")
def output_supercategory_scores():
"""Error detection results for each supercategory in iEC given
in SUPERCATEGORIES, each subcategory, and error code"""
bprint("Supercategory: frequency, F-score")
bprint("\tSubcategory: frequency, F-score")
bprint(
"\t\tError code: frequency, (recall, precision, F-score), (tp, fn, fp)| correct recall"
)
totalfreq = 0
totalf = 0.0
for supercat in SUPERCATEGORIES:
# supercategory: {subcategory : error code}
# entry = supercategory, catlist = {subcategory : error code}
superblob = ""
superfreq = 0
superf = 0.0
for subcat in SUPERCATEGORIES[supercat]:
subblob = ""
subfreq = 0
subf = 0.0
for code in SUPERCATEGORIES[supercat][subcat]:
if code not in OUT_OF_SCOPE:
et = calc_error_category_metrics(code)
if et["f05score"] == "N/A":
continue
freq = cast(int, et["freq"])
fscore = cast(float, et["f05score"])
# codework
subblob = subblob + "\t\t{} {} ({:3.2f}, {:3.2f}, {:3.2f}) ({},{},{})| {}\n".format(
code,
freq,
cast(float, et["recall"]) * 100.0
if "recall" in et
else 0.0,
cast(float, et["precision"]) * 100.0
if "precision" in et
else 0.0,
fscore * 100.0,
cast(int, et["tp"]) if "tp" in et else 0,
cast(int, et["fn"]) if "fn" in et else 0,
cast(int, et["fp"]) if "fp" in et else 0,
cast(float, et["corr_rec"])
if "corr_rec" in et
else 0.0,
)
# subwork
subfreq += freq
subf += fscore * freq * 100.0
if subfreq != 0:
subblob = (
"\t{} {} {}\n".format(
subcat.capitalize(), subfreq, subf / subfreq
)
+ subblob
)
else:
subblob = (
"\t{} 0 N/A\n".format(subcat.capitalize()) + subblob
)
# superwork
# freq, f05
superblob += subblob
superfreq += subfreq
superf += subf # TODO is this correct?
if superfreq != 0:
superblob = (
"\n{} {} {}\n".format(
supercat.capitalize(), superfreq, superf / superfreq
)
+ superblob
)
else:
superblob = (
"\n{} 0 N/A\n".format(supercat.capitalize()) + superblob
)
totalfreq += superfreq
totalf += superf # TODO is this correct?
bprint("".join(superblob))
bprint("Total frequency: {}".format(totalfreq))
bprint("Total F-score: {}".format(totalf / totalfreq))
def output_all_scores():
"""Results for each supercategory in iEC given in SUPERCATEGORIES, each subcategory, and error code, in tsv format."""
bprint(
"Category\tfrequency\ttp\tfn\tfp\trecall\tprecision\tF-score\tctp\tcfn\tcfp\tcrecall\tcprecision\tCF-score"
)
totalfreq = 0
totaltp = 0
totalfn = 0
totalfp = 0
totalrecall = 0.0
totalprecision = 0.0
totalf = 0.0
totalctp = 0
totalcfn = 0
totalcfp = 0
totalcrecall = 0.0
totalcprecision = 0.0
totalcf = 0.0
for supercat in SUPERCATEGORIES:
# supercategory: {subcategory : error code}
# entry = supercategory, catlist = {subcategory : error code}
superfreq = 0
supertp = 0
superfn = 0
superfp = 0
superrecall = 0.0
superprecision = 0.0
superf = 0.0
superctp = 0
supercfn = 0
supercfp = 0
supercrecall = 0.0
supercprecision = 0.0
supercf = 0.0
superblob = ""
for subcat in SUPERCATEGORIES[supercat]:
subfreq = 0
subtp = 0
subfn = 0
subfp = 0
subrecall = 0.0
subprecision = 0.0
subf = 0.0
subctp = 0
subcfn = 0
subcfp = 0
subcrecall = 0.0
subcprecision = 0.0
subcf = 0.0
subblob = ""
for code in SUPERCATEGORIES[supercat][subcat]:
if code not in OUT_OF_SCOPE:
et = calc_error_category_metrics(code)
freq = cast(int, et["freq"])
fscore = cast(float, et["f05score"])
cfscore = cast(float, et["cf05score"])
# codework
subblob = subblob + "{}\t{}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\n".format(
code,
freq,
cast(int, et["tp"]) if "tp" in et else 0,
cast(int, et["fn"]) if "fn" in et else 0,
cast(int, et["fp"]) if "fp" in et else 0,
cast(float, et["recall"]) * 100.0
if ("recall" in et and et["recall"] > 0.0)
else NO_RESULTS, # Or "N/A", but that messes with the f-string formatting
cast(float, et["precision"]) * 100.0
if ("precision" in et and et["precision"] > 0.0)
else NO_RESULTS,
fscore * 100.0 if fscore > 0.0 else NO_RESULTS,
cast(int, et["ctp"]) if "ctp" in et else 0,
cast(int, et["cfn"]) if "cfn" in et else 0,
cast(int, et["cfp"]) if "cfp" in et else 0,
cast(float, et["crecall"]) * 100.0
if ("crecall" in et and et["crecall"] > 0.0)
else NO_RESULTS,
cast(float, et["cprecision"]) * 100.0
if ("cprecision" in et and et["cprecision"] > 0.0)
else NO_RESULTS,
cfscore * 100.0 if cfscore > 0.0 else NO_RESULTS,
)
# subwork
subfreq += freq
subtp += cast(int, et["tp"]) if "tp" in et else 0
subfn += cast(int, et["fn"]) if "fn" in et else 0
subfp += cast(int, et["fp"]) if "fp" in et else 0
subrecall += (
cast(float, et["recall"]) * freq * 100.0
if ("recall" in et and et["recall"] > 0.0)
else 0.0
)
subprecision += (
cast(float, et["precision"]) * freq * 100.0
if ("precision" in et and et["precision"] > 0.0)
else 0.0
)
subf += fscore * freq * 100.0 if fscore > 0.0 else 0.0
subctp += cast(int, et["ctp"]) if "ctp" in et else 0
subcfn += cast(int, et["cfn"]) if "cfn" in et else 0
subcfp += cast(int, et["cfp"]) if "cfp" in et else 0
subcrecall += (
cast(float, et["crecall"]) * freq * 100.0
if ("crecall" in et and et["crecall"] > 0.0)
else 0.0
)
subcprecision += (
cast(float, et["cprecision"]) * freq * 100.0
if ("cprecision" in et and et["cprecision"] > 0.0)
else 0.0
)
subcf += cfscore * freq * 100.0 if cfscore > 0.0 else 0.0
if subfreq != 0:
subblob = (
"\n{}\t{}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\n".format(
subcat.capitalize(),
subfreq,
subtp,
subfn,
subfp,
subrecall / subfreq,
subprecision / subfreq,
subf / subfreq,
subctp,
subcfn,
subcfp,
subcrecall / subfreq,
subcprecision / subfreq,
subcf / subfreq,
)
+ subblob
)
else:
subblob = (
"\n{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
subcat.capitalize(),
subfreq,
subtp,
subfn,
subfp,
NO_RESULTS,
NO_RESULTS,
NO_RESULTS,
subctp,
subcfn,
subcfp,
NO_RESULTS,
NO_RESULTS,
NO_RESULTS,
)
+ subblob
)
# superwork
superblob += subblob
superfreq += subfreq
supertp += subtp
superfn += subfn
superfp += subfp
superrecall += subrecall
superprecision += subprecision
superf += subf
superctp += subctp
supercfn += subcfn
supercfp += subcfp
supercrecall += subcrecall
supercprecision += subcprecision
supercf += subcf
if superfreq != 0:
superblob = (
"\n{}\t{}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\n".format(
supercat.capitalize(),
superfreq,
supertp,
superfn,
superfp,
superrecall / superfreq,
superprecision / superfreq,
superf / superfreq,
superctp,
supercfn,
supercfp,
supercrecall / superfreq,
supercprecision / superfreq,
supercf / superfreq,
)
+ superblob
)
else:
superblob = (
"\n{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
supercat.capitalize(),
superfreq,
supertp,
superfn,
superfp,
NO_RESULTS,
NO_RESULTS,
NO_RESULTS,
superctp,
supercfn,
supercfp,
NO_RESULTS,
NO_RESULTS,
NO_RESULTS,
)
+ superblob
)
totalfreq += superfreq
totaltp += supertp
totalfn += superfn
totalfp += superfp
totalrecall += superrecall
totalprecision += superprecision
totalf += superf
totalctp += superctp
totalcfn += supercfn
totalcfp += supercfp
totalcrecall += supercrecall
totalcprecision += supercprecision
totalcf += supercf
bprint("".join(superblob))
bprint(
"\n{}\t{}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\t{}\t{}\t{}\t{:3.2f}\t{:3.2f}\t{:3.2f}\n".format(
"Total",
totalfreq,
totaltp,
totalfn,
totalfp,
totalrecall / totalfreq,
totalprecision / totalfreq,
totalf / totalfreq,
totalctp,
totalcfn,
totalcfp,
totalcrecall / totalfreq,
totalcprecision / totalfreq,
totalcf / totalfreq,
)
)
# output_duration()
# output_sentence_scores()
# output_token_scores()
# output_error_cat_scores()
bprint(f"\n\nResults for iEC-categories:")
# output_supercategory_scores()
output_all_scores()
# Print the accumulated output before exiting
for s in buffer:
print(s)
def correct_spaces(tokens: List[Tuple[str, str]]) -> str:
"""Returns a string with a reasonably correct concatenation
of the tokens, where each token is a (tag, text) tuple."""
return detokenize(
Tok(TOK.PUNCTUATION if tag == "c" else TOK.WORD, txt, None)
for tag, txt in tokens
)
# Accumulate standard output in a buffer, for writing in one fell
# swoop at the end (after acquiring the output lock)
buffer: List[str] = []
def bprint(s: str):
"""Buffered print: accumulate output for printing at the end"""
buffer.append(s)
def process(fpath_and_category: Tuple[str, str]) -> Dict[str, Any]:
"""Process a single error corpus file in TEI XML format.
This function is called within a multiprocessing pool
and therefore usually executes in a child process, separate
from the parent process. It should thus not modify any
global state, and arguments and return values should be
picklable."""
# Unpack arguments
fpath, category = fpath_and_category
# Set up XML namespace stuff
NS = "http://www.tei-c.org/ns/1.0"
# Length of namespace prefix to cut from tag names, including { }
nl = len(NS) + 2
# Namespace dictionary to be passed to ET functions
ns = dict(ns=NS)
# Statistics about processed sentences. These data will
# be returned back to the parent process.
stats: List[StatsTuple] = []
# Counter of iceErrorCorpus error codes (xtypes) encountered
true_positives: Dict[str, int] = defaultdict(int)
false_negatives: Dict[str, int] = defaultdict(int)
# Counter of iceErrorCorpus error codes in unparsable sentences
ups: Dict[str, int] = defaultdict(int)
# Stats for each error code (xtypes)
errtypefreqs: ErrTypeStatsDict = ErrTypeStatsDict(TypeFreqs().copy)
try:
if not QUIET:
# Output a file header
bprint("-" * 64)
bprint(f"File: {fpath}")
bprint("-" * 64)
# Parse the XML file into a tree
try:
tree = ET.parse(fpath)
except ET.ParseError as e:
if QUIET:
bprint(f"000: *** Unable to parse XML file {fpath} ***")
else:
bprint(f"000: *** Unable to parse XML file ***")
raise e
# Obtain the root of the XML tree
root = tree.getroot()
# Iterate through the sentences in the file
for sent in root.findall("ns:text/ns:body/ns:p/ns:s", ns):
# Skip sentence if find exclude
if EXCLUDE:
exc = sent.attrib.get("exclude", "")
if exc:
continue
check = False # When args.single, checking single error code
# Sentence identifier (index)
index = sent.attrib.get("n", "")
tokens: List[Tuple[str, str]] = []
errors: List[ErrorDict] = []
# A dictionary of errors by their index (idx field)
error_indexes: Dict[str, ErrorDict] = {}
dependencies: List[Tuple[str, ErrorDict]] = []
analysisblob: List[str] = []
# Error corpora annotations for sentences marked as unparsable
# Enumerate through the tokens in the sentence
for el in sent:
tag = el.tag[nl:]
if tag == "revision":
# An error annotation starts here, eventually
# spanning multiple tokens
original = ""
corrected = ""
# Note the index of the starting token within the span
start = len(tokens)
# Revision id
rev_id = el.attrib["id"]
# Look at the original text
el_orig = el.find("ns:original", ns)
if el_orig is not None:
# We have 0 or more original tokens embedded
# within the revision tag
orig_tokens = [
(subel.tag[nl:], element_text(subel)) for subel in el_orig
]
tokens.extend(orig_tokens)
original = " ".join(t[1] for t in orig_tokens).strip()
# Calculate the index of the ending token within the span
end = max(start, len(tokens) - 1)
# Look at the corrected text
el_corr = el.find("ns:corrected", ns)
if el_corr is not None:
corr_tokens = [element_text(subel) for subel in el_corr]
corrected = " ".join(corr_tokens).strip()
# Accumulate the annotations (errors)
for el_err in el.findall("ns:errors/ns:error", ns):
attr = el_err.attrib
# Collect relevant information into a dict
xtype: str = attr["xtype"].lower()
error: ErrorDict = dict(
start=start,
end=end,
rev_id=rev_id,
xtype=xtype,
in_scope=xtype not in OUT_OF_SCOPE,
eid=attr.get("eid", ""),
original=original,
corrected=corrected,
)
errors.append(error)
# Temporarily index errors by the idx field
idx = attr.get("idx")
if idx:
error_indexes[idx] = error
# Accumulate dependencies that need to be "fixed up",
# i.e. errors that depend on and refer to other errors
# within the sentence
if xtype == "dep":
dep_id = attr.get("depId")
if dep_id:
# Note the fact that this error depends on the
# error with idx=dep_id
dependencies.append((dep_id, error))
else:
if QUIET:
bprint(f"In file {fpath}:")
bprint(
f"\n{index}: *** 'depId' attribute missing for dependency ***"
)
if SINGLE and xtype == SINGLE:
check = True
else:
tokens.append((tag, element_text(el)))
# Fix up the dependencies, if any
for dep_id, error in dependencies:
if dep_id not in error_indexes:
if QUIET:
bprint(f"In file {fpath}:")
bprint(f"\n{index}: *** No error has idx='{dep_id}' ***")
else:
# Copy the in_scope attribute from the original error
error["in_scope"] = error_indexes[dep_id]["in_scope"]
if SINGLE and not check:
continue
# Reconstruct the original sentence
# TODO switch for sentence from original text file
text = correct_spaces(tokens)
if not text:
# Nothing to do: drop this and go to the next sentence
continue
# print(text)
# Pass it to GreynirCorrect
pg = [list(p) for p in gc_check(text)]
s: Optional[_Sentence] = None
if len(pg) >= 1 and len(pg[0]) >= 1:
s = pg[0][0]
if len(pg) > 1 or (len(pg) == 1 and len(pg[0]) > 1):
# if QUIET:
# bprint(f"In file {fpath}:")
# bprint(
# f"\n{index}: *** Input contains more than one sentence *** {text}"
# )
pass
if s is None:
# if QUIET:
# bprint(f"In file {fpath}:")
# bprint(f"\n{index}: *** No parse for sentence *** {text}")
pass
if not QUIET:
# Output the original sentence
bprint(f"\n{index}: {text}")
if not index:
if QUIET:
bprint(f"In file {fpath}:")
bprint("000: *** Sentence identifier is missing ('n' attribute) ***")
def sentence_results(
hyp_annotations: List[Annotation], ref_annotations: List[ErrorDict]
) -> Tuple[bool, bool]:
gc_error = False
ice_error = False
unparsable = False
# Output GreynirCorrect annotations
for ann in hyp_annotations:
if ann.is_error:
gc_error = True
if ann.code == "E001":
unparsable = True
if not QUIET:
bprint(f">>> {ann}")
# Output iceErrorCorpus annotations
xtypes: Dict[str, int] = defaultdict(int)
for err in ref_annotations:
asterisk = "*"
xtype = cast(str, err["xtype"])
if err["in_scope"]:
# This is an in-scope error
asterisk = ""
ice_error = True
# Count the errors of each xtype
if xtype != "dep":
xtypes[xtype] += 1
if unparsable:
ups[xtype] += 1
if not QUIET:
bprint(
f"<<< {err['start']:03}-{err['end']:03}: {asterisk}{xtype}"
)
if not QUIET:
# Output true/false positive/negative result
if ice_error and gc_error:
bprint("=++ True positive")
for xtype in xtypes:
true_positives[xtype] += 1
elif not ice_error and not gc_error:
bprint("=-- True negative")
elif ice_error and not gc_error:
bprint("!-- False negative")
for xtype in xtypes:
false_negatives[xtype] += 1
else:
assert gc_error and not ice_error
bprint("!++ False positive")
return gc_error, ice_error
assert s is not None
assert isinstance(s, AnnotatedSentence)
gc_error, ice_error = sentence_results(s.annotations, errors)
def token_results(
hyp_annotations: Iterable[Annotation],
ref_annotations: Iterable[ErrorDict],
) -> Tuple[int, int, int, int, int, int, int, int, int, int]:
"""Calculate statistics on annotations at the token span level"""
tp, fp, fn = 0, 0, 0 # tn comes from len(tokens)-(tp+fp+fn) later on
right_corr, wrong_corr = 0, 0
ctp, cfp, cfn = (
0,
0,
0,
) # ctn comes from len(tokens)-(ctp+cfp+cfn) later on
right_span, wrong_span = 0, 0
if not hyp_annotations and not ref_annotations:
# No need to go any further
return (
tp,
fp,
fn,
right_corr,
wrong_corr,
ctp,
cfp,
cfn,
right_span,
wrong_span,
)
y = iter(hyp_annotations) # GreynirCorrect annotations
x = iter(ref_annotations) # iEC annotations
ytok: Optional[Annotation] = None
xtok: Optional[ErrorDict] = None
if ANALYSIS:
analysisblob.append("\n{}".format(text))
analysisblob.append("\tiEC:")
for iec_ann in ref_annotations:
analysisblob.append("\t\t{}".format(iec_ann))
analysisblob.append("\tGC:")
for gc_ann in hyp_annotations:
analysisblob.append("\t\t{}".format(gc_ann))
xspanlast = set([-1])
try:
ytok = next(y)
xtok = next(x)
while True:
ystart, yend = ytok.start, ytok.end
xstart, xend = cast(int, xtok["start"]), cast(int, xtok["end"])
samespan = False
# 1. Error detection
# Token span in GreynirCorrect annotation
# TODO Usually ystart, yend+1, reset when secondary comparison works
yspan = set(range(ystart, yend + 1))
# Token span in iEC annotation
xspan = set(range(xstart, xend + 1))
yorig: Set[str]
ysugg: Set[str]
if ytok.original:
yorig = set(ytok.original.split())
else:
yorig = set()
xorig = set(cast(str, xtok["original"]).split())
if ytok.suggest:
ysugg = set(ytok.suggest.split())
else:
ysugg = set()
xsugg = set(cast(str, xtok["corrected"]).split())
if xspan & yspan:
samespan = True
# Secondary comparison:
# Check if any common tokens
# and relatively same span
if abs(ystart - xstart) <= 5 or abs(yend - xend) <= 5:
if yorig and xorig and yorig.intersection(xorig):
samespan = True
if ysugg and xsugg and ysugg.intersection(xsugg):
samespan = True
# iEC error code
xtype = cast(str, xtok["xtype"])
# By default, use iEC error code
# on the GreynirCorrect side as well
ytype = xtype
if ytok.code in GCtoIEC:
# We have a mapping of the GC code
if xtype not in GCtoIEC[ytok.code]:
# The iEC code is not one that could
# correspond to a GC code.
# We select the iEC code that most commonly
# corresponds to the GC code;
# we're going to get an error for
# a wrong annotation type anyway, as ytype != xtype.
ytype = GCtoIEC[ytok.code][0]
else:
print("Error tag {} is not supported".format(ytok.code))
if ANALYSIS:
analysisblob.append(
"\tComparing:\n\t {}\n\t {} - {} ({})".format(
xtok, ytok, ytok.text, ytype
)
)
# analysisblob.append("\tXspans: {} | {}".format(xspanlast, xspan))
# Multiple tags for same error: Skip rest
if xspan == xspanlast:
if ANALYSIS:
analysisblob.append(
"\t Same span, skip: {}".format(
cast(str, xtok["xtype"])
)
)
xtok = None
xtok = next(x)
continue
if ytok.code in GCSKIPCODES or ytok.code.endswith("/w"):
# Skip these errors, shouldn't be compared.
if ANALYSIS:
analysisblob.append(
"\t Skip: {}".format(ytok.code)
)
ytok = None
ytok = next(y)
continue
if samespan:
# The annotation spans overlap
# or almost overlap and contain the same original value or correction
tp += 1
errtypefreqs[xtype]["tp"] += 1
if ANALYSIS:
analysisblob.append("\t TP: {}".format(xtype))
# 2. Span detection
if xspan == yspan:
right_span += 1
errtypefreqs[xtype]["right_span"] += 1
else:
wrong_span += 1
errtypefreqs[xtype]["wrong_span"] += 1
# 3. Error correction
ycorr = getattr(ytok, "suggest", "")
if ycorr == xtok["corrected"]:
right_corr += 1
errtypefreqs[xtype]["right_corr"] += 1
ctp += 1
errtypefreqs[xtype]["ctp"] += 1
else:
wrong_corr += 1
errtypefreqs[xtype]["wrong_corr"] += 1
cfn += 1
errtypefreqs[xtype]["cfn"] += 1
xspanlast = xspan
xtok, ytok = None, None
xtok = next(x)
ytok = next(y)
continue
# The annotation spans do not overlap
if yend < xstart:
# Extraneous GC annotation before next iEC annotation
fp += 1
errtypefreqs[ytype]["fp"] += 1
cfp += 1
errtypefreqs[ytype]["cfp"] += 1
if ANALYSIS:
analysisblob.append("\t FP: {}".format(ytype))
ytok = None
ytok = next(y)
continue
if ystart > xend:
# iEC annotation with no corresponding GC annotation
fn += 1
errtypefreqs[xtype]["fn"] += 1
cfn += 1
errtypefreqs[xtype]["cfn"] += 1
if ANALYSIS:
analysisblob.append("\t FN: {}".format(xtype))
xspanlast = xspan
xtok = None
xtok = next(x)
continue
# Should never get here
assert False
except StopIteration:
pass
# At least one of the iterators has been exhausted
# Process the remainder
if ANALYSIS and ytok:
analysisblob.append("\tDumping rest of GC errors:")
while ytok is not None:
# This is a remaining GC annotation: false positive
if ytok.code in GCSKIPCODES or ytok.code.endswith("/w"):
# Skip these errors, shouldn't be a part of the results.
if ANALYSIS:
analysisblob.append(
"\t Skip: {}".format(ytok.code)
)
ytok = next(y, None)
continue
fp += 1
ytype = GCtoIEC[ytok.code][0] if ytok.code in GCtoIEC else ytok.code
errtypefreqs[ytype]["fp"] += 1
cfp += 1
errtypefreqs[ytype]["cfp"] += 1
if ANALYSIS:
analysisblob.append("\t FP: {}".format(ytype))
ytok = next(y, None)
if ANALYSIS and xtok:
analysisblob.append("\tDumping rest of iEC errors:")
if not xtok:
# In case try fails on ytok = next(y)
xtok = next(x, None)
while xtok is not None:
# This is a remaining iEC annotation: false negative
xstart, xend = cast(int, xtok["start"]), cast(int, xtok["end"])
xspan = set(range(xstart, xend + 1))
xtype = cast(str, xtok["xtype"])
if xspan == xspanlast:
# Multiple tags for same error: Skip rest
if ANALYSIS:
analysisblob.append(
"\t Same span, skip: {}".format(xtype)
)
xtok = None
xtok = next(x, None)
else:
if ANALYSIS:
analysisblob.append("\t FN: {}".format(xtype))
fn += 1
errtypefreqs[xtype]["fn"] += 1
cfn += 1
errtypefreqs[xtype]["cfn"] += 1
xspanlast = xspan
xtok = next(x, None)
return (
tp,
fp,
fn,
right_corr,
wrong_corr,
ctp,
cfp,
cfn,
right_span,
wrong_span,
)
assert isinstance(s, AnnotatedSentence)
(
tp,
fp,
fn,
right_corr,
wrong_corr,
ctp,
cfp,
cfn,
right_span,
wrong_span,
) = token_results(s.annotations, errors)
tn = len(tokens) - tp - fp - fn
ctn = len(tokens) - ctp - cfp - cfn
# Collect statistics into the stats list, to be returned
# to the parent process
if stats is not None:
stats.append(
(
category,
len(tokens),
ice_error,
gc_error,
tp,
tn,
fp,
fn,
right_corr,
wrong_corr,
ctp,
ctn,
cfp,
cfn,
right_span,
wrong_span,
)
)
if ANALYSIS:
with open("analysis.txt", "a+") as analysis:
analysis.write("\n".join(analysisblob))
analysisblob = []
except ET.ParseError:
# Already handled the exception: exit as gracefully as possible
pass
finally:
# Print the accumulated output before exiting
with OUTPUT_LOCK:
for txt in buffer:
print(txt)
print("", flush=True)
# This return value will be pickled and sent back to the parent process
return dict(
stats=stats,
true_positives=true_positives,
false_negatives=false_negatives,
ups=ups,
errtypefreqs=errtypefreqs,
)
def initialize_cats(catfile: str) -> None:
first = True
with open(catfile, "r") as cfile:
for row in cfile:
split = row.split("\t")
if first:
first = False
else:
s0, s1, s2 = [s.strip() for s in split[0:3]]
SUPERCATEGORIES[s0][s1].append(s2)
def main() -> None:
"""Main program"""
# Parse the command line arguments
args = parser.parse_args()
# For a measurement run on the test corpus, the default is
# quiet operation. We store the flag in a global variable
# that is accessible to child processes.
global QUIET
QUIET = args.measure
# Overriding flags
if args.verbose is not None:
QUIET = False
# --quiet has precedence over --verbose
if args.quiet is not None:
QUIET = True
global EXCLUDE
EXCLUDE = args.exclude
global SINGLE
SINGLE = args.single
global ANALYSIS
ANALYSIS = args.analysis
# Maximum number of files to process (0=all files)
max_count = args.number
# Initialize the statistics collector
stats = Stats()
# The glob path of the XML files to process
path: str = args.path
# When running measurements only, we use _TEST_PATH as the default,
# otherwise _DEV_PATH
initialize_cats(args.catfile)
if path is None:
path = _TEST_PATH if args.measure else _DEV_PATH
def gen_files() -> Iterable[Tuple[str, str]]:
"""Generate tuples with the file paths and categories
to be processed by the multiprocessing pool"""
count = 0
it: Iterable[str]
if args.randomize and max_count > 0:
# Randomizing only makes sense if there is a max count as well
it = glob.glob(path, recursive=True)
it = random.sample(it, max_count)
else:
it = glob.iglob(path, recursive=True)
for fpath in it:
# Find out which genre the file belongs to by
# inference from the file name
for genre in GENRES:
if genre in fpath:
break
else:
assert False, f"File path does not contain a recognized genre: {fpath}"
# Add the file to the statistics under its genre
stats.add_file(genre)
# Yield the file information to the multiprocessing pool
yield fpath, genre
count += 1
# If there is a limit on the number of processed files,
# and we're done, stop the generator
if max_count > 0 and count >= max_count:
break
# Use a multiprocessing pool to process the articles
with multiprocessing.Pool(processes=args.cores) as pool:
# Iterate through the TEI XML files in turn and call the process()
# function on each file, in a child process within the pool
for result in pool.imap_unordered(process, gen_files()):
# Results come back as a dict of arguments that
# we pass to Stats.add_result()
stats.add_result(**result)
# Done: close the pool in an orderly manner
pool.close()
pool.join()
# Finally, acquire the output lock and write the final statistics
with OUTPUT_LOCK:
stats.output(cores=args.cores or os.cpu_count() or 1)
print("", flush=True)
if __name__ == "__main__":
main()
| 41.664453 | 177 | 0.492 | 9,641 | 94,120 | 4.727518 | 0.163157 | 0.002808 | 0.002764 | 0.002808 | 0.260542 | 0.208105 | 0.159485 | 0.131269 | 0.107947 | 0.093137 | 0 | 0.018815 | 0.396898 | 94,120 | 2,258 | 178 | 41.682905 | 0.784071 | 0.218923 | 0 | 0.266898 | 0 | 0.008666 | 0.190147 | 0.014986 | 0 | 0 | 0 | 0.000886 | 0.004622 | 1 | 0.014443 | false | 0.005199 | 0.008088 | 0 | 0.028307 | 0.045061 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
380ed18d43976269469c3c2614242e47914c12c3 | 615 | py | Python | software-updatable/script-agent/src/device_info.py | vigier/iot-suite-examples | ea3995188c7dc8c2c461ba2b1989fc72fda093a4 | [
"BSD-Source-Code"
] | 1 | 2021-02-19T10:40:48.000Z | 2021-02-19T10:40:48.000Z | software-updatable/script-agent/src/device_info.py | vigier/iot-suite-examples | ea3995188c7dc8c2c461ba2b1989fc72fda093a4 | [
"BSD-Source-Code"
] | null | null | null | software-updatable/script-agent/src/device_info.py | vigier/iot-suite-examples | ea3995188c7dc8c2c461ba2b1989fc72fda093a4 | [
"BSD-Source-Code"
] | 2 | 2021-03-18T10:41:19.000Z | 2021-03-29T09:43:41.000Z | import json
class DeviceInfo:
"""An entity that represents the information provided by edge agent over edge/thing/response topic."""
def toJson(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def compute(self,payload):
# See https://docs.bosch-iot-suite.com/edge/index.html#109655.htm
arr = payload["deviceId"].split(":")
self.namespace = arr[0]
self.deviceId = arr[1]
self.hubTenantId = payload["tenantId"]
self.policyId = payload["policyId"]
print("Device information is \n" + self.toJson())
| 38.4375 | 106 | 0.650407 | 79 | 615 | 5 | 0.734177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018711 | 0.217886 | 615 | 15 | 107 | 41 | 0.802495 | 0.260163 | 0 | 0 | 0 | 0 | 0.109375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0.090909 | 0.454545 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38103fdb2d64dfd907732198ccb8056945c45042 | 867 | py | Python | test/test_intermediate_image.py | beesandbombs/coldtype | d02c7dd36bf1576fa37dc8c50d5c1a6e47b1c5ea | [
"Apache-2.0"
] | 1 | 2021-04-04T15:25:06.000Z | 2021-04-04T15:25:06.000Z | test/test_intermediate_image.py | beesandbombs/coldtype | d02c7dd36bf1576fa37dc8c50d5c1a6e47b1c5ea | [
"Apache-2.0"
] | null | null | null | test/test_intermediate_image.py | beesandbombs/coldtype | d02c7dd36bf1576fa37dc8c50d5c1a6e47b1c5ea | [
"Apache-2.0"
] | null | null | null | from coldtype import *
import coldtype.filtering as fl
import skia
co = Font.Cacheable("assets/ColdtypeObviously-VF.ttf")
@animation(bg=hsl(0.65, l=0.83), rstate=1, storyboard=[15], timeline=Timeline(30))
def render(f, rstate):
p = f.a.progress(f.i, loops=1, easefn="qeio").e
return (StyledString("COLDTYPE",
Style(co, 700, wdth=(p)*0.1, tu=-85+(p*50), r=1, ro=1, rotate=10*p))
.pens()
.align(f.a.r)
.f(1)
.understroke(s=0, sw=30)
.precompose(f.a.r)
.attr(skp=dict(
ImageFilter=skia.BlurImageFilter.Make(10, 10),
ColorFilter=skia.LumaColorFilter.Make()
))
.precompose(f.a.r)
.attr(skp=dict(
ColorFilter=fl.compose(
fl.as_filter(fl.contrast_cut(200+p*30, 5)),
fl.fill(hsl(0.9, l=0.5, s=0.7)),
)))) | 32.111111 | 82 | 0.560554 | 128 | 867 | 3.78125 | 0.570313 | 0.016529 | 0.018595 | 0.053719 | 0.099174 | 0.099174 | 0.099174 | 0 | 0 | 0 | 0 | 0.070093 | 0.259516 | 867 | 27 | 83 | 32.111111 | 0.683801 | 0 | 0 | 0.166667 | 0 | 0 | 0.049539 | 0.035714 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3810de583777d6043b6fbc1b957266870a434a71 | 8,722 | py | Python | cogs/stream.py | 948guppy/Morrigan-Rewrite | e0ad2823b09945ee88f7978361903b84e92d8bd1 | [
"MIT"
] | null | null | null | cogs/stream.py | 948guppy/Morrigan-Rewrite | e0ad2823b09945ee88f7978361903b84e92d8bd1 | [
"MIT"
] | 13 | 2020-10-07T09:43:45.000Z | 2020-12-29T02:10:15.000Z | cogs/stream.py | 948guppy/Morrigan-Rewrite | e0ad2823b09945ee88f7978361903b84e92d8bd1 | [
"MIT"
] | null | null | null | import textwrap
import asyncio
import discord
from discord.ext import commands
class StreamStatusIsNone(Exception):
pass
class Stream(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
channel_id = 758219068007776256
channel = member.guild.get_channel(channel_id)
# 配信作成時の関数
def overwrites(streamer):
overwrite = {
streamer.guild.default_role: discord.PermissionOverwrite(create_instant_invite=False,
manage_channels=False,
manage_permissions=False, manage_roles=False,
manage_webhooks=False, read_messages=False,
send_messages=False, send_tts_messages=False,
manage_messages=False, embed_links=False,
attach_files=False, read_message_history=False,
mention_everyone=False, external_emojis=False,
use_external_emojis=False, add_reactions=False,
view_channel=True, connect=True, speak=True,
stream=True, mute_members=False,
deafen_members=False, move_members=False,
use_voice_activation=True,
priority_speaker=False),
streamer: discord.PermissionOverwrite(create_instant_invite=False, manage_channels=False,
manage_permissions=False, manage_roles=False,
manage_webhooks=False, read_messages=False, send_messages=False,
send_tts_messages=False, manage_messages=False, embed_links=False,
attach_files=False, read_message_history=False,
mention_everyone=False, external_emojis=False,
use_external_emojis=False, add_reactions=False, view_channel=True,
connect=True, speak=True, stream=True, mute_members=True,
deafen_members=True, move_members=False,
use_voice_activation=True, priority_speaker=True)
}
return overwrite
async def create_stream_channel(streamer):
category_id = 733625569178157076
category = streamer.guild.get_channel(category_id)
stream = await category.create_voice_channel(name=f"{streamer.display_name}",
overwrites=overwrites(streamer))
await streamer.move_to(stream)
def get_streaming_game(streamer):
try:
game = streamer.activities[0]
except IndexError:
game = None
return game
async def send_stream_started(streamer):
e = discord.Embed()
e.title = "配信が開始されました!"
e.description = textwrap.dedent(
f"""
配信者 : {streamer.mention}さん
配信中のゲーム : {get_streaming_game(streamer).name if get_streaming_game(streamer) else '取得されませんでした'}
"""
)
e.colour = 0x99FFFF
await channel.send(embed=e)
async def send_error_message(streamer):
e = discord.Embed()
e.title = "エラーが発生しました!"
e.description = textwrap.dedent(
f"""
配信者 : {streamer.mention}さんによる配信情報パネルの取得ができませんでした。
既にパネルが削除されているか、存在するパネル数が多すぎる可能性があります。
このエラーは10秒後に削除されます。
"""
)
e.colour = 0xFF0000
await channel.send(embed=e, delete_after=10)
async def delete_stream_information(streamer):
stream_information = None
async for message in channel.history(limit=200):
try:
if message.embeds[0].title == "配信が開始されました!":
if f"配信者 : {streamer.mention}さん" in message.embeds[0].description:
stream_information = message
break
except IndexError:
continue
try:
await stream_information.delete()
except AttributeError:
await send_error_message(streamer)
# 配信終了時の関数
async def close_stream(listener, stream):
try:
if stream.channel.overwrites_for(listener).deafen_members:
await stream.channel.delete()
await delete_stream_information(member)
except AttributeError:
pass
# 処理の実行
try:
if after.channel.id == 733626787992567868:
await send_stream_started(member)
await create_stream_channel(member)
except AttributeError:
if not before.channel.id == 733626787992567868 and before.channel.category_id == 733625569178157076:
await close_stream(member, before)
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
information_channel_id = 758219068007776256
information_channel = self.bot.get_channel(information_channel_id)
channel = self.bot.get_channel(payload.channel_id)
guild = self.bot.get_guild(payload.guild_id)
message = await channel.fetch_message(payload.message_id)
member = guild.get_member(payload.user_id)
delete = []
def check(m):
return m.author == member
async def change_streaming_channel_name(listener, stream_name):
state = listener.voice
if state is None:
delete.append(await channel.send("VCにいません"))
else:
if state.channel.category_id == 733625569178157076 and not state.channel.id == 733626787992567868:
if state.channel.overwrites_for(listener).deafen_members:
stream_channel_id = state.channel.id
stream_channel = listener.guild.get_channel(stream_channel_id)
await stream_channel.edit(name=stream_name)
return True
return False
raise StreamStatusIsNone
if member.bot:
return
else:
try:
if message.embeds[0].title == "配信編集パネル":
if str(payload.emoji) == "1⃣":
try:
delete.append(await channel.send("配信の名前を入力してください"))
msg = await self.bot.wait_for('message', timeout=60.0, check=check)
delete.append(msg)
except asyncio.TimeoutError:
delete.append(await channel.send('タイムアウトしました'))
else:
try:
if await change_streaming_channel_name(member, msg.content):
delete.append(await channel.send(f"配信の名前を{msg.content}に変更しました"))
else:
delete.append(await channel.send("あなたの配信ではありません"))
await (await self.bot.get_channel(payload.channel_id).fetch_message(payload.message_id)).remove_reaction(
payload.emoji, self.bot.get_guild(payload.guild_id).get_member(payload.user_id))
except StreamStatusIsNone:
pass
except IndexError:
pass
await asyncio.sleep(5)
await channel.delete_messages(delete)
def setup(bot):
bot.add_cog(Stream(bot))
| 47.145946 | 137 | 0.497822 | 729 | 8,722 | 5.751715 | 0.223594 | 0.025757 | 0.026711 | 0.028619 | 0.399237 | 0.318626 | 0.260911 | 0.231338 | 0.209874 | 0.184593 | 0 | 0.034265 | 0.437858 | 8,722 | 184 | 138 | 47.402174 | 0.820722 | 0.002637 | 0 | 0.24359 | 0 | 0 | 0.058884 | 0.021967 | 0 | 0 | 0.00184 | 0 | 0 | 1 | 0.032051 | false | 0.025641 | 0.025641 | 0.00641 | 0.108974 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3812f35ba24d9bbcbcb368fba353b59808d5476c | 7,477 | py | Python | secapr/phase_alleles.py | mftorres/seqcap_processor | ce8ff01b3918bd29105db7b3d91b1d12572f014e | [
"MIT"
] | null | null | null | secapr/phase_alleles.py | mftorres/seqcap_processor | ce8ff01b3918bd29105db7b3d91b1d12572f014e | [
"MIT"
] | null | null | null | secapr/phase_alleles.py | mftorres/seqcap_processor | ce8ff01b3918bd29105db7b3d91b1d12572f014e | [
"MIT"
] | null | null | null |
"""
Phase remapped reads form reference-based assembly into two separate alleles. Then produce consensus sequence for each allele.
"""
#author: Tobias Andermann, tobias.andermann@bioenv.gu.se
import os
import sys
import re
import glob
import shutil
import argparse
import configparser
import subprocess
import subprocess
import pickle
from Bio import SeqIO
from secapr.utils import CompletePath
from secapr.reference_assembly import bam_consensus, join_fastas
from secapr.helpers import CreateDir
# Get arguments
def add_arguments(parser):
parser.add_argument(
'--input',
required=True,
action=CompletePath,
default=None,
help='Call the folder that contains the results of the reference based assembly (output of reference_assembly function, containing the bam-files).'
)
parser.add_argument(
'--output',
required=True,
action=CreateDir,
default=None,
help='The output directory where results will be safed.'
)
parser.add_argument(
'--min_coverage',
type=int,
default=4,
help='Set the minimum read coverage. Only positions that are covered by this number of reads will be called in the consensus sequence, otherwise the program will add an ambiguity at this position.'
)
parser.add_argument(
'--reference',
required=True,
action=CompletePath,
default=None,
help='Provide the reference that was used for read-mapping. If you used the alignment-consensus method, provide the joined_fasta_library.fasta which is found in the reference_seqs folder within the reference-assembly output.'
)
def phase_bam(sorted_bam_file,sample_output_folder,min_cov,reference):
# Phasing:
bam_basename = re.sub('.bam$', '', sorted_bam_file)
split_sample_path = re.split("/",sorted_bam_file)
split_file_name = split_sample_path[-1]
phasing_file_base_pre = re.sub('.sorted.bam$', '', split_file_name)
if 'unphased' in phasing_file_base_pre:
phasing_file_base_pre = re.sub('_unphased','',phasing_file_base_pre)
phasing_out_dir = "%s/phased_bam_files" %(sample_output_folder)
if not os.path.exists(phasing_out_dir):
os.makedirs(phasing_out_dir)
phasing_basename = "%s/%s_allele" %(phasing_out_dir,phasing_file_base_pre)
phasing_cmd = [
"samtools",
"phase",
"-A",
"-F",
"-Q",
"20",
"-b",
phasing_basename,
sorted_bam_file
]
try:
print ("Phasing bam file..........")
with open(os.path.join(phasing_out_dir, "phasing_screen_out.txt"), 'w') as phasing_screen:
ph = subprocess.Popen(phasing_cmd, stdout=phasing_screen)
ph.communicate()
print ("Phasing completed.")
except:
print ("Phasing unsuccessful. Script terminated.")
sys.exit()
allele_0_file = "%s.0.bam" %phasing_basename
allele_1_file = "%s.1.bam" %phasing_basename
allele_0_sorted_base = "%s/%s_sorted_allele_0" %(phasing_out_dir,phasing_file_base_pre)
allele_1_sorted_base = "%s/%s_sorted_allele_1" %(phasing_out_dir,phasing_file_base_pre)
allele_0_sorted_file = "%s.bam" %allele_0_sorted_base
allele_1_sorted_file = "%s.bam" %allele_1_sorted_base
# Sorting phased bam files:
sort_phased_0 = "samtools sort -o %s %s" %(allele_0_sorted_file,allele_0_file)
sort_phased_1 = "samtools sort -o %s %s" %(allele_1_sorted_file,allele_1_file)
#sort_phased_0 = "samtools sort -o %s %s" %(allele_0_sorted_file, allele_0_file)
#sort_phased_1 = "samtools sort -o %s %s" %(allele_1_sorted_file,allele_1_file)
os.system(sort_phased_0)
os.system(sort_phased_1)
# Creating index file for phased bam-files:
index_allele0 = "samtools index %s" %(allele_0_sorted_file)
index_allele1 = "samtools index %s" %(allele_1_sorted_file)
os.system(index_allele0)
os.system(index_allele1)
print ("Creating consensus sequences from bam-files..........")
allele0_stem = re.split("/", allele_0_sorted_base)[-1]
allele0_stem = re.sub('_sorted', '', allele0_stem)
allele1_stem = re.split("/", allele_1_sorted_base)[-1]
allele1_stem = re.sub('_sorted', '', allele1_stem)
fasta_allele0 = bam_consensus(reference,allele_0_sorted_file,allele0_stem,sample_output_folder,min_cov)
fasta_allele1 = bam_consensus(reference,allele_1_sorted_file,allele1_stem,sample_output_folder,min_cov)
# Cleaning up output directory
output_files = [val for sublist in [[os.path.join(i[0], j) for j in i[2]] for i in os.walk(sample_output_folder)] for val in sublist]
intermediate_files = "%s/intermediate_files" %sample_output_folder
if not os.path.exists(intermediate_files):
os.makedirs(intermediate_files)
# check the names to make sure we're not deleting something improperly
try:
assert fasta_allele0 in output_files
except:
raise IOError("Output-files were not created properly.")
for file in output_files:
if file.endswith('.mpileup') or file.endswith('.vcf'):
shutil.move(file,intermediate_files)
return fasta_allele0,fasta_allele1
def manage_homzygous_samples(fasta_dir, sample_id):
fasta_sequences = SeqIO.parse(open("%s/%s.sorted.fasta" %(fasta_dir,sample_id)),'fasta')
with open('%s/%s_joined_homozygous_alleles.fasta'%(fasta_dir,sample_id), 'w') as outfile:
for fasta in fasta_sequences:
name = re.split(" ", fasta.description)
name[0] += "_0"
fasta.description = " ".join(name)
fasta.id += "_0"
SeqIO.write(fasta, outfile, "fasta")
name = re.split(" ", fasta.description)
allele_1_name = re.sub("_0$", "_1", name[0])
name[0] = allele_1_name
fasta.description = " ".join(name)
allele_1_id = re.sub("_0$", "_1", str(fasta.id))
fasta.id = allele_1_id
SeqIO.write(fasta, outfile, "fasta")
outfile.close
def main(args):
print('\n')
min_cov = args.min_coverage
reference = args.reference
# Set working directory
out_dir = args.output
input_folder = args.input
sample_out_list = []
# Iterate through all sample specific subfolders
for subfolder in os.listdir(input_folder):
path = os.path.join(input_folder,subfolder)
if os.path.isdir(path):
subfolder_path = os.path.join(input_folder,subfolder)
if subfolder_path.endswith('_remapped') or subfolder_path.endswith('_locus_selection'):
sample = '_'.join(subfolder.split('_')[:-1])
sample_output_folder = os.path.join(out_dir,'%s_phased' %sample)
if not os.path.exists(sample_output_folder):
os.makedirs(sample_output_folder)
sample_out_list.append(sample_output_folder)
tmp_folder = os.path.join(subfolder_path,'tmp')
reference_pickle = os.path.join(tmp_folder,'%s_reference.pickle' %sample)
#with open(reference_pickle, 'rb') as handle:
# reference = pickle.load(handle)
for file in os.listdir(subfolder_path):
if file.endswith("sorted.bam"):
sorted_bam = file
sorted_bam_path = os.path.join(subfolder_path,sorted_bam)
print(('\n'+"#" * 50))
print(('Processing sample %s' %sample))
allele_fastas = phase_bam(sorted_bam_path,sample_output_folder,min_cov,reference)
# The following is for the case that no phased bam files were created, i.e. the individual is homozygous for all loci (happens when only looking at one locus or a very few)
allele0 = ""
allele1 = ""
# testing if phasing files were created
for file in allele_fastas:
if file.endswith(".fasta"):
if "allele_0" in file:
allele0 = file
if "allele_1" in file:
allele1 = file
if allele0 == 0:
manage_homzygous_samples(allele_fastas,sample_id)
os.remove(os.path.join(allele_fastas,allele0))
os.remove(os.path.join(allele_fastas,allele1))
join_fastas(out_dir,sample_out_list)
print('\n') | 37.199005 | 227 | 0.738933 | 1,108 | 7,477 | 4.724729 | 0.222924 | 0.022732 | 0.037822 | 0.024069 | 0.22999 | 0.160267 | 0.119007 | 0.084432 | 0.056543 | 0.041261 | 0 | 0.012965 | 0.143774 | 7,477 | 201 | 228 | 37.199005 | 0.804749 | 0.118095 | 0 | 0.159509 | 0 | 0.018405 | 0.202069 | 0.02252 | 0 | 0 | 0 | 0 | 0.006135 | 1 | 0.02454 | false | 0 | 0.08589 | 0 | 0.116564 | 0.04908 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38131c7ecad19e2b5a5ed7d86da453eb8db2ed3b | 1,201 | py | Python | src/anaphylaxis_nlp/algo/observation.py | kpwhri/anaphylaxis-runrex | ee52f11f54d2034314abd3dfc3caf4770dcd8acb | [
"MIT"
] | null | null | null | src/anaphylaxis_nlp/algo/observation.py | kpwhri/anaphylaxis-runrex | ee52f11f54d2034314abd3dfc3caf4770dcd8acb | [
"MIT"
] | null | null | null | src/anaphylaxis_nlp/algo/observation.py | kpwhri/anaphylaxis-runrex | ee52f11f54d2034314abd3dfc3caf4770dcd8acb | [
"MIT"
] | null | null | null | from runrex.algo.pattern import Pattern
from runrex.text import Document
from runrex.algo.result import Status, Result
from anaphylaxis_nlp.algo.epinephrine import hypothetical
class ObsStatus(Status):
NONE = -1
OBSERVATION = 1
MONITORING = 2
overnight = r'(observation|overnight|extended|hospital|unit|[i1]cm?u|neuro)'
admit = r'\b(add?mit|admission|transfer|xfer)\w*'
OBSERVATION = Pattern(
rf'{admit} (\w+ )?((for|to) )?(\w+ )?{overnight}',
negates=[hypothetical]
)
MONITORING = Pattern(
rf'(close|continu)\w* ((to|for) )?(\w+ )?(monitor|observ|check|follow|track)\w*',
negates=[hypothetical]
)
def _search_observation(document: Document):
for sentence in document:
for text, start, end in sentence.get_patterns(OBSERVATION):
yield ObsStatus.OBSERVATION, text, start, end
for text, start, end in sentence.get_patterns(MONITORING):
yield ObsStatus.MONITORING, text, start, end
def get_observation(document: Document, expected=None):
for status, text, start, end in _search_observation(document):
yield Result(status, status.value, expected=expected,
text=text, start=start, end=end)
| 30.025 | 85 | 0.693589 | 150 | 1,201 | 5.5 | 0.393333 | 0.065455 | 0.072727 | 0.050909 | 0.087273 | 0.087273 | 0.087273 | 0.087273 | 0 | 0 | 0 | 0.004061 | 0.17985 | 1,201 | 39 | 86 | 30.794872 | 0.833503 | 0 | 0 | 0.071429 | 0 | 0.035714 | 0.183181 | 0.115737 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38171d2911bdd197c075b3659dc77a7fa491cb09 | 2,757 | py | Python | Otros/automatizacion-separador-de-direcciones -2.py | gustavoghp87/MiseriScraping | a9f19c1f45424107713a331d74e2a905404390c4 | [
"Apache-2.0"
] | null | null | null | Otros/automatizacion-separador-de-direcciones -2.py | gustavoghp87/MiseriScraping | a9f19c1f45424107713a331d74e2a905404390c4 | [
"Apache-2.0"
] | null | null | null | Otros/automatizacion-separador-de-direcciones -2.py | gustavoghp87/MiseriScraping | a9f19c1f45424107713a331d74e2a905404390c4 | [
"Apache-2.0"
] | null | null | null | import os
import xlsxwriter
import pandas as pd
################################################################################################################################
data = pd.read_excel("te.xlsx", encoding='UTF8')
wb = xlsxwriter.Workbook("nuevo2.xlsx")
ws = wb.add_worksheet()
ws.write(0, 0, "inner_id")
ws.write(0, 1, "calle_id")
ws.write(0, 2, "territorio")
ws.write(0, 3, "manzana")
ws.write(0, 4, "dirección")
ws.write(0, 5, "dire-alt")
ws.write(0, 6, "piso/depto")
ws.write(0, 13, "teléfono")
ws.write(0, 14, "estado")
for i in data.index:
inner_id = data['inner_id'][i]
cuadra_id = data['cuadra_id'][i]
territorio = data['territorio'][i]
manzana = data['manzana'][i]
direccion = data['dirección'][i]
telefono = data['teléfono'][i]
estado = data['estado'][i]
excA = "A" + str(i+2)
ws.write(excA, inner_id)
excB = "B" + str(i+2)
ws.write(excB, cuadra_id)
excC = "C" + str(i+2)
ws.write(excC, territorio)
excD = "D" + str(i+2)
ws.write(excD, manzana)
excE = "E" + str(i+2)
ws.write(excE, direccion)
excF = "F" + str(i+2)
excG = "G" + str(i+2)
print("DIRECCION: " + direccion)
lista = direccion.split(' ')
if (lista[1].isdigit()):
direccion2 = lista[0] + " " + lista[1]
ws.write(excF, direccion2)
try:
pisodepto = lista[2] + " " + lista[3] + " " + lista[4]
except:
try:
pisodepto = lista[2] + " " + lista[3]
except:
try:
pisodepto = lista[2]
except:
pisodepto = ""
ws.write(excG, pisodepto)
elif (lista[2].isdigit()):
direccion2 = lista[0] + " " + lista[1] + " " + lista[2]
ws.write(excF, direccion2)
try:
pisodepto = lista[3] + " " + lista[4] + " " + lista[5]
except:
try:
pisodepto = lista[3] + " " + lista[4]
except:
try:
pisodepto = lista[3]
except:
pisodepto = ""
ws.write(excG, pisodepto)
elif (lista[3].isdigit()):
direccion2 = lista[0] + " " + lista[1] + " " + lista[2] + " " + lista[3]
ws.write(excF, direccion2)
try:
pisodepto = lista[4] + " " + lista[5] + " " + lista[6]
except:
try:
pisodepto = lista[4] + " " + lista[5]
except:
try:
pisodepto = lista[4]
except:
pisodepto = ""
ws.write(excG, pisodepto)
elif (lista[4].isdigit()):
direccion2 = lista[0] + " " + lista[1] + " " + lista[2] + " " + lista[3] + " " + lista[4]
ws.write(excF, direccion2)
try:
pisodepto = lista[5] + " " + lista[6] + " " + lista[7]
except:
try:
pisodepto = lista[5] + " " + lista[6]
except:
try:
pisodepto = lista[5]
except:
pisodepto = ""
ws.write(excG, pisodepto)
excN = "N" + str(i+2)
ws.write(excN, telefono)
excO = "O" + str(i+2)
ws.write(excO, estado)
wb.close()
print("\n\n\n##################### Terminado con éxito! \n\n\n")
| 24.39823 | 128 | 0.540805 | 372 | 2,757 | 3.981183 | 0.22043 | 0.113437 | 0.137745 | 0.12424 | 0.551654 | 0.472654 | 0.405807 | 0.303174 | 0.055368 | 0.055368 | 0 | 0.037241 | 0.211099 | 2,757 | 112 | 129 | 24.616071 | 0.643678 | 0 | 0 | 0.363636 | 0 | 0 | 0.098136 | 0.01027 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.030303 | 0 | 0.030303 | 0.020202 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3817a39ebae3bec12eb2bb2bd3ccd37374c4d67e | 15,930 | py | Python | Scripts/logica_estado_smach.py | LiuSeeker/Robotica-projeto-1 | 425795d51232470ac840faf9dc7d97863d801554 | [
"CECILL-B"
] | null | null | null | Scripts/logica_estado_smach.py | LiuSeeker/Robotica-projeto-1 | 425795d51232470ac840faf9dc7d97863d801554 | [
"CECILL-B"
] | null | null | null | Scripts/logica_estado_smach.py | LiuSeeker/Robotica-projeto-1 | 425795d51232470ac840faf9dc7d97863d801554 | [
"CECILL-B"
] | null | null | null | #! /usr/bin/env python
# -*- coding:utf-8 -*-
import smach
import smach_ros
import rospy
import numpy as np
import tf
import math
import cv2
import time
from geometry_msgs.msg import Twist, Vector3, Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image, CompressedImage, LaserScan, Imu
from turtlebot3_msgs.msg import Sound
from cv_bridge import CvBridge, CvBridgeError
import smach
import smach_ros
import cormodule
import transformations
#xml do haarcascade com o treinamento dos rostos de gatos
face_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')
bridge = CvBridge()
global cv_image
global dif_x
global media
global centro
global imu
global angulos
global toca1
global toca2
bateu = False
toca1 = True
toca2 = True
angulos = [0]
cv_image = None
dif_x = None
media = 0
centro = 0
media0 = (0,0)
imu = []
atraso = 1.5E9
delay_frame = 0.03
bateu = None
p = False
#--------------------------------------------------------------------------------------------------------------------------------------
#Converte o valor da medida do LaserScan para centímetros
def converte(valor):
return valor*44.4/0.501
#Função que recebe a imagem da câmera
def roda_todo_frame(imagem):
global cv_image
global media
global centro
global dif_x
global p
global area1, area2
global media0, centro0, area0
#Calcula o lag de frames
now = rospy.get_rostime()
imgtime = imagem.header.stamp
lag = now-imgtime
delay = lag.nsecs
if delay > atraso and check_delay==True:
print("delay: {}".format(delay/1.0E9))
return
cv_image = bridge.compressed_imgmsg_to_cv2(imagem, "bgr8")
#Deixa a imagem da câmera preto e branco para ser utilizada na detecção de faces
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#Detecta os pontos extremos da face
for(x,y,z,w) in faces:
cv2.rectangle(cv_image, (x,y), (x+z, y+w), (255,0,0), 2) #Desenha um retângulo em volta da face
roi_gray = gray[y:y+w, x:x+z]
roi_color = cv_image[y:y+w, x:x+z]
media = x+z/2 #Calcula a posição em x do centro da face
centro = cv_image.shape[0]//1.5 #Calcula a posição em x do centro da imagem
#Identificando a primeira interacao do 'for' (ao reconhecer gato) para calcular a area da figura
if p == False:
area1 = z*w
area2 = 0
p = True
#Calculando a area em todas as outras interacoes para definir a velocidade proporcional do robo
elif p == True:
area2 = z*w
#Caso tenha achado uma face, calcula a diferença entre o centro da face e o centro da imagem
if faces != ():
dif_x = media-centro
else:
dif_x = None
#Define o centro do objeto detectada por cor e o centro da imagem
media0, centro0, area0 = cormodule.identifica_cor(cv_image)
#Printa a imagem da câmera
cv2.imshow("Camera", cv_image)
cv2.waitKey(1)
#Define a lista de distâncias do scan do LaserScan
def scaneou(dado):
global distancias
distancias = np.array(dado.ranges)
#Define o posicionamento pelo IMU
def leu_imu(dado):
global angulos, imu, imu_acele, imu_media, bateu
quat = dado.orientation
lista = [quat.x, quat.y, quat.z, quat.w]
angulos = np.degrees(transformations.euler_from_quaternion(lista))
imu_acele = np.array(dado.linear_acceleration.x).round(decimals=2)
imu.append(imu_acele)
#Pegando a media da lista recebida
if len(imu) >= 12:
imu = imu[6:]
imu_media = np.mean(imu)
#Analisando se bateu
if abs(imu[-1] - imu_media) >= 3.0:
imu = []
bateu = True
else:
bateu = False
#--------------------------------------------------------------------------------------------------------------------------------------
## Classes para o state machine
#Classe para procurar objetos de interesse
class Procurar(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['objeto_1', 'objeto_2', 'nada'])
def execute(self, userdata):
print("Procurar")
global velocidade
velocidade = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0.4))
#Se acha a face:
if dif_x != None:
return 'objeto_1'
#Se acha o objeto pela cor de interesse
if media0[0] != 0 and media0[1] != 0 and area0 >= 3000:
return 'objeto_2'
#Se não acha nada
else:
velocidade_saida.publish(velocidade)
rospy.sleep(0.01)
return 'nada'
#Apos encontrar o objeto, essa classe serve para seguir o objeto
class Seguir(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['seguir', 'desviar', 'perto', 'perdido', 'bateu'])
def execute(self, userdata):
print("Seguir")
global velocidade_saida, bateu
tolerancia = 25
desviar = False
seguindo = False
perdido = False
perto = False
l_fd = 0
l_fe = 0
l_d = 0
l_e = 0
if bateu:
bateu = False
return "bateu"
#Utilizando as distancias recebidas na funcao scaneou()
#A primeira distância na lista é a distância em frente ao robô,
#e o scan segue sentido anti-horário a partir da primeira distância (a frente do robô)
for i in range(len(distancias)):
#Faixa de scan na parte frontal esquerda do robô
if i <= 40:
#Se houver algo nessa faixa a uma distância menor que 50cm, deixa a flag 'Desviar' como True
if converte(distancias[i]) < 30 and converte(distancias[i]) != 0:
l_fe += 1
#Faixa de scan na parte frontal direita do robô
if i >= 320:
#Se houver algo nessa faixa a uma distância menor que 30cm, deixa a flag 'Desviar' como True
if converte(distancias[i]) < 30 and converte(distancias[i]) != 0:
l_fd += 1
#Faixa de scan na parte esquerda do robô
if i <= 70 and i > 40:
#Se houver algo nessa faixa a uma distância menor que 20cm, deixa a flag 'Desviar' como True
if converte(distancias[i]) < 20 and converte(distancias[i]) != 0:
l_e += 1
#Faixa de scan na parte direita do robô
if i < 320 and i >= 290:
#Se houver algo nessa faixa a uma distância menor que 20cm, deixa a flag 'Desviar' como True
if converte(distancias[i]) < 20 and converte(distancias[i]) != 0:
l_d += 1
if l_fe >= 5:
desviar = True
if l_fd >= 5:
desviar = True
if l_e >= 5:
desviar = True
if l_d >= 5:
desviar = True
#Centralizando o rosto (no caso, do gato) de acordo com a tolerancia
if dif_x > tolerancia:
velocidade = Twist(Vector3(0,0,0), Vector3(0,0,-0.2))
rospy.sleep(delay_frame)
seguindo = True
if dif_x < -tolerancia and dif_x != None:
velocidade = Twist(Vector3(0,0,0), Vector3(0,0,0.2))
rospy.sleep(delay_frame)
seguindo = True
#Uma vez centralizado, comparamos a area que o objeto ocupa do frame em relacao a area que ocupava inicialmente
#para decidir a velocidade que o robo pode andar
if dif_x >= -tolerancia and dif_x <= tolerancia:
#Caso a area esteja muito grande (ou seja, o objeto esta perto do robo), ele para, de maneira que nao colida
if area2 >= area1*1.1:
velocidade = Twist(Vector3(0,0,0), Vector3(0,0,0))
rospy.sleep(delay_frame)
perto = True
#Se o objeto estiver longe, podemos acelerar o robo
if area2 <= area1*1.1 and area2 > area1*0.6:
velocidade = Twist(Vector3(0.1,0,0), Vector3(0,0,0))
rospy.sleep(delay_frame)
seguindo = True
#Caso esteja muito longe, sua velocidade é maior ainda
if area2 <= area1*0.6:
velocidade = Twist(Vector3(0.2,0,0), Vector3(0,0,0))
rospy.sleep(delay_frame)
seguindo = True
if bateu:
bateu = False
return "bateu"
#Se a
if dif_x == None:
if area1 == 0 and area2 == 0:
perdido = True
if desviar:
return 'desviar'
if perdido:
return 'perdido'
if seguindo:
velocidade_saida.publish(velocidade)
rospy.sleep(0.01)
return 'seguir'
if perto:
velocidade_saida.publish(velocidade)
rospy.sleep(0.01)
return 'perto'
return 'perdido'
#Classe utilizada para desviar de objetos perto do robo
class Desviar(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['desviado', 'desviando','bateu'])
def execute(self, userdata):
global bateu
print("Desviar")
desviando = False
l_fd = 0
l_fdc = 0
l_fe = 0
l_fec = 0
l_d = 0
l_e = 0
l_t = 0
if bateu:
bateu = False
return "bateu"
for i in range(len(distancias)):
#Faixa de scan na parte frontal esquerda do robô
if i <= 40:
#Se houver algo nessa faixa a uma distância menor que 30cm, desvia
if converte(distancias[i]) < 30 and converte(distancias[i]) >= 15:
l_fe += 1
#Se houver algo nessa faixa a uma distância menor que 15cm, para e desvia
elif converte(distancias[i]) < 15 and converte(distancias[i]) != 0:
l_fec += 1
#Faixa de scan na parte frontal direita do robô
if i >= 320:
#Se houver algo nessa faixa a uma distância menor que 30cm, desvia
if converte(distancias[i]) < 30 and converte(distancias[i]) >= 15:
l_fd += 1
#Se houver algo nessa faixa a uma distância menor que 15cm, para e desvia
elif converte(distancias[i]) < 15 and converte(distancias[i]) != 0:
l_fdc += 1
#Faixa de scan na parte esquerda do robô
if i <= 70 and i > 40:
#Se houver algo nessa faixa a uma distância menor que 20cm, desvia
if converte(distancias[i]) < 20 and converte(distancias[i]) != 0:
l_e += 1
#Faixa de scan na parte direita do robô
if i < 320 and i >= 290:
#Se houver algo nessa faixa a uma distância menor que 20cm, desvia
if converte(distancias[i]) < 20 and converte(distancias[i]) != 0:
l_d += 1
#Faixa de scan na parte traseira do robô
if i < 290 and i > 70:
#Se houver algo nessa faixa a uma distância menor que 20cm, para e desvia
if converte(distancias[i]) < 20 and converte(distancias[i]) != 0:
l_t += 1
if l_t >= 10:
velocidade = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0.5))
desviando = True
if l_d >= 5:
velocidade = Twist(Vector3(0.1, 0, 0), Vector3(0, 0, 0.5))
desviando = True
if l_e >= 5:
velocidade = Twist(Vector3(0.1, 0, 0), Vector3(0, 0, -0.5))
desviando = True
if l_fe > l_fd and l_fe >= 5:
velocidade = Twist(Vector3(0.2, 0, 0), Vector3(0, 0, -0.7))
desviando = True
elif l_fd > l_fe and l_fd >= 5:
velocidade = Twist(Vector3(0.2, 0, 0), Vector3(0, 0, 0.7))
desviando = True
if l_fec > l_fdc and l_fec >= 5:
velocidade = Twist(Vector3(-0.1, 0, 0), Vector3(0, 0, -0.9))
desviando = True
elif l_fdc > l_fec and l_fdc >= 5:
velocidade = Twist(Vector3(-0.1, 0, 0), Vector3(0, 0, 0.9))
desviando = True
#Se estiver desviando, publica a velocidade e retorna 'desviando'
if desviando:
velocidade_saida.publish(velocidade)
rospy.sleep(0.01)
return 'desviando'
#Se já acabou de desviar, retorna 'desviado'
else:
return 'desviado'
#As duas classes abaixo servem para tocar sons do robo
class Som1(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['tocado'])
def execute(self, userdata):
global saida_som
global toca1
print("Som1")
if toca1:
saida_som.publish(3)
velocidade = Twist(Vector3(0,0,0), Vector3(0, 0,0))
velocidade_saida.publish(velocidade)
rospy.sleep(0.01)
toca1 = False
return 'tocado'
class Som2(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['tocado'])
def execute(self, userdata):
global saida_som
global toca2
print("Som2")
if toca2:
saida_som.publish(2)
velocidade = Twist(Vector3(0,0,0), Vector3(0,0,0))
velocidade_saida.publish(velocidade)
rospy.sleep(0.01)
toca2 = False
return 'tocado'
#Classe que, quando rodada, grava o angulo atual como o inicial e define o angulo final
class Pos_ini(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['pego'])
def execute(self, userdata):
print("Pos_ini")
global ang_inicial
global ang_final
global ang_varia
global angulos
ang_inicial = angulos[0]
ang_varia = 180
if ang_inicial < ang_varia and ang_inicial >= 0:
ang_final = ang_inicial - ang_varia
elif ang_inicial > -ang_varia and ang_inicial < 0:
ang_final = ang_inicial + ang_varia
return 'pego'
#Essa classe vira até o ângulo final definido pela classe Pos_ini
class Virar(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['virando', 'virado', 'bateu'])
def execute(self, userdata):
print("Virar")
global velocidade_saida
global angulos
global bateu
ang_atual = angulos[0]
if bateu:
bateu = False
return "bateu"
velocidade = Twist(Vector3(0, 0, 0), Vector3(0, 0, -0.7))
#Se já acabou de virar, retorna 'virado'
if ang_atual <= ang_final+3 and ang_atual >= ang_final-3:
velocidade = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))
velocidade_saida.publish(velocidade)
rospy.sleep(0.01)
return 'virado'
#Se estiver virando, publica a velocidade e retorna 'virando'
else:
velocidade_saida.publish(velocidade)
rospy.sleep(0.01)
return 'virando'
class Bateu(smach.State):
def __init__(self):
smach.State.__init__(self,outcomes = ['perdido'])
def execute(self,userdata):
print("Bateu")
global velocidade_saida
velocidade = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))
velocidade_saida.publish(velocidade)
rospy.sleep(2)
return "perdido"
def main():
global velocidade_saida
global angulos
global ang_inicial
global ang_final
global saida_som
rospy.init_node('smach_example_state_machine')
#Dá subscribe na função 'leu_imu'
recebe_imu = rospy.Subscriber("/imu", Imu, leu_imu)
#Publica o som
saida_som = rospy.Publisher("/sound", Sound, queue_size = 2)
#Dá subscribe na função 'roda_todo_frame'
recebedor = rospy.Subscriber("/raspicam_node/image/compressed", CompressedImage, roda_todo_frame, queue_size=10, buff_size = 2**24)
#Publica a velocidade
velocidade_saida = rospy.Publisher("/cmd_vel", Twist, queue_size = 2)
#Dá subscribe na função 'scaneou'
recebe_scan = rospy.Subscriber("/scan", LaserScan, scaneou)
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['terminei'])
# Open the container
with sm:
# Add states to the container
smach.StateMachine.add('PROCURAR', Procurar(),
transitions={'objeto_1':'SEGUIR', #se achar o objeto 1, retorna 'objeto_1' e executa 'SEGUIR'
'objeto_2':'POS_INI', #se achar o objeto 2, retorna 'objeto_2' e executa 'SOM_2'
'nada': 'PROCURAR'}) #se não achar nada, retorna 'nada' e executa "PROCURAR"
smach.StateMachine.add('SEGUIR', Seguir(),
transitions={'seguir':'SEGUIR', #se nao scnear nada perto, estiver longe do objeto, retorna 'longe' e executa 'SEGUIR'
'desviar': 'DESVIAR', #se scanear qq coisa perto (direção qualquer), retorna 'desviar' e executa 'DESVIAR'
'perto': 'SOM_1', #se estiver perto do objeto, retorna 'perto' e executa 'SOM_1'
'perdido': 'PROCURAR',
'bateu': 'BATEU'}) #perder o objeto, retorna 'perdido' e executa 'PROCURAR'
smach.StateMachine.add('DESVIAR', Desviar(),
transitions={'desviado': 'PROCURAR', #se desviou, retorna 'desviado' e executa 'PROCURAR'
'desviando': 'DESVIAR',
'bateu' : 'BATEU'}) #scaneia a direção e desvia, e retorna 'desviado' e executa 'PROCURAR'
smach.StateMachine.add('SOM_1', Som1(),
transitions={'tocado': 'SEGUIR'}) #toca o som1, e retorna 'tocado' e executa 'PROCURAR'
smach.StateMachine.add('SOM_2', Som2(),
transitions={'tocado': 'PROCURAR'}) #toca o som2, e retorna 'tocado' e executa 'PROCURAR'
smach.StateMachine.add('POS_INI', Pos_ini(),
transitions={'pego': 'VIRAR'}) #grava a posição atual, e retorna 'pego' e executa 'VIRAR'
smach.StateMachine.add('VIRAR', Virar(),
transitions={'virado': 'SOM_2', #se virou 180 graus, retorna 'virado' e executa 'PROCURAR'
'virando': 'VIRAR',
'bateu' : 'BATEU'}) #se está virado 180 graus, e retorna 'virando' e executa 'VIRAR'
smach.StateMachine.add('BATEU', Bateu(),
transitions={'perdido': 'PROCURAR'
})
# Execute SMACH plan
outcome = sm.execute()
if __name__ == '__main__':
main()
| 29.720149 | 135 | 0.675267 | 2,430 | 15,930 | 4.322222 | 0.169136 | 0.012758 | 0.023993 | 0.026659 | 0.463963 | 0.421975 | 0.384462 | 0.353899 | 0.346282 | 0.321622 | 0 | 0.037017 | 0.197866 | 15,930 | 535 | 136 | 29.775701 | 0.784943 | 0.302888 | 0 | 0.433511 | 0 | 0 | 0.064004 | 0.007989 | 0 | 0 | 0 | 0.001869 | 0 | 1 | 0.055851 | false | 0 | 0.045213 | 0.00266 | 0.180851 | 0.023936 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
381af4f749698a944829d6fcd5c433427a798dde | 44,048 | py | Python | sfm/ui/forms.py | edsu/sfm-ui | d07d1e28262330f12b64f0ce68998c378ed76e43 | [
"MIT"
] | null | null | null | sfm/ui/forms.py | edsu/sfm-ui | d07d1e28262330f12b64f0ce68998c378ed76e43 | [
"MIT"
] | null | null | null | sfm/ui/forms.py | edsu/sfm-ui | d07d1e28262330f12b64f0ce68998c378ed76e43 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.models import Group
from django.urls import reverse
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.conf import settings
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Button, Submit, Div, HTML
from crispy_forms.bootstrap import FormActions
from datetimewidget.widgets import DateTimeWidget
from .models import CollectionSet, Collection, Seed, Credential, Export, User
from .utils import clean_token, clean_blogname
import json
import logging
import re
log = logging.getLogger(__name__)
HISTORY_NOTE_LABEL = "Change Note"
HISTORY_NOTE_HELP = "Explain why you made these changes at this time."
HISTORY_NOTE_HELP_ADD = "Further information about this addition."
HISTORY_NOTE_WIDGET = forms.Textarea(attrs={'rows': 4})
DATETIME_WIDGET = DateTimeWidget(
usel10n=True,
bootstrap_version=3,
attrs={'data-readonly': 'false'},
options={
'showMeridian': True
}
)
SCHEDULE_HELP = "How frequently you want data to be retrieved."
INCREMENTAL_LABEL = "Incremental harvest"
INCREMENTAL_HELP = "Only collect new items since the last data retrieval."
GROUP_HELP = "Your default group is your username, unless the SFM team has added you to another group."
class CollectionSetForm(forms.ModelForm):
group = forms.ModelChoiceField(queryset=None)
class Meta:
model = CollectionSet
fields = ['name', 'description', 'group', 'history_note']
exclude = []
widgets = {
'history_note': HISTORY_NOTE_WIDGET
}
localized_fields = None
labels = {
'history_note': HISTORY_NOTE_LABEL
}
help_texts = {
'history_note': HISTORY_NOTE_HELP,
}
error_messages = {}
def __init__(self, *args, **kwargs):
request = kwargs.pop('request')
super(CollectionSetForm, self).__init__(*args, **kwargs)
# limiting groups in dropdown to user's and setting default if only 1 value.
group_queryset = Group.objects.filter(pk__in=request.user.groups.all())
if len(group_queryset) == 1:
self.initial['group'] = group_queryset[0]
self.fields['group'].queryset = group_queryset
self.fields['group'].help_text = GROUP_HELP
# check whether it's a CreateView and offer different help text
if self.instance.pk is None:
self.fields['history_note'].help_text = HISTORY_NOTE_HELP_ADD
# set up crispy forms helper
self.helper = FormHelper(self)
self.helper.layout = Layout(
Fieldset(
'',
'name',
'description',
'group',
'history_note'
),
FormActions(
Submit('submit', 'Save'),
Button('cancel', 'Cancel', onclick="window.history.back()")
)
)
class NameModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.name
class BaseCollectionForm(forms.ModelForm):
credential = NameModelChoiceField(None)
link = forms.URLField(required=False, label="Public link",
help_text="Link to a public version of this collection, e.g., in a data repository.")
class Meta:
model = Collection
fields = ['name', 'description', 'link', 'collection_set', 'visibility',
'schedule_minutes', 'credential', 'end_date',
'history_note']
exclude = []
widgets = {'collection_set': forms.HiddenInput,
'history_note': HISTORY_NOTE_WIDGET,
'end_date': DATETIME_WIDGET}
labels = {
'history_note': HISTORY_NOTE_LABEL,
'visibility': 'Sharing'
}
help_texts = {
'history_note': HISTORY_NOTE_HELP,
'schedule_minutes': SCHEDULE_HELP
}
error_messages = {}
def __init__(self, *args, **kwargs):
self.coll = kwargs.pop("coll", None)
self.credential_list = kwargs.pop('credential_list', None)
super(BaseCollectionForm, self).__init__(*args, **kwargs)
# Set default if only 1 value.
if self.credential_list and self.credential_list.count() == 1:
self.initial['credential'] = self.credential_list[0]
self.fields['credential'].queryset = self.credential_list
# check whether it's a create view and offer different help text
if self.instance.pk is None:
self.fields['history_note'].help_text = HISTORY_NOTE_HELP_ADD
cancel_url = reverse('collection_set_detail', args=[self.coll])
self.helper = FormHelper(self)
self.helper.layout = Layout(
Fieldset(
'',
'name',
'description',
'link',
'credential',
Div(css_id='credential_warning'),
Div(),
'schedule_minutes',
'end_date',
'collection_set',
'visibility',
'history_note'
),
FormActions(
Submit('submit', 'Save'),
Button('cancel', 'Cancel',
onclick="window.location.href='{0}'".format(cancel_url))
)
)
def clean_end_date(self):
data = self.cleaned_data.get('end_date', None)
if data:
if data < timezone.now():
raise forms.ValidationError(
'End date must be later than current date and time.')
return data
class CollectionTwitterUserTimelineForm(BaseCollectionForm):
incremental = forms.BooleanField(initial=True, required=False, label=INCREMENTAL_LABEL, help_text=INCREMENTAL_HELP)
deleted_accounts_option = forms.BooleanField(initial=False, required=False, label="Automatically delete seeds "
"for deleted / not found "
"accounts.")
suspended_accounts_option = forms.BooleanField(initial=False, required=False, label="Automatically delete seeds "
"for suspended accounts.")
protected_accounts_options = forms.BooleanField(initial=False, required=False, label="Automatically delete seeds "
"for protected accounts.")
def __init__(self, *args, **kwargs):
super(CollectionTwitterUserTimelineForm, self).__init__(*args, **kwargs)
self.helper.layout[0][5].extend(('incremental',
'deleted_accounts_option', 'suspended_accounts_option',
'protected_accounts_options'))
if self.instance and self.instance.harvest_options:
harvest_options = json.loads(self.instance.harvest_options)
if "incremental" in harvest_options:
self.fields['incremental'].initial = harvest_options["incremental"]
if "deactivate_not_found_seeds" in harvest_options:
self.fields['deleted_accounts_option'].initial = harvest_options["deactivate_not_found_seeds"]
if "deactivate_unauthorized_seeds" in harvest_options:
self.fields['protected_accounts_options'].initial = harvest_options["deactivate_unauthorized_seeds"]
if "deactivate_suspended_seeds" in harvest_options:
self.fields['suspended_accounts_option'].initial = harvest_options["deactivate_suspended_seeds"]
def save(self, commit=True):
m = super(CollectionTwitterUserTimelineForm, self).save(commit=False)
m.harvest_type = Collection.TWITTER_USER_TIMELINE
harvest_options = {
"incremental": self.cleaned_data["incremental"],
"deactivate_not_found_seeds": self.cleaned_data["deleted_accounts_option"],
"deactivate_unauthorized_seeds": self.cleaned_data["protected_accounts_options"],
"deactivate_suspended_seeds": self.cleaned_data["suspended_accounts_option"]
}
m.harvest_options = json.dumps(harvest_options, sort_keys=True)
m.save()
return m
class CollectionTwitterSearchForm(BaseCollectionForm):
incremental = forms.BooleanField(initial=True, required=False, label=INCREMENTAL_LABEL, help_text=INCREMENTAL_HELP)
def __init__(self, *args, **kwargs):
super(CollectionTwitterSearchForm, self).__init__(*args, **kwargs)
self.helper.layout[0][5].extend(('incremental',))
if self.instance and self.instance.harvest_options:
harvest_options = json.loads(self.instance.harvest_options)
if "incremental" in harvest_options:
self.fields['incremental'].initial = harvest_options["incremental"]
def save(self, commit=True):
m = super(CollectionTwitterSearchForm, self).save(commit=False)
m.harvest_type = Collection.TWITTER_SEARCH
harvest_options = {
"incremental": self.cleaned_data["incremental"],
}
m.harvest_options = json.dumps(harvest_options, sort_keys=True)
m.save()
return m
class CollectionTwitterSampleForm(BaseCollectionForm):
class Meta(BaseCollectionForm.Meta):
exclude = ('schedule_minutes',)
def __init__(self, *args, **kwargs):
super(CollectionTwitterSampleForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
m = super(CollectionTwitterSampleForm, self).save(commit=False)
m.harvest_type = Collection.TWITTER_SAMPLE
m.schedule_minutes = None
m.save()
return m
class CollectionTwitterFilterForm(BaseCollectionForm):
class Meta(BaseCollectionForm.Meta):
exclude = ('schedule_minutes',)
def __init__(self, *args, **kwargs):
super(CollectionTwitterFilterForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
m = super(CollectionTwitterFilterForm, self).save(commit=False)
m.harvest_type = Collection.TWITTER_FILTER
m.schedule_minutes = None
m.save()
return m
class CollectionFlickrUserForm(BaseCollectionForm):
incremental = forms.BooleanField(initial=True, required=False, label=INCREMENTAL_LABEL, help_text=INCREMENTAL_HELP)
def __init__(self, *args, **kwargs):
super(CollectionFlickrUserForm, self).__init__(*args, **kwargs)
self.helper.layout[0][5].extend(('incremental',))
if self.instance and self.instance.harvest_options:
harvest_options = json.loads(self.instance.harvest_options)
if "incremental" in harvest_options:
self.fields['incremental'].initial = harvest_options["incremental"]
def save(self, commit=True):
m = super(CollectionFlickrUserForm, self).save(commit=False)
m.harvest_type = Collection.FLICKR_USER
harvest_options = {
"incremental": self.cleaned_data["incremental"],
}
m.harvest_options = json.dumps(harvest_options)
m.save()
return m
class CollectionWeiboTimelineForm(BaseCollectionForm):
incremental = forms.BooleanField(initial=True, required=False, label=INCREMENTAL_LABEL, help_text=INCREMENTAL_HELP)
def __init__(self, *args, **kwargs):
super(CollectionWeiboTimelineForm, self).__init__(*args, **kwargs)
self.helper.layout[0][5].extend(('incremental',))
if self.instance and self.instance.harvest_options:
harvest_options = json.loads(self.instance.harvest_options)
if "incremental" in harvest_options:
self.fields['incremental'].initial = harvest_options["incremental"]
def save(self, commit=True):
m = super(CollectionWeiboTimelineForm, self).save(commit=False)
m.harvest_type = Collection.WEIBO_TIMELINE
harvest_options = {
"incremental": self.cleaned_data["incremental"],
}
m.harvest_options = json.dumps(harvest_options, sort_keys=True)
m.save()
return m
class CollectionWeiboSearchForm(BaseCollectionForm):
incremental = forms.BooleanField(initial=True, required=False, help_text=INCREMENTAL_HELP, label=INCREMENTAL_LABEL)
def __init__(self, *args, **kwargs):
super(CollectionWeiboSearchForm, self).__init__(*args, **kwargs)
self.helper.layout[0][5].extend(('incremental',))
if self.instance and self.instance.harvest_options:
harvest_options = json.loads(self.instance.harvest_options)
if "incremental" in harvest_options:
self.fields['incremental'].initial = harvest_options["incremental"]
def save(self, commit=True):
m = super(CollectionWeiboSearchForm, self).save(commit=False)
m.harvest_type = Collection.WEIBO_SEARCH
harvest_options = {
"incremental": self.cleaned_data["incremental"],
}
m.harvest_options = json.dumps(harvest_options, sort_keys=True)
m.save()
return m
class CollectionTumblrBlogPostsForm(BaseCollectionForm):
incremental = forms.BooleanField(initial=True, required=False, label=INCREMENTAL_LABEL, help_text=INCREMENTAL_HELP)
def __init__(self, *args, **kwargs):
super(CollectionTumblrBlogPostsForm, self).__init__(*args, **kwargs)
self.helper.layout[0][5].extend(('incremental',))
if self.instance and self.instance.harvest_options:
harvest_options = json.loads(self.instance.harvest_options)
if "incremental" in harvest_options:
self.fields['incremental'].initial = harvest_options["incremental"]
def save(self, commit=True):
m = super(CollectionTumblrBlogPostsForm, self).save(commit=False)
m.harvest_type = Collection.TUMBLR_BLOG_POSTS
harvest_options = {
"incremental": self.cleaned_data["incremental"],
}
m.harvest_options = json.dumps(harvest_options, sort_keys=True)
m.save()
return m
class BaseSeedForm(forms.ModelForm):
class Meta:
model = Seed
fields = ['collection',
'history_note']
exclude = []
widgets = {
'collection': forms.HiddenInput,
'history_note': HISTORY_NOTE_WIDGET
}
labels = {
'history_note': HISTORY_NOTE_LABEL
}
help_texts = {
'history_note': HISTORY_NOTE_HELP
}
def __init__(self, *args, **kwargs):
self.collection = kwargs.pop("collection", None)
# for createView and updateView
self.view_type = kwargs.pop("view_type", None)
# for updateView check the updates for the original token and uid
self.entry = kwargs.pop("entry", None)
super(BaseSeedForm, self).__init__(*args, **kwargs)
cancel_url = reverse('collection_detail', args=[self.collection])
# check whether it's a create view and offer different help text
if self.instance.pk is None:
self.fields['history_note'].help_text = HISTORY_NOTE_HELP_ADD
self.helper = FormHelper(self)
self.helper.layout = Layout(
Fieldset(
'',
Div(),
'history_note',
'collection'
),
FormActions(
Submit('submit', 'Save'),
Button('cancel', 'Cancel',
onclick="window.location.href='{0}'".format(cancel_url))
)
)
def clean_token(self):
token_val = self.cleaned_data.get("token")
return token_val.strip()
def clean_uid(self):
uid_val = self.cleaned_data.get("uid")
return uid_val.strip()
def clean(self):
fields = self._meta.fields
uid_val, token_val = '', ''
uid_label, token_label = '', ''
if "uid" in fields:
uid_val = self.cleaned_data.get("uid")
uid_label = self._meta.labels["uid"]
if "token" in fields:
token_val = self.cleaned_data.get("token")
token_label = self._meta.labels["token"]
# if has invalid error before, directly not check deep error
if self.errors:
return
# should not both empty if has token or uid fields, the twitter filter should deal with separately
if (uid_label or token_label) and (not uid_val and not token_val):
or_text = 'or' * (1 if uid_label and token_label else 0)
raise ValidationError(
u'One of the following fields is required :{} {} {}.'.format(token_label, or_text, uid_label))
# for the update view
if self.view_type == Seed.UPDATE_VIEW:
# check updated seeds exist in db if changes
# case insensitive match, and user can't add 'token:TeSt' or 'token:teSt', etc if 'token:test exist.',
# but can update to 'token:TeSt' or other.
if token_val.lower() != self.entry.token.lower() and \
token_val and Seed.objects.filter(collection=self.collection,
token__iexact=token_val).exists():
raise ValidationError(u'{}: {} already exist.'.format(token_label, token_val))
# check updated uid whether exist in db if changes
if uid_val.lower() != self.entry.uid.lower() and \
uid_val and Seed.objects.filter(collection=self.collection,
uid__iexact=uid_val).exists():
raise ValidationError(u'{}: {} already exist.'.format(uid_label, uid_val))
else:
if token_val and Seed.objects.filter(collection=self.collection, token__iexact=token_val).exists():
raise ValidationError(u'{}: {} already exist.'.format(token_label, token_val))
if uid_val and Seed.objects.filter(collection=self.collection, uid__iexact=uid_val).exists():
raise ValidationError(u'{}: {} already exist.'.format(uid_label, uid_val))
class SeedTwitterUserTimelineForm(BaseSeedForm):
class Meta(BaseSeedForm.Meta):
fields = ['token', 'uid']
fields.extend(BaseSeedForm.Meta.fields)
labels = dict(BaseSeedForm.Meta.labels)
labels["token"] = "Screen name"
labels["uid"] = "User id"
widgets = dict(BaseSeedForm.Meta.widgets)
widgets["token"] = forms.TextInput(attrs={'size': '40'})
widgets["uid"] = forms.TextInput(attrs={'size': '40'})
def __init__(self, *args, **kwargs):
super(SeedTwitterUserTimelineForm, self).__init__(*args, **kwargs)
self.helper.layout[0][0].extend(('token', 'uid'))
def clean_uid(self):
uid_val = self.cleaned_data.get("uid")
# check the format
if uid_val and not uid_val.isdigit():
raise ValidationError('Uid should be numeric.', code='invalid')
return uid_val
def clean_token(self):
token_val = clean_token(self.cleaned_data.get("token"))
token_val = token_val.split(" ")[0]
# check the format
if token_val and token_val.isdigit():
raise ValidationError('Screen name may not be numeric.', code='invalid')
return token_val
class SeedTwitterSearchForm(BaseSeedForm):
query = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4}),
help_text='See <a href="https://developer.twitter.com/en/docs/tweets/search/guides/'
'standard-operators" target="_blank">'
'these instructions</a> for writing a query. '
'Example: firefly OR "lightning bug"')
geocode = forms.CharField(required=False,
help_text='Geocode in the format latitude,longitude,radius. '
'Example: 38.899434,-77.036449,50mi')
def __init__(self, *args, **kwargs):
super(SeedTwitterSearchForm, self).__init__(*args, **kwargs)
self.helper.layout[0][0].extend(('query', 'geocode'))
if self.instance and self.instance.token:
try:
token = json.loads(self.instance.token)
# This except handling is for converting over old query tokens
except ValueError:
token = {'query': self.instance.token}
if 'query' in token:
self.fields['query'].initial = token['query']
if 'geocode' in token:
self.fields['geocode'].initial = token['geocode']
def clean_query(self):
query_val = self.cleaned_data.get("query")
return query_val.strip()
def clean_geocode(self):
geocode_val = self.cleaned_data.get("geocode")
return geocode_val.strip()
def clean(self):
# if do string strip in here, string ends an empty space, not sure why
query_val = self.cleaned_data.get("query")
geocode_val = self.cleaned_data.get("geocode")
# should not all be empty
if not query_val and not geocode_val:
raise ValidationError(u'One of the following fields is required: query, geocode.')
def save(self, commit=True):
m = super(SeedTwitterSearchForm, self).save(commit=False)
token = dict()
if self.cleaned_data['query']:
token['query'] = self.cleaned_data['query']
if self.cleaned_data['geocode']:
token['geocode'] = self.cleaned_data['geocode']
m.token = json.dumps(token, ensure_ascii=False)
m.save()
return m
class SeedWeiboSearchForm(BaseSeedForm):
class Meta(BaseSeedForm.Meta):
fields = ['token']
fields.extend(BaseSeedForm.Meta.fields)
labels = dict(BaseSeedForm.Meta.labels)
labels["token"] = "Topic"
help_texts = dict(BaseSeedForm.Meta.help_texts)
help_texts["token"] = u'See <a href="http://open.weibo.com/wiki/2/search/topics" target="_blank">' \
u'API documents</a> for query Weibo related on a topic. ' \
u'Example: "科技".'
widgets = dict(BaseSeedForm.Meta.widgets)
widgets["token"] = forms.TextInput(attrs={'size': '40'})
def __init__(self, *args, **kwargs):
super(SeedWeiboSearchForm, self).__init__(*args, **kwargs)
self.helper.layout[0][0].append('token')
class SeedTwitterFilterForm(BaseSeedForm):
track = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4}),
help_text="""Separate keywords and phrases with commas. See Twitter <a
target="_blank" href="https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/
basic-stream-parameters#track">
track</a> for more information.""")
follow = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4}),
help_text="""Use commas to separate user IDs (e.g. 1233718,6378678) of accounts whose
tweets, retweets, and replies will be collected. See Twitter <a
target="_blank"
href="https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/
basic-stream-parameters#follow"> follow</a>
documentation for a full list of what is returned. User <a target="_blank"
href="https://tweeterid.com/">TweeterID</a> to get the user ID for a screen name.""")
locations = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4}),
help_text="""Provide a longitude and latitude (e.g. -74,40,-73,41) of a geographic
bounding box. See Twitter <a target="blank"
href="https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/basic-stream-parameters#locations">
locations</a> for more information.""")
def __init__(self, *args, **kwargs):
super(SeedTwitterFilterForm, self).__init__(*args, **kwargs)
self.helper.layout[0][0].extend(('track', 'follow', 'locations'))
if self.instance and self.instance.token:
token = json.loads(self.instance.token)
if 'track' in token:
self.fields['track'].initial = token['track']
if 'follow' in token:
self.fields['follow'].initial = token['follow']
if 'locations' in token:
self.fields['locations'].initial = token['locations']
def clean_track(self):
track_val = self.cleaned_data.get("track").strip()
if len(track_val.split(",")) > 400:
raise ValidationError("Can only track 400 keywords.")
return track_val
def clean_locations(self):
return self.cleaned_data.get("locations").strip()
def clean_follow(self):
follow_val = self.cleaned_data.get("follow").strip()
if len(follow_val.split(",")) > 5000:
raise ValidationError("Can only follow 5000 users.")
return follow_val
def clean(self):
# if do string strip in here, string ends an empty space, not sure why
track_val = self.cleaned_data.get("track")
follow_val = self.cleaned_data.get("follow")
locations_val = self.cleaned_data.get("locations")
# should not all be empty
if not track_val and not follow_val and not locations_val:
raise ValidationError(u'One of the following fields is required: track, follow, locations.')
# check follow should be number uid
if re.compile(r'[^0-9, ]').search(follow_val):
raise ValidationError('Follow must be user ids', code='invalid_follow')
token_val = {}
if track_val:
token_val['track'] = track_val
if follow_val:
token_val['follow'] = follow_val
if locations_val:
token_val['locations'] = locations_val
token_val = json.dumps(token_val, ensure_ascii=False)
# for the update view
if self.view_type == Seed.UPDATE_VIEW:
# check updated seeds exist in db if changes
# case insensitive match, and user can update seed `tack:Test` to 'tack:test'
if token_val.lower() != self.entry.token.lower() and \
token_val and Seed.objects.filter(collection=self.collection,
token__iexact=token_val).exists():
raise ValidationError(u'Seed: {} already exist.'.format(token_val))
else:
if token_val and Seed.objects.filter(collection=self.collection, token__iexact=token_val).exists():
raise ValidationError(u'Seed: {} already exist.'.format(token_val))
def save(self, commit=True):
m = super(SeedTwitterFilterForm, self).save(commit=False)
token = dict()
if self.cleaned_data['track']:
token['track'] = self.cleaned_data['track']
if self.cleaned_data['follow']:
token['follow'] = self.cleaned_data['follow']
if self.cleaned_data['locations']:
token['locations'] = self.cleaned_data['locations']
m.token = json.dumps(token, ensure_ascii=False)
m.save()
return m
class SeedFlickrUserForm(BaseSeedForm):
class Meta(BaseSeedForm.Meta):
fields = ['token', 'uid']
fields.extend(BaseSeedForm.Meta.fields)
labels = dict(BaseSeedForm.Meta.labels)
labels["token"] = "Username"
labels["uid"] = "NSID"
help_texts = dict(BaseSeedForm.Meta.help_texts)
help_texts["token"] = 'A string name for the user account. Finding this on the Flickr website can be ' \
'confusing, so see NSID below.'
help_texts["uid"] = 'An unchanging identifier for a user account, e.g., 80136838@N05. To find the NSID for a ' \
'user account, use <a href="http://www.webpagefx.com/tools/idgettr/">idGettr</a>.'
widgets = dict(BaseSeedForm.Meta.widgets)
widgets["token"] = forms.TextInput(attrs={'size': '40'})
widgets["uid"] = forms.TextInput(attrs={'size': '40'})
def __init__(self, *args, **kwargs):
super(SeedFlickrUserForm, self).__init__(*args, **kwargs)
self.helper.layout[0][0].extend(('token', 'uid'))
class SeedTumblrBlogPostsForm(BaseSeedForm):
class Meta(BaseSeedForm.Meta):
fields = ['uid']
fields.extend(BaseSeedForm.Meta.fields)
labels = dict(BaseSeedForm.Meta.labels)
labels["uid"] = "Blog hostname"
help_texts = dict(BaseSeedForm.Meta.help_texts)
help_texts["uid"] = 'Please provide the standard blog hostname, eg. codingjester or codingjester.tumblr.com.' \
'If blog hostname is codingjester.tumblr.com, it would be considered as codingjester. ' \
'To better understand standard blog hostname, See ' \
'<a target="_blank" href="https://www.tumblr.com/docs/en/api/v2#hostname">' \
'these instructions</a>.'
widgets = dict(BaseSeedForm.Meta.widgets)
widgets["uid"] = forms.TextInput(attrs={'size': '40'})
def __init__(self, *args, **kwargs):
super(SeedTumblrBlogPostsForm, self).__init__(*args, **kwargs)
self.helper.layout[0][0].append('uid')
def clean_uid(self):
return clean_blogname(self.cleaned_data.get("uid"))
class BaseBulkSeedForm(forms.Form):
TYPES = (('token', 'Username'), ('uid', 'NSID'))
seeds_type = forms.ChoiceField(required=True, choices=TYPES, widget=forms.RadioSelect)
tokens = forms.CharField(required=True, widget=forms.Textarea(attrs={'rows': 20}),
help_text="Enter each seed on a separate line.", label="Bulk Seeds")
history_note = forms.CharField(label=HISTORY_NOTE_LABEL, widget=HISTORY_NOTE_WIDGET, help_text=HISTORY_NOTE_HELP,
required=False)
def __init__(self, *args, **kwargs):
self.collection = kwargs.pop("collection", None)
super(BaseBulkSeedForm, self).__init__(*args, **kwargs)
self.fields['history_note'].help_text = HISTORY_NOTE_HELP_ADD
cancel_url = reverse('collection_detail', args=[self.collection])
self.helper = FormHelper(self)
self.helper.layout = Layout(
Fieldset(
'',
'seeds_type',
'tokens',
'history_note'
),
FormActions(
Submit('submit', 'Save'),
Button('cancel', 'Cancel',
onclick="window.location.href='{0}'".format(cancel_url))
)
)
class BulkSeedTwitterUserTimelineForm(BaseBulkSeedForm):
def __init__(self, *args, **kwargs):
super(BulkSeedTwitterUserTimelineForm, self).__init__(*args, **kwargs)
self.fields['seeds_type'].choices = (('token', 'Screen Name'), ('uid', 'User id'))
def clean_tokens(self):
seed_type = self.cleaned_data.get("seeds_type")
tokens = self.cleaned_data.get("tokens")
splittoken = ''.join(tokens).splitlines()
numtoken, strtoken, finaltokens = [], [], []
for t in splittoken:
clean_t = clean_token(t)
clean_t = clean_t.split(" ")[0]
if clean_t and clean_t.isdigit():
numtoken.append(clean_t)
elif clean_t and not clean_t.isdigit():
strtoken.append(clean_t)
finaltokens.append(clean_t + "\n")
if seed_type == 'token' and numtoken:
raise ValidationError(
'Screen names may not be numeric. Please correct the following seeds: ' + ', '.join(numtoken) + '.')
elif seed_type == 'uid' and strtoken:
raise ValidationError(
'UIDs must be numeric. Please correct the following seeds: ' + ', '.join(strtoken) + '.')
return ''.join(finaltokens)
class BulkSeedFlickrUserForm(BaseBulkSeedForm):
def __init__(self, *args, **kwargs):
super(BulkSeedFlickrUserForm, self).__init__(*args, **kwargs)
class BulkSeedTumblrBlogPostsForm(BaseBulkSeedForm):
def __init__(self, *args, **kwargs):
super(BulkSeedTumblrBlogPostsForm, self).__init__(*args, **kwargs)
self.fields['seeds_type'].choices = (('uid', 'Blog hostnames'),)
self.fields['seeds_type'].initial = 'uid'
class BaseCredentialForm(forms.ModelForm):
class Meta:
model = Credential
fields = ['name', 'history_note']
exclude = []
widgets = {
'history_note': HISTORY_NOTE_WIDGET
}
localized_fields = None
labels = {
'history_note': HISTORY_NOTE_LABEL
}
help_texts = {
'history_note': HISTORY_NOTE_HELP
}
error_messages = {}
def __init__(self, *args, **kwargs):
# for createView and updateView
self.view_type = kwargs.pop("view_type", None)
# for updateView check the updates for the original token
self.entry = kwargs.pop("entry", None)
super(BaseCredentialForm, self).__init__(*args, **kwargs)
# check whether it's a create view and offer different help text
if self.instance.pk is None:
self.fields['history_note'].help_text = HISTORY_NOTE_HELP_ADD
# set up crispy forms helper
self.helper = FormHelper(self)
# set up crispy forms helper
self.helper.layout = Layout(
Fieldset(
'',
'name',
Div(),
'history_note'
),
FormActions(
Submit('submit', 'Save'),
Button('cancel', 'Cancel', onclick="window.history.back()")
)
)
def clean(self):
cleaned_data = super(BaseCredentialForm, self).clean()
token = json.dumps(self.to_token())
# for the update view
if self.view_type == Credential.UPDATE_VIEW:
# check updated Credential exist in db if changes
if token != self.entry.token and Credential.objects.filter(token=token).exists():
raise ValidationError(u'This is a duplicate of an existing credential!')
else:
if Credential.objects.filter(token=token).exists():
raise ValidationError(u'This is a duplicate of an existing credential!')
return cleaned_data
class CredentialFlickrForm(BaseCredentialForm):
key = forms.CharField(required=True)
secret = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
super(CredentialFlickrForm, self).__init__(*args, **kwargs)
self.helper.layout[0][1].extend(['key', 'secret'])
if self.instance and self.instance.token:
token = json.loads(self.instance.token)
self.fields['key'].initial = token.get('key')
self.fields['secret'].initial = token.get('secret')
def to_token(self):
return {
"key": self.cleaned_data.get("key", "").strip(),
"secret": self.cleaned_data.get("secret", "").strip(),
}
def save(self, commit=True):
m = super(CredentialFlickrForm, self).save(commit=False)
m.platform = Credential.FLICKR
m.token = json.dumps(self.to_token())
m.save()
return m
class CredentialTwitterForm(BaseCredentialForm):
consumer_key = forms.CharField(required=True)
consumer_secret = forms.CharField(required=True)
access_token = forms.CharField(required=True)
access_token_secret = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
super(CredentialTwitterForm, self).__init__(*args, **kwargs)
self.helper.layout[0][1].extend(['consumer_key', 'consumer_secret', 'access_token', 'access_token_secret'])
if self.instance and self.instance.token:
token = json.loads(self.instance.token)
self.fields['consumer_key'].initial = token.get('consumer_key')
self.fields['consumer_secret'].initial = token.get('consumer_secret')
self.fields['access_token'].initial = token.get('access_token')
self.fields['access_token_secret'].initial = token.get('access_token_secret')
def to_token(self):
return {
"consumer_key": self.cleaned_data.get("consumer_key", "").strip(),
"consumer_secret": self.cleaned_data.get("consumer_secret", "").strip(),
"access_token": self.cleaned_data.get("access_token", "").strip(),
"access_token_secret": self.cleaned_data.get("access_token_secret", "").strip(),
}
def save(self, commit=True):
m = super(CredentialTwitterForm, self).save(commit=False)
m.platform = Credential.TWITTER
m.token = json.dumps(self.to_token())
m.save()
return m
class CredentialTumblrForm(BaseCredentialForm):
api_key = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
super(CredentialTumblrForm, self).__init__(*args, **kwargs)
self.helper.layout[0][1].extend(['api_key'])
if self.instance and self.instance.token:
token = json.loads(self.instance.token)
self.fields['api_key'].initial = token.get('api_key')
def to_token(self):
return {
"api_key": self.cleaned_data.get("api_key", "").strip(),
}
def save(self, commit=True):
m = super(CredentialTumblrForm, self).save(commit=False)
m.platform = Credential.TUMBLR
m.token = json.dumps(self.to_token())
m.save()
return m
class CredentialWeiboForm(BaseCredentialForm):
access_token = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
super(CredentialWeiboForm, self).__init__(*args, **kwargs)
self.helper.layout[0][1].extend(['access_token'])
if self.instance and self.instance.token:
token = json.loads(self.instance.token)
self.fields['access_token'].initial = token.get('access_token')
def to_token(self):
return {
"access_token": self.cleaned_data.get("access_token", "").strip(),
}
def save(self, commit=True):
m = super(CredentialWeiboForm, self).save(commit=False)
m.platform = Credential.WEIBO
m.token = json.dumps(self.to_token())
m.save()
return m
class SeedChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return obj.label()
class ExportForm(forms.ModelForm):
seeds = SeedChoiceField(None, required=False, widget=forms.SelectMultiple, label="")
seed_choice = forms.ChoiceField(choices=(('ALL', 'All seeds'), ('ACTIVE', 'Active seeds only'),
('SELECTED', 'Selected seeds only'),),
initial='ALL',
widget=forms.RadioSelect)
class Meta:
model = Export
fields = ['seeds', 'seed_choice', 'export_format', 'export_segment_size', 'dedupe',
'item_date_start', 'item_date_end',
'harvest_date_start', 'harvest_date_end']
localized_fields = None
error_messages = {}
widgets = {
'item_date_start': DATETIME_WIDGET,
'item_date_end': DATETIME_WIDGET,
'harvest_date_start': DATETIME_WIDGET,
'harvest_date_end': DATETIME_WIDGET
}
labels = {
'dedupe': "Deduplicate (remove duplicate posts)",
'export_segment_size': "Maximum number of items per file"
}
def __init__(self, *args, **kwargs):
self.collection = Collection.objects.get(pk=kwargs.pop("collection"))
super(ExportForm, self).__init__(*args, **kwargs)
self.fields["seeds"].queryset = self.collection.seeds.all()
cancel_url = reverse('collection_detail', args=[self.collection.pk])
self.helper = FormHelper(self)
self.helper.layout = Layout(
Fieldset(
'',
'seed_choice',
Div('seeds', css_class="longseed"),
'export_format',
'export_segment_size',
'dedupe',
Div(
HTML("""<h4>Limit by item date range</h4>"""),
'item_date_start',
'item_date_end',
HTML("""<p class="help-block">The timezone for dates entered here are {}. Adjustments will be
made to match the time zone of the items. For example, dates in
tweets are UTC.</p>""".format(settings.TIME_ZONE)),
css_class="panel panel-default panel-body"),
Div(
HTML("""<h4>Limit by harvest date range</h4>"""),
'harvest_date_start',
'harvest_date_end',
css_class="panel panel-default panel-body"),
),
FormActions(
Submit('submit', 'Export'),
Button('cancel', 'Cancel',
onclick="window.location.href='{0}'".format(cancel_url))
)
)
if len(self.fields["seeds"].queryset) < 2:
del self.fields["seeds"]
del self.fields["seed_choice"]
self.helper.layout[0].pop(0)
self.helper.layout[0].pop(0)
def clean_seeds(self):
seeds = self.cleaned_data["seeds"]
if self.data.get("seed_choice") == "SELECTED" and not seeds:
raise ValidationError("At least one seed must be selected")
if self.data.get("seed_choice", "ALL") == "ALL":
seeds = []
elif self.data["seed_choice"] == "ACTIVE":
seeds = list(self.collection.seeds.filter(is_active=True))
return seeds
def save(self, commit=True):
m = super(ExportForm, self).save(commit=False)
m.export_type = self.collection.harvest_type
if self.cleaned_data.get("seed_choice", "ALL") == "ALL":
m.collection = self.collection
m.save()
self.save_m2m()
return m
class UserProfileForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'email', 'email_frequency', 'harvest_notifications']
widgets = {
"username": forms.TextInput(attrs={'size': '40'}),
"email": forms.TextInput(attrs={'size': '40'})
}
help_texts = {
'harvest_notifications': "Receive an email when there is a problem with a harvest.",
}
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
# set up crispy forms helper
self.helper = FormHelper(self)
# set up crispy forms helper
self.helper.layout = Layout(
Fieldset(
'',
'username',
'email',
'email_frequency',
'harvest_notifications',
Div()
),
FormActions(
Submit('submit', 'Save'),
Button('cancel', 'Cancel', onclick="window.history.back()")
)
)
| 41.712121 | 141 | 0.603024 | 4,735 | 44,048 | 5.427244 | 0.10792 | 0.021792 | 0.029185 | 0.020313 | 0.586155 | 0.549109 | 0.505176 | 0.458129 | 0.429644 | 0.394428 | 0 | 0.004768 | 0.280966 | 44,048 | 1,055 | 142 | 41.751659 | 0.806612 | 0.037346 | 0 | 0.440141 | 0 | 0.011737 | 0.18758 | 0.021243 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077465 | false | 0 | 0.017606 | 0.00939 | 0.223005 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
381b84b00335b790806debd10772e523f8081f3d | 10,583 | py | Python | gritsbot/firmware.py | robotarium/gritsbot_2 | 43d3d32ac3dbb960301b45f6addbcad6c2195a6f | [
"MIT"
] | 1 | 2020-11-24T19:32:22.000Z | 2020-11-24T19:32:22.000Z | gritsbot/firmware.py | robotarium/gritsbot_2 | 43d3d32ac3dbb960301b45f6addbcad6c2195a6f | [
"MIT"
] | 6 | 2018-10-02T22:33:04.000Z | 2018-10-09T20:20:41.000Z | gritsbot/firmware.py | robotarium/gritsbot_2 | 43d3d32ac3dbb960301b45f6addbcad6c2195a6f | [
"MIT"
] | null | null | null | import gritsbot.gritsbotserial as gritsbotserial
import json
import vizier.node as node
import time
import argparse
import queue
import netifaces
import vizier.log as log
global logger
logger = log.get_logger()
# Constants
MAX_QUEUE_SIZE = 100
def get_mac():
"""Gets the MAC address for the robot from the network config info.
Returns:
str: A MAC address for the robot.
Example:
>>> print(get_mac())
AA:BB:CC:DD:EE:FF
"""
interface = [x for x in netifaces.interfaces() if 'wlan' in x][0]
return netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']
def create_node_descriptor(end_point):
"""Returns a node descriptor for the robot based on the end_point.
The server_alive link is for the robot to check the MQTT connection periodically.
Args:
end_point (str): The ID of the robot.
Returns:
dict: A node descriptor of the vizier format for the robot.
Example:
>>> node_descriptor(1)
"""
node_descriptor = \
{
'end_point': end_point,
'links':
{
'/status': {'type': 'DATA'},
},
'requests':
[
{
'link': 'matlab_api/'+end_point,
'type': 'STREAM',
'required': False
},
]
}
return node_descriptor
# Responses
# Battery voltage response
# response = {'status': 1, 'body': {'bat_volt': 4.3}}
class Request:
"""Represents serial requests to the microcontroller.
The serial communications operate on a request/response architecture. For example, the request is of a form (when JSON encoded)
.. code-block:: python
{'request': ['read', 'write', 'read'], 'iface': [iface1, iface2, iface3], body: [body1, body2, body3]}
Attributes:
request (list): A list of requests (or actions) to perform. Must be 'read' or 'write'.
iface (list): A list of interfaces on which to perform the request
body (list): A list of bodies for the requests. These are empty if the request is a read.
"""
def __init__(self):
"""Initializes a request with optional iface, request, and body parameters.
Returns:
The created request.
"""
self.iface = []
self.request = []
self.body = []
def add_write_request(self, iface, body):
"""Adds a write to the request.
Args:
iface (str): The interface to write.
body (dict): A JSON-encodable body to be written.
Returns:
The modified request containing the new interface and body.
Examples:
>>> r = Request().add_write_request('motor', {'v': 0.1, 'w': 0.0})
"""
self.iface.append(iface)
self.request.append('write')
self.body.append(body)
return self
def add_read_request(self, iface):
"""Adds a read to the request.
Args:
iface (str): Interface from which to read.
Returns:
The request with the added read.
"""
self.iface.append(iface)
self.request.append('read')
self.body.append({})
return self
def to_json_encodable(self):
"""Turns the request into a JSON-encodable dict.
Raises:
Exception: If an underlying body element is not JSON-encodable.
Returns:
dict: A JSON-encodable dict representing the request.
"""
req = {'request': self.request, 'iface': self.iface}
if(self.body):
req['body'] = self.body
return req
def handle_write_response(status, body):
return {}
def handle_read_response(iface, status, body):
if(iface in body):
return {iface: body[iface]}
else:
logger.critical('Request for ({0}) not in body ({1}) after request.'.format(iface, body))
return {}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("mac_list", help="JSON file containing MAC to id mapping")
parser.add_argument("-port", type=int, help="MQTT Port", default=8080)
parser.add_argument("-host", help="MQTT Host IP", default="localhost")
parser.add_argument('-update_rate', type=float, help='Update rate for robot main loop', default=0.016)
parser.add_argument('-status_update_rate', type=float, help='How often to check status info', default=1)
# Retrieve the MAC address for the robot
mac_address = get_mac()
# Parser and set CLI arguments
args = parser.parse_args()
update_rate = args.update_rate
status_update_rate = args.status_update_rate
# Retrieve the MAC list file, containing a mapping from MAC address to robot ID
try:
f = open(args.mac_list, 'r')
mac_list = json.load(f)
except Exception as e:
print(repr(e))
print('Could not open file ({})'.format(args.node_descriptor))
if(mac_address in mac_list):
robot_id = mac_list[mac_address]
else:
print('MAC address {} not in supplied MAC list file'.format(mac_address))
raise ValueError()
logger.info('This is robot: ({0}) with MAC address: ({1})'.format(robot_id, mac_address))
# Create node descriptor for robot and set up links
node_descriptor = create_node_descriptor(mac_list[mac_address])
status_link = robot_id + '/status'
input_link = 'matlab_api/' + robot_id
started = False
robot_node = None
while (not started):
robot_node = node.Node(args.host, args.port, node_descriptor)
try:
robot_node.start()
started = True
except Exception as e:
logger.critical('Could not start robot node.')
logger.critical(repr(e))
robot_node.stop()
# Don't try to make nodes too quickly
time.sleep(1)
logger.info('Started robot node.')
started = False
serial = None
while (not started):
serial = gritsbotserial.GritsbotSerial(serial_dev='/dev/ttyACM0', baud_rate=500000)
try:
serial.start()
started = True
except Exception as e:
# This class stops itself if the device cannot be initially acquired, so we don't need to stop it.
logger.critical('Could not acquire serial device.')
logger.critical(repr(e))
# Don't try to acquire the serial device too quickly
time.sleep(1)
logger.info('Acquired serial device.')
# Queues for STREAM links
inputs = robot_node.subscribe(input_link)
# Initialize times for various activities
start_time = time.time()
print_time = time.time()
status_update_time = time.time()
# Initialize data
status_data = {'batt_volt': -1, 'charge_status': False}
last_input_msg = {}
# Main loop for the robot
while True:
start_time = time.time()
# Serial requests
request = Request()
handlers = []
# Retrieve status data: battery voltage and charging status
if((start_time - status_update_time) >= status_update_rate):
request.add_read_request('batt_volt').add_read_request('charge_status')
handlers.append(lambda status, body: handle_read_response('batt_volt', status, body))
handlers.append(lambda status, body: handle_read_response('charge_status', status, body))
status_update_time = start_time
# Process input commands
input_msg = None
# Make sure that the queue has few enough messages
if(inputs.qsize() > MAX_QUEUE_SIZE):
logger.critical('Queue of motor messages is too large.')
try:
# Clear out the queue
while True:
input_msg = inputs.get_nowait()
except queue.Empty:
pass
if(input_msg is not None):
try:
input_msg = json.loads(input_msg.decode(encoding='UTF-8'))
except Exception as e:
logger.warning('Got malformed JSON motor message ({})'.format(input_msg))
logger.warning(e)
# Set this to None for the next checks
input_msg = None
# If we got a valid JSON input msg, look for appropriate commands
if(input_msg is not None):
last_input_msg = input_msg
if('v' in input_msg and 'w' in input_msg):
# Handle response?
request.add_write_request('motor', {'v': input_msg['v'], 'w': input_msg['w']})
handlers.append(handle_write_response)
if('left_led' in input_msg):
request.add_write_request('left_led', {'rgb': input_msg['left_led']})
handlers.append(handle_write_response)
if('right_led' in input_msg):
request.add_write_request('right_led', {'rgb': input_msg['right_led']})
handlers.append(handle_write_response)
# Write to serial port
response = None
if(len(handlers) > 0):
try:
response = serial.serial_request(request.to_json_encodable())
except Exception as e:
logger.critical('Serial exception.')
logger.critical(e)
# Call handlers
# We'll have a status and body for each request
if(response is not None and 'status' in response and 'body' in response
and len(response['status']) == len(handlers) and len(response['body']) == len(handlers)):
status = response['status']
body = response['body']
# Ensure the appropriate handler gets each response
for i, handler in enumerate(handlers):
status_data.update(handler(status[i], body[i]))
else:
# If we should have responses, but we don't
if(len(handlers) > 0):
logger.critical('Malformed response ({})'.format(response))
robot_node.put(status_link, json.dumps(status_data))
# Print out status data
if((start_time - print_time) >= status_update_rate):
logger.info('Status data ({})'.format(status_data))
logger.info('Last input message received ({})'.format(last_input_msg))
print_time = time.time()
# Sleep for whatever time is left at the end of the loop
time.sleep(max(0, update_rate - (time.time() - start_time)))
if __name__ == '__main__':
main()
| 30.764535 | 132 | 0.60172 | 1,321 | 10,583 | 4.685087 | 0.222559 | 0.027145 | 0.012441 | 0.014542 | 0.129908 | 0.115204 | 0.05946 | 0.026822 | 0 | 0 | 0 | 0.006171 | 0.295663 | 10,583 | 343 | 133 | 30.854227 | 0.824121 | 0.273174 | 0 | 0.255682 | 0 | 0 | 0.124235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051136 | false | 0.005682 | 0.045455 | 0.005682 | 0.147727 | 0.034091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
381c68068132361d222183fa84b228cba0959f89 | 5,963 | py | Python | crypto/code_crypto_py_book/enigmaM3.py | gcjordi/tests | f917d0b77d0363ddb1abeea1eb172ea7c8a764d7 | [
"Unlicense"
] | null | null | null | crypto/code_crypto_py_book/enigmaM3.py | gcjordi/tests | f917d0b77d0363ddb1abeea1eb172ea7c8a764d7 | [
"Unlicense"
] | null | null | null | crypto/code_crypto_py_book/enigmaM3.py | gcjordi/tests | f917d0b77d0363ddb1abeea1eb172ea7c8a764d7 | [
"Unlicense"
] | null | null | null | # ENIGMA M3
# Octubre 2016
# David Arboledas
# Dominio Público
import re, pyperclip
LETRAS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
#################################################
### Parámetros de configuración de Enigma M3 ###
#################################################
# Grundstellung
inicio = ('A','A','A')
# Walzenlage
rotores = (1,2,3)
# Umkehrwalze
reflector = 'B'
# Ringstellung
posicion_interna = ('A','A','A')
# Steckerverbindungen
clavijero = [('Z','N'),('Y','O'),('Q','B'),('E','R'),('D','K'),('X','U'),('G','P'),('T','V'),('S','J'),('L','M')]
cableado_rotor =('EKMFLGDQVZNTOWYHXUSPAIBRCJ',
'AJDKSIRUXBLHWTMCQGZNPYFVOE',
'BDFHJLCPRTXVZNYEIWGAKMUSQO',
'ESOVPZJAYQUIRHXLNFTGKDCMWB',
'VZBRGITYUPSDNHLXAWMJQOFECK',
'JPGVOUMFYQBENHZRDKASXLICTW',
'NZJHGRCXMYSWBOUFAIVLPEKQDT',
'FKQHTLXOCBJSPDZRAMEWNIUYGV')
cableado_reflector = ('EJMZALYXVBWFCRQUONTSPIKHGD',
'YRUHQSLDPXNGOKMIEBFZCWVJAT',
'FVPJIAOYEDRZXWGCTKUQSBNMHL')
def main():
print("""\n**** Enigma M3 ****\n
1. Cifrar
2. Descifrar""")
opcion = int(input('\nOpcion (1, 2)> '))
if opcion == 1: # Cifrado
mensaje = input('Mensaje > ')
texto = cifrar(mensaje)
print('\n',texto)
pyperclip.copy(texto)
elif opcion == 2: # Descifrado
mensaje = input('Mensaje > ')
texto = descifrar(mensaje)
print('\n', texto)
pyperclip.copy(texto)
def vuelta(cableado):
# Sigue el recorrido de vuelta de la señal
# desde el reflector para un rotor
salida = ''
for i in LETRAS:
salida += LETRAS[cableado.find(i)]
return salida
def cableado_inverso():
# Devuelve una tupla con el recorrido
# inverso de los 8 rotores
inverso=[]
for i in range(len(cableado_rotor)):
inverso.append(vuelta(cableado_rotor[i]))
inverso = tuple(inverso)
return inverso
inverso = cableado_inverso()
inicio = list(inicio) # inicio se actualiza con cada caracter
rotores = tuple([q-1 for q in rotores])
# Las muescan indican dónde se produce el giro del siguiente rotor
# en el momento en que el anterior pasa por la muesca.
# Los rotores 6, 7 y 8 tienen dos
muesca = (('Q',),('E',),('V',),('J',),('Z',),('Z','M'),('Z','M'),('Z','M'))
def numero(car):
# Devulve el número correspondiente a cada carácter
car = car.upper()
arr = {'A':0,'B':1,'C':2, 'D':3,'E':4, 'F':5, 'G':6, 'H':7, 'I':8, 'J':9, 'K':10, 'L':11, 'M':12,
'N':13, 'O':14, 'P':15, 'Q':16, 'R':17, 'S':18,'T':19, 'U':20, 'V':21, 'W':22,'X':23,
'Y':24, 'Z':25}
return arr[car]
reflector = numero(reflector)
def rotor(letra,veces,cableado_rotor):
# letra - el carácter de entrada que se cifrará
# veces - cuántas veces ha girado el rotor
# cableado_rotor - cableado del rotor
letra = sustituye(letra,cableado_rotor,veces)
# Letra de salida por el rotor
return sustituye(letra,veces=-veces)
def refleja(letra):
# Realiza la sustitución del reflector.
# El reflector se representa por un entero 0-2
return sustituye(letra,cableado_reflector[reflector])
def sustituye(letra,alfabeto=LETRAS,veces=0):
# sustituye una letra de acuerdo a la clave
indice = (numero(letra)+veces)%26
return alfabeto[indice]
def aplicar_clavijero(letra):
for i in clavijero:
if letra == i[0]: return i[1]
if letra == i[1]: return i[0]
return letra
def rotor_avanza():
# Los rotores mueven al siguiente rotor dependiendo de su posicion
if inicio[1] in muesca[rotores[1]]:
# Incrementa la posición 1 letra
inicio[0] = sustituye(inicio[0],veces=1)
inicio[1] = sustituye(inicio[1],veces=1)
if inicio[2] in muesca[rotores[2]]:
inicio[1] = sustituye(inicio[1],veces=1)
inicio[2] = sustituye(inicio[2],veces=1)
def cifrar_caracter(letra):
# Con cada letra los rotores avanzan
rotor_avanza()
# Entrada al clavijero
letra = aplicar_clavijero(letra)
# Camino de ida de la señal
for i in [2,1,0]:
veces = ord(inicio[i])-ord(posicion_interna[i])
letra = rotor(letra,veces,cableado_rotor[rotores[i]])
# Entrada y salida por el refletor
letra = refleja(letra)
# Camino de vuelta
for i in [0,1,2]:
veces = ord(inicio[i])-ord(posicion_interna[i])
letra = rotor(letra,veces,inverso[rotores[i]])
# Salida por el clavijero
letra = aplicar_clavijero(letra)
return letra
def descifrar(texto):
# cifrar y descifrar son la misma operación
return cifrar(texto)
def cifrar(texto):
texto = eliminar_puntuacion(texto).upper()
salida = ''
for c in texto:
if c.isalpha(): salida += cifrar_caracter(c)
else: salida += c
return salida
def eliminar_puntuacion(texto):
return re.sub('[^A-Z]','',texto.upper())
if __name__ == "__main__":
main()
'''
Entra clavijero A
Sale clavijero A
Rotor 3 Entra letra A
Rotor 3 Sale letra C
Rotor 2 Entra letra C
Rotor 2 Sale letra D
Rotor 1 Entra letra D
Rotor 1 Sale letra F
Entra reflector F
Sale reflector S
Rotor 1 Entra letra S
Rotor 1 Sale letra S
Rotor 2 Entra letra S
Rotor 2 Sale letra E
Rotor 3 Entra letra E
Rotor 3 Sale letra B
Entra clavijero B
Sale clavijero Q
'''
| 27.734884 | 114 | 0.553748 | 714 | 5,963 | 4.581232 | 0.285714 | 0.02782 | 0.009172 | 0.014674 | 0.106695 | 0.072761 | 0.072761 | 0.033017 | 0.033017 | 0.033017 | 0 | 0.025889 | 0.306893 | 5,963 | 214 | 115 | 27.864486 | 0.765546 | 0.193024 | 0 | 0.18 | 0 | 0 | 0.119159 | 0.077132 | 0 | 0 | 0 | 0 | 0 | 1 | 0.13 | false | 0 | 0.01 | 0.03 | 0.25 | 0.03 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
381c9c9f8a9d7896ecd6ae03b9d5931ad71791d9 | 2,586 | py | Python | max_connect.py | passionzhan/LeetCode | c4d33b64b9da15ca7a9b0d41e645d86a697694fe | [
"MIT"
] | 1 | 2019-08-29T01:12:47.000Z | 2019-08-29T01:12:47.000Z | max_connect.py | passionzhan/LeetCode | c4d33b64b9da15ca7a9b0d41e645d86a697694fe | [
"MIT"
] | null | null | null | max_connect.py | passionzhan/LeetCode | c4d33b64b9da15ca7a9b0d41e645d86a697694fe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# PROJECT_NAME: design_pattern
# Name: max_connect.py
# Author: 9824373
# Date: 2020-08-19 21:03
# Contact: 9824373@qq.com
# Version: V1.0
# Description:
#-------------------------------------------------------------------------------
'''
0 1 1 1 0 1 1 1
1 1 1 0 1 0 1 1
1 1 0 1 1 0 1 1
int getMaxLinked(int[][] matrix,int n,int m)
{
}
我给定任意一个矩阵n*m,其中只有 0 1 两个数字,1表示可以走通,0表示不能走通,只能上下左右,求矩阵的最长联通长度
假如说
m和n如果是无限大情况,应该怎么处理?
'''
# from queue import Queue
from collections import deque
import copy
def neighbors(matrix, iRow, iClo):
for (nr, nc) in ((iRow - 1, iClo), (iRow + 1, iClo), (iRow, iClo - 1), (iRow, iClo + 1)):
if 0 <= nr < len(matrix) and 0 <= nc < len(matrix[0]):
yield (nr, nc)
# 深度优先搜索
def dfs(matrix, seen, i, j):
res_len = 1
rst_lst = [0, ]
for nei_i, nei_j in neighbors(matrix,i,j,):
if matrix[nei_i][nei_j]==1 and (nei_i,nei_j) not in seen:
next_seen = copy.deepcopy(seen)
next_seen.add((nei_i,nei_j))
rst_lst.append(dfs(matrix, next_seen, nei_i, nei_j))
res_len += max(rst_lst)
return res_len
def max_linked(matrix):
n = len(matrix)
m = len(matrix[0])
untraveled = set([(i,j) for i in range(n) for j in range(m) if matrix[i][j]==1])
max_len = 0
for i,j in untraveled:
# for i, j in ((0,5),(1,0),):
cur_seen = set()
cur_seen.add((i,j))
cur_len = dfs(matrix, cur_seen, i, j)
# for n_i, n_j in neighbors(matrix, i, j):
# if matrix[n_i][n_j] == 1 and (n_i, n_j) not in cur_seen:
#
# cur_len += dfs(matrix, next_cur_seen, i, j)
#
max_len = max(max_len, cur_len)
return max_len - 1
# def max_linked(matrix):
# n = len(matrix)
# m = len(matrix[0])
#
# for i in range(n):
# for j in range(j):
# if matrix[i][j]:
# cur_deque = deque()
# cur_deque.append(matrix[i][j])
# seen = set([matrix[i][j]],)
# for ii,jj in neighbors(matrix,i,j):
# if matrix[ii][jj] and matrix[ii][jj] not in seen:
# cur_deque.append(matrix[ii][jj])
#
if __name__ == '__main__':
a = [
[0, 1, 1, 0, 1, 1, 1, 1,],
[1, 0, 1, 0, 1, 0, 1, 0,],
[1, 1, 1, 0, 1, 0, 1, 1,],
]
rst = max_linked(a)
print(rst)
| 27.806452 | 93 | 0.476411 | 392 | 2,586 | 3.002551 | 0.216837 | 0.033985 | 0.030586 | 0.023789 | 0.216653 | 0.216653 | 0.208156 | 0.181818 | 0.120646 | 0.06627 | 0 | 0.057321 | 0.318639 | 2,586 | 92 | 94 | 28.108696 | 0.61067 | 0.486466 | 0 | 0 | 0 | 0 | 0.006221 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.057143 | 0 | 0.2 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
381d46f30a7018770a9715e4227a6f4b622412b6 | 786 | py | Python | tests/wipy/sd.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 13,648 | 2015-01-01T01:34:51.000Z | 2022-03-31T16:19:53.000Z | tests/wipy/sd.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 7,092 | 2015-01-01T07:59:11.000Z | 2022-03-31T23:52:18.000Z | tests/wipy/sd.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 4,942 | 2015-01-02T11:48:50.000Z | 2022-03-31T19:57:10.000Z | """
SD card test for the CC3200 based boards.
"""
from machine import SD
import os
mch = os.uname().machine
if "LaunchPad" in mch:
sd_pins = ("GP16", "GP17", "GP15")
elif "WiPy" in mch:
sd_pins = ("GP10", "GP11", "GP15")
else:
raise Exception("Board not supported!")
sd = SD(pins=sd_pins)
print(sd)
sd.deinit()
print(sd)
sd.init(sd_pins)
print(sd)
sd = SD(0, pins=sd_pins)
sd = SD(id=0, pins=sd_pins)
sd = SD(0, sd_pins)
# check for memory leaks
for i in range(0, 1000):
sd = sd = SD(0, pins=sd_pins)
# next ones should raise
try:
sd = SD(pins=())
except Exception:
print("Exception")
try:
sd = SD(pins=("GP10", "GP11", "GP8"))
except Exception:
print("Exception")
try:
sd = SD(pins=("GP10", "GP11"))
except Exception:
print("Exception")
| 17.086957 | 43 | 0.627226 | 127 | 786 | 3.818898 | 0.377953 | 0.148454 | 0.065979 | 0.086598 | 0.360825 | 0.307216 | 0.268041 | 0.197938 | 0.197938 | 0.197938 | 0 | 0.053883 | 0.197201 | 786 | 45 | 44 | 17.466667 | 0.714739 | 0.111959 | 0 | 0.375 | 0 | 0 | 0.149492 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
381fc709361b0418ede89f17181ce6266425a268 | 6,001 | py | Python | data/Criteo/forOtherModels/dataPreprocess_TensorFlow.py | wangweitong/recommend_system | 635a772c5d6097527ab446bf69c8e9e2bb7f16da | [
"MIT"
] | 2 | 2021-09-22T09:47:37.000Z | 2022-02-07T07:30:47.000Z | data/Criteo/forOtherModels/dataPreprocess_TensorFlow.py | wangweitong/recommend_system | 635a772c5d6097527ab446bf69c8e9e2bb7f16da | [
"MIT"
] | null | null | null | data/Criteo/forOtherModels/dataPreprocess_TensorFlow.py | wangweitong/recommend_system | 635a772c5d6097527ab446bf69c8e9e2bb7f16da | [
"MIT"
] | null | null | null | import numpy as np
from data.Criteo.util import *
"""
Data Process for FM, PNN, and DeepFM.
[1] PaddlePaddle implementation of DeepFM for CTR prediction
https://github.com/PaddlePaddle/models/blob/develop/PaddleRec/ctr/deepfm/data/preprocess.py
"""
def get_train_test_file(file_path, feat_dict_, split_ratio=0.9):
train_label_fout = open('train_label', 'w')
train_value_fout = open('train_value', 'w')
train_idx_fout = open('train_idx', 'w')
test_label_fout = open('test_label', 'w')
test_value_fout = open('test_value', 'w')
test_idx_fout = open('test_idx', 'w')
categorical_range_ = range(1, 11)
continuous_range_ = range(11, 19)
cont_min_ = [1.00000000e+00, 0.00000000e+00, -1.00000000e+00, 1.38888889e-03,
-8.70709083e+03, -9.62673000e+05, -1, -4.00000000e+00]
cont_max_ = [3.02320000e+04, 4.04000000e+02, 9.00000000e+00, 9.85688611e+03,
8.72573694e+03, 2.19570650e+06, 9.00000000e+00, 4.00000000e+00]
cont_diff_ = [cont_max_[i] - cont_min_[i] for i in range(len(cont_min_))]
# 分割并获取索引以及特征值这些
def process_line_(line):
# 用,分割获取每行元素
features = line.rstrip('\n').split(',')
feat_idx, feat_value, label = [], [], []
# MinMax Normalization 对于连续特征 最大最小标准化
for idx in continuous_range_:
# 如果特征的值为空则索引为0
if features[idx] == '':
# print(idx,features[idx],"0000000000000000000000")
feat_idx.append(0)
feat_value.append(0.0)
else:
# 否则用索引值代替
feat_idx.append(feat_dict_[idx])
# 原来的值
feat_value.append(features[idx])
# 处理分类型数据
for idx in categorical_range_:
if features[idx] == '' or features[idx] not in feat_dict_:
feat_idx.append(0)
feat_value.append(0.0)
else:
feat_idx.append(feat_dict_[features[idx]])
feat_value.append(1.0)
return feat_idx, feat_value, [int(features[0])]
# 打开训练文件train.txt
with open(file_path, 'r') as fin:
# 遍历行号和每行的值
for line_idx, line in enumerate(fin):
# print(line_idx,"----------",line)
if line_idx % 1000000 == 0:
print(line_idx)
# 数据超过某个界限就停止
if line_idx >= EACH_FILE_DATA_NUM * 10:
break
# 给每一行赋值
feat_idx, feat_value, label = process_line_(line)
feat_value = ','.join([str(v) for v in feat_value]) + '\n'
feat_idx = ','.join([str(idx) for idx in feat_idx]) + '\n'
label = ','.join([str(idx) for idx in label]) + '\n'
# print("feat_value",feat_value, "feat_idx", feat_idx, "label", label)
if np.random.random() <= split_ratio:
train_label_fout.write(label)
train_idx_fout.write(feat_idx)
train_value_fout.write(feat_value)
else:
test_label_fout.write(label)
test_idx_fout.write(feat_idx)
test_value_fout.write(feat_value)
fin.close()
train_label_fout.close()
train_idx_fout.close()
train_value_fout.close()
test_label_fout.close()
test_idx_fout.close()
test_value_fout.close()
def get_feat_dict():
freq_ = 10
dir_feat_dict_ = 'feat_dict_' + str(freq_) + '.pkl2'
# 确定连续和分类特征的列
categorical_range_ = range(1, 11)
continuous_range_ = range(11, 19)
if os.path.exists(dir_feat_dict_):
feat_dict = pickle.load(open(dir_feat_dict_, 'rb'))
else:
# 设置全局的特征索引,用counter来整
# print('generate a feature dict')
# Count the number of occurrences of discrete features
feat_cnt = Counter()
with open('../train.txt', 'r') as fin:
for line_idx, line in enumerate(fin):
# for test
# print("line_idx---", line_idx, "line---", line)
if line_idx >= EACH_FILE_DATA_NUM * 10:
break
if line_idx % EACH_FILE_DATA_NUM == 0:
print('generating feature dict', line_idx / 45000000)
features = line.rstrip('\n').split(',')
for idx in categorical_range_:
if features[idx] == '': continue
# print(features[idx], "----categorical_range_333", idx)
feat_cnt.update([features[idx]])
# Only retain discrete features with high frequency
# 仅保留高频离散特征
dis_feat_set = set()
for feat, ot in feat_cnt.items():
if ot >= freq_:
dis_feat_set.add(feat)
# Create a dictionary for continuous and discrete features
# 创建连续和离散特征的字典
feat_dict = {}
tc = 1
# Continuous features
# 1 到14 就赋值为原来的列号
for idx in continuous_range_:
feat_dict[idx] = tc
tc += 1
# Discrete features
cnt_feat_set = set()
with open('../train.txt', 'r') as fin:
for line_idx, line in enumerate(fin):
# get mini-sample for test
if line_idx >= EACH_FILE_DATA_NUM * 10:
break
features = line.rstrip('\n').split(',')
for idx in categorical_range_:
if features[idx] == '' or features[idx] not in dis_feat_set:
continue
if features[idx] not in cnt_feat_set:
cnt_feat_set.add(features[idx])
feat_dict[features[idx]] = tc
tc += 1
# Save dictionary
with open(dir_feat_dict_, 'wb') as fout:
pickle.dump(feat_dict, fout)
print('args.num_feat ', len(feat_dict) + 1)
return feat_dict
if __name__ == '__main__':
feat_dict = get_feat_dict()
get_train_test_file('../train.txt', feat_dict)
print('Done!')
| 35.3 | 95 | 0.557074 | 738 | 6,001 | 4.257453 | 0.234417 | 0.050923 | 0.017823 | 0.01655 | 0.292171 | 0.204328 | 0.192871 | 0.176321 | 0.176321 | 0.146722 | 0 | 0.062221 | 0.327779 | 6,001 | 169 | 96 | 35.508876 | 0.716658 | 0.125146 | 0 | 0.305556 | 0 | 0 | 0.038078 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.018519 | 0 | 0.064815 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38278d88790447c7025e49fc709ca332d9b8da93 | 6,106 | py | Python | data/spellhelper.py | patronical/disaster_pipeline | a6b699f76c7174389905cdff57d651d5f398bb58 | [
"MIT"
] | null | null | null | data/spellhelper.py | patronical/disaster_pipeline | a6b699f76c7174389905cdff57d651d5f398bb58 | [
"MIT"
] | null | null | null | data/spellhelper.py | patronical/disaster_pipeline | a6b699f76c7174389905cdff57d651d5f398bb58 | [
"MIT"
] | null | null | null | # spellhelper.py
# spelling helper class for text cleaning
# this is a wrapper class for frequency dictionary
# that complements peter norvig spell check routines
'''
Implementation Notes
For default case Download the frequency dictionary and put adjacent to this file
https://raw.githubusercontent.com/mammothb/symspellpy/master/symspellpy/frequency_dictionary_en_82_765.txt
License: Apache Software License (MIT)
'''
import regex as re
import numpy as np
from collections import Counter
class Spellhelper:
'''
A class for frequency dictionary based spell correction and segmentation.
'''
import peter_norvig_spelling as pns
def __init__(self, filename = 'data/frequency_dictionary_en_82_765.txt'):
'''
initialize counter by loading frequency dictionary file
'''
def LoadFD(file):
'''
extract frequency dictionary file
transform data and load into counter
return counter
'''
# extract
with open(file, 'r') as myfile:
DICT=myfile.read().replace('\n', '')
# transform
DICT2 = re.sub(r'(\d+)(\w+)', r'\1 \2', DICT.lower())
d_tup = re.findall(r'(\w+)\s(\d+)', DICT2)
# load
C = Counter()
for pair in d_tup:
C[pair[0]] = int(pair[1])
return C
self.filename = filename
self.counter = LoadFD(self.filename)
def spellcheck(self, token):
'''
input token string
check spelling
return best effort spell check
'''
self.pns.COUNTS = self.counter
spell_check = self.pns.correct(token)
return spell_check
def segcheck(self, token):
'''
input token string
check segmentation
return list of words as best effort segmentation check
'''
self.pns.COUNTS = self.counter
segs = self.pns.segment(token)
return segs
def updatefreq(self, C_corpus):
'''
import corpus frequency counters
build interpolation intervals
convert corpus frequencies
update english frequencies
set counter to updated values
'''
C_english = self.counter
# range variables
eng_max = C_english.most_common()[0][1]+1
eng_min = C_english.most_common()[-1][1]
cor_max = C_corpus.most_common()[0][1]+1
cor_min = C_corpus.most_common()[-1][1]
# interpolation intervals
eng_int = np.linspace(eng_min,eng_max,1000)
cor_int = np.linspace(cor_min,cor_max,1000)
# interpolator
def IntFreq(token):
'''
import token
lookup token frequency in corpus
interpolate frequency in english
round float frequency into integer
assign updated counter
'''
cf = C_corpus[token]
idx = np.max(np.where(cor_int<=cf))
c_base = cor_int[idx]
e_base = eng_int[idx]
cspan = cor_int[idx+1]-c_base
espan = eng_int[idx+1]-e_base
f_intp = (cf - c_base)*(espan/cspan) + e_base
return int(round(f_intp,0))
# sort entries
tok_updates = [token for token in C_corpus if token in C_english]
# update frequencies
C_eng = C_english.copy()
for token in tok_updates:
tf = IntFreq(token)
C_eng[token] = tf
self.counter = C_eng
print('Frequency dictionary updated.')
def addwords(self, C_corpus, new_word_list):
'''
import english and corpus frequency counters
import words not in English Dict but in Corpus
build interpolation intervals
convert corpus frequencies
add new words to english dict
set counter to updated values
'''
C_english = self.counter
# range variables
eng_max = C_english.most_common()[0][1]+1
eng_min = C_english.most_common()[-1][1]
cor_max = C_corpus.most_common()[0][1]+1
cor_min = C_corpus.most_common()[-1][1]
# interpolation intervals
eng_int = np.linspace(eng_min,eng_max,1000)
cor_int = np.linspace(cor_min,cor_max,1000)
# interpolator
def IntFreq(token):
'''
import token
lookup token frequency in corpus
interpolate frequency in english
round float frequency into integer
return english frequency
'''
cf = C_corpus[token]
idx = np.max(np.where(cor_int<=cf))
c_base = cor_int[idx]
e_base = eng_int[idx]
cspan = cor_int[idx+1]-c_base
espan = eng_int[idx+1]-e_base
f_intp = (cf - c_base)*(espan/cspan) + e_base
return int(round(f_intp,0))
# input new words
C_eng = C_english.copy()
news = [word for word in new_word_list if word in C_corpus]
for word in news:
tf = IntFreq(word)
C_eng[word] = tf
self.counter = C_eng
print('New words added to frequency dictionary.')
def savefreqdict(self, filename):
'''
import filename
format counter as frequency dictionary
save the Counter as frequency dictionary
'''
C = self.counter
#Format Counter for Output
Cout = sorted(C.items(), key=lambda pair: pair[1], reverse=True)
# Save for later re-run from start in place of generic dictionary
with open(filename, encoding='utf-8', mode='w') as f:
for tag, count in Cout:
f.write('{} {}\n'.format(tag, count))
print('Spell Check Counter saved to ' + filename)
| 31.312821 | 107 | 0.557648 | 726 | 6,106 | 4.548209 | 0.263085 | 0.063295 | 0.014537 | 0.021805 | 0.425197 | 0.415506 | 0.317989 | 0.317989 | 0.317989 | 0.317989 | 0 | 0.015322 | 0.358664 | 6,106 | 194 | 108 | 31.474227 | 0.827886 | 0.290043 | 0 | 0.475 | 0 | 0 | 0.0496 | 0.010747 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1125 | false | 0 | 0.05 | 0 | 0.2375 | 0.0375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38293665d011cbbd211e0920b3368185cc35abc1 | 1,362 | py | Python | UNIOA/algs/unioaCSA.py | Huilin-Li/UNIOA | 0f2527eac955a7193406775e5b71fab35f064422 | [
"MIT"
] | null | null | null | UNIOA/algs/unioaCSA.py | Huilin-Li/UNIOA | 0f2527eac955a7193406775e5b71fab35f064422 | [
"MIT"
] | null | null | null | UNIOA/algs/unioaCSA.py | Huilin-Li/UNIOA | 0f2527eac955a7193406775e5b71fab35f064422 | [
"MIT"
] | null | null | null | from UNIOA.NatureOpt import NatureOpt
# This class implements CSA-Optimizer in the UNIOA framework.
# E is sync
# G is sync
class CSA_UNIOA(NatureOpt):
def __init__(self, func ,hyperparams_set, budget_factor=1e4):
super().__init__(func, budget_factor)
self.M = hyperparams_set.get('popsize',50)
self.w1 = hyperparams_set.get('w1', 0.1)
self.w2 = hyperparams_set.get('w2', 2)
def __call__(self):
t = 0 # iteration counter
X = self.Init_X.Init_X(M=self.M, n=self.n, lb_x=self.lb_x, ub_x=self.ub_x)
X_Fit = self.Evaluate_X(X=X) # Evaluate X
X_p, X_p_Fit = self.Init_Delta_X.Personal_best(new_X=X,new_X_Fit=X_Fit)
# Optimizing
while not self.stop:
# OOpt temp_X(t+1)
temp_X = self.Opt_X.csa(old_X=X, X_p=X_p, w1=self.w1, w2=self.w2, lb_x=self.lb_x, ub_x=self.ub_x)
# Evaluate
temp_X_Fit = self.Evaluate_X(X=temp_X)
# Selection
new_X, new_X_Fit = self.Selection.same_type(temp_X=temp_X, temp_X_Fit=temp_X_Fit)
# ----------------------------
t = t + 1
##########################
X_p, X_p_Fit = self.Opt_Delta_X.Personal_best(new_X=new_X, new_X_Fit=new_X_Fit, old_X_p=X_p, old_X_p_Fit=X_p_Fit)
X = new_X
X_Fit = new_X_Fit
| 34.923077 | 125 | 0.589574 | 230 | 1,362 | 3.121739 | 0.265217 | 0.061281 | 0.034819 | 0.022284 | 0.261838 | 0.197772 | 0.05571 | 0.05571 | 0.05571 | 0.05571 | 0 | 0.017982 | 0.265051 | 1,362 | 38 | 126 | 35.842105 | 0.699301 | 0.135095 | 0 | 0 | 0 | 0 | 0.009649 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
382bc908de3c182c09ebf8232f4505e77a202c6f | 417 | py | Python | code/genric/torch_ext/segment_indirect.py | wendazhou/reversible-inductive-construction | 14815d1b5ef5a35a569c0793888bc5548acd64be | [
"MIT"
] | 31 | 2019-07-22T23:34:43.000Z | 2021-09-10T14:28:53.000Z | code/genric/torch_ext/segment_indirect.py | wendazhou/reversible-inductive-construction | 14815d1b5ef5a35a569c0793888bc5548acd64be | [
"MIT"
] | null | null | null | code/genric/torch_ext/segment_indirect.py | wendazhou/reversible-inductive-construction | 14815d1b5ef5a35a569c0793888bc5548acd64be | [
"MIT"
] | 3 | 2019-12-03T14:12:45.000Z | 2020-07-17T14:50:18.000Z | import torch
def segment_index_add_python(values, scopes, indices, out=None):
if out is None:
out = values.new_zeros([scopes.shape[0]] + list(values.shape[1:]))
scopes = scopes.long()
values_dup = values.index_select(0, indices)
idx_global = torch.repeat_interleave(scopes[:, 1])
out.index_add_(0, idx_global, values_dup)
return out
segment_index_add = segment_index_add_python
| 23.166667 | 74 | 0.709832 | 61 | 417 | 4.57377 | 0.459016 | 0.114695 | 0.16129 | 0.150538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014577 | 0.177458 | 417 | 17 | 75 | 24.529412 | 0.798834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
382cc28dba3c76c364eeb160a5426a562bfbf3bf | 1,580 | py | Python | bot/conversations/statistics/graphs.py | borissimkin/moneykeeper-bot | 45f7ed92be187db71d28c5326a5b62cb587c88bf | [
"MIT"
] | 2 | 2021-08-04T08:04:31.000Z | 2022-01-21T13:00:28.000Z | bot/conversations/statistics/graphs.py | borissimkin/moneykeeper-bot | 45f7ed92be187db71d28c5326a5b62cb587c88bf | [
"MIT"
] | 2 | 2021-06-08T21:07:54.000Z | 2021-09-08T01:46:50.000Z | bot/conversations/statistics/graphs.py | borissimkin/moneykeeper-bot | 45f7ed92be187db71d28c5326a5b62cb587c88bf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import tempfile
import matplotlib.pyplot as plt
import numpy as np
from bot.conversations.statistics.type_transacation_graph import TypeTransaction
from bot.conversations.statistics.utils import remove_emoji
def make_pie_graph(data, labels, title, type_transactions: TypeTransaction):
labels = [remove_emoji(label) for label in labels]
explode = [0.01 for _ in labels]
data = [int(x) for x in data]
sum_data = sum(data)
patches, texts, _ = plt.pie(data, labels=labels, startangle=90, pctdistance=0.85, explode=explode,
autopct=lambda x: '{}р.'.format(int(sum_data / 100 * x)))
centre_circle = plt.Circle((0, 0), 0.70, fc='white')
make_center_text(type_transactions, sum_data)
fig = plt.gcf()
plt.title(title, fontsize=20)
fig.gca().add_artist(centre_circle)
# Equal aspect ratio ensures that pie is drawn as a circle
plt.axis('equal')
plt.tight_layout()
path_to_graph = os.path.join(tempfile.mkdtemp(), 'figure.jpg')
plt.savefig(path_to_graph)
plt.close()
return path_to_graph
def make_center_text(type_transactions, amount_money):
if type_transactions == type_transactions.CONSUMPTION:
text = 'Расходы\n{} р.'.format(amount_money)
color = 'red'
elif type_transactions == type_transactions.EARNING:
text = 'Доходы\n{} р.'.format(amount_money)
color = 'green'
else:
text = 'Неопределено'
color = 'black'
plt.annotate(text, xy=(0, 0), fontsize=18, ha="center", color=color)
| 32.916667 | 102 | 0.678481 | 216 | 1,580 | 4.800926 | 0.476852 | 0.108004 | 0.031823 | 0.057859 | 0.104147 | 0.046287 | 0 | 0 | 0 | 0 | 0 | 0.018239 | 0.201899 | 1,580 | 47 | 103 | 33.617021 | 0.804124 | 0.049367 | 0 | 0 | 0 | 0 | 0.054813 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.171429 | 0 | 0.257143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
382d17974be4712bef8cb069dbd3f2c26b182c0d | 2,182 | py | Python | weibo_search.py | py1120/weibo_wordcloud | 1e182709cab1dbf632eaf6746d3d4859f13bd7ba | [
"MIT"
] | 197 | 2018-01-24T05:15:28.000Z | 2022-03-24T15:09:24.000Z | weibo_search.py | MonteLex/weibo_wordcloud | 1e182709cab1dbf632eaf6746d3d4859f13bd7ba | [
"MIT"
] | 1 | 2020-06-15T17:27:38.000Z | 2020-06-15T17:27:38.000Z | weibo_search.py | MonteLex/weibo_wordcloud | 1e182709cab1dbf632eaf6746d3d4859f13bd7ba | [
"MIT"
] | 71 | 2018-01-25T03:35:08.000Z | 2022-03-01T06:31:29.000Z | # coding: utf-8
import re
import json
import requests
# 基于 m.weibo.cn 抓取少量数据,无需登陆验证
url_template = "https://m.weibo.cn/api/container/getIndex?type=wb&queryVal={}&containerid=100103type=2%26q%3D{}&page={}"
def clean_text(text):
"""清除文本中的标签等信息"""
dr = re.compile(r'(<)[^>]+>', re.S)
dd = dr.sub('', text)
dr = re.compile(r'#[^#]+#', re.S)
dd = dr.sub('', dd)
dr = re.compile(r'@[^ ]+ ', re.S)
dd = dr.sub('', dd)
return dd.strip()
def fetch_data(query_val, page_id):
"""抓取关键词某一页的数据"""
resp = requests.get(url_template.format(query_val, query_val, page_id))
card_group = json.loads(resp.text)['data']['cards'][0]['card_group']
print('url:', resp.url, ' --- 条数:', len(card_group))
mblogs = [] # 保存处理过的微博
for card in card_group:
mblog = card['mblog']
blog = {'mid': mblog['id'], # 微博id
'text': clean_text(mblog['text']), # 文本
'userid': str(mblog['user']['id']), # 用户id
'username': mblog['user']['screen_name'], # 用户名
'reposts_count': mblog['reposts_count'], # 转发
'comments_count': mblog['comments_count'], # 评论
'attitudes_count': mblog['attitudes_count'] # 点赞
}
mblogs.append(blog)
return mblogs
def remove_duplication(mblogs):
"""根据微博的id对微博进行去重"""
mid_set = {mblogs[0]['mid']}
new_blogs = []
for blog in mblogs[1:]:
if blog['mid'] not in mid_set:
new_blogs.append(blog)
mid_set.add(blog['mid'])
return new_blogs
def fetch_pages(query_val, page_num):
"""抓取关键词多页的数据"""
mblogs = []
for page_id in range(1 + page_num + 1):
try:
mblogs.extend(fetch_data(query_val, page_id))
except Exception as e:
print(e)
print("去重前:", len(mblogs))
mblogs = remove_duplication(mblogs)
print("去重后:", len(mblogs))
# 保存到 result.json 文件中
fp = open('result_{}.json'.format(query_val), 'w', encoding='utf-8')
json.dump(mblogs, fp, ensure_ascii=False, indent=4)
print("已保存至 result_{}.json".format(query_val))
if __name__ == '__main__':
fetch_pages('谷歌', 50)
| 29.093333 | 120 | 0.572411 | 285 | 2,182 | 4.203509 | 0.424561 | 0.046745 | 0.040067 | 0.03005 | 0.136895 | 0.096828 | 0.058431 | 0.058431 | 0.058431 | 0.040067 | 0 | 0.012173 | 0.247021 | 2,182 | 74 | 121 | 29.486486 | 0.716981 | 0.067369 | 0 | 0.076923 | 0 | 0.019231 | 0.179641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.057692 | 0 | 0.192308 | 0.096154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
382d234f18f176eef58325d0092f70dabe6db06d | 5,482 | py | Python | stix_shifter_modules/arcsight/stix_transmission/api_client.py | priti-patil/stix-shifter | 26954598fb79dde4506987388592ec391ff8a10b | [
"Apache-2.0"
] | 33 | 2018-05-25T17:07:28.000Z | 2019-09-30T10:08:53.000Z | stix_shifter_modules/arcsight/stix_transmission/api_client.py | priti-patil/stix-shifter | 26954598fb79dde4506987388592ec391ff8a10b | [
"Apache-2.0"
] | 54 | 2018-06-01T18:17:24.000Z | 2019-09-30T18:36:15.000Z | stix_shifter_modules/arcsight/stix_transmission/api_client.py | subbyte/stix-shifter | 36d71c172a5fc5b97d872e623753b0dd1bf4fe6c | [
"Apache-2.0"
] | 37 | 2018-07-24T13:29:46.000Z | 2019-09-29T19:06:27.000Z | import time
from datetime import datetime, timedelta
import json
from stix_shifter_utils.stix_transmission.utils.RestApiClient import RestApiClient
from stix_shifter_utils.utils.error_response import ErrorResponder
class APIClient:
TOKEN_ENDPOINT = 'core-service/rest/LoginService/login'
STATUS_ENDPOINT = 'server/search/status'
QUERY_ENDPOINT = 'server/search'
RESULT_ENDPOINT = 'server/search/events'
DELETE_ENDPOINT = 'server/search/close'
def __init__(self, connection, configuration):
self.connector = __name__.split('.')[1]
self.auth = configuration.get('auth')
headers = {'Accept': 'application/json'}
self.client = RestApiClient(connection.get('host'),
connection.get('port'),
headers,
cert_verify=connection.get('selfSignedCert', True)
)
def ping_data_source(self):
data, headers = dict(), dict()
data['search_session_id'] = int(round(time.time() * 1000))
data['user_session_id'] = self.get_user_session_id()
data['start_time'] = self.get_current_time()['start_time']
data['end_time'] = self.get_current_time()['end_time']
headers['Content-Type'] = 'application/json'
headers['Accept-Charset'] = 'utf-8'
return self.client.call_api(self.QUERY_ENDPOINT, 'POST', headers, data=json.dumps(data))
def create_search(self, query_expression):
return_obj = dict()
auth = dict()
auth['search_session_id'] = int(round(time.time() * 1000))
auth['user_session_id'] = self.get_user_session_id()
try:
query = json.loads(query_expression)
query.update(auth)
headers = {'Content-Type': 'application/json', 'Accept-Charset': 'utf-8'}
response = self.client.call_api(self.QUERY_ENDPOINT, 'POST', headers, data=json.dumps(query))
raw_response = response.read()
response_code = response.code
if 199 < response_code < 300:
response_dict = json.loads(raw_response)
if response_dict.get('sessionId'):
return_obj['success'] = True
return_obj['search_id'] = str(auth['search_session_id']) + ':' + str(auth['user_session_id'])
# arcsight logger error codes - currently unavailable state
elif response_code in [500, 503]:
response_string = raw_response.decode()
ErrorResponder.fill_error(return_obj, response_string, ['message'], connector=self.connector)
elif isinstance(json.loads(raw_response), dict):
response_error = json.loads(raw_response)
response_dict = response_error['errors'][0]
ErrorResponder.fill_error(return_obj, response_dict, ['message'], connector=self.connector)
else:
raise Exception(raw_response)
return return_obj
except Exception as err:
raise err
def get_search_status(self, search_session_id, user_session_id):
headers, params = dict(), dict()
params['search_session_id'] = int(search_session_id)
params['user_session_id'] = user_session_id
headers['Content-Type'] = 'application/json'
headers['Accept-Charset'] = 'utf-8'
return self.client.call_api(self.STATUS_ENDPOINT, 'POST', headers, data=json.dumps(params))
def get_search_results(self, search_session_id, user_session_id, range_start=None, range_end=None):
headers, params = dict(), dict()
params['search_session_id'] = int(search_session_id)
params['user_session_id'] = user_session_id
params['offset'] = int(range_start)
params['length'] = int(range_end)
headers['Content-Type'] = 'application/json'
headers['Accept-Charset'] = 'utf-8'
return self.client.call_api(self.RESULT_ENDPOINT, 'POST', headers, data=json.dumps(params))
def delete_search(self, search_session_id, user_session_id):
headers, params = dict(), dict()
params['search_session_id'] = int(search_session_id)
params['user_session_id'] = user_session_id
headers['Content-Type'] = 'application/json'
headers['Accept-Charset'] = 'utf-8'
return self.client.call_api(self.DELETE_ENDPOINT, 'POST', headers, data=json.dumps(params))
def get_user_session_id(self):
try:
response = self.client.call_api(self.TOKEN_ENDPOINT, 'POST', data=self.auth)
if response.code == 200:
response_text = json.loads(response.read())
token = response_text['log.loginResponse']['log.return']
elif response.read().decode("utf-8") == '':
return_dict = 'Request error or authentication failure.'
raise Exception(return_dict)
else:
raise Exception(response)
return token
except Exception as err:
raise err
@staticmethod
def get_current_time():
ping_time = dict()
end_time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
start_time = (datetime.utcnow() - timedelta(minutes=5)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
ping_time['start_time'] = start_time
ping_time['end_time'] = end_time
return ping_time
| 46.457627 | 113 | 0.620394 | 632 | 5,482 | 5.139241 | 0.221519 | 0.074815 | 0.060037 | 0.031404 | 0.416872 | 0.393165 | 0.339901 | 0.330049 | 0.275554 | 0.248461 | 0 | 0.008339 | 0.256293 | 5,482 | 117 | 114 | 46.854701 | 0.788325 | 0.010398 | 0 | 0.242718 | 0 | 0 | 0.153789 | 0.006638 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07767 | false | 0 | 0.048544 | 0 | 0.252427 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
382d7124361bb294df0980d102709ff2f1a3f049 | 5,010 | py | Python | app/routers/v1/api_listeners.py | d3vzer0/reternal-backend | aeeb613c820759212e7aef9150738a66b2882d50 | [
"MIT"
] | 6 | 2019-01-01T23:38:12.000Z | 2021-07-27T03:43:11.000Z | app/routers/v1/api_listeners.py | d3vzer0/reternal-backend | aeeb613c820759212e7aef9150738a66b2882d50 | [
"MIT"
] | 1 | 2020-08-02T00:21:41.000Z | 2020-08-02T00:21:41.000Z | app/routers/v1/api_listeners.py | d3vzer0/reternal-backend | aeeb613c820759212e7aef9150738a66b2882d50 | [
"MIT"
] | 1 | 2021-07-27T03:43:24.000Z | 2021-07-27T03:43:24.000Z | from app.utils import celery
from app.utils.depends import validate_worker, decode_token, validate_token
from fastapi import Depends, Body, APIRouter, Security
from app.schemas.listeners import ListenersOut
from app.schemas.generic import CeleryTask
from typing import List, Dict
from celery import Signature
from celery.result import AsyncResult
router = APIRouter()
# Get active listeners and currently configured options
@router.get('/listeners/{worker_name}', response_model=CeleryTask, dependencies=[Security(validate_token)])
async def get_listeners(worker_name: str, context: dict = Depends(validate_worker), current_user: dict = Depends(decode_token)):
''' Get configuration options for all potential listeners by worker name / c2 framework '''
get_listeners = celery.send_task(context[worker_name]['listeners']['get'], chain=[
Signature('api.websocket.result.transmit', kwargs={
'user': current_user['sub'],
'task_type': 'getListeners'
})
])
return {'task': str(get_listeners)}
@router.get('/state/listeners/get/{job_uuid}', response_model=List[ListenersOut], dependencies=[Security(validate_token)])
async def get_listeners_result(job_uuid: str):
''' Get configuration options for all potential listeners by worker name / c2 framework '''
get_workers = AsyncResult(id=job_uuid, app=celery)
workers_result = get_workers.get() if get_workers.state == 'SUCCESS' else None
print(workers_result['response'])
return workers_result['response']
# Enable a new listener
@router.post('/listeners/{worker_name}/{listener_type}', response_model=CeleryTask, dependencies=[Security(validate_token, scopes=['write:integrations'])])
async def create_listener(worker_name: str, listener_type: str, listener_opts: dict = Body(...),
context: dict = Depends(validate_worker), current_user: dict = Depends(decode_token)):
''' Enable listener for a specific c2 framework '''
create_listener = celery.send_task(context[worker_name]['listeners']['create'],
args=(listener_type, listener_opts,), chain=[
Signature('api.websocket.result.transmit', kwargs={
'user': current_user['sub'],
'task_type': 'createListener'
})
])
return {'task': str(create_listener)}
@router.get('/state/listeners/create/{job_uuid}', dependencies=[Security(validate_token)])
async def create_listener_result(job_uuid: str):
''' Enable listener for a specific c2 framework '''
get_workers = AsyncResult(id=job_uuid, app=celery)
workers_result = get_workers.get() if get_workers.state == 'SUCCESS' else None
return workers_result['response']
# Disable a running listener
@router.delete('/listener/{worker_name}/{listener_name}', response_model=CeleryTask, dependencies=[Security(validate_token, scopes=['write:integrations'])])
async def delete_listener(worker_name: str, listener_name: str, context: dict = Depends(validate_worker), current_user: dict = Depends(decode_token)):
''' Disable enabled listener option by c2 framework and active listener name '''
delete_listener = celery.send_task(context[worker_name]['listeners']['delete'],
args=(listener_name,), chain=[
Signature('api.websocket.result.transmit', kwargs={
'user': current_user['sub'],
'task_type': 'deleteListener'
})
])
return {'task': str(delete_listener)}
@router.get('/state/listener/delete/{job_uuid}', dependencies=[Security(validate_token, scopes=['write:integrations'])])
async def delete_listener_result(job_uuid: str):
''' Disable enabled listener option by c2 framework and active listener name '''
get_workers = AsyncResult(id=job_uuid, app=celery)
workers_result = get_workers.get() if get_workers.state == 'SUCCESS' else None
return workers_result['response']
# Get options available for all listeners
@router.get('/listeneroptions/{worker_name}', response_model=CeleryTask, dependencies=[Security(validate_token)])
async def get_listener_options(worker_name: str, context: dict = Depends(validate_worker), current_user: dict = Depends(decode_token)):
''' Get all available listeners to run by specific c2 framework '''
get_listeners = celery.send_task(context[worker_name]['listeners']['options'], chain=[
Signature('api.websocket.result.transmit', kwargs={
'user': current_user['sub'],
'task_type': 'getListenerOptions'
})
])
return {'task': str(get_listeners)}
@router.get('/state/listeneroptions/get/{job_uuid}', response_model=Dict[str, Dict], dependencies=[Security(validate_token)])
async def get_listener_options_result(job_uuid: str):
''' Get all available listeners to run by specific c2 framework '''
get_workers = AsyncResult(id=job_uuid, app=celery)
workers_result = get_workers.get() if get_workers.state == 'SUCCESS' else None
return workers_result['response']
| 53.297872 | 156 | 0.719561 | 606 | 5,010 | 5.757426 | 0.155116 | 0.040126 | 0.064202 | 0.075666 | 0.730582 | 0.682431 | 0.666667 | 0.650903 | 0.582115 | 0.560906 | 0 | 0.001892 | 0.155888 | 5,010 | 93 | 157 | 53.870968 | 0.823126 | 0.028343 | 0 | 0.434783 | 0 | 0 | 0.163827 | 0.089615 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115942 | 0 | 0.231884 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
382e3eda425327d2b534d7a2a0cc8ec6e1f59ecf | 2,827 | py | Python | metrics/classification_metrics.py | Karthik-Ragunath/DDU | b9daae9304bdeb222857884ef8cb3b6b3d004d33 | [
"MIT"
] | 43 | 2021-05-20T14:07:53.000Z | 2022-03-23T12:58:26.000Z | metrics/classification_metrics.py | Karthik-Ragunath/DDU | b9daae9304bdeb222857884ef8cb3b6b3d004d33 | [
"MIT"
] | 3 | 2021-09-19T20:49:21.000Z | 2022-03-07T10:25:47.000Z | metrics/classification_metrics.py | Karthik-Ragunath/DDU | b9daae9304bdeb222857884ef8cb3b6b3d004d33 | [
"MIT"
] | 8 | 2021-06-26T15:28:45.000Z | 2022-02-19T02:07:05.000Z | """
Metrics to measure classification performance
"""
import torch
from torch import nn
from torch.nn import functional as F
from utils.ensemble_utils import ensemble_forward_pass
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
def get_logits_labels(model, data_loader, device):
"""
Utility function to get logits and labels.
"""
model.eval()
logits = []
labels = []
with torch.no_grad():
for data, label in data_loader:
data = data.to(device)
label = label.to(device)
logit = model(data)
logits.append(logit)
labels.append(label)
logits = torch.cat(logits, dim=0)
labels = torch.cat(labels, dim=0)
return logits, labels
def test_classification_net_softmax(softmax_prob, labels):
"""
This function reports classification accuracy and confusion matrix given softmax vectors and
labels from a model.
"""
labels_list = []
predictions_list = []
confidence_vals_list = []
confidence_vals, predictions = torch.max(softmax_prob, dim=1)
labels_list.extend(labels.cpu().numpy())
predictions_list.extend(predictions.cpu().numpy())
confidence_vals_list.extend(confidence_vals.cpu().numpy())
accuracy = accuracy_score(labels_list, predictions_list)
return (
confusion_matrix(labels_list, predictions_list),
accuracy,
labels_list,
predictions_list,
confidence_vals_list,
)
def test_classification_net_logits(logits, labels):
"""
This function reports classification accuracy and confusion matrix given logits and labels
from a model.
"""
softmax_prob = F.softmax(logits, dim=1)
return test_classification_net_softmax(softmax_prob, labels)
def test_classification_net(model, data_loader, device):
"""
This function reports classification accuracy and confusion matrix over a dataset.
"""
logits, labels = get_logits_labels(model, data_loader, device)
return test_classification_net_logits(logits, labels)
def test_classification_net_ensemble(model_ensemble, data_loader, device):
"""
This function reports classification accuracy and confusion matrix over a dataset
for a deep ensemble.
"""
for model in model_ensemble:
model.eval()
softmax_prob = []
labels = []
with torch.no_grad():
for data, label in data_loader:
data = data.to(device)
label = label.to(device)
softmax, _, _ = ensemble_forward_pass(model_ensemble, data)
softmax_prob.append(softmax)
labels.append(label)
softmax_prob = torch.cat(softmax_prob, dim=0)
labels = torch.cat(labels, dim=0)
return test_classification_net_softmax(softmax_prob, labels)
| 29.447917 | 96 | 0.691546 | 342 | 2,827 | 5.502924 | 0.192982 | 0.052604 | 0.078108 | 0.05101 | 0.534538 | 0.501594 | 0.442614 | 0.337938 | 0.286929 | 0.250797 | 0 | 0.002735 | 0.223912 | 2,827 | 95 | 97 | 29.757895 | 0.855059 | 0.17439 | 0 | 0.315789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0.035088 | 0.105263 | 0 | 0.280702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
382e6e3b04b4482534ee076317c8dbc1adf788e7 | 1,351 | py | Python | CH04/coin_flip_streaks.py | kaifee-haque/Automate-the-Boring-Stuff-Solutions | 5acbf9a397dc4aa000ebd9e8f6d79d0ee5287fef | [
"MIT"
] | null | null | null | CH04/coin_flip_streaks.py | kaifee-haque/Automate-the-Boring-Stuff-Solutions | 5acbf9a397dc4aa000ebd9e8f6d79d0ee5287fef | [
"MIT"
] | null | null | null | CH04/coin_flip_streaks.py | kaifee-haque/Automate-the-Boring-Stuff-Solutions | 5acbf9a397dc4aa000ebd9e8f6d79d0ee5287fef | [
"MIT"
] | null | null | null | #! python3
"""Using a given number of trials, calculates the probability that a streak of
6 heads or 6 tails will occur in a series of 100 coin tosses."""
import random
def streaks_in_list():
"""Determines whether or not a streak of 6 heads of 6 tails occurs in
a given series.
returns:
A boolean indicating the presence of a streak.
"""
flip_list = []
for i in range(100):
flip_list.append(random.choice(["H", "T"]))
#For each item in the list of tosses, up until the 94th item, determines
#whether or not the next 5 tosses produced the same result (making a streak).
for i in range(95):
for j in range(6):
if flip_list[i + j] != flip_list[i]:
return False
if j == 5:
return True
def percent_of_streaks(trials):
"""Calls the streaks_in_list function for each trial and determines the
probability of a streak occurring.
Args:
trials: An integer representing the number of trials to perform.
Returns:
The probability of a streak of 6 occurring in a series of 100 coin tosses.
"""
streaks = 0
for i in range(trials):
if streaks_in_list() == True:
streaks += 1
return streaks / trials
print(f"Chance of streak: {percent_of_streaks(10000) * 100}%")
| 27.02 | 82 | 0.632124 | 203 | 1,351 | 4.137931 | 0.389163 | 0.05 | 0.032143 | 0.035714 | 0.147619 | 0.057143 | 0.057143 | 0 | 0 | 0 | 0 | 0.033438 | 0.291636 | 1,351 | 49 | 83 | 27.571429 | 0.844305 | 0.523316 | 0 | 0 | 0 | 0 | 0.092466 | 0.044521 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0 | 0.333333 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38319015f478f01fd850ca28cd8def2b6dfa7969 | 2,287 | py | Python | testdsc.py | aligirayhanozbay/pydscpack | 48d1df0775e4b063cf387b3884b8b463b3660e89 | [
"BSD-3-Clause"
] | null | null | null | testdsc.py | aligirayhanozbay/pydscpack | 48d1df0775e4b063cf387b3884b8b463b3660e89 | [
"BSD-3-Clause"
] | null | null | null | testdsc.py | aligirayhanozbay/pydscpack | 48d1df0775e4b063cf387b3884b8b463b3660e89 | [
"BSD-3-Clause"
] | null | null | null | import pydscpack
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
#python3 -m numpy.f2py -c Src/Dp/src.f -m dsc
def complexify(x):
return np.array([complex(zs) for zs in x])
def check_laplacian(c, spacing, u, *wdsc_args):
spacing = np.real(spacing)
if not isinstance(c, np.ndarray):
c = np.array(c)
if not (c.dtype == np.complex128 or c.dtype==np.complex64):
c = np.complex(*c)
zcoords = np.array([c, c+spacing, c-spacing, c+1j*spacing, c-1j*spacing], dtype = c.dtype)
wcoords = map_wdsc(zcoords,u,*wdsc_args)
wnorm = np.real(wcoords * np.conj(wcoords))
soln = 1-(1/np.log(u))*np.log(wnorm**0.5)
laplacian = (np.sum(soln[1:]) - 4*soln[0])/(spacing**2)
return laplacian
if __name__ == '__main__':
nptq = 64 #No of Gauss-Jacobi integration pts
tol = 1e-10 #tolerance for iterative process
iguess = 1 #initial guess type. see line 770 is src.f - not sure what this does yet.
ishape = 0 #0 for no vertices at infinity
linearc = 1
#outer_coords = 5*complexify(['1.5+1.5j', '-1.5+1.5j', '-1.5-1.5j', '1.5-1.5j']) #coordinates of the outer polygon
#inner_coords = -1.0*complexify(['0.5', '-0.5+0.5j', '-0.5-0.5j']) #coordinates of the inner polygon
q = np.sqrt(2)
q = 0.25
outer_coords = 1.5*complexify(['1+j', '-1+j', '-1-j', '1-j']) #coordinates of the outer polygon
inner_coords = complexify([f'{q}+0.0j', f'0.0+{q}j', f'-{q}+0.0j', f'0.0-{q}j']) #coordinates of the inner polygon
amap = pydscpack.AnnulusMap(outer_coords, inner_coords)
n_plotpts = (50,200)
r = np.linspace(amap.mapping_params['inner_radius'],1.0-(1e-5),n_plotpts[0]) #important to not evaluate map at r=1.0 (outer annulus ring)
theta = np.linspace(0,2*np.pi,n_plotpts[1])
a = np.exp(theta*1j)
#import pdb; pdb.set_trace()
wplot = np.einsum('i,j->ij', r, a)
wnorm = np.real(wplot * np.conj(wplot))
wangle = np.angle(wplot)
amap.plot_map('norm', 'argument', w=wplot, save_path='/tmp/radius_and_argument.png')
zplot = np.array([0.65+0.10*1j,0.75+0.1*1j,0.65-0.10*1j,0.75-0.1*1j])
wplot = amap.backward_map(zplot)
amap.plot_map(np.ones(zplot.shape),z=zplot, plot_type='scatter', save_path='/tmp/triangle_sensors_z.png')
| 38.762712 | 141 | 0.640577 | 406 | 2,287 | 3.524631 | 0.366995 | 0.006988 | 0.008386 | 0.013976 | 0.148148 | 0.109015 | 0.103424 | 0.048917 | 0.048917 | 0.034941 | 0 | 0.063034 | 0.18146 | 2,287 | 58 | 142 | 39.431034 | 0.701389 | 0.249235 | 0 | 0 | 0 | 0 | 0.087008 | 0.032334 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0.025 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38342d037b3b1d7b13402937a3a8f685102c37a8 | 1,852 | py | Python | test/TestLog.py | nrejack/redi | 51dea9a217d52fec278adba2fb794fa7620f9ba7 | [
"BSD-3-Clause"
] | 7 | 2015-07-16T03:03:24.000Z | 2021-02-05T14:26:32.000Z | test/TestLog.py | nrejack/redi | 51dea9a217d52fec278adba2fb794fa7620f9ba7 | [
"BSD-3-Clause"
] | 87 | 2015-01-08T16:21:47.000Z | 2021-12-22T14:37:53.000Z | test/TestLog.py | nrejack/redi | 51dea9a217d52fec278adba2fb794fa7620f9ba7 | [
"BSD-3-Clause"
] | 15 | 2015-04-01T19:05:55.000Z | 2021-03-02T22:23:16.000Z | #!/usr/bin/env python
# Contributors:
# Christopher P. Barnes <senrabc@gmail.com>
# Andrei Sura: github.com/indera
# Mohan Das Katragadda <mohan.das142@gmail.com>
# Philip Chase <philipbchase@gmail.com>
# Ruchi Vivek Desai <ruchivdesai@gmail.com>
# Taeber Rapczak <taeber@ufl.edu>
# Nicholas Rejack <nrejack@ufl.edu>
# Josh Hanna <josh@hanna.io>
# Copyright (c) 2014-2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
'''
@author : Radha
email : rkandula@ufl.edu
This file tests if the `configure_logging`
properly creates a log file
Note: the created file is destroyed at the end
'''
import unittest
import os
import sys
from redi import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestLog(unittest.TestCase):
def setUp(self):
# initialize log file name
self.file_name = proj_root + 'log/redi.log'
def test_log(self):
import os.path
file_name = self.file_name
sys.path.append('log')
# remove any existing log file in log/ folder
#if os.path.isfile(file_name):
# with open(file_name):
#print "here"
# os.remove(file_name)
#print os.path.isfile(file_name)
# call the configure logging function
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
#print 'checking if log file was created: ' + file_name
# check if the file is created
assert os.path.isfile(file_name) == True
# remove the file created through testing
os.remove(file_name)
if __name__ == '__main__':
unittest.main()
| 26.84058 | 91 | 0.679266 | 261 | 1,852 | 4.685824 | 0.498084 | 0.071954 | 0.02453 | 0.039248 | 0.114473 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009642 | 0.215983 | 1,852 | 68 | 92 | 27.235294 | 0.832645 | 0.562095 | 0 | 0 | 0 | 0 | 0.034704 | 0 | 0 | 0 | 0 | 0 | 0.05 | 1 | 0.1 | false | 0 | 0.25 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3835c2b5cedc5df64dfd33405ffd3071075936ab | 2,330 | py | Python | data/cbct2ct_dataset.py | CommanderCero/DJRevGAN | bb2a79acc951eb01c5427a7a08e82b1e40348a53 | [
"BSD-3-Clause"
] | 2 | 2022-03-16T15:13:30.000Z | 2022-03-16T15:13:37.000Z | data/cbct2ct_dataset.py | CommanderCero/DJRevGAN | bb2a79acc951eb01c5427a7a08e82b1e40348a53 | [
"BSD-3-Clause"
] | null | null | null | data/cbct2ct_dataset.py | CommanderCero/DJRevGAN | bb2a79acc951eb01c5427a7a08e82b1e40348a53 | [
"BSD-3-Clause"
] | null | null | null | import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import torch
import numpy as np
def normalize(image, minimum, maximum):
# https://stats.stackexchange.com/questions/178626/how-to-normalize-data-between-1-and-1
image = (image - minimum) / (maximum - minimum)
return 2 * image - 1
class cbct2ctDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.cbct_dir = os.path.join(opt.dataroot, opt.phase + 'CBCT')
self.ct_dir = os.path.join(opt.dataroot, opt.phase + 'CT')
self.cbct_paths = make_dataset(self.cbct_dir)
self.ct_paths = make_dataset(self.ct_dir)
# Do we really need to sort the paths?
#self.cbct_paths = sorted(self.cbct_paths)
#self.ct_paths = sorted(self.ct_paths)
self.cbct_size = len(self.cbct_paths)
self.ct_size = len(self.ct_paths)
print('len(CBCT),len(CT)=', self.cbct_size, self.ct_size)
#self.transform = get_transform(opt)
def __getitem__(self, index):
# Calculate indices
index_cbct = index % self.cbct_size
cbct_path = self.cbct_paths[index_cbct]
if self.opt.serial_batches:
index_ct = index % self.ct_size
else:
index_ct = random.randint(0, self.ct_size - 1)
ct_path = self.ct_paths[index_ct]
# Load data
cbct_img = np.load(cbct_path)
ct_img = np.load(ct_path)
# Convert to tensors
cbct_tensor = torch.from_numpy(cbct_img).view(1, *cbct_img.shape)
ct_tensor = torch.from_numpy(ct_img).view(1, *ct_img.shape)
# Normalize data
# We didnt do this in the preprocessing step so we dont have to store our data as floats
cbct_tensor = normalize(cbct_tensor, -1000.0, 7000.0)
ct_tensor = normalize(ct_tensor, -1024.0, 3072.0)
return {'A': cbct_tensor, 'B': ct_tensor,
'A_paths': cbct_path, 'ct_paths': ct_path}
def __len__(self):
return max(self.cbct_size, self.ct_size)
def name(self):
return 'cbct2ctDataset'
| 33.285714 | 96 | 0.642489 | 329 | 2,330 | 4.334347 | 0.31003 | 0.050491 | 0.045582 | 0.018233 | 0.102384 | 0.075736 | 0.044881 | 0.044881 | 0 | 0 | 0 | 0.020761 | 0.255794 | 2,330 | 69 | 97 | 33.768116 | 0.801615 | 0.165236 | 0 | 0 | 0 | 0 | 0.028438 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.159091 | 0.068182 | 0.431818 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38388cbdae4f3ce7f30294f51a48514831405b20 | 1,222 | py | Python | savor_code/pipelines.py | tobias-fyi/savor_data | b06cbef44f23048fa01f9ad7fb36b8fa715d63b2 | [
"MIT"
] | null | null | null | savor_code/pipelines.py | tobias-fyi/savor_data | b06cbef44f23048fa01f9ad7fb36b8fa715d63b2 | [
"MIT"
] | null | null | null | savor_code/pipelines.py | tobias-fyi/savor_data | b06cbef44f23048fa01f9ad7fb36b8fa715d63b2 | [
"MIT"
] | null | null | null | """
Savor Data :: Various pipeline functions
"""
from os import environ
from pathlib import Path
import pandas as pd
def extract_and_concat_airtable_data(records: dict) -> pd.DataFrame:
"""Extracts fields from the airtable data and concatenates them with airtable id.
Uses pyjanitor to clean up column names.
"""
# Load and clean/fix names
df = (
pd.DataFrame.from_records(records)
.clean_names()
.rename_column("id", "airtable_id")
)
df2 = pd.concat( # Extract `fields` and concat to `airtable_id`
[df["airtable_id"], df["fields"].apply(pd.Series)], axis=1
)
return df2
def convert_datetime_cols(data: pd.DataFrame, dt_cols: list) -> pd.DataFrame:
"""If datetime columns exist in dataframe, convert them to datetime.
:param data (pd.DataFrame) : DataFrame with datetime cols to be converted.
:param dt_cols (list) : List of potential datetime cols.
:return (pd.DataFrame) : DataFrame with datetime cols converted.
"""
data = data.copy() # Don't change original dataframe
for col in dt_cols:
if col in data.columns: # Make sure column exists
data[col] = pd.to_datetime(data[col])
return data
| 31.333333 | 85 | 0.674304 | 167 | 1,222 | 4.838323 | 0.419162 | 0.081683 | 0.029703 | 0.059406 | 0.089109 | 0.089109 | 0 | 0 | 0 | 0 | 0 | 0.003171 | 0.225859 | 1,222 | 38 | 86 | 32.157895 | 0.850951 | 0.4509 | 0 | 0 | 0 | 0 | 0.048077 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
383966bec51a678c93133bca8d324981bf23e90d | 7,526 | py | Python | sdd-db/cronjobs/db_upload_energy.py | socialdistancingdashboard/virushack | 6ef69d26c5719d0bf257f4594ed2488dd73cdc40 | [
"Apache-2.0"
] | 29 | 2020-03-21T00:47:51.000Z | 2021-07-17T15:50:33.000Z | sdd-db/cronjobs/db_upload_energy.py | socialdistancingdashboard/virushack | 6ef69d26c5719d0bf257f4594ed2488dd73cdc40 | [
"Apache-2.0"
] | 7 | 2020-03-21T14:04:26.000Z | 2022-03-02T08:05:40.000Z | sdd-db/cronjobs/db_upload_energy.py | socialdistancingdashboard/virushack | 6ef69d26c5719d0bf257f4594ed2488dd73cdc40 | [
"Apache-2.0"
] | 13 | 2020-03-21T01:08:08.000Z | 2020-04-08T17:21:11.000Z | """ Uploads Corona data from Zeit online
Note: infected numbers are known infections on a particular day.
Dead and Recovered numbers were summed up until today. """
import os
import pandas as pd
from datetime import datetime, timedelta
import pytz
# compatibility with ipython
try:
__IPYTHON__
os.chdir(os.path.dirname(__file__))
except: pass
import json
import pymysql
from pymysql.constants import CLIENT
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import requests
from hashlib import md5
# connect to aws database with sqlalchemy (used for pandas connections)
config = json.load(open("../../credentials/credentials-aws-db.json", "r"))
aws_engine = create_engine(
("mysql+pymysql://" +
config["user"] + ":" +
config["password"] + "@" +
config["host"] + ":" +
str(config["port"]) + "/" +
config["database"]),
poolclass=NullPool, # dont maintain a pool of connections
pool_recycle=3600 # handles timeouts better, I think...
)
# aws database connection used for normal queries because sqlalchemy doesnt support on duplicate key queries
pymysql_con = pymysql.connect(
config["host"],
config["user"],
config["password"],
config["database"],
client_flag=CLIENT.MULTI_STATEMENTS)
charts_whitelist = [
"import balance",
"load",
"day ahead auction",
"intraday continuous average price",
"intraday continuous id3 price",
"intraday continuous id1 price"
]
description_lookup = {
"import balance": {
"desc_short": "Netto Stromimporte",
"desc_long": "Netto Stromimporte",
"unit": "Gigawatt",
"unit_agg": "Gigawatt",
"agg_mode": "sum"
},
"load": {
"desc_short": "Stromverbrauch",
"desc_long": "Stromverbrauch",
"unit": "Gigawatt",
"unit_agg": "Gigawatt",
"agg_mode": "sum"
},
"day ahead auction": {
"desc_short": "Day-ahead Strompreis",
"desc_long": "Day-ahead Strompreise",
"unit": "EUR/MWh",
"unit_agg": "Prozent",
"agg_mode": "avg-percentage-of-normal"
},
"intraday continuous average price": {
"desc_short": "Strompreis Index IDFull",
"desc_long": "The IDFull index is the weighted average price of all continuous trades executed during the full trading session of any EPEX SPOT continuous contract. This index includes the entire market liquidity and thus represents the obvious continuous market price references for each contract.",
"unit": "EUR/MWh",
"unit_agg": "Prozent",
"agg_mode": "avg-percentage-of-normal"
},
"intraday continuous id3 price": {
"desc_short": "Strompreis Index ID3",
"desc_long": "The ID3 index is the weighted average price of all continuous trades executed within the last 3 trading hours of a contract (up to 30min before delivery start).This index focuses on the most liquid timeframe of a continuous contract trading session. As such, this index presents large business interest for EPEX SPOT customers to market their offers or challenge their trading activity.",
"unit": "EUR/MWh",
"unit_agg": "Prozent",
"agg_mode": "avg-percentage-of-normal"
},
"intraday continuous id1 price": {
"desc_short": "Strompreis Index ID1",
"desc_long": "The ID1 index is the weighted average price of all continuous trades executed within the last trading hour of a contract up to 30min before delivery start. This index catches the market last minute imbalance needs, reflecting amongst other the increasing REN breakthrough and system balancing flexibility.",
"unit": "EUR/MWh",
"unit_agg": "Prozent",
"agg_mode": "avg-percentage-of-normal"
}
}
# retrieve data from fraunhofer ise
def upload_week(week):
url = f"https://www.energy-charts.de/price/week_2020_{week}.json"
r = requests.get(url)
data = r.json()
for chart in data:
chart_key = chart["key"][0]["en"].lower().replace("-", " ")
if not chart_key in charts_whitelist:
continue
print(f"current chart_key {chart_key}")
source_id = ("score fraunhofer " + chart_key).replace(" ", "_")
source = {
"id": source_id,
"desc_short": description_lookup[chart_key]["desc_short"],
"desc_long": description_lookup[chart_key]["desc_long"] ,
"contributors": "Fraunhofer ISI, 50 Hertz, Amprion, Tennet, TransnetBW, EEX, EPEX SPOT",
"unit": description_lookup[chart_key]["unit"],
"unit_long": description_lookup[chart_key]["unit"],
"unit_agg_long": description_lookup[chart_key]["unit_agg"],
"sample_interval": "hourly",
"agg_mode": description_lookup[chart_key]["agg_mode"],
"has_reference_values": 0,
"spatial_level": "country"
}
q = """
REPLACE INTO sources (
id, desc_short, desc_long, contributors, unit, unit_long, unit_agg_long,
sample_interval, agg_mode, has_reference_values, spatial_level )
VALUES ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
with pymysql_con.cursor() as cur:
cur.execute(q, list(source.values()))
pymysql_con.commit()
country_id = "DE"
unique_index = source_id + country_id
station = {
"source_id": source_id,
"description": "country-level data",
"source_station_id": "country-level data",
"country_id": country_id,
"unique_index": md5(unique_index.encode("utf-8")).hexdigest()
}
q = """
INSERT INTO stations ( source_id, description, source_station_id, country_id, unique_index )
VALUES ( %s, %s, %s, %s, %s )
ON DUPLICATE KEY UPDATE
source_id = VALUES(source_id),
description = VALUES(description),
source_station_id = VALUES(source_station_id),
country_id = VALUES(country_id)
"""
with pymysql_con.cursor() as cur:
cur.execute(q, list(station.values()))
pymysql_con.commit()
q = """
SELECT id AS station_id FROM stations
WHERE source_id = '%s'
""" % source_id
scores_stations_foreign_key = pd.read_sql(q, aws_engine)["station_id"].iloc[0]
# remove trailing zeros
drop_index = len(chart["values"])
while chart["values"][drop_index-1][1] == 0:
drop_index = drop_index - 1
df_scores = pd.DataFrame(chart["values"][:drop_index], columns=["dt", "score_value"])
df_scores.dropna(inplace=True)
df_scores.dt = df_scores.dt.apply(lambda x: datetime.fromtimestamp(x / 1000))
df_scores['dt'] = df_scores['dt'].astype(str)
df_scores["source_id"] = source_id
df_scores["station_id"] = scores_stations_foreign_key
q = """
INSERT INTO scores ( dt, score_value, source_id, station_id )
VALUES (%s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
score_value = VALUES(score_value)
"""
with pymysql_con.cursor() as cur:
cur.executemany(q, df_scores[["dt", "score_value", "source_id", "station_id"]].values.tolist())
pymysql_con.commit()
print("uploaded week %s done" % week)
def upload_all():
""" Drop all fraunhofer data before reuploading """
q = """
DELETE FROM sources WHERE id LIKE '%fraunhofer%';
DELETE FROM stations WHERE source_id LIKE '%fraunhofer%';
DELETE FROM scores WHERE source_id LIKE '%fraunhofer%';
"""
with pymysql_con.cursor() as cur:
cur.execute(q)
pymysql_con.commit()
start = datetime(2020,1,1)
week = start.isocalendar()[1]
current_week = datetime.now().isocalendar()[1]
while week <= current_week:
upload_week(str(week).zfill(2))
week = week + 1
upload_all()
# upload for today
#current_week = datetime.now().isocalendar()[1]
#upload_week(str(week).zfill(2))
pymysql_con.close() | 34.054299 | 406 | 0.680973 | 992 | 7,526 | 4.99496 | 0.295363 | 0.027447 | 0.008476 | 0.00888 | 0.344097 | 0.240161 | 0.185671 | 0.18002 | 0.155399 | 0.131584 | 0 | 0.008026 | 0.188812 | 7,526 | 221 | 407 | 34.054299 | 0.803604 | 0.083577 | 0 | 0.207865 | 0 | 0.022472 | 0.51178 | 0.02676 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011236 | false | 0.016854 | 0.08427 | 0 | 0.095506 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
383b03761e542f44270f90f959691b44c38bf91a | 1,499 | py | Python | celery_worker.py | ceephoen/celery-flask-example | b92febfdd29dc2431c3c39940d3e54d4cc677134 | [
"MIT"
] | null | null | null | celery_worker.py | ceephoen/celery-flask-example | b92febfdd29dc2431c3c39940d3e54d4cc677134 | [
"MIT"
] | null | null | null | celery_worker.py | ceephoen/celery-flask-example | b92febfdd29dc2431c3c39940d3e54d4cc677134 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
celery work file
"""
from start import app
from datum.datum import Invitation, Account
from public.initial import db
from celery import Celery
def create_celery(app):
celery = Celery(
app.import_name,
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL']
)
celery.conf.update(app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
# init celery
celery_app = create_celery(app)
celery_app.config_from_object(app.config)
@celery_app.task()
def invite_record(uid, code):
"""invite_record"""
invitation = Invitation()
invitation.uid = uid
invitation.share_code = code
try:
db.session.add(invitation)
db.session.commit()
except Exception as err:
print(err)
db.session.rollback()
@celery_app.task()
def init_account(uid, mobile):
"""init_account"""
account = Account()
account.uid = uid
account.acc_no = mobile
try:
db.session.add(account)
db.session.commit()
except Exception as err:
print(err)
db.session.rollback()
"""
1 start celery by following command
celery -A celery_worker.celery_app worker -l info
2 use it in views by
from celery_worker import invite_record
invite_record.delay(uid, invite_code)
""" | 20.534247 | 52 | 0.66044 | 193 | 1,499 | 4.974093 | 0.367876 | 0.075 | 0.046875 | 0.04375 | 0.125 | 0.125 | 0.125 | 0.125 | 0.125 | 0.125 | 0 | 0.0026 | 0.230153 | 1,499 | 73 | 53 | 20.534247 | 0.829289 | 0.052035 | 0 | 0.292683 | 0 | 0 | 0.031405 | 0.017355 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.121951 | 0 | 0.292683 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
383dc005e8248bd773075f0ffebcc3744799a8e7 | 1,672 | py | Python | invoice/urls.py | seba6m6/django_invoice_shipment | 0cfa76176e40916c6cf33ff6a95d3ed40cc244f1 | [
"MIT"
] | null | null | null | invoice/urls.py | seba6m6/django_invoice_shipment | 0cfa76176e40916c6cf33ff6a95d3ed40cc244f1 | [
"MIT"
] | 10 | 2019-12-04T22:51:43.000Z | 2022-02-10T08:28:31.000Z | invoice/urls.py | seba6m6/django_invoice_shipment | 0cfa76176e40916c6cf33ff6a95d3ed40cc244f1 | [
"MIT"
] | null | null | null | from django.urls import path, re_path
from . import views
urlpatterns = [
path('invoice-list-api/', views.invoice_list_api ),
path('invoice-list/', views.InvoiceList.as_view(), name='invoice_list' ),
path('invoice-list-unpaid/', views.UnpaidInvoice.as_view(), name='unpaid_invoice_list' ),
path('invoice-list-partially-paid/', views.PartiallyPaidInvoice.as_view(), name='partially_paid_invoice_list' ),
path('invoice-list-batch/', views.BatchInvoice.as_view(), name='batch_invoice_list' ),
re_path(r'^invoice/(?P<pk>[0-9]+)/$', views.invoice_detail, name='invoice_detail' ),
re_path(r'^invoice-pdf/(?P<pk>[0-9]+)/$', views.generate_pdf, name='generate_pdf'),
re_path(r'^shipments-list-pdf/(?P<pk>[0-9]+)/$', views.generate_shipment_pdf, name='generate_shipment_pdf'),
path('invoice-create/', views.CreateInvoice.as_view(), name='create_invoice'),
re_path(r'^invoice-update/(?P<pk>[0-9]+)/$', views.UpdateInvoice.as_view(), name='update_invoice'),
path('invoice-items-ship/', views.ShipmentView.as_view(), name='ship_items'),
re_path(r'^invoice-items-ship-update/(?P<pk>[0-9]+)/$', views.ShipmentUpdate.as_view(), name='ship_items_update'),
re_path(r'^invoice-items-ship-simple-update/(?P<pk>[0-9]+)/$', views.ShipmentSimpleUpdate.as_view(), name='ship_items_simple_update'),
path('invoice-items-to-ship/', views.ItemsToSendList.as_view(), name='items_to_send'),
path('invoice-items-sent/', views.ItemsSent.as_view(), name='items_sent'),
path('shipments-list/', views.shipment_lists, name='shipment_lists'),
re_path(r'^shipment-detail/(?P<pk>[0-9]+)/$', views.shipment_detail, name='shipment_detail'),
] | 72.695652 | 138 | 0.705742 | 239 | 1,672 | 4.719665 | 0.200837 | 0.058511 | 0.097518 | 0.031028 | 0.257979 | 0.120567 | 0.037234 | 0 | 0 | 0 | 0 | 0.009126 | 0.082536 | 1,672 | 23 | 139 | 72.695652 | 0.726206 | 0 | 0 | 0 | 0 | 0 | 0.411835 | 0.22116 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
383fc49df8fbfef72b69486d6d3c48fe6421c587 | 1,114 | py | Python | migrations/versions/06bfb5c8f270_.py | jingshu-fk/FXTest_copy | 150012f87021b6b8204fd342c62538c10d8dfa85 | [
"MIT"
] | 1 | 2019-12-31T01:53:04.000Z | 2019-12-31T01:53:04.000Z | migrations/versions/06bfb5c8f270_.py | fuyang123/FXTest | ccbc5e986f4d0f9d3145a857674529380d873719 | [
"MIT"
] | 2 | 2021-03-26T00:24:28.000Z | 2022-03-22T22:06:39.000Z | migrations/versions/06bfb5c8f270_.py | jingshu-fk/FXTest_copy | 150012f87021b6b8204fd342c62538c10d8dfa85 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 06bfb5c8f270
Revises: 2488b2a0a217
Create Date: 2019-04-23 08:54:15.345663
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '06bfb5c8f270'
down_revision = '2488b2a0a217'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interfacetests', sa.Column('is_ci', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='foreignkey')
op.drop_constraint(None, 'tstresults', type_='foreignkey')
op.add_column('tasks', sa.Column('taskdesc', sa.TEXT(length=252), nullable=True))
op.drop_constraint(None, 'tasks', type_='foreignkey')
op.drop_constraint(None, 'tasks', type_='foreignkey')
op.drop_constraint(None, 'projects', type_='unique')
op.drop_constraint(None, 'interfacetests', type_='foreignkey')
op.drop_column('interfacetests', 'is_ci')
# ### end Alembic commands ###
| 30.108108 | 85 | 0.702873 | 137 | 1,114 | 5.569343 | 0.452555 | 0.055046 | 0.125819 | 0.157274 | 0.288336 | 0.288336 | 0.243775 | 0.243775 | 0.12844 | 0.12844 | 0 | 0.057956 | 0.148115 | 1,114 | 36 | 86 | 30.944444 | 0.746048 | 0.264811 | 0 | 0.117647 | 0 | 0 | 0.227621 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.117647 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
384710fe7a2d234e8e57b874e247e8de75288b44 | 5,057 | py | Python | data/read_xml.py | guillaume-chevalier/Wikipedia-XML-Markup-Code-to-Plain-Text-Parser | b2b41b9786e676e6f855c993f938a1345b9591c1 | [
"BSD-3-Clause"
] | 1 | 2019-01-15T11:00:21.000Z | 2019-01-15T11:00:21.000Z | data/read_xml.py | guillaume-chevalier/Wikipedia-XML-Markup-Code-to-Plain-Text-Parser | b2b41b9786e676e6f855c993f938a1345b9591c1 | [
"BSD-3-Clause"
] | 3 | 2019-01-14T10:46:03.000Z | 2019-01-14T10:47:14.000Z | data/read_xml.py | guillaume-chevalier/Wikipedia-XML-Markup-Code-to-Plain-Text-Parser-of-Hell | b2b41b9786e676e6f855c993f938a1345b9591c1 | [
"BSD-3-Clause"
] | null | null | null | import time
import os
import codecs
import csv
import xml.etree.ElementTree as etree
import re
import mwparserfromhell
from data.config import WIKIPEDIA_XML, WIKIPEDIA_WORKING_DIR, WIKIPEDIA_OUTPUT_UTF8_TXT_RAW_FILE
from data.read_write_txt import FilesReaderBinaryUTF8, FilesWriterBinaryUTF8
def mwparserfromhell_remove_templates(text):
"""
Removes templates from wikicode parsed tree.
Note: I'm not even sure what this does or if it does something.
"""
wikicode = mwparserfromhell.parse(text)
for template in reversed(wikicode.filter_templates()):
wikicode.remove(template)
return wikicode
def wikicode_to_txt(wikicode):
"""
I couldn't find a properly-licensed tool to do this, I created my own.
This mostly crushes wikicode to plain text, but it's not perfect.
"""
# removes wiki markup: https://en.wikipedia.org/wiki/Wikipedia:Tutorial/Formatting#Wiki_markup
# matches everything but a `<`, a `}`, or a `}`.
everything_but_closing_parenthesis = '[^\]\<\}]'
# matches a `[`, a `{`, or a `<ref>`:
begin = '(\{|\[|\<ref\>)+'
# matches anywhing and then a final `:` or a final `|`:
preceded = '(' + everything_but_closing_parenthesis + '+(\||:))?'
# matches a `]`, a `}`, or a `</ref>.:
closing = '(\]|\}|\</ref\>)+'
# matches the last thing between "begin" and "closing" which is preceded by "preceded".:
RE = begin + preceded + '(?P<text>' + everything_but_closing_parenthesis + '*)' + closing
RE = re.compile(RE)
# Keep the "<text>" group found before closing parenthesis.
keep_right_group = r'\g<text>'
out = re.sub(RE, keep_right_group, wikicode)
# remove remaining refs and xml comments.
RE = "((</?ref)[^\>]*\>|(<!--)[^\>]*--\>)"
out = re.sub(RE, "", out)
# remove a whole lot of crap
out = out.replace("'''", "").replace("''", "").replace(" ", " ").replace("--", "").replace("~~", "").replace("\t", " ").replace(" ", " ")
out = out.replace("\n*", "\n").replace("\n#", "\n").replace(" \n", "\n").replace("\n ", "\n").replace(" \n", "\n")
out = out.replace("\n\n", "\n").replace("\n\n", "\n").replace("\n\n", "\n").replace("\n\n", "\n")
return out
def beautify_text_to_sentences_that_ends_with_dots(text):
"""
Make sentences end with dots whenever possible or where there is a dot missing.
"""
symbols_to_avoid = ["^", "\n", "\.", "\!", ",", "-", ":", ";", "\?"]
not_an_empty_nor_starting_nor_punctuated_line = ""
for symb in symbols_to_avoid:
not_an_empty_nor_starting_nor_punctuated_line += '(?<!' + symb + ')'
#not_an_empty_nor_starting_nor_punctuated_line = "(?<!^)(?<!\n)"
RE = not_an_empty_nor_starting_nor_punctuated_line + "(?P<eos>(\n|$))"
RE = re.compile(RE)
# Keep the "<text>" group found before closing parenthesis.
keep = r'.\g<eos>'
out = re.sub(RE, keep, text)
return out
def remove_titles_and_insert_newline_instead(text):
"""
A title will now be 2 new lines, and an article separation will now be 3 newlines.
"""
RE = re.compile("(((?<=^)|(?<=\n)))((=|_).*(=|_)\s?\.?\s?)(\n|$)")
out = re.sub(RE, "\n", text)
out = out.strip().rstrip("\n") + "\n\n\n"
return out
def title_tag_name_to_clean_title(title_tag):
i = title_tag.rfind("}")
if i != -1:
return title_tag[i + 1:]
else:
return title_tag
def convert_wikipedia_to_plain_txt(WIKIPEDIA_OUTPUT_UTF8_TXT_RAW_FILE, WIKIPEDIA_XML):
# With a writer on text files
with FilesWriterBinaryUTF8(WIKIPEDIA_OUTPUT_UTF8_TXT_RAW_FILE, chunk_size=int(1024**2), verbose=True) as fswr:
title = ""
# For every note in Wikipedia's XML dump
for event, elem in etree.iterparse(WIKIPEDIA_XML, events=('start', 'end')):
tname = title_tag_name_to_clean_title(elem.tag)
# Retrieve pages just under certain conditions.
if event == 'start':
if tname == 'title':
title = elem.text
elif (
tname == 'text'
# Discard if no title or no text:
and elem.text is not None
and title is not None
# Discard if page is a redirection to another page:
and "{{redirect" not in elem.text.lower()
and "#redirect" not in elem.text.lower()
# Discard if the page is not a regular article (ex type: page, list, talk, book):
and ":" not in title
):
# Will save this page.
text = elem.text
wikicode = mwparserfromhell_remove_templates(text)
text = wikicode_to_txt(str(wikicode))
text = beautify_text_to_sentences_that_ends_with_dots(text)
text = remove_titles_and_insert_newline_instead(text)
fswr.write(text)
elem.clear()
| 35.612676 | 143 | 0.584932 | 637 | 5,057 | 4.452119 | 0.324961 | 0.011284 | 0.028561 | 0.024683 | 0.324048 | 0.295134 | 0.238011 | 0.190056 | 0.13646 | 0.106135 | 0 | 0.004025 | 0.263002 | 5,057 | 141 | 144 | 35.865248 | 0.756909 | 0.25786 | 0 | 0.068493 | 0 | 0 | 0.085699 | 0.02238 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082192 | false | 0 | 0.123288 | 0 | 0.287671 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
384823b968310ae6bd298d652f5063804c7c94d4 | 6,779 | py | Python | hathor/mining/block_template.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 51 | 2019-12-28T03:33:27.000Z | 2022-03-10T14:03:03.000Z | hathor/mining/block_template.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 316 | 2019-09-10T09:20:05.000Z | 2022-03-31T20:18:56.000Z | hathor/mining/block_template.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 19 | 2020-01-04T00:13:18.000Z | 2022-02-08T21:18:46.000Z | # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for abstractions around generating mining templates.
"""
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple, Type, Union
from hathor.transaction import BaseTransaction, Block, MergeMinedBlock
from hathor.transaction.storage import TransactionStorage
from hathor.util import Random
class BlockTemplate(NamedTuple):
versions: Set[int]
reward: int # reward unit value, 64.00 HTR is 6400
weight: float # calculated from the DAA
timestamp_now: int # the reference timestamp the template was generated for
timestamp_min: int # min valid timestamp
timestamp_max: int # max valid timestamp
parents: List[bytes] # required parents, will always have a block and at most 2 txs
parents_any: List[bytes] # list of extra parents to choose from when there are more options
height: int # metadata
score: float # metadata
def generate_minimaly_valid_block(self) -> BaseTransaction:
""" Generates a block, without any extra information that is valid for this template. No random choices."""
from hathor.transaction import TxOutput, TxVersion
return TxVersion(min(self.versions)).get_cls()(
timestamp=self.timestamp_min,
parents=self.parents[:] + sorted(self.parents_any)[:(3 - len(self.parents))],
outputs=[TxOutput(self.reward, b'')],
weight=self.weight,
)
def generate_mining_block(self, rng: Random, merge_mined: bool = False, address: Optional[bytes] = None,
timestamp: Optional[int] = None, data: Optional[bytes] = None,
storage: Optional[TransactionStorage] = None, include_metadata: bool = False,
) -> Union[Block, MergeMinedBlock]:
""" Generates a block by filling the template with the given options and random parents (if multiple choices).
Note that if a timestamp is given it will be coerced into the [timestamp_min, timestamp_max] range.
"""
# XXX: importing these here to try to contain hathor dependencies as much as possible
from hathor.transaction import TransactionMetadata, TxOutput
from hathor.transaction.scripts import create_output_script
parents = list(self.get_random_parents(rng))
output_script = create_output_script(address) if address is not None else b''
base_timestamp = timestamp if timestamp is not None else self.timestamp_now
block_timestamp = min(max(base_timestamp, self.timestamp_min), self.timestamp_max)
tx_outputs = [TxOutput(self.reward, output_script)]
cls: Union[Type['Block'], Type['MergeMinedBlock']] = MergeMinedBlock if merge_mined else Block
block = cls(outputs=tx_outputs, parents=parents, timestamp=block_timestamp,
data=data or b'', storage=storage, weight=self.weight)
if include_metadata:
block._metadata = TransactionMetadata(height=self.height, score=self.score)
block.get_metadata(use_storage=False)
return block
def get_random_parents(self, rng: Random) -> Tuple[bytes, bytes, bytes]:
""" Get parents from self.parents plus a random choice from self.parents_any to make it 3 in total.
Return type is tuple just to make it clear that the length is always 3.
"""
assert 1 <= len(self.parents) <= 3
more_parents = rng.ordered_sample(self.parents_any, 3 - len(self.parents))
p1, p2, p3 = self.parents[:] + more_parents
return p1, p2, p3
def to_dict(self) -> Dict:
return {
'data': self.generate_minimaly_valid_block().get_struct_without_nonce().hex(),
'versions': sorted(self.versions),
'reward': self.reward,
'weight': self.weight,
'timestamp_now': self.timestamp_now,
'timestamp_min': self.timestamp_min,
'timestamp_max': self.timestamp_max,
'parents': [p.hex() for p in self.parents],
'parents_any': [p.hex() for p in self.parents_any],
'height': self.height,
'score': self.score,
}
@classmethod
def from_dict(cls, data: Dict) -> 'BlockTemplate':
return cls(
versions=set(data['versions']),
reward=int(data['reward']),
weight=float(data['weight']),
timestamp_now=int(data['timestamp_now']),
timestamp_min=int(data['timestamp_min']),
timestamp_max=int(data['timestamp_max']),
parents=[bytes.fromhex(p) for p in data['parents']],
parents_any=[bytes.fromhex(p) for p in data['parents_any']],
height=int(data['height']),
score=int(data['score']),
)
class BlockTemplates(List[BlockTemplate]):
def __init__(self, templates: Iterable[BlockTemplate], storage: Optional[TransactionStorage] = None):
super().__init__(templates)
self.storage = storage
assert len(self) > 0, 'This class requires at least one block template.'
def choose_random_template(self, rng: Random) -> BlockTemplate:
""" Randomly choose and return a template and use that for generating a block, see BlockTemplate"""
return rng.choice(self)
def generate_mining_block(self, rng: Random, merge_mined: bool = False, address: Optional[bytes] = None,
timestamp: Optional[int] = None, data: Optional[bytes] = None,
storage: Optional[TransactionStorage] = None, include_metadata: bool = False,
) -> Union[Block, MergeMinedBlock]:
""" Randomly choose a template and use that for generating a block, see BlockTemplate.generate_mining_block"""
return self.choose_random_template(rng).generate_mining_block(rng, merge_mined=merge_mined, address=address,
timestamp=timestamp, data=data,
storage=storage or self.storage,
include_metadata=include_metadata)
| 50.969925 | 118 | 0.648031 | 813 | 6,779 | 5.289053 | 0.260763 | 0.02814 | 0.024419 | 0.018837 | 0.173488 | 0.173488 | 0.159535 | 0.136279 | 0.122326 | 0.122326 | 0 | 0.005986 | 0.260658 | 6,779 | 132 | 119 | 51.356061 | 0.851955 | 0.245906 | 0 | 0.089888 | 0 | 0 | 0.052013 | 0 | 0 | 0 | 0 | 0 | 0.022472 | 1 | 0.089888 | false | 0 | 0.078652 | 0.022472 | 0.382022 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
38482a9c89f54e16cf60b0ec6e3262e31f7db32b | 1,124 | py | Python | baselines/ADJ/adj_rerank.py | sordonia/HierarchicalEncoderDecoder | b217a94387eca1e37975d7dd770d94981ec7eac1 | [
"BSD-3-Clause"
] | 116 | 2015-11-01T19:37:25.000Z | 2022-02-03T10:26:41.000Z | baselines/ADJ/adj_rerank.py | sordonia/HierarchicalEncoderDecoder | b217a94387eca1e37975d7dd770d94981ec7eac1 | [
"BSD-3-Clause"
] | 10 | 2015-11-02T14:17:03.000Z | 2019-06-12T08:37:20.000Z | baselines/ADJ/adj_rerank.py | sordonia/HierarchicalEncoderDecoder | b217a94387eca1e37975d7dd770d94981ec7eac1 | [
"BSD-3-Clause"
] | 52 | 2015-11-02T06:40:42.000Z | 2021-07-07T11:47:39.000Z | import os
import argparse
import cPickle
import operator
import itertools
from Common.psteff import *
def rerank(model_file, ctx_file, rnk_file, \
score=False, no_normalize=False, fallback=False):
pst = PSTInfer()
pst.load(model_file)
output_file = open(rnk_file + "_ADJ" + (".f" if score else ".gen"), "w")
begin = True
for ctx_line, rnk_line in itertools.izip(open(ctx_file), open(rnk_file)):
suffix = ctx_line.strip().split('\t')
candidates = rnk_line.strip().split('\t')
candidates, scores = pst.rerank(
suffix, candidates, no_normalize=no_normalize, fallback=fallback)
if not score:
reranked = [x[0] for x in sorted(zip(candidates, scores),
key=operator.itemgetter(1),
reverse=False)]
print >> output_file, '\t'.join(reranked)
else:
if begin:
print >> output_file, 'ADJ'
begin=False
for s in scores:
print >> output_file, s
output_file.close()
| 35.125 | 77 | 0.563167 | 132 | 1,124 | 4.643939 | 0.431818 | 0.081566 | 0.073409 | 0.04894 | 0.081566 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002653 | 0.329181 | 1,124 | 31 | 78 | 36.258065 | 0.810345 | 0 | 0 | 0 | 0 | 0 | 0.017794 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.206897 | 0 | 0.241379 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3849cad925c322f4e12bb86a7774aad131406491 | 474 | py | Python | 2015/2/paper.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | 2015/2/paper.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | 2015/2/paper.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
paper_needed = 0
ribbon_needed = 0
with open('input.txt', 'r') as f:
for line in f:
[h,w,l] = [int(a) for a in line.split('x')]
side_a = h*w
side_b = h*l
side_c = w*l
paper_needed += min(side_a, side_b, side_c) + 2*side_a + 2*side_b + 2*side_c
smallest_perimiter = 2*(h+w+l-max(h,w,l))
ribbon_needed += h*w*l + smallest_perimiter
print("Paper", paper_needed)
print("Ribbon", ribbon_needed) | 29.625 | 84 | 0.594937 | 87 | 474 | 3.045977 | 0.390805 | 0.037736 | 0.045283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019608 | 0.246835 | 474 | 16 | 85 | 29.625 | 0.722689 | 0.044304 | 0 | 0 | 0 | 0 | 0.048565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
384b99fd8b2faa37e3b775a159b4d21eb26af8c2 | 20,467 | py | Python | gym/envs/robotics/hand/move.py | carlo-/gym | 7e7575601a0df5476ab9b15072c8b65693ce3071 | [
"Python-2.0",
"OLDAP-2.7"
] | 1 | 2021-01-08T18:18:43.000Z | 2021-01-08T18:18:43.000Z | gym/envs/robotics/hand/move.py | carlo-/gym | 7e7575601a0df5476ab9b15072c8b65693ce3071 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | gym/envs/robotics/hand/move.py | carlo-/gym | 7e7575601a0df5476ab9b15072c8b65693ce3071 | [
"Python-2.0",
"OLDAP-2.7"
] | 1 | 2019-07-31T18:40:26.000Z | 2019-07-31T18:40:26.000Z | import os
import pickle
from typing import Sequence
import numpy as np
from gym import utils, error
from gym.envs.robotics import rotations, hand_env
from gym.envs.robotics.utils import robot_get_obs, reset_mocap_welds, reset_mocap2body_xpos
try:
import mujoco_py
except ImportError as e:
raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, "
"and also perform the setup instructions here: "
"https://github.com/openai/mujoco-py/.)".format(e))
HAND_PICK_AND_PLACE_XML = os.path.join('hand', 'pick_and_place.xml')
HAND_MOVE_AND_REACH_XML = os.path.join('hand', 'move_and_reach.xml')
FINGERTIP_BODY_NAMES = [
'robot0:ffdistal',
'robot0:mfdistal',
'robot0:rfdistal',
'robot0:lfdistal',
'robot0:thdistal',
]
OBJECTS = dict(
original=dict(type='ellipsoid', size='0.03 0.03 0.04'),
small_box=dict(type='box', size='0.022 0.022 0.022'),
box=dict(type='box', size='0.03 0.03 0.03'),
sphere=dict(type='ellipsoid', size='0.028 0.028 0.028'),
small_sphere=dict(type='ellipsoid', size='0.024 0.024 0.024'),
teapot=dict(type='mesh', mesh='object_mesh:teapot_vhacd_m', mesh_parts=6, mass=[0.01, 0.01, 0.01, 0.5, 0.01, 0.01]),
)
def _goal_distance(goal_a, goal_b, ignore_target_rotation):
assert goal_a.shape == goal_b.shape
assert goal_a.shape[-1] == 7
delta_pos = goal_a[..., :3] - goal_b[..., :3]
d_pos = np.linalg.norm(delta_pos, axis=-1)
d_rot = np.zeros_like(goal_b[..., 0])
if not ignore_target_rotation:
quat_a, quat_b = goal_a[..., 3:], goal_b[..., 3:]
# Subtract quaternions and extract angle between them.
quat_diff = rotations.quat_mul(quat_a, rotations.quat_conjugate(quat_b))
angle_diff = 2 * np.arccos(np.clip(quat_diff[..., 0], -1., 1.))
d_rot = angle_diff
assert d_pos.shape == d_rot.shape
return d_pos, d_rot
def _check_range(a, a_min, a_max, include_bounds=True):
if include_bounds:
return np.all((a_min <= a) & (a <= a_max))
else:
return np.all((a_min < a) & (a < a_max))
class MovingHandEnv(hand_env.HandEnv, utils.EzPickle):
def __init__(self, model_path, reward_type, initial_qpos=None, relative_control=False, has_object=False,
randomize_initial_arm_pos=False, randomize_initial_object_pos=True, ignore_rotation_ctrl=False,
distance_threshold=0.05, rotation_threshold=0.1, n_substeps=20, ignore_target_rotation=False,
success_on_grasp_only=False, grasp_state=None, grasp_state_reset_p=0.0, target_in_the_air_p=0.5,
object_id='original', object_cage=False, cage_opacity=0.1, weld_fingers=False):
self.target_in_the_air_p = target_in_the_air_p
self.has_object = has_object
self.ignore_target_rotation = ignore_target_rotation
self.randomize_initial_arm_pos = randomize_initial_arm_pos
self.randomize_initial_object_pos = randomize_initial_object_pos
self.ignore_rotation_ctrl = ignore_rotation_ctrl
self.distance_threshold = distance_threshold
self.rotation_threshold = rotation_threshold
self.reward_type = reward_type
self.success_on_grasp_only = success_on_grasp_only
self.object_id = object_id
self.forearm_bounds = (np.r_[0.65, 0.3, 0.42], np.r_[1.75, 1.2, 1.0])
self.table_safe_bounds = (np.r_[1.10, 0.43], np.r_[1.49, 1.05])
self._initial_arm_mocap_pose = np.r_[1.05, 0.75, 0.65, rotations.euler2quat(np.r_[0., 1.59, 1.57])]
if isinstance(grasp_state, bool) and grasp_state:
suffix = "" if object_id == 'original' else f'_{object_id}'
p = os.path.join(os.path.dirname(__file__), f'../assets/states/grasp_state{suffix}.pkl')
if not os.path.exists(p):
raise IOError('File {} does not exist'.format(p))
grasp_state = pickle.load(open(p, 'rb'))
if grasp_state is not None and grasp_state_reset_p <= 0.0:
raise ValueError('grasp_state_reset_p must be greater than zero if grasp_state is specified!')
self.grasp_state = grasp_state
self.grasp_state_reset_p = grasp_state_reset_p
if ignore_rotation_ctrl and not ignore_target_rotation:
raise ValueError('Target rotation must be ignored if arm cannot rotate! Set ignore_target_rotation=True')
if success_on_grasp_only:
if reward_type != 'sparse':
raise ValueError('Parameter success_on_grasp_only requires sparse rewards!')
if not has_object:
raise ValueError('Parameter success_on_grasp_only requires object to be grasped!')
default_qpos = dict()
xml_format = None
if self.has_object:
default_qpos['object:joint'] = [1.25, 0.53, 0.4, 1., 0., 0., 0.]
xml_format = dict(
object_geom='<geom name="{name}" {props} material="material:object" condim="4"'
'friction="1 0.95 0.01" solimp="0.99 0.99 0.01" solref="0.01 1"/>',
target_geom='<geom name="{name}" {props} material="material:target" condim="4" group="2"'
'contype="0" conaffinity="0"/>',
)
obj = dict(OBJECTS[object_id])
if 'mass' not in obj.keys():
obj['mass'] = 0.2
mesh_parts = obj.get('mesh_parts')
if mesh_parts is not None and isinstance(mesh_parts, int):
del obj['mesh_parts']
object_geom, target_geom = '', ''
if isinstance(obj['mass'], Sequence):
masses = list(obj['mass'])
assert len(masses) == mesh_parts
del obj['mass']
else:
masses = [obj['mass']] * mesh_parts
for i in range(mesh_parts):
obj_part = dict(obj)
obj_part['mesh'] += f'_part{i}'
obj_part['mass'] = masses[i]
props = " ".join([f'{k}="{v}"' for k, v in obj_part.items()])
object_geom += xml_format['object_geom'].format(name=f'object_part{i}', props=props)
target_geom += xml_format['target_geom'].format(name=f'target_part{i}', props=props)
xml_format = dict(object_geom=object_geom, target_geom=target_geom)
else:
props = " ".join([f'{k}="{v}"' for k, v in obj.items()])
xml_format = dict(
object_geom=xml_format['object_geom'].format(name='object', props=props),
target_geom=xml_format['target_geom'].format(name='target', props=props),
)
if object_cage:
rgba = f"1 0 0 {cage_opacity}"
xml_format['cage'] = '''
<geom pos="0 0.55 0.4" quat="0.924 0.3826 0 0" size="1 0.75 0.01" type="box" mass="200" rgba="{}" solimp="0.99 0.99 0.01" solref="0.01 1"/>
<geom pos="0 -0.55 0.4" quat="0.924 -0.3826 0 0" size="1 0.75 0.01" type="box" mass="200" rgba="{}" solimp="0.99 0.99 0.01" solref="0.01 1"/>
<geom pos="-0.45 0 0.4" quat="0.924 0 0.3826 0" size="0.75 1 0.01" type="box" mass="200" rgba="{}" solimp="0.99 0.99 0.01" solref="0.01 1"/>
<geom pos="0.45 0 0.4" quat="0.924 0 -0.3826 0" size="0.75 1 0.01" type="box" mass="200" rgba="{}" solimp="0.99 0.99 0.01" solref="0.01 1"/>
'''.format(*[rgba]*4)
else:
xml_format['cage'] = ''
if weld_fingers:
mocap_tp = '''
<body mocap="true" name="{}:mocap" pos="0 0 0">
<geom conaffinity="0" contype="0" pos="0 0 0" rgba="0 0.5 0 0.7" size="0.005 0.005 0.005" type="box"/>
<geom conaffinity="0" contype="0" pos="0 0 0" rgba="0 0.5 0 0.1" size="1 0.005 0.005" type="box"/>
<geom conaffinity="0" contype="0" pos="0 0 0" rgba="0 0.5 0 0.1" size="0.005 1 0.001" type="box"/>
<geom conaffinity="0" contype="0" pos="0 0 0" rgba="0 0.5 0 0.1" size="0.005 0.005 1" type="box"/>
</body>
'''
weld_tp = '<weld body1="{}:mocap" body2="{}" solimp="0.9 0.95 0.001" solref="0.02 1"/>'
fingertip_names = [x.replace('robot0:', '') for x in FINGERTIP_BODY_NAMES]
xml_format['finger_mocaps'] = '\n'.join([mocap_tp.format(n) for n in fingertip_names])
xml_format['finger_welds'] = '\n'.join([weld_tp.format(m, f) for m, f in zip(fingertip_names, FINGERTIP_BODY_NAMES)])
else:
xml_format['finger_welds'] = ''
xml_format['finger_mocaps'] = ''
initial_qpos = initial_qpos or default_qpos
hand_env.HandEnv.__init__(self, model_path, n_substeps=n_substeps, initial_qpos=initial_qpos,
relative_control=relative_control, arm_control=True, xml_format=xml_format)
utils.EzPickle.__init__(self)
def get_object_contact_points(self, other_body='robot0:'):
if not self.has_object:
raise NotImplementedError("Cannot get object contact points in an environment without objects!")
sim = self.sim
object_name = 'object'
object_pos = self.sim.data.get_body_xpos(object_name)
object_rot = self.sim.data.get_body_xmat(object_name)
contact_points = []
# Partially from: https://gist.github.com/machinaut/209c44e8c55245c0d0f0094693053158
for i in range(sim.data.ncon):
# Note that the contact array has more than `ncon` entries,
# so be careful to only read the valid entries.
contact = sim.data.contact[i]
body_name_1 = sim.model.body_id2name(sim.model.geom_bodyid[contact.geom1])
body_name_2 = sim.model.body_id2name(sim.model.geom_bodyid[contact.geom2])
if other_body in body_name_1 and body_name_2 == object_name or \
other_body in body_name_2 and body_name_1 == object_name:
c_force = np.zeros(6, dtype=np.float64)
mujoco_py.functions.mj_contactForce(sim.model, sim.data, i, c_force)
# Compute contact point position wrt the object
rel_contact_pos = object_rot.T @ (contact.pos - object_pos)
contact_points.append(dict(
body1=body_name_1,
body2=body_name_2,
relative_pos=rel_contact_pos,
force=c_force
))
return contact_points
def _get_body_pose(self, body_name, no_rot=False, euler=False):
if no_rot:
rot = np.zeros(4)
else:
rot = self.sim.data.get_body_xquat(body_name)
if euler:
rot = rotations.quat2euler(rot)
return np.r_[self.sim.data.get_body_xpos(body_name), rot]
def _get_site_pose(self, site_name, no_rot=False):
if no_rot:
quat = np.zeros(4)
else:
# this is very inefficient, avoid computation when possible
quat = rotations.mat2quat(self.sim.data.get_site_xmat(site_name))
return np.r_[self.sim.data.get_site_xpos(site_name), quat]
def _get_palm_pose(self, no_rot=False):
return self._get_site_pose('robot0:palm_center', no_rot)
def _get_grasp_center_pose(self, no_rot=False):
return self._get_site_pose('robot0:grasp_center', no_rot)
def _get_object_pose(self):
return self._get_body_pose('object')
def _get_achieved_goal(self):
palm_pose = self._get_palm_pose(no_rot=self.ignore_target_rotation)
if self.has_object:
pose = self._get_object_pose()
else:
pose = palm_pose
if self.ignore_target_rotation:
pose[3:] = 0.0
if self.success_on_grasp_only:
d = np.linalg.norm(palm_pose[:3] - pose[:3])
return np.r_[pose, d]
return pose
def _set_arm_pose(self, pose: np.ndarray):
assert pose.size == 7 or pose.size == 3
reset_mocap2body_xpos(self.sim)
self.sim.data.mocap_pos[0, :] = np.clip(pose[:3], *self.forearm_bounds)
if pose.size == 7:
self.sim.data.mocap_quat[0, :] = pose[3:]
def get_table_surface_pose(self):
pose = np.r_[
self.sim.data.get_body_xpos('table0'),
self.sim.data.get_body_xquat('table0'),
]
geom = self.sim.model.geom_name2id('table0_geom')
size = self.sim.model.geom_size[geom].copy()
pose[2] += size[2]
return pose
# GoalEnv methods
# ----------------------------
def compute_reward(self, achieved_goal: np.ndarray, goal: np.ndarray, info: dict):
if self.reward_type == 'sparse':
success = self._is_success(achieved_goal, goal).astype(np.float32)
weights = (info or dict()).get('weights')
if weights is not None:
success *= weights
return success - 1.
else:
d_pos, d_rot = _goal_distance(achieved_goal, goal, self.ignore_target_rotation)
# We weigh the difference in position to avoid that `d_pos` (in meters) is completely
# dominated by `d_rot` (in radians).
return -(10. * d_pos + d_rot)
# RobotEnv methods
# ----------------------------
def _set_action(self, action):
assert action.shape == self.action_space.shape
hand_ctrl = action[:20]
forearm_ctrl = action[20:] * 0.1
# set hand action
hand_env.HandEnv._set_action(self, hand_ctrl)
# set forearm action
assert self.sim.model.nmocap == 1
pos_delta = forearm_ctrl[:3]
quat_delta = forearm_ctrl[3:]
if self.ignore_rotation_ctrl:
quat_delta *= 0.0
new_pos = self.sim.data.mocap_pos[0] + pos_delta
new_quat = self.sim.data.mocap_quat[0] + quat_delta
self._set_arm_pose(np.r_[new_pos, new_quat])
def _is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray):
d_pos, d_rot = _goal_distance(achieved_goal[..., :7], desired_goal[..., :7], self.ignore_target_rotation)
achieved_pos = (d_pos < self.distance_threshold).astype(np.float32)
achieved_rot = (d_rot < self.rotation_threshold).astype(np.float32)
achieved_all = achieved_pos * achieved_rot
if self.success_on_grasp_only:
assert achieved_goal.shape[-1] == 8
d_palm = achieved_goal[..., 7]
achieved_grasp = (d_palm < 0.08).astype(np.float32)
achieved_all *= achieved_grasp
return achieved_all
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
reset_mocap_welds(self.sim)
self.sim.forward()
# Move end effector into position.
self._set_arm_pose(self._initial_arm_mocap_pose.copy())
for _ in range(10):
self.sim.step()
if self.has_object:
self.height_offset = self.sim.data.get_site_xpos('object:center')[2]
def _viewer_setup(self):
body_id = self.sim.model.body_name2id('table0') #'robot0:forearm')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 1.4
self.viewer.cam.azimuth = 180. #132.
self.viewer.cam.elevation = -38.
def _reset_sim(self):
reset_to_grasp_state = self.grasp_state_reset_p > self.np_random.uniform()
while True:
if reset_to_grasp_state:
assert self.has_object
self.sim.set_state(self.grasp_state)
# Fix hand ctrl so that fingers stay close while we update the arm position later
rel_ctrl = self.relative_control
self.relative_control = True
self._set_action(np.zeros(self.action_space.shape))
self.relative_control = rel_ctrl
else:
self.sim.set_state(self.initial_state)
# Reset initial position of arm.
new_arm_pose = self._initial_arm_mocap_pose.copy()
if self.randomize_initial_arm_pos:
new_arm_pose[:2] += self.np_random.uniform(-0.2, 0.2, size=2)
self._set_arm_pose(new_arm_pose)
for _ in range(10):
self.sim.step()
# Randomize initial position of object.
if self.has_object and not reset_to_grasp_state:
object_qpos = self.sim.data.get_joint_qpos('object:joint').copy()
if self.randomize_initial_object_pos:
object_xpos = self.np_random.uniform(*self.table_safe_bounds)
else:
object_xpos = self._get_palm_pose(no_rot=True)[:2]
object_xpos += self.np_random.uniform(-0.005, 0.005, size=2) # always add small amount of noise
object_qpos[:2] = object_xpos
self.sim.data.set_joint_qpos('object:joint', object_qpos)
self.sim.forward()
if not self.has_object:
break
else:
object_pos = self._get_object_pose()[:3]
object_vel = self.sim.data.get_joint_qvel('object:joint')
object_still = np.linalg.norm(object_vel) < 0.8
if reset_to_grasp_state:
palm_pos = self._get_palm_pose(no_rot=True)[:3]
object_on_palm = np.linalg.norm(object_pos - palm_pos) < 0.08
if object_still and object_on_palm:
break
else:
object_on_table = _check_range(object_pos[:2], *self.table_safe_bounds)
if object_still and object_on_table:
break
return True
def _sample_goal(self):
goal = np.r_[self.np_random.uniform(*self.table_safe_bounds), 0.0]
if self.has_object:
goal[2] = self.height_offset
if self.np_random.uniform() < self.target_in_the_air_p:
goal[2] += self.np_random.uniform(0, 0.45)
goal = np.r_[goal, np.zeros(4)]
if self.success_on_grasp_only:
goal = np.r_[goal, 0.]
return goal
def _render_callback(self):
# Assign current state to target object but offset a bit so that the actual object
# is not obscured.
goal = self.goal.copy()[:7]
assert goal.shape == (7,)
self.sim.data.set_joint_qpos('target:joint', goal)
self.sim.data.set_joint_qvel('target:joint', np.zeros(6))
self.sim.forward()
def _get_obs(self):
robot_qpos, robot_qvel = robot_get_obs(self.sim)
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
forearm_pose = self._get_body_pose('robot0:forearm', euler=True)
forearm_velp = self.sim.data.get_body_xvelp('robot0:forearm') * dt
palm_pos = self._get_palm_pose(no_rot=True)[:3]
object_pose = np.zeros(0)
object_vel = np.zeros(0)
object_rel_pos = np.zeros(0)
if self.has_object:
object_vel = self.sim.data.get_joint_qvel('object:joint')
object_pose = self._get_body_pose('object', euler=True)
object_rel_pos = object_pose[:3] - palm_pos
observation = np.concatenate([
forearm_pose, forearm_velp, palm_pos, object_rel_pos,
robot_qpos, robot_qvel, object_pose, object_vel
])
return {
'observation': observation,
'achieved_goal': self._get_achieved_goal().ravel(),
'desired_goal': self.goal.ravel().copy(),
}
class HandPickAndPlaceEnv(MovingHandEnv):
def __init__(self, reward_type='sparse', **kwargs):
super(HandPickAndPlaceEnv, self).__init__(
model_path=HAND_PICK_AND_PLACE_XML,
reward_type=reward_type,
has_object=True, **kwargs
)
class MovingHandReachEnv(MovingHandEnv):
def __init__(self, reward_type='sparse', **kwargs):
super(MovingHandReachEnv, self).__init__(
model_path=HAND_MOVE_AND_REACH_XML,
reward_type=reward_type,
has_object=False, **kwargs
)
| 43.920601 | 157 | 0.600821 | 2,855 | 20,467 | 4.04028 | 0.144658 | 0.023667 | 0.02098 | 0.015778 | 0.314348 | 0.235024 | 0.168184 | 0.142436 | 0.104638 | 0.084525 | 0 | 0.041842 | 0.278351 | 20,467 | 465 | 158 | 44.015054 | 0.739133 | 0.045048 | 0 | 0.132075 | 0 | 0.02965 | 0.14697 | 0.009579 | 0 | 0 | 0 | 0 | 0.026954 | 1 | 0.061995 | false | 0 | 0.024259 | 0.008086 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
384d14f0e35bddd98ff67e0f7591aca683112f67 | 1,672 | py | Python | paas-ce/paas/esb/components/generic/templates/cmsi/get_msg_type.py | canway-bk/bk-PaaS | 7a6fe1ef38a7e4e2bd11a6c2efa871a967ac2a3c | [
"Apache-2.0"
] | 767 | 2019-03-25T06:35:43.000Z | 2022-03-30T08:57:51.000Z | paas-ce/paas/esb/components/generic/templates/cmsi/get_msg_type.py | canway-bk/bk-PaaS | 7a6fe1ef38a7e4e2bd11a6c2efa871a967ac2a3c | [
"Apache-2.0"
] | 194 | 2019-03-29T07:16:41.000Z | 2022-03-30T06:17:49.000Z | paas-ce/paas/esb/components/generic/templates/cmsi/get_msg_type.py | canway-bk/bk-PaaS | 7a6fe1ef38a7e4e2bd11a6c2efa871a967ac2a3c | [
"Apache-2.0"
] | 381 | 2019-03-25T07:19:54.000Z | 2022-03-29T03:22:42.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from components.component import Component, SetupConfMixin
from common.constants import API_TYPE_Q, HTTP_METHOD
from common.base_utils import str_bool
from .toolkit import configs
class GetMsgType(Component, SetupConfMixin):
suggest_method = HTTP_METHOD.GET
label = u'查询消息发送类型'
label_en = 'Get message type'
sys_name = configs.SYSTEM_NAME
api_type = API_TYPE_Q
def handle(self):
bk_language = self.request.headers.get('Blueking-Language', 'en')
msg_type = []
for mt in configs.msg_type:
is_active = mt.get('is_active', str_bool(getattr(self, mt['type'], False)))
msg_type.append({
'type': mt['type'],
'label': mt['label_en'] if bk_language == 'en' else mt['label'],
'is_active': is_active,
})
self.response.payload = {
"result": True,
"data": msg_type,
}
| 42.871795 | 305 | 0.689593 | 229 | 1,672 | 4.925764 | 0.572052 | 0.053191 | 0.02305 | 0.028369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008455 | 0.22189 | 1,672 | 38 | 306 | 44 | 0.85857 | 0.434809 | 0 | 0 | 0 | 0 | 0.110043 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.166667 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
697355181e1f710ba412694ba7696f2baa4d4985 | 4,646 | py | Python | Compression.py | rafaaseddik/minimalistic-search-engine | a9cb14b7eb49b8990c93c913f9465d831a30b189 | [
"Apache-2.0"
] | null | null | null | Compression.py | rafaaseddik/minimalistic-search-engine | a9cb14b7eb49b8990c93c913f9465d831a30b189 | [
"Apache-2.0"
] | null | null | null | Compression.py | rafaaseddik/minimalistic-search-engine | a9cb14b7eb49b8990c93c913f9465d831a30b189 | [
"Apache-2.0"
] | null | null | null | import pickle
## GAP
def GapEncoding(decoded):
result = {}
for key in decoded.keys():
# Get The token's posting
toEncode = decoded[key]
# Initialise token's result
encoded = [toEncode[0],toEncode[1],[]]
encoded[2] = toEncode[2]
# Gap Encoding Algorithm
lastIndex = toEncode[2][0]
for doc in toEncode[3:]:
encoded.append((doc[0]-lastIndex,doc[1]))
lastIndex = doc[0]
result[key] = encoded
return result
def GapDecoding(encoded):
result = {}
for key in encoded.keys():
# Get The token's posting
toDecode = encoded[key]
# Initialise token's result
decoded = [toDecode[0],toDecode[1],[]]
decoded[2] = toDecode[2]
# Gap Encoding Algorithm
lastIndex = toDecode[2][0]
for doc in toDecode[3:]:
decoded.append((doc[0]+lastIndex,doc[1]))
lastIndex = doc[0]+lastIndex
result[key] = decoded
return result
def PostingToArray(posting):
result = posting[:2]
for element in posting[2:]:
result.append(element[0])
result.append(element[1])
return result
def GammaEncoding(number):
binaryRep = bin(number)[2:]
length = ''.join(['1' for i in range(len(binaryRep)-1)])+'0'
return length + binaryRep[1:]
def GammaDecoding(binary):
result = []
while(len(binary)):
firstZeroIndex = binary.find('0')
decoded = int('1'+binary[firstZeroIndex+1:2*firstZeroIndex+1],2)
result.append(decoded)
binary = binary[2*firstZeroIndex+1:]
return result
def EncodeArrayGamma(array):
return ''.join([GammaEncoding(e) for e in array])
def encodePostingToFile(allData,filename):
with open('original'+filename, 'wb') as f:
pickle.dump(allData, f, protocol=pickle.HIGHEST_PROTOCOL)
result = {}
for key in allData['index'].keys():
gapEncoded = GapEncoding( allData['index'])[key]
binary_encoded = EncodeArrayGamma(PostingToArray(gapEncoded))
bit_strings = [binary_encoded[i:i + 8] for i in range(0, len(binary_encoded), 8)]
byte_list = [int(b, 2) for b in bit_strings]
if(key=="damage"):
print(key , byte_list)
result[key]= byte_list
allData['index'] = result
with open(filename, 'wb') as f:
pickle.dump(allData, f, protocol=pickle.HIGHEST_PROTOCOL)
return result
def encodePostingToFileNew(allData,filename):
with open('original'+filename, 'wb') as f:
pickle.dump(allData, f, protocol=pickle.HIGHEST_PROTOCOL)
result = {}
for key in allData['index'].keys():
gapEncoded = GapEncoding( allData['index'])[key]
binary_encoded = EncodeArrayGamma(PostingToArray(gapEncoded))
result[key]= binary_encoded
allData['index'] = result
with open(filename, 'wb') as f:
pickle.dump(allData, f, protocol=pickle.HIGHEST_PROTOCOL)
return result
def fromBinaryToIntArray(binary):
#binary_coded = ''.join([bin(e)[2:] for e in binary])
binary_string = ''
for i in range(len(binary)):
binary_code = bin(binary[i])[2:]
if(i is not len(binary)-1 and len(binary_code) <8):
# must add leading 0
binary_code = ''.join(['0' for i in range(8-len(binary_code))]) + binary_code
binary_string = binary_string + binary_code
return binary_string
def loadIndex(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def DecompressIndex(encodedIndex):
result = {}
for key in list(encodedIndex.keys())[:10]:
decompressedList = GammaDecoding(fromBinaryToIntArray(encodedIndex[key]))
node = decompressedList[:2]
decompressedList = decompressedList[2:]
while(len(decompressedList)>=2):
node.append((decompressedList[0],decompressedList[1]))
if(len(decompressedList) is 2):
break
else:
decompressedList = decompressedList[2:]
result[key] = node
print(result)
return GapDecoding(result)
def DecompressIndexNew(encodedIndex):
result = {}
for key in list(encodedIndex.keys())[:10]:
decompressedList = GammaDecoding(encodedIndex[key])
node = decompressedList[:2]
decompressedList = decompressedList[2:]
while(len(decompressedList)>=2):
node.append((decompressedList[0],decompressedList[1]))
if(len(decompressedList) is 2):
break
else:
decompressedList = decompressedList[2:]
result[key] = node
return GapDecoding(result) | 35.465649 | 89 | 0.620103 | 530 | 4,646 | 5.388679 | 0.181132 | 0.047619 | 0.02521 | 0.029412 | 0.520308 | 0.464986 | 0.44888 | 0.44888 | 0.44888 | 0.423669 | 0 | 0.019048 | 0.254197 | 4,646 | 131 | 90 | 35.465649 | 0.805195 | 0.047353 | 0 | 0.442478 | 0 | 0 | 0.015172 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106195 | false | 0 | 0.00885 | 0.00885 | 0.221239 | 0.017699 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69761b2e063701aea3c8b8d4a06a737b92dca98b | 557 | py | Python | app/ext/database.py | jonatasoli/sapu | b0b796a53fecd9ab77eb0ece31d2a0113c656fc5 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T01:48:36.000Z | 2020-10-29T01:48:36.000Z | app/ext/database.py | jonatasoli/sapu | b0b796a53fecd9ab77eb0ece31d2a0113c656fc5 | [
"BSD-3-Clause"
] | 10 | 2020-10-29T23:46:13.000Z | 2020-10-29T23:48:00.000Z | app/ext/database.py | jonatasoli/sapu | b0b796a53fecd9ab77eb0ece31d2a0113c656fc5 | [
"BSD-3-Clause"
] | null | null | null | from tortoise import Tortoise
from dynaconf import settings
async def init():
# Here we connect to a Postgres.
# also specify the app name of "models"
# which contain models from "app.models"
await Tortoise.init(
db_url=settings.DATABASE_URL,
modules={'models': ['app.models', "aerich.models"]}
)
TORTOISE_ORM = {
"connections": {"default": settings.DATABASE_URL},
"apps": {
"models": {
"models": ["models", "aerich.models"],
"default_connection": "default",
},
},
}
| 25.318182 | 59 | 0.599641 | 60 | 557 | 5.483333 | 0.566667 | 0.054711 | 0.115502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.263914 | 557 | 21 | 60 | 26.52381 | 0.802439 | 0.192101 | 0 | 0 | 0 | 0 | 0.23991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69766d6af00c62b98d16c76cbde72ead241d0e17 | 628 | py | Python | leetcode/python/medium/p1105_minHeightShelves.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | leetcode/python/medium/p1105_minHeightShelves.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | leetcode/python/medium/p1105_minHeightShelves.py | kefirzhang/algorithms | 549e68731d4c05002e35f0499d4f7744f5c63979 | [
"Apache-2.0"
] | null | null | null | class Solution:
def minHeightShelves(self, books, shelf_width: int) -> int:
length = len(books)
dp = [1000000] * (length + 1)
dp[0] = 0
for i in range(1, length + 1):
tmp_width, h, j = 0, 0, i
while j > 0:
h = max(h, books[j - 1][1])
tmp_width += books[j - 1][0]
if tmp_width > shelf_width:
break
dp[i] = min(dp[i], dp[j - 1] + h)
j -= 1
return dp[-1]
slu = Solution()
print(slu.minHeightShelves([[1, 1], [2, 3], [2, 3], [1, 1], [1, 1], [1, 1], [1, 2]], 4))
| 31.4 | 88 | 0.420382 | 90 | 628 | 2.877778 | 0.366667 | 0.061776 | 0.057915 | 0.061776 | 0.027027 | 0.027027 | 0 | 0 | 0 | 0 | 0 | 0.09973 | 0.409236 | 628 | 19 | 89 | 33.052632 | 0.598383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6976ccafe60900afb7a75bc8b7218bc5457673b2 | 2,509 | py | Python | frappe_notification/frappe_notification/controllers/templates/tests/test_get_templates.py | leam-tech/frappe_notification | 79e40f2c541d86d714a0b8d48b87f32b2f85076a | [
"MIT"
] | null | null | null | frappe_notification/frappe_notification/controllers/templates/tests/test_get_templates.py | leam-tech/frappe_notification | 79e40f2c541d86d714a0b8d48b87f32b2f85076a | [
"MIT"
] | null | null | null | frappe_notification/frappe_notification/controllers/templates/tests/test_get_templates.py | leam-tech/frappe_notification | 79e40f2c541d86d714a0b8d48b87f32b2f85076a | [
"MIT"
] | null | null | null | from unittest import TestCase
import frappe
from frappe_notification import (
NotificationClientFixtures,
NotificationTemplateFixtures,
NotificationClientNotFound,
# NotificationTemplateNotFound,
set_active_notification_client)
from ..get_templates_list import get_templates
class TestGetTemplates(TestCase):
clients = NotificationClientFixtures()
templates = NotificationTemplateFixtures()
@classmethod
def setUpClass(cls):
cls.clients.setUp()
cls.templates.setUp()
@classmethod
def tearDownClass(cls):
cls.templates.tearDown()
cls.clients.tearDown()
def setUp(self) -> None:
frappe.set_user("Guest")
set_active_notification_client(None)
def tearDown(self) -> None:
frappe.set_user("Administrator")
set_active_notification_client(None)
def test_simple(self):
"""
- Login as a Manager Client
- Ask for all templates he can access
"""
client = self.clients.get_manager_client().name
set_active_notification_client(client)
templates = get_templates()
self.assertGreater(len(templates), 0)
for t in templates:
self.assertIsNotNone(t.name)
self.assertIsNotNone(t.key)
self.assertIsNotNone(t.subject)
self.assertEqual(t.created_by, client)
def test_admin(self):
"""
- Login as Administrator (not a Client)
- He will not have access to any templates
"""
frappe.set_user("Administrator")
_template = self.templates[0]
self.assertIsNotNone(_template)
with self.assertRaises(NotificationClientNotFound):
get_templates()
def test_subordinate(self):
"""
- Login as a subordinate
- Ask for all templates he can access
- He should get templates that his manager allowed him access
"""
manager_1 = self.clients.get_manager_client().name
client_m1 = self.clients.get_clients_managed_by(manager_1)[0].name
set_active_notification_client(client_m1)
templates = get_templates()
self.assertGreater(len(templates), 0)
for t in templates:
self.assertIsNotNone(t.name)
self.assertIsNotNone(t.key)
self.assertIsNotNone(t.subject)
self.assertTrue(any([
t.created_by == client_m1,
t.created_by == manager_1
]))
| 28.191011 | 74 | 0.64169 | 259 | 2,509 | 6.03861 | 0.285714 | 0.085038 | 0.076726 | 0.086317 | 0.375959 | 0.349105 | 0.221228 | 0.184143 | 0.184143 | 0.184143 | 0 | 0.005504 | 0.275807 | 2,509 | 88 | 75 | 28.511364 | 0.855256 | 0.121164 | 0 | 0.327273 | 0 | 0 | 0.014741 | 0 | 0 | 0 | 0 | 0 | 0.218182 | 1 | 0.127273 | false | 0 | 0.072727 | 0 | 0.254545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69803440f4ec5474a3d41e644bcbc330b3047a85 | 1,249 | py | Python | examples/sms_example.py | twizoapi/lib-api-python | 115d0f574518fbe03f74801d743972c7593bf30e | [
"MIT"
] | null | null | null | examples/sms_example.py | twizoapi/lib-api-python | 115d0f574518fbe03f74801d743972c7593bf30e | [
"MIT"
] | null | null | null | examples/sms_example.py | twizoapi/lib-api-python | 115d0f574518fbe03f74801d743972c7593bf30e | [
"MIT"
] | null | null | null | import pprint
from examples.api_key import api_key, api_host
from models.parameters import SmsAdvanceParams, SmsParams
from twizo import Twizo
if __name__ == '__main__':
twizo = Twizo(api_key=api_key,
api_host=api_host)
params = SmsParams(['12345600000', '432543254'], "A SMS Message", '01023456789')
params.resultType = 2
pprint = pprint.PrettyPrinter(indent=4)
controller = twizo.sms_controller
result = controller.send_simple(params)
for sms in result:
pprint.pprint(vars(sms))
print("\n One recipient: \n")
params = SmsParams(['12345600000'], "A SMS Message", '01023456789')
result = controller.send_simple(params)
pprint.pprint(vars(result[0]))
print("\n Advanced sms: \n")
params = SmsAdvanceParams(params.recipients, params.body, params.sender)
params.udh = "0A"
params.dcs = 2
result = controller.send_advanced(params)
pprint.pprint(vars(result[0]))
print("\n get sms status: \n")
result = controller.get_status(result[0].messageId)
pprint.pprint(vars(result))
print("\n get delivery report: \n")
result = controller.get_delivery_report()
for sms in result:
pprint.pprint(vars(sms))
| 26.020833 | 84 | 0.670136 | 154 | 1,249 | 5.292208 | 0.331169 | 0.088344 | 0.09816 | 0.080982 | 0.238037 | 0.166871 | 0.166871 | 0.166871 | 0 | 0 | 0 | 0.060852 | 0.210568 | 1,249 | 47 | 85 | 26.574468 | 0.76572 | 0 | 0 | 0.258065 | 0 | 0 | 0.152922 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.129032 | 0 | 0.129032 | 0.354839 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
698319c4eb2ec5e438aeef63de23d5f2cd5920f3 | 1,577 | py | Python | censys_subdomains.py | ctixsystems/censys-recon-ng | fded032044796b5893a572d00b05f2f03843eeb5 | [
"Apache-2.0"
] | 23 | 2019-05-07T02:27:22.000Z | 2022-02-17T02:14:40.000Z | censys_subdomains.py | ctixsystems/censys-recon-ng | fded032044796b5893a572d00b05f2f03843eeb5 | [
"Apache-2.0"
] | 3 | 2021-09-17T14:25:13.000Z | 2022-03-08T14:32:44.000Z | censys_subdomains.py | ctixsystems/censys-recon-ng | fded032044796b5893a572d00b05f2f03843eeb5 | [
"Apache-2.0"
] | 13 | 2019-08-30T09:48:39.000Z | 2022-02-13T01:32:35.000Z | from recon.core.module import BaseModule
from censys.certificates import CensysCertificates
from censys.base import CensysException
class Module(BaseModule):
meta = {
'name': 'Censys subdomains by company',
'author': 'J Nazario',
'version': '1.1',
'description': 'Retrieves certificates for companies, and \'domains\' tables \
with the results.',
'query': 'SELECT DISTINCT company FROM companies WHERE company IS NOT NULL',
'dependencies': ['censys'],
'required_keys': ['censysio_id', 'censysio_secret'],
}
def module_run(self, companies):
api_id = self.get_key('censysio_id')
api_secret = self.get_key('censysio_secret')
c = CensysCertificates(
api_id, api_secret, timeout=self._global_options['timeout']
)
SEARCH_FIELDS = [
'parsed.subject.organization',
'parsed.subject.organizational_unit',
]
CERT_FIELDS = [
'parsed.names',
]
for company in companies:
self.heading(company, level=0)
try:
query = ' OR '.join(
['{0}:"{1}"'.format(x, company) for x in SEARCH_FIELDS]
)
payload = c.search(query, CERT_FIELDS)
except CensysException:
continue
for result in payload:
for name in result.get('parsed.names', []):
if name.startswith('*.'):
self.insert_domains(name.replace('*.', ''))
| 35.044444 | 86 | 0.557387 | 155 | 1,577 | 5.541935 | 0.522581 | 0.023283 | 0.023283 | 0.041909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004721 | 0.328472 | 1,577 | 44 | 87 | 35.840909 | 0.806421 | 0 | 0 | 0 | 0 | 0 | 0.235891 | 0.038681 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.075 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6984c4e5f95ca2a2f02ce82bd452911e9c17f8a9 | 20,746 | py | Python | rul_pm/results/results.py | lucianolorenti/rul_pm | da9dfad79129dd47d24923cfd6c833869ef7b6a7 | [
"MIT"
] | 1 | 2021-09-01T13:13:10.000Z | 2021-09-01T13:13:10.000Z | rul_pm/results/results.py | lucianolorenti/rul_pm | da9dfad79129dd47d24923cfd6c833869ef7b6a7 | [
"MIT"
] | 3 | 2021-08-24T15:23:52.000Z | 2021-11-09T10:28:51.000Z | rul_pm/results/results.py | lucianolorenti/rul_pm | da9dfad79129dd47d24923cfd6c833869ef7b6a7 | [
"MIT"
] | 1 | 2021-12-25T14:00:16.000Z | 2021-12-25T14:00:16.000Z | """Compute evaluating results of fitted models
The main structure used on this functions is a dictionary in which
each the keys are the model name, and the elements are list of dictionaries.
Each of the dictionaries contain two keys: true, predicted.
Those elements are list of the predictions
"""
import logging
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from rul_pm.results.picewise_regression import (
PiecewesieLinearFunction,
PiecewiseLinearRegression,
)
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_squared_error as mse
logger = logging.getLogger(__name__)
@dataclass
class MetricsResult:
mae: float
mse: float
fitting_time: float = 0
prediction_time: float = 0
@dataclass
class PredictionResult:
name: str
true_RUL: np.ndarray
predicted_RUL: np.ndarray
metrics: MetricsResult = MetricsResult(0, 0)
def compute_metrics(self):
self.metrics.mae = mae(self.true_RUL, self.predicted_RUL)
self.metrics.mse = mse(self.true_RUL, self.predicted_RUL)
def compute_sample_weight(sample_weight, y_true, y_pred, c: float = 0.9):
if sample_weight == "relative":
sample_weight = np.abs(y_true - y_pred) / (np.clip(y_true, c, np.inf))
else:
sample_weight = 1
return sample_weight
def compute_rul_line(rul: float, n: int, tt: Optional[np.array] = None):
if tt is None:
tt = -np.ones(n)
z = np.zeros(n)
z[0] = rul
for i in range(len(tt) - 1):
z[i + 1] = max(z[i] + tt[i], 0)
if z[i + 1] - 0 < 0.0000000000001:
break
return z
class CVResults:
def __init__(
self,
y_true: List[List],
y_pred: List[List],
nbins: int = 5,
bin_edges: Optional[np.array] = None,
):
"""
Compute the error histogram
Compute the error with respect to the RUL considering the results of different
folds
Parameters
----------
y_true: List[List]
List with the true values of each hold-out set of a cross validation
y_pred: List[List]
List with the predictions of each hold-out set of a cross validation
nbins: int
Number of bins to compute the histogram
"""
if bin_edges is None:
max_value = np.max([np.max(y) for y in y_true])
bin_edges = np.linspace(0, max_value, nbins + 1)
self.n_folds = len(y_true)
self.n_bins = len(bin_edges) - 1
self.bin_edges = bin_edges
self.mean_error = np.zeros((self.n_folds, self.n_bins))
self.mae = np.zeros((self.n_folds, self.n_bins))
self.mse = np.zeros((self.n_folds, self.n_bins))
self.errors = []
for i, (y_pred, y_true) in enumerate(zip(y_pred, y_true)):
self._add_fold_result(i, y_pred, y_true)
def _add_fold_result(self, fold: int, y_pred: np.array, y_true: np.array):
y_pred = np.squeeze(y_pred)
y_true = np.squeeze(y_true)
for j in range(len(self.bin_edges) - 1):
mask = (y_true >= self.bin_edges[j]) & (y_true <= self.bin_edges[j + 1])
indices = np.where(mask)[0]
if len(indices) == 0:
continue
errors = y_true[indices] - y_pred[indices]
self.mean_error[fold, j] = np.mean(errors)
self.mae[fold, j] = np.mean(np.abs(errors))
self.mse[fold, j] = np.mean((errors) ** 2)
self.errors.append(errors)
def model_cv_results(
results: List[PredictionResult],
nbins: Optional[int] = None,
bin_edges: Optional[np.ndarray] = None,
) -> CVResults:
if nbins is None and bin_edges is None:
raise ValueError("nbins and bin_edges cannot be both None")
if nbins is None:
nbins = len(bin_edges) - 1
if bin_edges is None:
max_y_value = np.max([r.true_RUL.max() for r in results])
bin_edges = np.linspace(0, max_y_value, nbins + 1)
trues = []
predicted = []
for results in results:
trues.append(results.true_RUL)
predicted.append(results.predicted_RUL)
return CVResults(trues, predicted, nbins=nbins, bin_edges=bin_edges)
def models_cv_results(
results_dict: Dict[str, List[PredictionResult]], nbins: int
) -> Tuple[np.ndarray, Dict[str, CVResults]]:
"""Create a dictionary with the result of each cross validation of the model"""
max_y_value = np.max(
[
r.true_RUL.max()
for model_name in results_dict.keys()
for r in results_dict[model_name]
]
)
bin_edges = np.linspace(0, max_y_value, nbins + 1)
model_results = {}
for model_name in results_dict.keys():
model_results[model_name] = model_cv_results(
results_dict[model_name], bin_edges=bin_edges
)
return bin_edges, model_results
class FittedLife:
"""Represent a Fitted Life
Parameters
----------
y_true: np.array
The true RUL target
y_pred: np.array
The predicted target
time: Optional[Union[np.array, int]]
Time feature
fit_line_not_increasing: Optional[bool] = False,
Wether the fitted line can increase or not.
RUL_threshold: Optional[float]
Indicates the thresholding value used during de fit, By default None
"""
def __init__(
self,
y_true: np.array,
y_pred: np.array,
time: Optional[Union[np.array, int]] = None,
fit_line_not_increasing: Optional[bool] = False,
RUL_threshold: Optional[float] = None,
):
self.fit_line_not_increasing = fit_line_not_increasing
y_true = np.squeeze(y_true)
y_pred = np.squeeze(y_pred)
if time is not None:
self.degrading_start = FittedLife._degrading_start(y_true, RUL_threshold)
if isinstance(time, np.ndarray):
self.time = time
else:
self.time = np.array(np.linspace(0, y_true[0], n=len(y_true)))
else:
self.degrading_start, self.time = FittedLife.compute_time_feature(
y_true, RUL_threshold
)
#self.y_pred_fitted_picewise = self._fit_picewise_linear_regression(y_pred)
#self.y_true_fitted_picewise = self._fit_picewise_linear_regression(y_true)
self.RUL_threshold = RUL_threshold
self.y_pred = y_pred
self.y_true = y_true
self.y_pred_fitted_coefficients = np.polyfit(self.time, self.y_pred, 1)
p = np.poly1d(self.y_pred_fitted_coefficients)
self.y_pred_fitted = p(self.time)
self.y_true_fitted_coefficients = np.polyfit(self.time, self.y_true, 1)
p = np.poly1d(self.y_true_fitted_coefficients)
self.y_true_fitted = p(self.time)
@staticmethod
def compute_time_feature(y_true: np.array, RUL_threshold: Optional[float] = None):
degrading_start = FittedLife._degrading_start(y_true, RUL_threshold)
time = FittedLife._compute_time(y_true, degrading_start)
return degrading_start, time
@staticmethod
def _degrading_start(
y_true: np.array, RUL_threshold: Optional[float] = None
) -> float:
"""Obtain the index when the life value is lower than the RUL_threshold
Parameters
----------
y_true : np.array
Array of true values of the RUL of the life
RUL_threshold : float
Returns
-------
float
if RUL_threshold is None, the degradint start if the first index.
Otherwise it is the first index in which y_true < RUL_threshold
"""
degrading_start = 0
if RUL_threshold is not None:
degrading_start_i = np.where(y_true < RUL_threshold)
if len(degrading_start_i[0]) > 0:
degrading_start = degrading_start_i[0][0]
return degrading_start
@staticmethod
def _compute_time(y_true: np.array, degrading_start: int) -> np.array:
"""Compute the passage of time from the true RUL
The passage of time is computed as the cumulative sum of the first
difference of the true labels. In case there are tresholded values,
the time steps of the thresholded zone is assumed to be as the median values
of the time steps computed of the zones of the life in which we have information.
Parameters
----------
y_true : np.array
The true RUL labels
degrading_start : int
The index in which the true RUL values starts to be lower than the treshold
Returns
-------
np.array
[description]
"""
time_diff = np.diff(np.squeeze(y_true)[degrading_start:][::-1])
time = np.zeros(len(y_true))
if degrading_start > 0:
if len(time_diff) > 0:
time[0 : degrading_start + 1] = np.median(time_diff)
else:
time[0 : degrading_start + 1] = 1
time[degrading_start + 1 :] = time_diff
return np.cumsum(time)
def _fit_picewise_linear_regression(self, y: np.array) -> PiecewesieLinearFunction:
"""Fit the array trough a picewise linear regression
Parameters
----------
y : np.array
Points to be fitted
Returns
-------
PiecewesieLinearFunction
The Picewise linear function fitted
"""
pwlr = PiecewiseLinearRegression(not_increasing=self.fit_line_not_increasing)
for j in range(len(y)):
pwlr.add_point(self.time[j], y[j])
line = pwlr.finish()
return line
def rmse(self, sample_weight=None) -> float:
N = len(self.y_pred)
sw = compute_sample_weight(sample_weight, self.y_true[:N], self.y_pred)
return np.sqrt(np.mean(sw * (self.y_true[:N] - self.y_pred) ** 2))
def mae(self, sample_weight=None) -> float:
N = len(self.y_pred)
sw = compute_sample_weight(sample_weight, self.y_true[:N], self.y_pred)
return np.mean(sw * np.abs(self.y_true[:N] - self.y_pred))
def noisiness(self) -> float:
"""How much the predictions resemble a line
This metric is computed as the mse of the fitted values
with respect to the least squares fitted line of this
values
"""
return mae(self.y_pred_fitted, self.y_pred)
def slope_resemblance(self):
m1 = self.y_true_fitted_coefficients[0]
m2 = self.y_pred_fitted_coefficients[0]
d = np.arctan((m1-m2)/(1+m1*m2))
d = d / (np.pi/2)
return 1-np.abs((d / (np.pi/2)))
def predicted_end_of_life(self):
z = np.where(self.y_pred == 0)[0]
if len(z) == 0:
return self.time[len(self.y_pred) - 1] + self.y_pred[-1]
else:
return self.time[z[0]]
def end_of_life(self):
z = np.where(self.y_true == 0)[0]
if len(z) == 0:
return self.time[len(self.y_pred) - 1] + self.y_true[-1]
else:
return self.time[z[0]]
def maintenance_point(self, m: float = 0):
"""Compute the maintenance point
The maintenance point is computed as the predicted end of life - m
Parameters
-----------
m: float, optional
Fault horizon Defaults to 0.
Returns
--------
float
Time of maintenance
"""
return self.predicted_end_of_life() - m
def unexploited_lifetime(self, m: float = 0):
"""Compute the unexploited lifetime given a fault horizon window
Machine Learning for Predictive Maintenance: A Multiple Classifiers Approach
Susto, G. A., Schirru, A., Pampuri, S., McLoone, S., & Beghi, A. (2015).
Parameters
----------
m: float, optional
Fault horizon windpw. Defaults to 0.
Returns:
float: unexploited lifetime
"""
if self.maintenance_point(m) < self.end_of_life():
return self.end_of_life() - self.maintenance_point(m)
else:
return 0
def unexpected_break(self, m: float = 0, tolerance: float = 0):
"""Compute wether an unexpected break will produce using a fault horizon window of size m
Machine Learning for Predictive Maintenance: A Multiple Classifiers Approach
Susto, G. A., Schirru, A., Pampuri, S., McLoone, S., & Beghi, A. (2015).
Parameters
----------
m: float, optional
Fault horizon windpw. Defaults to 0.
Returns
-------
bool
Unexploited lifetime
"""
if self.maintenance_point(m) - tolerance < self.end_of_life():
return False
else:
return True
def split_lives_indices(y_true: np.array):
"""Obtain a list of indices for each life
Parameters
----------
y_true : np.array
True vector with the RUL
Returns
-------
List[List[int]]
A list with the indices belonging to each life
"""
assert len(y_true) >= 2
lives_indices = (
[0] + (np.where(np.diff(np.squeeze(y_true)) > 0)[0]+1).tolist() + [len(y_true)]
)
indices = []
for i in range(len(lives_indices) - 1):
r = range(lives_indices[i], lives_indices[i + 1])
if len(r) <= 1:
continue
indices.append(r)
return indices
def split_lives(
results: PredictionResult,
RUL_threshold: Optional[float] = None,
fit_line_not_increasing: Optional[bool] = False,
time: Optional[int] = None,
) -> List[FittedLife]:
"""Divide an array of predictions into a list of FittedLife Object
Parameters
----------
y_true : np.array
The true RUL target
y_pred : np.array
The predicted RUL
fit_line_not_increasing : Optional[bool], optional
Wether the fit line can increase, by default False
time : Optional[int], optional
A vector with timestamps. If omitted wil be computed from y_true, by default None
Returns
-------
List[FittedLife]
FittedLife list
"""
lives = []
for r in split_lives_indices(results.true_RUL):
if np.any(np.isnan(results.predicted_RUL[r])):
continue
lives.append(
FittedLife(
results.true_RUL[r],
results.predicted_RUL[r],
RUL_threshold=RUL_threshold,
fit_line_not_increasing=fit_line_not_increasing,
time=time,
)
)
return lives
def split_lives_from_results(d: dict) -> List[FittedLife]:
y_true = d["true"]
y_pred = d["predicted"]
return split_lives(y_true, y_pred)
def unexploited_lifetime(d: dict, window_size: int, step: int):
bb = [split_lives_from_results(cv) for cv in d]
return unexploited_lifetime_from_cv(bb, window_size, step)
def unexploited_lifetime_from_cv(
lives: List[List[FittedLife]], window_size: int, n: int
):
qq = []
windows = np.linspace(0, window_size, n)
for m in windows:
jj = []
for r in lives:
ul_cv_list = [life.unexploited_lifetime(m) for life in r]
mean_ul_cv = np.mean(ul_cv_list)
std_ul_cv = np.std(ul_cv_list)
jj.append(mean_ul_cv)
qq.append(np.mean(jj))
return windows, qq
def unexpected_breaks(
d: dict, window_size: int, step: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the risk of unexpected breaks with respect to the maintenance window size
Parameters
----------
d : dict
Dictionary with the results
window_size : int
Maximum size of the maintenance windows
step : int
Number of points in which compute the risks.
step different maintenance windows will be used.
Returns
-------
Tuple[np.ndarray, np.ndarray]
* Maintenance window size evaluated
* Risk computed for every window size used
"""
bb = [split_lives_from_results(cv) for cv in d]
return unexpected_breaks_from_cv(bb, window_size, step)
def unexpected_breaks_from_cv(
lives: List[List[FittedLife]], window_size: int, n: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the risk of unexpected breaks given a Cross-Validation results
Parameters
----------
lives : List[List[FittedLife]]
Cross validation results.
window_size : int
Maximum size of the maintenance window
n : int
Number of points to evaluate the risk of unexpected breaks
Returns
-------
Tuple[np.ndarray, np.ndarray]
* Maintenance window size evaluated
* Risk computed for every window size used
"""
qq = []
windows = np.linspace(0, window_size, n)
for m in windows:
jj = []
for r in lives:
ul_cv_list = [life.unexpected_break(m) for life in r]
mean_ul_cv = np.mean(ul_cv_list)
std_ul_cv = np.std(ul_cv_list)
jj.append(mean_ul_cv)
qq.append(np.mean(jj))
return windows, np.array(qq)
def metric_J_from_cv(lives: List[List[FittedLife]], window_size: int, n: int, q1, q2):
J = []
windows = np.linspace(0, window_size, n)
for m in windows:
J_of_m = []
for r in lives:
ub_cv_list = np.array([life.unexpected_break(m) for life in r])
ub_cv_list = (ub_cv_list / (np.max(ub_cv_list) + 0.0000000001)) * q1
ul_cv_list = np.array([life.unexploited_lifetime(m) for life in r])
ul_cv_list = (ul_cv_list / (np.max(ul_cv_list) + 0.0000000001)) * q2
values = ub_cv_list + ul_cv_list
mean_J = np.mean(values)
std_ul_cv = np.std(values)
J_of_m.append(mean_J)
J.append(np.mean(J_of_m))
return windows, J
def metric_J(d, window_size: int, step: int):
lives_cv = [split_lives_from_results(cv) for cv in d]
return metric_J_from_cv(lives_cv, window_size, step)
def cv_regression_metrics_single_model(results: List[PredictionResult], threshold: float = np.inf):
errors = {
'MAE': [],
'MAE SW': [],
'MSE': [],
'MSE SW': [],
'Noisiness': [],
'Slope': []
}
for result in results:
y_mask = np.where(result.true_RUL <= threshold)[0]
y_true = np.squeeze(result.true_RUL[y_mask])
y_pred = np.squeeze(result.predicted_RUL[y_mask])
sw = compute_sample_weight(
"relative",
y_true,
y_pred,
)
try:
MAE_SW = mae(
y_true,
y_pred,
sample_weight=sw,
)
except:
MAE_SW = np.nan
MAE = mae(y_true, y_pred)
try:
MSE_SW = mse(
y_true,
y_pred,
sample_weight=sw,
)
except:
MSE_SW = np.nan
MSE = mse(y_true, y_pred)
lives = split_lives(result)
errors['Noisiness'].extend([life.noisiness() for life in lives ])
errors['Slope'].extend([life.slope_resemblance() for life in lives ])
errors['MAE'].append(MAE)
errors['MAE SW'].append(MAE_SW)
errors['MSE'].append(MSE)
errors['MSE SW'].append(MSE_SW)
errors1 = {}
for k in errors.keys():
errors1[(k, 'mean')] = np.mean(errors[k])
errors1[(k, 'std')] = np.std(errors[k])
return errors1
def cv_regression_metrics(
results_dict: Dict[str, List[PredictionResult]], threshold: float = np.inf
) -> dict:
"""Compute regression metrics for each model
Parameters
----------
data : dict
Dictionary with the model predictions.
The dictionary must conform the results specification of this module
threshold : float, optional
Compute metrics errors only in RUL values less than the threshold, by default np.inf
Returns
-------
dict
A dictionary with the following format:
{
'MAE': {
'mean':
'std':
},
'MAE SW': {
'mean':
'std':
},
'MSE': {
'mean':
'std':
},
}
"""
out = {}
for model_name in results_dict.keys():
out[model_name] = cv_regression_metrics_single_model(results_dict['model_name'], threshold)
return out
| 30.553756 | 101 | 0.596886 | 2,771 | 20,746 | 4.282209 | 0.115482 | 0.027389 | 0.014411 | 0.011124 | 0.422636 | 0.334906 | 0.272881 | 0.241025 | 0.19282 | 0.15102 | 0 | 0.009934 | 0.301263 | 20,746 | 678 | 102 | 30.59882 | 0.808637 | 0.270944 | 0 | 0.233333 | 0 | 0 | 0.010639 | 0 | 0 | 0 | 0 | 0 | 0.002778 | 1 | 0.088889 | false | 0 | 0.022222 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6984f4887fd119cbb2dc7ba7d8a3f67d1de12e78 | 490 | py | Python | src/readthermal.py | wemakerspace/MLX90640_Serial_Processing_Python | 66936a08e3c3864d267d214c8ab1e0bce34c0a19 | [
"MIT"
] | 4 | 2020-04-06T17:01:12.000Z | 2022-01-08T21:32:29.000Z | src/readthermal.py | wemakerspace/MLX90640_Serial_Processing_Python | 66936a08e3c3864d267d214c8ab1e0bce34c0a19 | [
"MIT"
] | null | null | null | src/readthermal.py | wemakerspace/MLX90640_Serial_Processing_Python | 66936a08e3c3864d267d214c8ab1e0bce34c0a19 | [
"MIT"
] | 2 | 2020-05-01T04:42:16.000Z | 2020-10-09T16:04:44.000Z | import serial
import numpy as np
ser = serial.Serial("/dev/ttyACM0", 115200)
def getData():
frames = 0;
while True:
frames+=1
recv = ser.readline()
recv = recv.rstrip() #strip the return character
if(frames > 1):#ditch the first frame in case it is incomplete
data = np.fromstring(recv, dtype=float, count=-1, sep=',') #get the data
print(data)
try:
if ser.is_open == False:
ser.open()
getData()
except KeyboardInterrupt: # Ctrl+C
if ser != None:
ser.close()
| 18.846154 | 75 | 0.667347 | 74 | 490 | 4.405405 | 0.662162 | 0.042945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027919 | 0.195918 | 490 | 25 | 76 | 19.6 | 0.799492 | 0.185714 | 0 | 0 | 0 | 0 | 0.032911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69854ff5a0a84afda144b80a24cdaa422d21b158 | 1,219 | py | Python | setup.py | dmentipl/exact-astro | 9099159b497ec9fa8b6089250c923e06e1e08691 | [
"MIT"
] | 3 | 2020-07-30T02:27:28.000Z | 2020-08-20T05:05:21.000Z | setup.py | dmentipl/exact-astro | 9099159b497ec9fa8b6089250c923e06e1e08691 | [
"MIT"
] | null | null | null | setup.py | dmentipl/exact-astro | 9099159b497ec9fa8b6089250c923e06e1e08691 | [
"MIT"
] | null | null | null | """exact-astro setup.py."""
import io
import pathlib
import re
from setuptools import setup
__version__ = re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', # It excludes inline comment too
io.open('exact/__init__.py', encoding='utf_8_sig').read(),
).group(1)
install_requires = ['numpy', 'scipy']
packages = ['exact']
description = 'Exact solutions to astrophysical problems.'
long_description = (pathlib.Path(__file__).parent / 'README.md').read_text()
setup(
name='exact',
version=__version__,
author='Daniel Mentiplay',
author_email='d.mentiplay@gmail.com',
url='https://github.com/dmentipl/exact-astro',
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
packages=packages,
license='MIT',
install_requires=install_requires,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Astronomy",
],
)
| 29.02381 | 79 | 0.658737 | 135 | 1,219 | 5.703704 | 0.622222 | 0.077922 | 0.101299 | 0.101299 | 0.072727 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009045 | 0.183757 | 1,219 | 41 | 80 | 29.731707 | 0.764824 | 0.043478 | 0 | 0 | 0 | 0 | 0.412931 | 0.037069 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69871248278ff33bccd0670c2fde318aba91024c | 4,712 | py | Python | training/run_experiment.py | yezhengkai/im2latex | cd01d705c70e756a3ec205e6f067034407c943c1 | [
"MIT"
] | 3 | 2021-04-14T19:41:19.000Z | 2022-02-07T08:44:32.000Z | training/run_experiment.py | yezhengkai/im2latex | cd01d705c70e756a3ec205e6f067034407c943c1 | [
"MIT"
] | null | null | null | training/run_experiment.py | yezhengkai/im2latex | cd01d705c70e756a3ec205e6f067034407c943c1 | [
"MIT"
] | 1 | 2021-07-01T02:40:23.000Z | 2021-07-01T02:40:23.000Z | """Experiment-running framework."""
import argparse
import importlib
import os
import shutil
import pytorch_lightning as pl
import wandb
from im2latex import lit_models
# In order to ensure reproducible experiments, we must set random seeds.
# https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#reproducibility
pl.seed_everything(42, workers=True)
def _import_class(module_and_class_name: str) -> type:
"""Import class from a module, e.g. 'text_recognizer.models.MLP'"""
module_name, class_name = module_and_class_name.rsplit(".", 1)
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_
def _setup_parser():
"""Set up Python's ArgumentParser with data, model, trainer, and other arguments."""
parser = argparse.ArgumentParser(add_help=False)
# Add Trainer specific arguments, such as --max_epochs, --gpus, --precision
trainer_parser = pl.Trainer.add_argparse_args(parser)
trainer_parser._action_groups[1].title = "Trainer Args" # pylint: disable=protected-access
parser = argparse.ArgumentParser(add_help=False, parents=[trainer_parser])
# Basic arguments
parser.add_argument("--wandb", action="store_true", default=False)
parser.add_argument("--data_class", type=str, default="Im2Latex100K")
parser.add_argument("--model_class", type=str, default="ResnetTransformer")
parser.add_argument("--load_checkpoint", type=str, default=None)
# Get the data and model classes, so that we can add their specific arguments
temp_args, _ = parser.parse_known_args()
data_class = _import_class(f"im2latex.data.{temp_args.data_class}")
model_class = _import_class(f"im2latex.models.{temp_args.model_class}")
# Get data, model, and LitModel specific arguments
data_group = parser.add_argument_group("Data Args")
data_class.add_to_argparse(data_group)
model_group = parser.add_argument_group("Model Args")
model_class.add_to_argparse(model_group)
lit_model_group = parser.add_argument_group("LitModel Args")
lit_models.BaseLitModel.add_to_argparse(lit_model_group)
parser.add_argument("--help", "-h", action="help")
return parser
def main():
"""
Run an experiment.
Sample command:
```
python training/run_experiment.py --max_epochs=3 --gpus='0,' --num_workers=0 --model_class=ResnetTransformer --data_class=Im2Latex100K
```
"""
parser = _setup_parser()
args = parser.parse_args()
data_class = _import_class(f"im2latex.data.{args.data_class}")
model_class = _import_class(f"im2latex.models.{args.model_class}")
data = data_class(args)
model = model_class(data_config=data.config(), args=args)
lit_model_class = lit_models.BaseLitModel
if args.load_checkpoint is not None:
lit_model = lit_model_class.load_from_checkpoint(args.load_checkpoint, args=args, model=model)
else:
lit_model = lit_model_class(args=args, model=model)
logger = pl.loggers.TensorBoardLogger("training/logs")
if args.wandb:
logger = pl.loggers.WandbLogger()
logger.watch(model)
logger.log_hyperparams(vars(args))
early_stopping_callback = pl.callbacks.EarlyStopping(monitor="val_loss", mode="min", patience=10)
model_checkpoint_callback = pl.callbacks.ModelCheckpoint(
filename="{epoch:03d}-{val_loss:.3f}-{val_bleu:.3f}-{val_cer:.3f}-{val_edit:.3f}",
monitor="val_loss",
mode="min",
)
callbacks = [early_stopping_callback, model_checkpoint_callback]
args.weights_summary = "full" # Print full summary of the model
trainer = pl.Trainer.from_argparse_args(args, callbacks=callbacks, logger=logger, weights_save_path="training/logs")
# pylint: disable=no-member
trainer.tune(lit_model, datamodule=data) # If passing --auto_lr_find, this will set learning rate
trainer.fit(lit_model, datamodule=data)
trainer.test(lit_model, datamodule=data)
# pylint: enable=no-member
best_model_path = model_checkpoint_callback.best_model_path
if best_model_path:
print("Best model saved at:", best_model_path)
if args.wandb:
# https://github.com/wandb/client/issues/1370
wandb_ckpt_dir = os.path.join(
wandb.run.dir, "training", "logs", wandb.run.project, wandb.run.id, "checkpoints"
)
os.makedirs(wandb_ckpt_dir, exist_ok=True)
shutil.copy(
best_model_path, os.path.join(wandb_ckpt_dir, os.path.basename(best_model_path),),
)
wandb.save(best_model_path)
print("Best model also uploaded to W&B")
if __name__ == "__main__":
main()
| 38 | 138 | 0.712224 | 622 | 4,712 | 5.130225 | 0.323151 | 0.034472 | 0.04262 | 0.02131 | 0.17236 | 0.126293 | 0.053902 | 0.053902 | 0.030711 | 0.030711 | 0 | 0.008207 | 0.172538 | 4,712 | 123 | 139 | 38.308943 | 0.810208 | 0.198854 | 0 | 0.025974 | 0 | 0.012987 | 0.131593 | 0.056512 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038961 | false | 0 | 0.168831 | 0 | 0.233766 | 0.025974 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
698a2f9d16f8e551807581f0095f4680980a32ea | 2,875 | py | Python | lib/importer.py | pe1te3son/colect-dotfiles | 4a9fcbc25d684d26bee3568032a7c22b65f7b630 | [
"MIT"
] | null | null | null | lib/importer.py | pe1te3son/colect-dotfiles | 4a9fcbc25d684d26bee3568032a7c22b65f7b630 | [
"MIT"
] | null | null | null | lib/importer.py | pe1te3son/colect-dotfiles | 4a9fcbc25d684d26bee3568032a7c22b65f7b630 | [
"MIT"
] | null | null | null | #!python3
from sys import exit
import errno
from shutil import copyfile
from socket import gethostname
from os import makedirs, path
from lib import utils
def import_configs(path_to_config_file, show_select_menu=False):
settings = utils.get_config_file(path_to_config_file)
files_to_import = settings['colect_files']
current_config_dir = get_current_config_dir(settings)
if show_select_menu:
menu_selection = print_select_menu(files_to_import)
if menu_selection == "c":
exit()
elif type(menu_selection) is list:
for idx in menu_selection:
import_single(files_to_import[idx], current_config_dir)
return
for conf in files_to_import:
import_single(conf, current_config_dir)
def get_current_config_dir(settings):
config_dir = utils.parse_home_path(settings['dest_dir'])
return path.join(config_dir, gethostname())
def print_select_menu(files_to_import):
print("\nSelect files to import; example: 1,2,3\n")
for idx, val in enumerate(files_to_import):
print(str(idx + 1) + ": " + val[0])
print("\n\t(a)ll\t(c)ancel\n")
user_selection = None
while not valid_user_selection(user_selection, files_to_import):
user_selection = input("select: ")
return parse_user_selection(user_selection)
def parse_user_selection(user_selection):
if user_selection == "a" or user_selection == "c":
return user_selection
return [int(x) - 1 for x in user_selection.split(",")]
def valid_user_selection(user_selection, files_to_import):
if not user_selection:
return False
if user_selection == "a" or user_selection == "c":
return True
selected_values = user_selection.split(",")
for val in selected_values:
if val.isnumeric() and int(val) - 1 < len(files_to_import):
continue
else:
print("Invalid option(s)")
return False
return True
def import_single(conf, current_config_dir):
conf[0] = utils.parse_home_path(conf[0])
failed_to_copy = []
if not path.exists(path.dirname(conf[0])):
try:
makedirs(path.dirname(conf[0]))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
try:
if len(conf) > 2:
opt_dir = path.join(current_config_dir, conf[2])
copyfile(path.join(opt_dir, conf[1]), conf[0])
else:
copyfile(path.join(current_config_dir, conf[1]), conf[0])
print("[ OK ] " + conf[0])
except FileNotFoundError:
failed_to_copy.append(conf[0])
print("[ FAILED! ] " + conf[0])
if len(failed_to_copy):
print("Failed to import following:")
for failed_file in failed_to_copy:
print("\t1: " + failed_file)
| 28.186275 | 71 | 0.651826 | 392 | 2,875 | 4.515306 | 0.265306 | 0.132203 | 0.073446 | 0.058757 | 0.268362 | 0.19209 | 0.092655 | 0.092655 | 0.042938 | 0 | 0 | 0.010157 | 0.246609 | 2,875 | 101 | 72 | 28.465347 | 0.807018 | 0.01287 | 0 | 0.138889 | 0 | 0 | 0.059238 | 0.007405 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.277778 | 0 | 0.486111 | 0.138889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
698a52940330a24b1d2b110e4cd07e015cdf98c4 | 6,907 | py | Python | usaspending_api/recipient/tests/integration/test_recipient_list.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | 217 | 2016-11-03T17:09:53.000Z | 2022-03-10T04:17:54.000Z | usaspending_api/recipient/tests/integration/test_recipient_list.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | 622 | 2016-09-02T19:18:23.000Z | 2022-03-29T17:11:01.000Z | usaspending_api/recipient/tests/integration/test_recipient_list.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | 93 | 2016-09-07T20:28:57.000Z | 2022-02-25T00:25:27.000Z | # Stdlib imports
import datetime
import pytest
# Core Django imports
# Third-party app imports
from rest_framework import status
from model_mommy import mommy
# Imports from your apps
from usaspending_api.common.helpers.fiscal_year_helpers import generate_fiscal_year
from usaspending_api.recipient.models import RecipientProfile
from usaspending_api.recipient.v2.views.list_recipients import get_recipients
# Getting relative dates as the 'latest'/default argument returns results relative to when it gets called
TODAY = datetime.datetime.now()
OUTSIDE_OF_LATEST = TODAY - datetime.timedelta(365 + 2)
CURRENT_FISCAL_YEAR = generate_fiscal_year(TODAY)
def list_recipients_endpoint():
return "/api/v2/recipient/duns/"
@pytest.mark.django_db
def test_one_recipient():
"""Verify error on bad autocomplete request for budget function."""
mommy.make(
RecipientProfile,
recipient_level="A",
recipient_hash="00077a9a-5a70-8919-fd19-330762af6b84",
recipient_unique_id="000000123",
recipient_name="WILSON AND ASSOC",
last_12_months=-29470313.00,
)
filters = {"limit": 10, "page": 1, "order": "desc", "sort": "amount", "award_type": "all"}
results, meta = get_recipients(filters=filters)
assert meta["total"] == 1
@pytest.mark.django_db
def test_ignore_special_case():
"""Verify error on bad autocomplete request for budget function."""
mommy.make(
RecipientProfile,
recipient_level="R",
recipient_hash="00077a9a-5a70-8919-fd19-330762af6b85",
recipient_unique_id=None,
recipient_name="MULTIPLE RECIPIENTS",
last_12_months=-29470313.00,
)
filters = {"limit": 10, "page": 1, "order": "desc", "sort": "amount", "award_type": "all"}
results, meta = get_recipients(filters=filters)
assert meta["total"] == 0
@pytest.mark.django_db
def test_filters_with_two_recipients():
"""Verify error on bad autocomplete request for budget function."""
mommy.make(
RecipientProfile,
recipient_level="A",
recipient_hash="00077a9a-5a70-8919-fd19-330762af6b84",
recipient_unique_id="000000123",
recipient_name="WILSON AND ASSOC",
last_12_months=-29470313.00,
)
mommy.make(
RecipientProfile,
recipient_level="B",
recipient_hash="c8f79139-38b2-3063-b039-d48172abc710",
recipient_unique_id="000000444",
recipient_name="DREW JORDAN INC.",
last_12_months=99705.97,
),
filters = {"limit": 1, "page": 1, "order": "desc", "sort": "amount", "award_type": "all"}
results, meta = get_recipients(filters=filters)
# Ensure pagination metadata meets API Contract
assert meta["total"] == 2
assert meta["page"] == 1
assert meta["limit"] == 1
assert len(results) == 1
assert results[0]["recipient_level"] == "B"
assert float(results[0]["amount"]) == float(99705.97)
assert results[0]["id"] == "c8f79139-38b2-3063-b039-d48172abc710-B"
filters = {"limit": 1, "page": 1, "order": "asc", "sort": "amount", "award_type": "all"}
results, meta = get_recipients(filters=filters)
assert results[0]["recipient_level"] == "A"
filters = {"limit": 10, "page": 1, "order": "asc", "sort": "amount", "keyword": "JOR", "award_type": "all"}
results, meta = get_recipients(filters=filters)
assert len(results) == 1
assert results[0]["recipient_level"] == "B"
@pytest.mark.django_db
def test_state_metadata_with_no_results(client):
resp = client.post(list_recipients_endpoint())
assert resp.status_code == status.HTTP_200_OK
assert resp.data.get("page_metadata", False)
assert resp.data["page_metadata"].get("next", True) is None
@pytest.mark.django_db
def test_award_type_filter():
"""Verify error on bad autocomplete request for budget function."""
mommy.make(
RecipientProfile,
recipient_level="A",
recipient_hash="00077a9a-5a70-8919-fd19-330762af6b84",
recipient_unique_id="000000123",
recipient_name="SHOES AND SOCKS INC.",
last_12_months=2400.00,
last_12_contracts=400.00,
last_12_grants=500.00,
last_12_loans=0.00,
last_12_other=700.00,
last_12_direct_payments=800.00,
award_types=["contract", "grant", "direct payment", "other"],
)
mommy.make(
RecipientProfile,
recipient_level="B",
recipient_hash="c8f79139-38b2-3063-b039-d48172abc710",
recipient_unique_id="000000444",
recipient_name="SPORT SHORTS",
last_12_months=2000.00,
last_12_contracts=700.00,
last_12_grants=600.00,
last_12_loans=0.00,
last_12_other=400.00,
last_12_direct_payments=300.00,
award_types=["contract", "grant", "direct payment", "other"],
)
mommy.make(
RecipientProfile,
recipient_level="C",
recipient_hash="5770e860-0f7b-69f1-182f-4d6966ebaa62",
recipient_unique_id="000000555",
recipient_name="JUST JERSEYS",
last_12_months=99.99,
last_12_contracts=0.00,
last_12_grants=0.00,
last_12_loans=99.99,
last_12_other=0.00,
last_12_direct_payments=0.00,
award_types=["loans"],
)
filters = {"limit": 10, "page": 1, "order": "desc", "sort": "amount", "award_type": "all"}
results, meta = get_recipients(filters=filters)
# "all"
assert len(results) == 3
assert results[0]["recipient_level"] == "A"
assert float(results[0]["amount"]) == float(2400)
assert results[0]["id"] == "00077a9a-5a70-8919-fd19-330762af6b84-A"
# Test "grants"
filters["award_type"] = "grants"
results, meta = get_recipients(filters=filters)
assert len(results) == 2
assert results[0]["recipient_level"] == "B"
assert float(results[0]["amount"]) == float(600)
assert results[0]["id"] == "c8f79139-38b2-3063-b039-d48172abc710-B"
# Test "contracts"
filters["award_type"] = "contracts"
results, meta = get_recipients(filters=filters)
assert len(results) == 2
assert results[0]["recipient_level"] == "B"
assert float(results[0]["amount"]) == float(700)
assert results[0]["id"] == "c8f79139-38b2-3063-b039-d48172abc710-B"
# Test "direct_payments"
filters["award_type"] = "direct_payments"
results, meta = get_recipients(filters=filters)
assert len(results) == 2
assert results[0]["recipient_level"] == "A"
assert float(results[0]["amount"]) == float(800)
assert results[0]["id"] == "00077a9a-5a70-8919-fd19-330762af6b84-A"
# Test "loans"
filters["award_type"] = "loans"
results, meta = get_recipients(filters=filters)
assert len(results) == 1
assert results[0]["recipient_level"] == "C"
assert float(results[0]["amount"]) == float(99.99)
assert results[0]["id"] == "5770e860-0f7b-69f1-182f-4d6966ebaa62-C"
| 35.239796 | 111 | 0.665267 | 865 | 6,907 | 5.123699 | 0.217341 | 0.029783 | 0.044224 | 0.054152 | 0.656137 | 0.626805 | 0.562049 | 0.562049 | 0.562049 | 0.549865 | 0 | 0.108137 | 0.194006 | 6,907 | 195 | 112 | 35.420513 | 0.687983 | 0.080064 | 0 | 0.486842 | 0 | 0 | 0.206073 | 0.079551 | 0 | 0 | 0 | 0 | 0.230263 | 1 | 0.039474 | false | 0 | 0.046053 | 0.006579 | 0.092105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
698acb3b25a009de312a19b1bc58ff4564a8e4ce | 8,988 | py | Python | tracardi/service/kasuj.py | Tracardi/tracardi | 1505c38e43e6e69595212d2a3f6917edc5841390 | [
"MIT"
] | 153 | 2021-11-02T00:35:41.000Z | 2022-03-25T16:37:44.000Z | tracardi/service/kasuj.py | Tracardi/tracardi | 1505c38e43e6e69595212d2a3f6917edc5841390 | [
"MIT"
] | 243 | 2021-10-17T17:00:22.000Z | 2022-03-28T10:13:34.000Z | tracardi/service/kasuj.py | Tracardi/tracardi | 1505c38e43e6e69595212d2a3f6917edc5841390 | [
"MIT"
] | 14 | 2021-10-17T11:39:04.000Z | 2022-03-14T14:44:02.000Z | from collections import defaultdict
from pprint import pprint
from typing import Tuple
from dictdiffer import diff
import deepdiff
from deepdiff import Delta
mappings_prev = {
"dynamic": False,
"date_detection": False,
"properties": {
"id": {
"type": "keyword"
},
"metadata": {
"dynamic": False,
"properties": {
"time": {
"properties": {
"insert": {
"type": "date"
},
"visit": {
"properties": {
"last": {
"type": "date"
},
"current": {
"type": "date"
},
"count": {
"type": "integer"
}
}
}
}
}
}
},
"stats": {
"dynamic": True,
"type": "object"
},
"traits": {
"properties": {
"private": {
"dynamic": True,
"type": "object"
},
"public": {
"dynamic": True,
"type": "object"
}
}
},
"pii": {
"dynamic": False,
"properties": {
"name": {
"type": "text"
},
"surname": {
"type": "text"
},
"birthDate": {
"type": "date"
},
"email": {
"type": "keyword"
},
"telephone": {
"type": "keyword"
},
"twitter": {
"type": "keyword"
},
"facebook": {
"type": "keyword"
},
"whatsapp": {
"type": "keyword"
},
"other": {
"dynamic": True,
"type": "object"
}
}
},
"segments": {
"type": "keyword",
"ignore_above": 64
},
"consents": {
"dynamic": True,
"type": "object"
},
"active": {
"type": "boolean"
}
}
}
mappings_head = {
"dynamic": False,
"date_detection": False,
"properties": {
"id": {
"type": "keyword"
},
"metadata": {
"type": "object",
"dynamic": True,
"enabled": False,
},
"stats": {
"dynamic": True,
"type": "int"
},
"traits": {
"properties": {
"private": {
"dynamic": True,
"type": "object"
},
"public": {
"dynamic": True,
"type": "object"
}
}
},
"pii": {
"dynamic": False,
"properties": {
"name": {
"type": "keyword"
},
"surname": {
"type": "text"
},
"birthDate": {
"type": "date"
},
"email": {
"type": "keyword"
},
"telephone": {
"type": "keyword"
},
"twitter": {
"type": "keyword"
},
"facebook": {
"type": "keyword"
},
"whatsapp": {
"type": "keyword"
},
"other": {
"dynamic": True,
"type": "object"
}
}
},
"segments": {
"type": "keyword",
"ignore_above": 64
},
"consents": {
"dynamic": True,
"type": "object"
},
}
}
class FieldMetaData:
def __init__(self, type, sub_field):
self.type = type
self.sub_field = sub_field
def __repr__(self):
return f"FieldMetaData(type={self.type}, sub_field={self.sub_field})"
def __eq__(self, other):
return self.sub_field == other.sub_field and self.type == other.type
class FieldTypes(dict):
def has(self, field, meta):
return field in self and self[field] == meta
def has_sub_path(self, path):
pass
class ScriptGenerator:
def reindex(self, prev_index, head_index, script):
print({
"endpoint": f"_reindex/{prev_index}/{head_index}",
"body": {
"script": script
}
})
def alias(self, prev_index, head_index):
pass
def skip(self):
print('skip')
class MigrationRuleEngine:
def __init__(self, prev_index, head_index, head_schema: FieldTypes, prev_schema: FieldTypes, diff):
self.prev_index = prev_index
self.head_index = head_index
self._diff = diff
self._head_schema = head_schema
self._prev_schema = prev_schema
self._generator = ScriptGenerator()
def add(self, data):
for field, new_mata in data: # type: str, FieldMetaData
if 'remove' not in self._diff:
# This field was added and no other field was removed.
# This is an empty field. No migration needed
self._generator.skip()
elif new_mata.type == 'object':
# If some sub path was removed and replaced by object then simple copy is OK.
if self._prev_schema.has_sub_path(field):
pass
else:
self._generator.reindex(self.prev_index, self.head_index, f"""
ctx.{field} = "SOME_VALUE || FILED"
""")
def change(self, data):
for field, changes in data: # type: str, Tuple[FieldMetaData, FieldMetaData]
prev_meta, head_meta = changes
print(field, prev_meta, head_meta)
def remove(self, data):
for field, old_meta in data: # type: str, FieldMetaData
print(field, old_meta)
class SchemaChangeManager:
def __init__(self, prev_index, head_index, prev_schema: FieldTypes, head_schema: FieldTypes):
self.head_index = head_index
self.prev_index = prev_index
self.head_schema = head_schema
self.prev_schema = prev_schema
_diff = sorted(self._standardize(diff(self.prev_schema, self.head_schema)), key=lambda item: item[0])
self._diff = defaultdict(list)
for _mode, __field, __meta in _diff:
self._diff[_mode].append((__field, __meta))
@staticmethod
def _standardize(source):
for _mode, _field, _change in source:
if _mode == 'add' or _mode == 'remove':
for __field, __meta in _change:
yield _mode, __field, __meta
elif _mode == 'change':
if isinstance(_field, list):
for __field in _field:
yield _mode, __field, _change
else:
yield _mode, _field, _change
def make_change_script(self):
_rules_engine = MigrationRuleEngine(self.prev_index, self.head_index, head_schema, prev_schema, self._diff)
for _mode, _list_of_tuples in self._diff.items():
getattr(_rules_engine, _mode)(_list_of_tuples)
class MappingConverter:
def __init__(self):
self.fields = {}
def schema(self, mappings) -> FieldTypes:
self.fields = {}
return FieldTypes(self._loop(mappings))
def _loop(self, mappings, path=None):
if path is None:
path = []
for _key, _object in mappings.items():
if 'properties' in _object:
self._loop(_object['properties'], path=path + [_key])
if 'type' in _object:
field_path = path + [_key]
self.fields[".".join(field_path)] = FieldMetaData(
type=_object['type'].lower(),
sub_field='field' in _object
)
return self.fields
converter = MappingConverter()
prev_schema = converter.schema(mappings_prev['properties'])
head_schema = converter.schema(mappings_head['properties'])
diff_master = SchemaChangeManager("prev_i", "head_i", prev_schema, head_schema)
diff_master.make_change_script()
| 28.443038 | 115 | 0.428349 | 721 | 8,988 | 5.084605 | 0.210818 | 0.045008 | 0.040917 | 0.051555 | 0.324604 | 0.287507 | 0.26132 | 0.231315 | 0.231315 | 0.231315 | 0 | 0.001031 | 0.46028 | 8,988 | 315 | 116 | 28.533333 | 0.75469 | 0.029929 | 0 | 0.397059 | 0 | 0 | 0.133953 | 0.01056 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066176 | false | 0.011029 | 0.022059 | 0.011029 | 0.128676 | 0.018382 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
698dd56244de111b1e33a880a1994bf163002e6f | 1,706 | py | Python | code/data_provider.py | ky-zhou/MMFDA | 41b4668c3c4ccdbe2625b77d9f0f9ce30218c187 | [
"MIT"
] | null | null | null | code/data_provider.py | ky-zhou/MMFDA | 41b4668c3c4ccdbe2625b77d9f0f9ce30218c187 | [
"MIT"
] | null | null | null | code/data_provider.py | ky-zhou/MMFDA | 41b4668c3c4ccdbe2625b77d9f0f9ce30218c187 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from options import MODEL_DIR, opt, Debug_Index
from sklearn.preprocessing import normalize
import pyreadr
test_size = opt.test_size
a = 0
def load_csv(which):
gedf = pd.read_csv('../data/%s/%s-ge.csv' % (which, which), sep=",").to_numpy()
medf = pd.read_csv('../data/%s/%s-me.csv' % (which, which), sep=",").to_numpy()
midf = pd.read_csv('../data/%s/%s-mi.csv' % (which, which), sep=",").to_numpy()
sample, ge, me, mi = gedf[:, 0], gedf[:, 1:], medf[:, 1:], midf[:, 1:]
ge, me, mi = normalize(X=ge, axis=a, norm="max"), normalize(X=me, axis=a, norm="max"), normalize(X=mi, axis=a, norm="max")
label_info = pd.read_csv('../data/%s/%s-label.csv' % (which, which), header=None).to_numpy()
label = label_info[:, 1].astype(np.int32) - 1
indices = np.arange(sample.shape[0])
np.random.seed(opt.seed)
np.random.shuffle(indices)
ge, me, mi = ge[indices], me[indices], mi[indices]
sample, label = sample[indices], label[indices]
label0, label1 = sum([1 for x in label if x==0]), sum([1 for x in label if x==1])
print('Data dimensions: ', ge.shape, me.shape, mi.shape, label0, label1)
return ge, me, mi, sample, label
if __name__ == '__main__':
# tuple = load_h5("../data_process/snn_data.h5")
# x, y, s1, s2 = get_batch(tuple, False)
load_csv(opt.input_type)
# load_geo('gbm')
# load_raw()
# save_sample()
# load_rdata(opt.input_type)
# gen_rdata(opt.input_type)
# gen_csv(opt.input_type)
# load_rdata4cluster(opt.input_type)
# load_pr_processed(opt.input_type)
# load_pr(opt.input_type)
# calc_pins_label(opt.input_type)
# define_label(opt.input_type)
| 37.911111 | 126 | 0.639508 | 276 | 1,706 | 3.771739 | 0.322464 | 0.069164 | 0.103746 | 0.049952 | 0.310279 | 0.200768 | 0.034582 | 0.034582 | 0 | 0 | 0 | 0.016324 | 0.174091 | 1,706 | 44 | 127 | 38.772727 | 0.722498 | 0.209261 | 0 | 0 | 0 | 0 | 0.089955 | 0.017241 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.2 | 0 | 0.28 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
698e6a08964652d821060c7ce13a449d85e194c7 | 6,119 | py | Python | src/draft.py | ScottNicholsonKurland/Capstone | 139df7255e8e09366fb67eaa2256f5dd97f1cd54 | [
"BSD-2-Clause"
] | 1 | 2018-01-22T15:37:51.000Z | 2018-01-22T15:37:51.000Z | src/draft.py | ScottNicholsonKurland/Capstone | 139df7255e8e09366fb67eaa2256f5dd97f1cd54 | [
"BSD-2-Clause"
] | null | null | null | src/draft.py | ScottNicholsonKurland/Capstone | 139df7255e8e09366fb67eaa2256f5dd97f1cd54 | [
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
# In[7]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
from sklearn import datasets
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
# In[8]:
df=pd.read_csv('Issued_Construction_Permits.csv')
# In[23]:
ls
# In[9]:
dfa=df[df['TotalNewAddSQFT']>0]
iris=dfa[['OriginalZip','Longitude','Latitude','TotalNewAddSQFT', 'TotalJobValuation','NumberOfFloors']]
iris['TotalJobValuation']=iris['TotalJobValuation'].str.lstrip('$')
iris.fillna(value=0)
iris['TotalJobValuation']=iris['TotalJobValuation'].astype(float)
iris.dropna(inplace=True)
# In[ ]:
#dfa['TotalJobValuation']=dfa['TotalJobValuation'].str.lstrip('$')
#dfa.fillna(value=0)
#dfa['TotalJobValuation']=dfa['TotalJobValuation'].astype(float)
#dfa.dropna(inplace=True)
# In[11]:
dfa.groupby(['IssuedDate']).mean()
# In[10]:
dfa.info()
# In[20]:
y=iris['TotalNewAddSQFT']
#y=y.iloc[1:]
x=iris.drop(labels='TotalNewAddSQFT',axis=1)
#x=x.iloc[0:-1]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=28)
rf = RandomForestRegressor()
rf.fit(X_train,y_train)
rf.score(X_test,y_test)
# In[ ]:
dfa.drop(['IssuedInLast30Days','IssuanceMethod','Location','TCAD_ID','CalendarYearIssued','FiscalYearIssued','DayIssued','OriginalState','CouncilDistrict','Jurisdiction'],axis=1,inplace=True)
# Build a random forest to predict total square footage on by month and by zip code basis for 2017 in Austin.
# In[12]:
dfa['IssuedDate']=pd.to_datetime(dfa['IssuedDate'])
# In[21]:
dfa.groupby(['IssuedDate','OriginalZip'])['TotalNewAddSQFT'].mean()
tbz=dfa.groupby(['OriginalZip'])['TotalNewAddSQFT'].sum()
tbz.nlargest(55)
# In[ ]:
dfa.std()
# In[19]:
df.shape
# In[ ]:
plt.figure(figsize=(15,6))
zip_counts = dfa['OriginalZip'].value_counts()
ax1 = zip_counts.plot(kind='bar');
labels = []
for i, label in enumerate(zip_counts.index):
labels.append('{} - ({})'.format(label, zip_counts[label]))
ax1.set_xticklabels(labels);
#Downtown - 78701 - had a tenth the *residential* development of 78702; huh.
# In[ ]:
plt.figure(figsize=(15,6))
floor_counts = dfa['NumberOfFloors'].value_counts()
ax1 = floor_counts.plot(kind='bar');
labels = []
for i, label in enumerate(floor_counts.index):
labels.append('{} - ({})'.format(label, floor_counts[label]))
ax1.set_xticklabels(labels);
# What the heck is a zero story project?! And two-story projects are half again as common as one-story.
# In[ ]:
plt.figure(figsize=(15,6))
desc_counts = dfa['PermitClass'].value_counts()
ax1 = desc_counts.plot(kind='bar');
labels = []
for i, label in enumerate(desc_counts.index):
labels.append('{} - ({})'.format(label, desc_counts[label]))
ax1.set_xticklabels(labels);
# In[ ]:
plt.figure(figsize=(15,6))
jv_counts = dfa['TotalJobValuation'].value_counts()
ax1 = jv_counts.plot(kind='hist');
labels = []
for i, label in enumerate(jv_counts.index):
labels.append('{} - ({})'.format(label, jv_counts[label]))
ax1.set_xticklabels(labels);
#Most projects are $150k-$250k
# In[ ]:
dfa['PermitClass'].unique()
# In[ ]:
dfa[dfa['Condominium']=='Yes'].count()
# In[ ]:
print(dfa[dfa['Condominium']=='Yes']['TotalNewAddSQFT'].sum())
print(dfa.sort_values('TotalNewAddSQFT', ascending=False))
howmany=dfa['ProjectName'].unique()
howmany.shape
# In[ ]:
print(dfa['OriginalZip'].unique())
dfa.sort_values('HousingUnits', ascending=False)
# In[ ]:
dfa['TotalNewAddSQFT'].sum()
#Over 1.3 billion square feet; seems low for over a hundred new people per day,
#but a lot of construction is likely unpermitted and many move into Austin metro
#- 4300 square miles and 2 million people, not Austin, 272 square miles and 800K people.
# In[ ]:
plt.figure(figsize=(22,11))
plt.ylabel('Latitude')
plt.xlabel('Longitude')
plt.title('Austin new square footage')
plt.scatter(dfa['Longitude'],dfa['Latitude'], s=dfa['TotalNewAddSQFT']/1000,c='g')
# In[ ]:
''' This uses the Bokeh package so you will have to make sure it is loaded'''
from bokeh.models import (
GMapPlot, GMapOptions, ColumnDataSource, Circle, Range1d, PanTool, WheelZoomTool, BoxSelectTool
)
from bokeh.io import output_file, show, output_notebook
from bokeh.io import export_png
# Uncomment out this line to work within the jupyter notebook.
# If left commented out map should show in new window
output_notebook()
# In[3]:
#creating the plot
## Set up the center point zoom and location for a given map
map_options = GMapOptions(lat=30.307182, lng=-97.76, map_type="roadmap", zoom=11)
plot = GMapPlot(
x_range=Range1d(), y_range=Range1d(), map_options=map_options
)
plot.title.text = "Austin"
'''
This is someone else's API key so you should get your own at https://console.developers.google.com/apis/
You will need to make sure you have set up the permissions and services so you can use this.
I believe you need at least
Google Maps JavaScript API
Google Static Maps API
Maybe more'''
plot.api_key = "AIzaSyBl6OOSxtxS7qmNgkm6EXdY6MSdVrHW8h4"
'''Here you will pass a list of the locations you want to show on the graph
The lats and longs are broken up into 2 lists'''
completed_lats = dfa['Latitude'].values
completed_longs = dfa['Longitude'].values
size=(dfa['TotalNewAddSQFT'].values**.25)/5
'''Setup for displaying the cordinates'''
completed_source = ColumnDataSource( data=dict(
lat=completed_lats,
lon=completed_longs,
alpha=size))
'''The dots are put together and added to the plot'''
completed_dots = Circle(x="lon", y="lat", size='alpha', fill_color="red", fill_alpha=.5, line_color=None)
plot.add_glyph(completed_source, completed_dots)
'''This line setups up what functionality to allow (zoom etc)'''
plot.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool())
'''Show the map'''
show(plot)
export_png(plot, filename="plot.png")
# In[ ]:
import scipy.stats as scs
import statsmodels.api as sm
pd.plotting.scatter_matrix(dfa, figsize=(15, 10))
plt.show()
# In[ ]:
dfa.describe().T
| 20.883959 | 191 | 0.718091 | 879 | 6,119 | 4.913538 | 0.390216 | 0.008104 | 0.012734 | 0.020838 | 0.118314 | 0.118314 | 0.029868 | 0.029868 | 0.029868 | 0.029868 | 0 | 0.023876 | 0.123876 | 6,119 | 292 | 192 | 20.955479 | 0.781757 | 0.188266 | 0 | 0.117647 | 0 | 0 | 0.20212 | 0.01649 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
698faf78aa6c63752705207447b7c63891ba7586 | 4,274 | py | Python | lib/util/calc_iou.py | kemaloksuz/aLRPLoss-AblationExperiments | 2624afeeb272d45c96d266c8f0b331e2cb286e5e | [
"MIT"
] | 4 | 2020-11-18T09:16:02.000Z | 2021-09-30T12:08:13.000Z | lib/util/calc_iou.py | BigHeartDB/aLRPLoss | 49d3eb7a01ae60380e46b8c0ef496d6c53ac40b9 | [
"MIT"
] | 1 | 2021-03-19T11:18:13.000Z | 2021-03-19T11:18:13.000Z | lib/util/calc_iou.py | BigHeartDB/aLRPLoss | 49d3eb7a01ae60380e46b8c0ef496d6c53ac40b9 | [
"MIT"
] | 1 | 2020-11-18T09:16:05.000Z | 2020-11-18T09:16:05.000Z | import numpy as np
import torch
import torch.nn as nn
import pdb
def calc_iou(a, b):
a=a.type(torch.cuda.DoubleTensor)
b=b.type(torch.cuda.DoubleTensor)
area = (b[:, 2] - b[:, 0]+1) * (b[:, 3] - b[:, 1]+1)
iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])+1
ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])+1
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]+1) * (a[:, 3] - a[:, 1]+1), dim=1) + area - iw * ih
#ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
def bbox_overlaps(bboxes1, bboxes2):
'''
Calculates IoU between model prediction and target to compute
IoULoss.
Inputs:
pred -> NX4 Bounding-boxes from model prediction.
target -> Nx4 Target bounding boxes.
Returns:
ious -> Nx1 IoU value between prediction and target bounding boxes.
'''
# check if prediction and target samples equal
num_pred = bboxes1.size(0)
num_target = bboxes2.size(0)
assert num_pred == num_target
# calculate max-top-left, min-bottom-right points
top_left = torch.max(bboxes1[:,:2], bboxes2[:,:2])
bottom_right = torch.min(bboxes1[:,2:], bboxes2[:,2:])
wh = (bottom_right - top_left + 1).clamp(min=0)
overlap = wh[:,0] * wh[:,1]
# calculate area for box1
area1 = (bboxes1[:,2] - bboxes1[:,0] + 1) *(
bboxes1[:,3] - bboxes1[:,1] + 1)
# calculate area for box2
area2 = (bboxes2[:,2] - bboxes2[:,0] + 1) *(
bboxes2[:,3] - bboxes2[:,1] + 1)
# calculate ious
ious = overlap / (area1 + area2 - overlap)
return ious
def compute_giou(pred, target, eps=1e-7):
num_pred = pred.size(0)
num_target = target.size(0)
assert num_pred == num_target
# calculate max-top-left and min-bottom-right points for overlap
top_left = torch.max(pred[:,:2], target[:,:2])
bottom_right = torch.min(pred[:, 2:], target[:, 2:])
wh = (bottom_right - top_left + 1).clamp(min=0)
# overlap
overlap = wh[:,0] * wh[:,1]
# calculate union
area_pred = (pred[:, 2]-pred[:,0] + 1) * (pred[:,3] - pred[:,1]+1)
area_target = (target[:, 2]-target[:, 0]+1) * (target[:,3]-target[:,1]+1)
union = area_pred + area_target - overlap + eps
# calculate iou
ious = overlap/union
# min. enclosing box
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1 + 1).clamp(min=0)
enclose_area = enclose_wh[:,0] * enclose_wh[: ,1] + eps
# giou
diff_term = (enclose_area - union) / enclose_area
gious = ious - diff_term
return gious
def compute_diou(pred, target, eps=1e-7):
num_pred = pred.size(0)
num_target = target.size(0)
assert num_pred == num_target
# calculate max-top-left and min-bottom-right points for overlap
top_left = torch.max(pred[:,:2], target[:,:2])
bottom_right = torch.min(pred[:, 2:], target[:, 2:])
wh = (bottom_right - top_left + 1).clamp(min=0)
# overlap
overlap = wh[:,0] * wh[:,1]
# get pred and gt centers
pred_c_x = (pred[:, 0] + pred[:, 2]) / 2
pred_c_y = (pred[:, 1] + pred[:, 3]) / 2
gt_c_x = (target[:, 0] + target[:, 2]) / 2
gt_c_y = (target[:, 1] + target[:, 3]) / 2
# calculate union
area_pred = (pred[:, 2]-pred[:,0] + 1) * (pred[:,3] - pred[:,1]+1)
area_target = (target[:, 2]-target[:, 0]+1) * (target[:,3]-target[:,1]+1)
union = area_pred + area_target - overlap + eps
# calculate iou
ious = overlap/union
# min. enclosing box
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_c = ((enclose_x2y2[:, 0] - enclose_x1y1[:, 0]) ** 2) + \
((enclose_x2y2[:, 1] - enclose_x1y1[:,1]) **2) + eps
box_d = ((pred_c_x - gt_c_x) ** 2) + \
((pred_c_y - gt_c_y) ** 2)
# diou
diff_term = box_d / enclose_c
dious = ious - diff_term
return dious
| 30.098592 | 112 | 0.565278 | 626 | 4,274 | 3.738019 | 0.153355 | 0.023504 | 0.037607 | 0.041026 | 0.492308 | 0.438034 | 0.431624 | 0.431624 | 0.431624 | 0.431624 | 0 | 0.055556 | 0.250351 | 4,274 | 141 | 113 | 30.312057 | 0.674782 | 0.167759 | 0 | 0.391892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040541 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
698fbdf71330808f397600f3ce9dfb44188c0cfc | 1,871 | py | Python | problems/336_palindrome_pairs.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | 1 | 2018-02-03T14:17:18.000Z | 2018-02-03T14:17:18.000Z | problems/336_palindrome_pairs.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | null | null | null | problems/336_palindrome_pairs.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | null | null | null | '''
URL: https://leetcode.com/problems/palindrome-pairs
Time complexity: O(k^2 * n) where n is number of words and k is length of word
Space complexity: O(n)
'''
class Solution(object):
def _is_palindrome(self, word):
return word == word[::-1]
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
word_to_index = {}
for i, word in enumerate(words):
word_to_index[word] = i
palindrome_pairs = []
# full word reversals case
for word, i in word_to_index.iteritems():
if word[::-1] in word_to_index and word_to_index[word[::-1]] != i:
palindrome_pairs.append([i, word_to_index[word[::-1]]])
# empty string case
if "" in word_to_index:
i = word_to_index[""]
for j, word in enumerate(words):
if self._is_palindrome(word) and j != i:
palindrome_pairs.append([i,j])
palindrome_pairs.append([j,i])
# prefix/suffix case
for i in range(len(words)):
curr_word = words[i]
for j in range(1, len(curr_word)):
prefix = curr_word[:j]
suffix = curr_word[j:]
reversed_suffix = suffix[::-1]
reversed_prefix = prefix[::-1]
if self._is_palindrome(prefix):
if reversed_suffix in word_to_index and i != word_to_index[reversed_suffix]:
palindrome_pairs.append([word_to_index[reversed_suffix], i])
if self._is_palindrome(suffix):
if reversed_prefix in word_to_index and i != word_to_index[reversed_prefix]:
palindrome_pairs.append([i, word_to_index[reversed_prefix]])
return palindrome_pairs
| 35.301887 | 96 | 0.55799 | 236 | 1,871 | 4.190678 | 0.237288 | 0.084934 | 0.155713 | 0.065723 | 0.252781 | 0.159757 | 0.139535 | 0.072801 | 0.072801 | 0.072801 | 0 | 0.00641 | 0.332977 | 1,871 | 52 | 97 | 35.980769 | 0.786058 | 0.140567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0 | 0.032258 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6991fbe18b2fd17fe844d70133264da95dbb60b1 | 1,518 | py | Python | DDSP/DataReceiver.py | CharKwayTeow/ddsp | 307ac3d5b0c0bac121c3ac7cef6f1d7ab6f23e1e | [
"MIT"
] | null | null | null | DDSP/DataReceiver.py | CharKwayTeow/ddsp | 307ac3d5b0c0bac121c3ac7cef6f1d7ab6f23e1e | [
"MIT"
] | null | null | null | DDSP/DataReceiver.py | CharKwayTeow/ddsp | 307ac3d5b0c0bac121c3ac7cef6f1d7ab6f23e1e | [
"MIT"
] | null | null | null | import os
import socket
import random
class DataReceiver:
"""docstring for DataReceiver"""
def __init__(self):
self.selectPort()
def selectPort(self):
while True:
self.port = random.randint(8192, 65535)
try:
# Detect whether the port has been occupied.
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', self.port))
s.close()
return
except:
# Do nothing here to repick another port
pass
def receive(self, path):
rc = 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', self.port))
s.settimeout(10)
try:
s.listen(1)
conn, addr = s.accept()
f = open(path, 'wb')
while True:
data = conn.recv(4096)
if data:
f.write(data)
else:
f.close()
break
except:
rc = -1
finally:
s.close()
return rc
"""Write the test code here"""
if __name__ == '__main__':
receiver = DataReceiver()
receiver.port = 11111
print (receiver.receive('../../received/missfont.log'))
print ("DataReceiver class should work if you see this") | 28.641509 | 71 | 0.506588 | 163 | 1,518 | 4.595092 | 0.503067 | 0.096128 | 0.034713 | 0.050734 | 0.264352 | 0.264352 | 0.264352 | 0.264352 | 0.264352 | 0.264352 | 0 | 0.027056 | 0.391304 | 1,518 | 53 | 72 | 28.641509 | 0.78355 | 0.071805 | 0 | 0.318182 | 0 | 0 | 0.060452 | 0.019665 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0.022727 | 0.068182 | 0 | 0.204545 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6992c1a60967636a99cd90b5996be62a2f93aea7 | 2,648 | py | Python | scripts/main.py | akj127/Robotics-project | 4cbe212281462a5cef8a6b0042915e57394c57c2 | [
"MIT"
] | 3 | 2020-11-15T10:11:54.000Z | 2020-11-16T06:02:47.000Z | scripts/main.py | akj127/Robotics-project | 4cbe212281462a5cef8a6b0042915e57394c57c2 | [
"MIT"
] | null | null | null | scripts/main.py | akj127/Robotics-project | 4cbe212281462a5cef8a6b0042915e57394c57c2 | [
"MIT"
] | 1 | 2021-01-26T17:16:11.000Z | 2021-01-26T17:16:11.000Z | #!/usr/bin/env python
from sensor_msgs.msg import NavSatFix, Imu,Image
from geometry_msgs.msg import Twist, TwistStamped
from std_msgs.msg import Float64
from mavros_msgs.msg import OverrideRCIn
import rospy
import matplotlib.pyplot as plt
import math
import sys, random, math
from math import sqrt,cos,sin,atan2
import os
import roslib
import numpy as np
import csv
import time
import tf
from cv_bridge import CvBridge
import cv2
bridge = CvBridge()
vx=0
vy=0
v=0
rover_x = 0
rover_y = 0
marker_size = 10
#Load the dictionary that was used to generate the markers.
dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)
# Initialize the detector parameters using default values
parameters = cv2.aruco.DetectorParameters_create()
def processimage(inp) :
## cv_image is a cv2 image object. proceed forward with merging your code
global bridge
cv_image = bridge.imgmsg_to_cv2(inp, "bgr8")
# Detect markers
markerCorners, markerIds, rejectedCandidates = cv2.aruco.detectMarkers(frame, dictionary, parameters=parameters)
if markerIds != None:
ret = aruco.estimatePoseSingleMarkers(markerCorners, marker_size, camera_matrix, camera_distortion)
#-- Unpack the output, get only the first
rvec, tvec = ret[0][0,0,:], ret[1][0,0,:]
str_position = "MARKER Position x=%4.0f y=%4.0f z=%4.0f"%(tvec[0], tvec[1], tvec[2])
print(str_position)
# bottom_left_corner = tuple(markerCorners[0][0][0])
# top_right_corner = tuple(markerCorners[0][0][2])
# rover_x = (bottom_left_corner[0] + top_right_corner[0]) / 2
# rover_y = (bottom_left_corner[1] + top_right_corner[1]) / 2
return cv_bridge
def getVelocity(measurement):
global vx,vy,v
x=measurement.twist.linear.x
y=measurement.twist.linear.y
vx=math.sqrt(x ** 2)
vy=math.sqrt(y ** 2)
v = math.sqrt((vx**2) + (vy**2))
def dist(a,b):
return math.sqrt((a[1]-b[1])**2+(a[0]-b[0])**2)
def letstrack() :
## add whatever you want to add
while True:
pub = rospy.Publisher('mavros/rc/override', OverrideRCIn, queue_size=1000)
control = OverrideRCIn()
def listener():
rospy.init_node('listener', anonymous=True)
rate = rospy.Rate(10) # 10hz
#r = rosrate(0.05)
# rospy.Subscriber("/mavros/global_position/global", NavSatFix, plot_gps_measurements)
#rospy.Subscriber("/mavros/global_position/global", NavSatFix, getGPS)
rospy.Subscriber("/mavros/global_position/raw/gps_vel", TwistStamped, getVelocity)
rospy.Subscriber("/webcam/image_raw",Image,getimage)
letsdetect()
rospy.spin()
#rospy.Subscriber("/mavros/imu/data", Imu, getPitch)
#rospy.Subscriber("/mavros/global_position/rel_alt", Float64, getAltitude
if __name__=='__main__' :
listener()
| 29.098901 | 113 | 0.744713 | 400 | 2,648 | 4.7925 | 0.4225 | 0.00626 | 0.054773 | 0.056338 | 0.115806 | 0.052165 | 0.052165 | 0 | 0 | 0 | 0 | 0.031561 | 0.126511 | 2,648 | 90 | 114 | 29.422222 | 0.797233 | 0.305514 | 0 | 0 | 0 | 0 | 0.071978 | 0.019231 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0.293103 | 0.017241 | 0.413793 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
699b2d94c39de0d54b340886a431ea3e1379d79d | 1,550 | py | Python | test/test_weighted_avg_peak.py | stungkit/TradingBot | 177400c1ee664817b199815cca85e6682a7686ab | [
"MIT"
] | 218 | 2018-10-07T19:16:01.000Z | 2022-03-29T10:59:22.000Z | test/test_weighted_avg_peak.py | stungkit/TradingBot | 177400c1ee664817b199815cca85e6682a7686ab | [
"MIT"
] | 123 | 2018-10-15T11:29:03.000Z | 2022-03-30T16:03:11.000Z | test/test_weighted_avg_peak.py | stungkit/TradingBot | 177400c1ee664817b199815cca85e6682a7686ab | [
"MIT"
] | 27 | 2019-02-09T08:20:18.000Z | 2022-03-29T14:31:16.000Z | from pathlib import Path
import pytest
from common.MockRequests import (
ig_request_confirm_trade,
ig_request_login,
ig_request_market_info,
ig_request_prices,
ig_request_set_account,
ig_request_trade,
)
from tradingbot.components import Configuration, TradeDirection
from tradingbot.components.broker import Broker, BrokerFactory
from tradingbot.strategies import WeightedAvgPeak
@pytest.fixture
def config():
config = Configuration.from_filepath(Path("test/test_data/trading_bot.toml"))
config.config["strategies"]["active"] = "weighted_avg_peak"
return config
@pytest.fixture
def broker(config, requests_mock):
"""
Initialise the strategy with mock services
"""
ig_request_login(requests_mock)
ig_request_set_account(requests_mock)
return Broker(BrokerFactory(config))
def test_find_trade_signal(config, broker, requests_mock):
ig_request_login(requests_mock)
ig_request_set_account(requests_mock)
ig_request_prices(requests_mock)
ig_request_trade(requests_mock)
ig_request_confirm_trade(requests_mock)
ig_request_market_info(requests_mock)
strategy = WeightedAvgPeak(config, broker)
# Need to use a mock enum as the requests_mock expect "mock" as interval
market = broker.get_market_info("mock")
prices = strategy.fetch_datapoints(market)
tradeDir, limit, stop = strategy.find_trade_signal(market, prices)
assert tradeDir is not None
assert limit is None
assert stop is None
assert tradeDir == TradeDirection.NONE
| 28.181818 | 81 | 0.771613 | 198 | 1,550 | 5.742424 | 0.333333 | 0.110818 | 0.086192 | 0.129288 | 0.145998 | 0.100264 | 0.100264 | 0.100264 | 0.100264 | 0.100264 | 0 | 0 | 0.159355 | 1,550 | 54 | 82 | 28.703704 | 0.872602 | 0.073548 | 0 | 0.157895 | 0 | 0 | 0.047887 | 0.021831 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.078947 | false | 0 | 0.157895 | 0 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
699ca3785181d97033e4bc0c45ba1c016dc9742d | 9,702 | py | Python | simulation/soft_body.py | mahdidolati/elementary-graph-algorithms | 08a90c021f766c2d30368112bc3b7f37b9a3b513 | [
"MIT"
] | null | null | null | simulation/soft_body.py | mahdidolati/elementary-graph-algorithms | 08a90c021f766c2d30368112bc3b7f37b9a3b513 | [
"MIT"
] | null | null | null | simulation/soft_body.py | mahdidolati/elementary-graph-algorithms | 08a90c021f766c2d30368112bc3b7f37b9a3b513 | [
"MIT"
] | null | null | null | from manim import *
import numpy as np
import networkx as nx
from util.util import Geometry
def to_coord(v):
return v[0] * RIGHT + v[1] * UP
class Polygon:
def __init__(self):
self.convex_box = {'left-x': None, 'right-x': None, 'up-y': None, 'down-y': None}
self.shape = nx.Graph()
def update_convex_box(self, p):
if self.convex_box['left-x'] is None:
self.convex_box['left-x'] = p[0]
self.convex_box['right-x'] = p[0]
self.convex_box['up-x'] = p[1]
self.convex_box['down-x'] = p[1]
return
if p[0] < self.convex_box['left-x']:
self.convex_box['left-x'] = p[0]
if p[0] > self.convex_box['right-x']:
self.convex_box['right-x'] = p[0]
if p[1] < self.convex_box['down-x']:
self.convex_box['down-x'] = p[1]
if p[1] > self.convex_box['up-x']:
self.convex_box['up-x'] = p[1]
def add_edge(self, x, y):
if x not in self.shape.nodes():
self.shape.add_node(x)
self.update_convex_box(x)
if y not in self.shape.nodes():
self.shape.add_node(y)
self.update_convex_box(y)
if (x, y) not in self.shape.edges():
self.shape.add_edge(x, y, q=set(), e=[np.array(x).reshape((2, 1)), np.array(y).reshape((2, 1))])
def add_edges(self, edge_list):
for e in edge_list:
self.add_edge(e[0], e[1])
def draw(self, q_screen):
for e in self.shape.edges():
l = Line(to_coord(e[0]), to_coord(e[1])).set_color(BLUE)
self.shape.edges[e]['q'].add(l)
q_screen.add(l)
def point_inside(self, p):
if p[0] <= self.convex_box['left-x'] or p[0] >= self.convex_box['right-x']:
return False
if p[1] <= self.convex_box['down-x'] or p[1] >= self.convex_box['up-x']:
return False
x = p[0, 0]
y = p[1, 0]
n = 0
corner_meet = 0
for e in self.shape.edges():
e0 = self.shape.edges[e]['e'][0]
e1 = self.shape.edges[e]['e'][1]
if e0[0, 0] < x < e1[0, 0] or e1[0, 0] < x < e0[0, 0]:
m = (e1[1, 0] - e0[1, 0]) / (e1[0, 0] - e0[0, 0])
yp = (x - e0[0, 0]) * m + e0[1, 0]
if np.abs(yp - y) < 0.0001:
return False
if yp < y:
n += 1
if x == e0[0, 0]:
if e0[1, 0] < y:
corner_meet += 1
if x == e1[0, 0]:
if e1[1, 0] < y:
corner_meet += 1
return (n + corner_meet // 2) % 2 != 0
def get_intersection(self, p1, p2):
g = Geometry()
candidates = None
hl = None
for e in self.shape.edges():
r = g.get_line_segment_intersection(self.shape.edges[e]['e'], [p1, p2])
if r is not None:
if candidates is None:
candidates = r
hl = self.shape.edges[e]['e']
if np.linalg.norm(r - p1) < np.linalg.norm(candidates - p1):
candidates = r
hl = self.shape.edges[e]['e']
return candidates, hl
class Point:
def __init__(self, x, y):
self.id = (x, y)
self.position = np.array([x, y], dtype='float64').reshape((2, 1))
self.velocity = np.zeros(2, dtype='float64').reshape((2, 1))
self.force = np.zeros(2, dtype='float64').reshape((2, 1))
self.latent_force = np.zeros(2, dtype='float64').reshape((2, 1))
self.mass = 1.0
def set_force_gravity(self):
self.force[1, 0] = self.force[1, 0] - self.mass * 0.15
def shift(self, dx, dy):
self.position += np.array([dx, dy], dtype='float64').reshape((2, 1))
def reset_velocity(self, direction=None):
if direction is None:
self.velocity = np.zeros(2, dtype='float64').reshape((2, 1))
else:
d = direction / np.linalg.norm(direction)
t = np.dot(np.transpose(self.velocity), d) * d
self.velocity -= t
def set_velocity(self, dt):
self.velocity = self.velocity + self.force * dt / self.mass
def set_position(self, dt):
self.position = self.position + self.velocity * dt
def reset_force(self):
self.force = self.latent_force
self.latent_force = np.zeros(2, dtype='float64').reshape((2, 1))
def apply_force(self, fd, f):
unit_x = np.array([1, 0], dtype='float64').reshape((2, 1))
unit_y = np.array([0, 1], dtype='float64').reshape((2, 1))
self.force[0, 0] = self.force[0, 0] + np.dot(np.transpose(fd), unit_x) * f
self.force[1, 0] = self.force[1, 0] + np.dot(np.transpose(fd), unit_y) * f
def get_next_position(self, dt):
v = self.velocity + self.force * dt / self.mass
return self.position + v * dt
class Spring:
def __init__(self, A, B, ks):
self.A = A
self.B = B
self.L0 = np.linalg.norm(self.A.position - self.B.position)
self.ks = ks
self.kd = 0.9
def set_force_spring(self):
norm1 = np.linalg.norm(self.A.position - self.B.position)
abu = (self.B.position - self.A.position) / norm1
bau = -1 * abu
f1 = self.ks * (norm1 - self.L0)
self.A.apply_force(abu, f1)
self.B.apply_force(bau, f1)
fa = np.dot(np.transpose(abu), self.B.velocity - self.A.velocity) * self.kd
self.A.apply_force(abu, fa)
fb = np.dot(np.transpose(bau), self.A.velocity - self.B.velocity) * self.kd
self.B.apply_force(bau, fb)
class SoftRectangle:
def __init__(self, w, h, ks):
self.lines = set()
self.g = nx.Graph()
for i in range(w):
for j in range(h):
c = Circle(radius=0.1)
x = 0.5 * i
y = 0.5 * j
c.move_to(x * RIGHT + y * UP)
c.set_fill(PINK, opacity=0.5)
self.g.add_node((i, j), p=Point(x, y), c=c)
if i % 2 == 0:
neighbors = [(i-1, j), (i, j-1), (i-1, j-1)]
else:
neighbors = [(i - 1, j), (i, j - 1), (i - 1, j + 1)]
for n in neighbors:
if n[0] >= 0 and 0 <= n[1] < h:
self.g.add_edge((i, j), n, s=Spring(self.g.nodes[(i, j)]['p'], self.g.nodes[n]['p'], ks))
def shift(self, dx, dy):
for n in self.g.nodes():
self.g.nodes[n]['p'].shift(dx, dy)
def step(self):
for n in self.g.nodes():
self.g.nodes[n]['p'].reset_force()
self.g.nodes[n]['p'].set_force_gravity()
for e in self.g.edges():
self.g.edges[e]['s'].set_force_spring()
def update_positions(self, dt, p):
for n in self.g.nodes():
c_p = self.g.nodes[n]['p'].position
n_p = self.g.nodes[n]['p'].get_next_position(dt)
if p.point_inside(n_p):
t, hl = p.get_intersection(c_p, n_p)
g = Geometry()
force = self.g.nodes[n]['p'].force
p_dir = g.get_force_reflection(hl)
f = -1 * np.dot(np.transpose(force), p_dir) * p_dir
self.g.nodes[n]['p'].latent_force = f
self.g.nodes[n]['p'].reset_velocity(p_dir)
self.g.nodes[n]['p'].position = t
else:
self.g.nodes[n]['p'].latent_force = np.zeros(2, dtype='float64').reshape((2, 1))
self.g.nodes[n]['p'].set_velocity(dt)
self.g.nodes[n]['p'].set_position(dt)
def move_atom(self, n, d):
self.g.nodes[n]['p'].position += d
def move_anim(self, q_screen):
for l in self.lines:
q_screen.remove(l)
for e in self.g.edges():
e1 = self.g.nodes[e[0]]['p'].position[0, 0] * RIGHT + self.g.nodes[e[0]]['p'].position[1, 0] * UP
e2 = self.g.nodes[e[1]]['p'].position[0, 0] * RIGHT + self.g.nodes[e[1]]['p'].position[1, 0] * UP
l = Line(e1, e2).set_color(PINK).set_opacity(0.5)
self.lines.add(l)
q_screen.add(l)
for n in self.g.nodes():
self.g.nodes[n]['c'].move_to(
self.g.nodes[n]['p'].position[0, 0] * RIGHT +
self.g.nodes[n]['p'].position[1, 0] * UP
)
class SoftBody(Scene):
def draw_body(self, body):
for n in body.g.nodes():
self.add(body.g.nodes[n]['c'])
body.move_anim(self)
def move_body(self, body, dt, p):
body.step()
body.update_positions(dt, p)
body.move_anim(self)
def construct(self):
bodies = [SoftRectangle(3, 3, 72), SoftRectangle(3, 3, 24), SoftRectangle(3, 3, 8)]
body_shifts = [(-4, 1.5), (-1, 1.5), (2, 1.5)]
for i in range(len(bodies)):
bodies[i].shift(*body_shifts[i])
self.draw_body(bodies[i])
p = Polygon()
p.add_edges([
# [(-1, -2), (3, -2)], [(3, -2), (-1, 2)], [(-1, 2), (-1, -2)]
[(-5, -2), (4, -2)], [(4, -2), (4, -1)], [(4, -1), (-5, -1)], [(-5, -1), (-5, -2)]
])
p.draw(self)
self.wait(1)
# s.move_atom((0, 0), np.array([-0.5, 0], dtype='float64').reshape((2, 1)))
# s.move_atom((1, 0), np.array([0.5, 0], dtype='float64').reshape((2, 1)))
# s.move_anim(self)
# self.wait(1)
dt = 0.05
for _ in range(40):
for b in bodies:
self.move_body(b, dt, p)
self.wait(2 * dt)
if __name__ == "__main__":
pass
| 36.066914 | 113 | 0.49093 | 1,500 | 9,702 | 3.078667 | 0.11 | 0.034647 | 0.056301 | 0.040494 | 0.4411 | 0.381117 | 0.289952 | 0.187744 | 0.14097 | 0.096795 | 0 | 0.045013 | 0.331375 | 9,702 | 268 | 114 | 36.201493 | 0.666872 | 0.024531 | 0 | 0.183857 | 0 | 0 | 0.024212 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125561 | false | 0.004484 | 0.017937 | 0.004484 | 0.201794 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
699efb78c06b896f75ab6b08619347efc68e02be | 3,576 | py | Python | pyroomacoustics/tests/tests_libroom/test_is_inside_2d_polygon.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
] | 1 | 2019-08-04T07:34:02.000Z | 2019-08-04T07:34:02.000Z | pyroomacoustics/tests/tests_libroom/test_is_inside_2d_polygon.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
] | null | null | null | pyroomacoustics/tests/tests_libroom/test_is_inside_2d_polygon.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
] | 1 | 2021-03-07T09:46:58.000Z | 2021-03-07T09:46:58.000Z | # Test of polygon point inclusion test
# Copyright (C) 2019 Robin Scheibler, Cyril Cadoux, Sidney Barthe
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
from __future__ import division
import numpy as np
import pyroomacoustics as pra
polygons = [
np.array([ # this one is clockwise
[0, 4, 4, 0,],
[0, 0, 4, 4,],
]),
np.array([ # this one is clockwise!
[0, 0, 1, 1, 3, 3],
[0, 1, 1, 2, 2, 0],
]),
np.array([ # this one is clockwise!
[0, 1, 1, 3, 3],
[0, 1, 2, 2, 0],
]),
]
cases = {
'inside' : {
'pol' : 0,
'p' : [2,2],
'ret' : 0,
},
'on_border' : {
'pol' : 0,
'p' : [0,2],
'ret' : 1,
},
'on_corner' : {
'pol' : 0,
'p' : [4,4],
'ret' : 1,
},
'outside' : {
'pol' : 0,
'p' : [5,5],
'ret' : -1,
},
# horizontal wall aligned with point
'horiz_wall_align' : {
'pol' : 1,
'p' : [2,1],
'ret' : 0,
},
# ray is going through vertex
'ray_through_vertex' : {
'pol' : 2,
'p' : [2,1],
'ret' : 0,
},
# point is at the same height as top of polygon, but outside
'top_outside' : {
'pol' : 2,
'p' : [4,2],
'ret' : -1,
},
}
def run_inside_pol(lbl):
pol = polygons[cases[lbl]['pol']]
p = cases[lbl]['p']
r_exp = cases[lbl]['ret']
ret = pra.libroom.is_inside_2d_polygon(p, pol)
assert ret == r_exp, '{} : returned={} expected={}'.format(lbl, ret, r_exp)
def test_inside():
run_inside_pol('inside')
def test_on_border():
run_inside_pol('on_border')
def test_on_corner():
run_inside_pol('on_corner')
def test_outside():
run_inside_pol('outside')
def test_horiz_wall_align():
run_inside_pol('horiz_wall_align')
def test_ray_through_vertex():
run_inside_pol('ray_through_vertex')
def test_top_outside():
run_inside_pol('top_outside')
if __name__ == '__main__':
test_inside()
test_on_border()
test_on_corner()
test_outside()
test_top_outside()
test_horiz_wall_align()
test_ray_through_vertex()
| 27.29771 | 80 | 0.568233 | 473 | 3,576 | 4.135307 | 0.357294 | 0.041411 | 0.04908 | 0.021472 | 0.053681 | 0.046524 | 0.046524 | 0 | 0 | 0 | 0 | 0.025735 | 0.315436 | 3,576 | 130 | 81 | 27.507692 | 0.773284 | 0.402405 | 0 | 0.256098 | 0 | 0 | 0.115804 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 1 | 0.097561 | false | 0 | 0.036585 | 0 | 0.134146 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
699f551d304f7026352ad9206f00d1322df3dcbb | 267 | py | Python | sphinx-sources/Examples/Commands/GaussScreen.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 132 | 2017-03-15T15:28:46.000Z | 2022-03-09T00:28:25.000Z | sphinx-sources/Examples/Commands/GaussScreen.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 63 | 2017-01-26T15:46:55.000Z | 2022-01-25T04:50:59.000Z | sphinx-sources/Examples/Commands/GaussScreen.py | jccmak/lightpipes | 1a296fe08bdd97fc9a0e11f92bab25c85f68e57d | [
"BSD-3-Clause"
] | 37 | 2017-02-17T16:11:38.000Z | 2022-01-25T18:03:47.000Z | from LightPipes import *
import matplotlib.pyplot as plt
wavelength=500*nm
size=5.0*mm
N=100
w=0.2*mm
T=2
z=1*m
dx=1.0*mm
dy=1.0*mm
F=Begin(size,wavelength,N)
F=GaussScreen(w,dx,dy,T,F)
F=Forvard(z,F)
I=Intensity(2,F)
plt.imshow(I)
#plt.plot(I[N/2][:N])
plt.show()
| 13.35 | 31 | 0.696629 | 65 | 267 | 2.861538 | 0.507692 | 0.048387 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07438 | 0.093633 | 267 | 19 | 32 | 14.052632 | 0.694215 | 0.074906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a04b1c505bb845a7403a07e74c3d06f3debfa0 | 14,063 | py | Python | head-ensembles/legacy/extract_trees.py | Tom556/BERTHeadEnsembles | aeb6ec37d24f30bb1d224f38fd4928488a215ed7 | [
"MIT"
] | null | null | null | head-ensembles/legacy/extract_trees.py | Tom556/BERTHeadEnsembles | aeb6ec37d24f30bb1d224f38fd4928488a215ed7 | [
"MIT"
] | 3 | 2020-09-26T01:00:29.000Z | 2021-05-21T16:25:33.000Z | head-ensembles/legacy/extract_trees.py | tomlimi/BERTHeadEnsembles | aeb6ec37d24f30bb1d224f38fd4928488a215ed7 | [
"MIT"
] | null | null | null |
from tools import dependency, sentence_attentions
import argparse
from collections import defaultdict, namedtuple
import numpy as np
RelData = namedtuple('RelData','layers heads transpose d2p')
# soft pos mask (BEST)
# relation_rules = {'adj-clause-p2d': RelData([3, 4, 7, 6, 5, 7], [3, 5, 6, 6, 9, 10],False, False),
# 'adj-modifier-d2p': RelData([3, 7, 4, 5], [9, 2, 5, 0],False, True),
# 'adv-clause-p2d': RelData([5, 3, 4, 5, 1], [8, 3, 8, 11, 5],False, False),
# 'adv-modifier-p2d': RelData([7, 5, 6, 4, 9, 4], [7, 1, 9, 7, 3, 10],False, True),
# 'apposition-p2d': RelData([3], [3],False, False),
# 'auxiliary-d2p': RelData([7, 3, 5, 4, 1, 1], [2, 9, 9, 11, 6, 1],False, True),
# 'clausal-p2d': RelData([5, 7, 7, 7, 4, 3, 5, 0, 6, 1], [8, 10, 1, 6, 5, 3, 11, 8, 6, 5],False, False),
# 'clausal-d2p': RelData([6, 5, 7, 7], [2, 3, 0, 9],False, True),
# 'compound-d2p': RelData([3, 7], [9, 2],False, True),
# 'conjunct-d2p': RelData([6, 9, 8, 4, 3, 11], [0, 6, 4, 10, 11, 9],False, True),
# 'determiner-d2p': RelData([7, 3, 4], [10, 9, 5],False, True),
# # 'i object-d2p': RelData([5], [1],False, True),
# 'noun-modifier-d2p': RelData([6, 3, 7, 6], [9, 11, 9, 2],False, True),
# 'num-modifier-p2d': RelData([7, 4, 0, 6, 5], [11, 7, 2, 9, 9],False, False),
# 'object-d2p': RelData([6, 3, 6], [9, 11, 10],False, True),
# 'other-d2p': RelData([5, 4, 8, 6, 3, 5, 7, 7, 7, 5, 1, 1], [0, 5, 5, 6, 9, 7, 1, 2, 6, 9, 6, 1],False, True),
# 'punctuation-d2p': RelData([7, 8, 8, 5, 7, 8], [1, 5, 0, 7, 6, 4],False, True),
# 'subject-p2d': RelData([7, 4, 6], [4, 10, 4],False, False)}
# diagonal mask
# relation_rules = {'adj-clause-p2d': RelData([4, 7, 6, 0], [5, 6, 5, 8],False, False),
# 'adj-modifier-d2p': RelData([3, 7, 6, 5, 7, 8, 0, 2], [9, 10, 5, 7, 6, 5, 8, 11],False, True),
# 'adv-clause-d2p': RelData([4, 4, 5, 4, 2, 0], [9, 3, 4, 4, 7, 7],False, True),
# 'adv-modifier-d2p': RelData([7, 5, 6, 8, 7, 3, 10, 0, 6, 0], [6, 7, 5, 5, 10, 10, 10, 8, 4, 11],False, True),
# 'apposition-p2d': RelData([0, 9], [8, 0],False, False),
# 'auxiliary-d2p': RelData([3, 8, 7, 5, 4, 7, 10], [9, 5, 6, 0, 5, 10, 10],False, True),
# 'clausal subject-p2d': RelData([8, 0, 0, 0], [10, 8, 5, 1],False, False),
# 'clausal-d2p': RelData([7, 6, 4, 8, 5, 0, 0, 1, 0], [0, 2, 6, 8, 4, 5, 9, 11, 7],False, True),
# 'compound-d2p': RelData([3, 7, 6, 7, 0], [9, 6, 5, 10, 8],False, True),
# 'conjunct-d2p': RelData([4, 6, 4, 9, 5, 1, 0, 4, 6], [3, 0, 9, 6, 4, 10, 1, 4, 8],False, True),
# 'determiner-d2p': RelData([7, 3, 4, 8], [10, 9, 5, 10],False, True),
# #'i object-d2p': RelData([6], [9],False, True),
# 'noun-modifier-p2d': RelData([4, 0, 9, 5, 3, 0, 0], [5, 8, 1, 8, 3, 1, 5],False, False),
# 'num-modifier-d2p': RelData([7, 6, 3, 8, 7, 6, 0, 10], [10, 5, 10, 5, 6, 4, 11, 10],False, True),
# 'object-d2p': RelData([7, 6, 4, 5, 3], [9, 9, 6, 3, 8],False, True),
# 'other-d2p': RelData([7, 4, 8, 6, 3, 0], [10, 5, 5, 5, 10, 8],False, True),
# 'punctuation-p2d': RelData([11, 10, 2, 11, 7, 7], [6, 7, 2, 2, 8, 7],False, False),
# 'subject-p2d': RelData([7, 4], [11, 10],False, False)}
# diagonal mask2
relation_rules = {'adj-clause-p2d': RelData([4, 7, 6, 0], [5, 6, 5, 8],False, False),
'adj-modifier-d2p': RelData([3, 7, 6, 5, 7, 8, 0, 2], [9, 10, 5, 7, 6, 5, 8, 11],False, True),
'adv-clause-p2d': RelData([4, 5, 5, 0, 4, 5, 11, 8, 3, 0], [3, 4, 5, 8, 9, 8, 8, 7, 1, 4],False, False),
'adv-modifier-d2p': RelData([7, 5, 6, 8, 7, 3, 10, 0, 6, 0], [6, 7, 5, 5, 10, 10, 10, 8, 4, 11],False, True),
'apposition-p2d': RelData([0, 9], [8, 0],False, False),
'auxiliary-d2p': RelData([3, 8, 7, 5, 4, 7, 10], [9, 5, 6, 0, 5, 10, 10],False, True),
'clausal subject-p2d': RelData([8, 0, 0, 0], [10, 8, 5, 1],False, False),
'clausal-p2d': RelData([5, 4, 7, 5, 0, 7, 4], [7, 5, 6, 8, 8, 1, 8],False, False),
'compound-d2p': RelData([3, 7, 6, 7, 0], [9, 6, 5, 10, 8],False, True),
'conjunct-d2p': RelData([4, 6, 4, 9, 5, 1, 0, 4, 6], [3, 0, 9, 6, 4, 10, 1, 4, 8],False, True),
'determiner-d2p': RelData([7, 3, 4, 8], [10, 9, 5, 10],False, True),
#'i object-d2p': RelData([6], [9],False, True),
'noun-modifier-p2d': RelData([4, 0, 9, 5, 3, 0, 0], [5, 8, 1, 8, 3, 1, 5],False, False),
'num-modifier-d2p': RelData([7, 6, 3, 8, 7, 6, 0, 10], [10, 5, 10, 5, 6, 4, 11, 10],False, True),
'object-d2p': RelData([7, 6, 4, 5, 3], [9, 9, 6, 3, 8],False, True),
'other-d2p': RelData([7, 4, 8, 6, 3, 0], [10, 5, 5, 5, 10, 8],False, True),
'punctuation-d2p': RelData([4, 8, 3, 7, 3], [5, 5, 10, 5, 9],False, True),
'subject-p2d': RelData([7, 4], [11, 10],False, False)
}
# based on tiny set 10 examples
# relation_rules = {'adj-clause-p2d': RelData([1], [1],False, False),
# 'adj-modifier-d2p': RelData([3], [9],False, True),
# 'adv-clause-p2d': RelData([1], [10],False, False),
# 'adv-modifier-p2d': RelData([2], [10],False, False),
# 'apposition-p2d': RelData([2], [1],False, False),
# 'auxiliary-p2d': RelData([3], [8],False, False),
# 'compound-d2p': RelData([5], [11],False, True),
# 'conjunct-d2p': RelData([11, 4], [9, 7],False, True),
# 'determiner-p2d': RelData([7], [9],False, False),
# 'noun-modifier-p2d': RelData([3], [9],False, False),
# 'num-modifier-p2d': RelData([11], [11],False, False),
# 'object-d2p': RelData([9], [3],False, True),
# 'other-d2p': RelData([6, 3, 5, 5], [6, 9, 9, 7],False, True),
# 'punctuation-d2p': RelData([6, 5], [6, 0],False, True),
# 'subject-p2d': RelData([4, 6], [10, 4],False, False)}
# more detailed relationship labels
# relation_rules = {'acl-p2d': RelData([3, 4, 7, 6, 5, 7], [3, 5, 6, 6, 9, 10],False, False),
# 'advcl-p2d': RelData([5, 3, 4, 5, 1], [8, 3, 8, 11, 5],False, False),
# 'advmod-p2d': RelData([7, 5, 6, 4, 9, 4], [7, 1, 9, 7, 3, 10],False, False),
# 'amod-d2p': RelData([3, 7, 4, 5], [9, 2, 5, 0],False, True),
# 'appos-p2d': RelData([3], [3],False, False),
# 'aux-d2p': RelData([7, 3, 5, 4, 1, 1], [2, 9, 9, 11, 6, 1],False, True),
# 'case-p2d': RelData([0, 5, 3, 9, 1], [3, 10, 11, 3, 4],False, False),
# 'cc-p2d': RelData([5, 3, 0, 6, 7, 7], [10, 11, 3, 9, 9, 4],False, False),
# 'ccomp-d2p': RelData([6, 5, 7, 7], [2, 3, 0, 9],False, True),
# 'compound-d2p': RelData([3, 7], [9, 2],False, True),
# 'conj-p2d': RelData([5, 9, 7, 3, 10, 4, 11], [5, 6, 8, 3, 5, 8, 8],False, False),
# 'csubj-p2d': RelData([7, 7], [1, 10],False, False),
# 'det-p2d': RelData([0, 6, 2, 6, 1], [3, 9, 1, 3, 4],False, False),
# 'discourse-p2d': RelData([11], [11],False, False),
# 'expl-p2d': RelData([5], [9],False, False),
# 'fixed-d2p': RelData([8], [6],False, True),
# 'flat-p2d': RelData([8], [5],False, False),
# 'iobj-p2d': RelData([3], [9],False, False),
# 'mark-p2d': RelData([7, 6, 5, 0, 6, 7, 8], [9, 9, 6, 3, 10, 4, 11],False, False),
# 'nmod-d2p': RelData([6, 3, 7, 6], [9, 11, 9, 2],False, True),
# 'nsubj-d2p': RelData([7, 6, 5, 5, 1, 7, 5, 7, 4], [2, 6, 9, 0, 6, 1, 11, 8, 11],False, True),
# 'nummod-p2d': RelData([7, 4, 0, 6, 5], [11, 7, 2, 9, 9],False, False),
# 'obj-d2p': RelData([6, 3, 6], [9, 11, 10],False, True),
# 'parataxis-p2d': RelData([5], [5],False, False),
# 'punct-p2d': RelData([5, 6], [10, 11],False, False),
# 'vocative-p2d': RelData([6], [9],False, False),
# 'xcomp-d2p': RelData([6, 3, 5, 5], [9, 11, 6, 3],False, True)}
def rewrite_conllu(conllu_file, conllu_out_pred, conllu_out_gold, break_after=1000):
CONLLU_ID = 0
CONLLU_LABEL = 7
CONLLU_HEAD = 6
reverse_label_map = {value: key for key, value in dependency.label_map.items()}
reverse_label_map['other'] = 'dep'
print(reverse_label_map)
lengths = []
uas= []
length_sent = 0
out_lines = []
out_lines_gold =[]
with open(conllu_file, 'r') as in_conllu:
sentid = 0
for line in in_conllu:
if sentid > break_after:
break
if line == '\n':
out_lines.append(line.strip())
out_lines_gold.append(line.strip())
uas_sent = (np.array(list(map(int, gold.ravel() != 'no edge'))) * np.array(
list(map(int, pred.ravel() != 'no edge')))).sum() / np.array(list(map(int,gold.ravel()!='no edge'))).sum()
uas.append(uas_sent)
lengths.append(length_sent)
print(f"Processed sentence {sentid}", flush=True)
sentid += 1
elif line.startswith('#'):
if line.startswith('# sent_id'):
out_lines.append(line.strip() + '/pred')
out_lines_gold.append(line.strip() + '/gold')
pred, gold = multigraph_aborescene(sentid)
length_sent = 0
else:
out_lines_gold.append(line.strip())
out_lines.append(line.strip())
else:
fields = line.strip().split('\t')
out_lines_gold.append(line.strip())
if fields[CONLLU_ID].isdigit():
length_sent += 1
if fields[CONLLU_LABEL].strip() != 'root':
col = pred.transpose()[int(fields[CONLLU_ID]) - 1]
x = np.argwhere(col != 'no edge')
x = x.item()
lab = reverse_label_map[col[x][:-4]]
fields[CONLLU_HEAD] = str(x + 1)
fields[CONLLU_LABEL] = lab
out_lines.append('\t'.join(fields))
with open(conllu_out_pred, 'w') as out_conllu:
out_conllu.write('\n'.join(out_lines))
with open(conllu_out_gold, 'w') as out_conllu:
out_conllu.write('\n'.join(out_lines_gold))
print("mena uas:")
print(np.mean(np.array(uas)))
print("length uas corr coef:")
print(np.corrcoef(np.array(uas), np.array(lengths)))
def multigraph_aborescene(sentence_index, dependency):
matrices, sentence_id = next(attention_gen)
assert sentence_index == sentence_id
words_list = common_tokens[sentence_index]
words = ' '.join(words_list)
edge_labeled = {(h, d): l for d, h, l, p in dependency_rels[sentence_index] if l != 'root'}
root_ord = 0
for d, h, l, p in dependency_rels[sentence_index]:
if l == 'root':
root_ord = d
break
DG = nx.DiGraph()
DG.add_edges_from(edge_labeled.keys())
labels = {}
for node in DG.nodes():
labels[node] = words_list[node]
MultiAttention = nx.MultiDiGraph()
MultiAttention.add_nodes_from(DG.nodes())
multi_edge2label = dict()
for relation, rules in relation_rules.items():
aggr_matrix = np.mean(np.array(matrices)[rules.layers, rules.heads, :, :], axis=0)
if rules.d2p == True:
aggr_matrix = aggr_matrix.transpose()
aggr_matrix[:, root_ord] = 0.001
np.fill_diagonal(aggr_matrix, 0.001)
aggr_matrix = np.log(aggr_matrix/(1-aggr_matrix))
AG = nx.from_numpy_matrix(aggr_matrix, create_using=nx.DiGraph)
for u, v, d in AG.edges(data=True):
multi_edge2label[(u, v, d['weight'])] = relation
# incldue statistical info about pos:
MultiAttention.add_edges_from(AG.edges(data=True), label=relation)
AttentionAborescene = tree.branchings.maximum_spanning_arborescence(MultiAttention)
espanning = AttentionAborescene.edges(data=True)
weights = [max(d['weight'] * 20, 1) for _, _, d in espanning]
attention_labels = {(u, v): multi_edge2label[(u, v, d['weight'])] for u, v, d in espanning}
espanning = [(u, v) for (u, v, d) in espanning]
posA = nx.spring_layout(AttentionAborescene)
alabelm = np.full((len(aggr_matrix), len(aggr_matrix)), 'no edge', dtype='U24')
dlabelm = np.full((len(aggr_matrix), len(aggr_matrix)), 'no edge', dtype='U24')
for aedge, ael in attention_labels.items():
alabelm[aedge[0], aedge[1]] = ael
# else:
# alabelm[aedge[1],aedge[0]] = ael
for dedge, deel in edge_labeled.items():
deel = dependency.transform_label(deel)
if deel + '-d2p' in relation_rules:
dlabelm[dedge[0], dedge[1]] = deel + '-d2p'
elif deel + '-p2d' in relation_rules:
dlabelm[dedge[0], dedge[1]] = deel + '-p2d'
elif 'other-d2p' in relation_rules:
dlabelm[dedge[0], dedge[1]] = 'other-d2p'
elif 'other-p2d' in relation_rules:
dlabelm[dedge[1], dedge[0]] = 'other-p2d'
return alabelm, dlabelm
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--attentions", required=True, help="NPZ file with attentions")
ap.add_argument("-t", "--tokens", required=True, help="Labels (tokens) separated by spaces")
ap.add_argument("-T", "--train-conllu", help="Conllu file for training POS",
default='/net/projects/LSD/attention_tomasz/lsd/attention-analysis/naacl2019/graph-extraction/entrain.conllu')
ap.add_argument("-c", "--conllu", help="Eval against the given conllu file")
ap.add_argument("-o", "--output-pred")
ap.add_argument("-g", "--output-gold")
ap.add_argument("-s", "--sentences", nargs='*', type=int, default=None,
help="Only use the specified sentences; 0-based")
ap.add_argument("-m", "--maxlen", type=int, default=1000,
help="Skip sentences longer than this many words. A word split into several wordpieces is counted as one word. EOS is not counted.")
ap.add_argument("-e", "--eos", action="store_true",
help="Attentions contain EOS")
ap.add_argument("-n", "--no-softmax", action="store_true",
help="Whether not to use softmax for attention matrices, use with bert metrices")
args = ap.parse_args()
attentions_loaded = np.load(args.attentions)
sentences_count = len(attentions_loaded.files)
layers_count = attentions_loaded['arr_0'].shape[0]
heads_count = attentions_loaded['arr_0'].shape[1]
with open(args.tokens) as tokens_file:
tokens_loaded = [l.split() for l in tokens_file]
# in dependency_rels for each sentece there is a lists of tuples (token, token's head)
# in dependency_rels_rev tuples are reversed.
dependency_rels = dependency.read_conllu_labeled(args.conllu)
grouped_tokens, common_tokens = dependency.group_wordpieces(tokens_loaded, args.conllu)
attention_gen = sentence_attentions.generate_matrices(attentions_loaded, grouped_tokens, args.eos, args.no_softmax,
args.maxlen, args.sentences)
pos_frame = dependency.conllu2freq_frame(args.train_conllu)
rewrite_conllu(args.conllu, args.output_pred, args.output_gold, break_after=300)
| 44.503165 | 149 | 0.59653 | 2,342 | 14,063 | 3.502989 | 0.138343 | 0.054851 | 0.021453 | 0.011702 | 0.422721 | 0.381399 | 0.301438 | 0.291931 | 0.286689 | 0.256704 | 0 | 0.096092 | 0.183033 | 14,063 | 315 | 150 | 44.644444 | 0.617982 | 0.412501 | 0 | 0.06875 | 0 | 0.0125 | 0.139307 | 0.012119 | 0 | 0 | 0 | 0 | 0.00625 | 1 | 0.0125 | false | 0 | 0.025 | 0 | 0.04375 | 0.0375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a2448f8373ef1e9a09551af005d5fb310a079a | 1,518 | py | Python | klue_baseline/models/relation_extraction.py | tucan9389/KLUE-baseline | add61158e61f86adfca65087237443828b650090 | [
"Apache-2.0"
] | 71 | 2021-07-29T11:34:50.000Z | 2022-03-21T08:17:21.000Z | klue_baseline/models/relation_extraction.py | tucan9389/KLUE-baseline | add61158e61f86adfca65087237443828b650090 | [
"Apache-2.0"
] | 3 | 2021-08-20T14:19:58.000Z | 2021-12-03T06:42:27.000Z | klue_baseline/models/relation_extraction.py | tucan9389/KLUE-baseline | add61158e61f86adfca65087237443828b650090 | [
"Apache-2.0"
] | 16 | 2021-08-01T02:29:11.000Z | 2022-02-25T07:51:03.000Z | import argparse
from typing import Dict, List, Tuple
import torch
from overrides import overrides
from .mode import Mode
from .sequence_classification import SCTransformer
class RETransformer(SCTransformer):
mode: str = Mode.RelationExtraction
def __init__(self, hparams: argparse.Namespace, metrics: dict = {}) -> None:
super().__init__(hparams, metrics=metrics)
self.label_list = hparams.label_list
@overrides
def validation_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], data_type: str = "valid", write_predictions: bool = False
) -> None:
labels = torch.cat([output["labels"] for output in outputs], dim=0)
preds, probs = self._convert_outputs_to_preds(outputs)
if write_predictions is True:
self.predictions = preds
self._set_metrics_device()
micro_f1 = self.metrics["micro_f1"]
micro_f1(preds, labels, self.label_list)
self.log(f"{data_type}/micro_f1", micro_f1, on_step=False, on_epoch=True, logger=True)
auprc = self.metrics["auprc"]
auprc(probs, labels)
self.log(f"{data_type}/auprc", auprc, on_step=False, on_epoch=True, logger=True)
@overrides
def _convert_outputs_to_preds(self, outputs: List[Dict[str, torch.Tensor]]) -> Tuple[torch.Tensor, torch.Tensor]:
# logits: (B, num_labels)
logits = torch.cat([output["logits"] for output in outputs], dim=0)
return torch.argmax(logits, dim=1), torch.softmax(logits, dim=1)
| 34.5 | 117 | 0.681818 | 198 | 1,518 | 5.025253 | 0.348485 | 0.035176 | 0.026131 | 0.038191 | 0.207035 | 0.174874 | 0.130653 | 0.064322 | 0 | 0 | 0 | 0.007438 | 0.202899 | 1,518 | 43 | 118 | 35.302326 | 0.814876 | 0.015152 | 0 | 0.066667 | 0 | 0 | 0.044876 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a2f926c0e745aa76abcf2f7851f4911e10c02a | 816 | py | Python | examples/chart-types/word_tree.py | tcbegley/dash-google-charts | b8b22e5b6bac533167f218e3610697dec0c3e4ca | [
"Apache-2.0"
] | 6 | 2019-01-23T17:37:09.000Z | 2020-11-17T16:12:27.000Z | examples/chart-types/word_tree.py | tcbegley/dash-google-charts | b8b22e5b6bac533167f218e3610697dec0c3e4ca | [
"Apache-2.0"
] | 9 | 2019-01-25T11:09:17.000Z | 2022-02-26T09:10:04.000Z | examples/chart-types/word_tree.py | tcbegley/dash-google-charts | b8b22e5b6bac533167f218e3610697dec0c3e4ca | [
"Apache-2.0"
] | 1 | 2019-01-23T17:37:12.000Z | 2019-01-23T17:37:12.000Z | import dash
from dash_google_charts import WordTree
app = dash.Dash()
app.layout = WordTree(
data=[
["Phrases"],
["cats are better than dogs"],
["cats eat kibble"],
["cats are better than hamsters"],
["cats are awesome"],
["cats are people too"],
["cats eat mice"],
["cats meowing"],
["cats in the cradle"],
["cats eat mice"],
["cats in the cradle lyrics"],
["cats eat kibble"],
["cats for adoption"],
["cats are family"],
["cats eat mice"],
["cats are better than kittens"],
["cats are evil"],
["cats are weird"],
["cats eat mice"],
],
options={"wordtree": {"format": "implicit", "word": "cats"}},
)
if __name__ == "__main__":
app.run_server()
| 24.727273 | 65 | 0.515931 | 91 | 816 | 4.505495 | 0.461538 | 0.136585 | 0.107317 | 0.12439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.318627 | 816 | 32 | 66 | 25.5 | 0.73741 | 0 | 0 | 0.206897 | 0 | 0 | 0.438725 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.068966 | 0 | 0.068966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a4cf62f85e10cc538b2079b12a30a64505ab66 | 7,432 | py | Python | context_processors/utils.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
] | null | null | null | context_processors/utils.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
] | null | null | null | context_processors/utils.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
] | null | null | null | import clients.models as Clients
import simplejson as json
from appconf.manager import SettingManager
def card_bases(request):
card_bases_vars = []
for b in Clients.CardBase.objects.filter(hide=False).order_by("pk"):
card_bases_vars.append(dict(title=b.title, code=b.short_title, pk=b.pk, history_number=b.history_number))
return {"card_bases": json.dumps(card_bases_vars)}
def ws(request):
from laboratory import settings
return {"ws_url": json.dumps(settings.WS_URL), "ws_enabled": json.dumps(settings.WS_ENABLED)}
def menu(request):
from laboratory import VERSION
data = []
if request.user.is_authenticated and not request.is_ajax():
from laboratory import settings
groups = [str(x) for x in request.user.groups.all()]
pages = [
{"url": "/mainmenu/", "title": "Начальная страница", "nt": False, "access": ["*"], "not_show_home": True},
{"url": "/logout", "title": "Выход из профиля", "nt": False, "access": ["*"], "not_show_home": True},
{"hr": True, "access": ["*"]},
{"url": "/mainmenu/directions", "title": "Направления", "nt": False, "access": ["Лечащий врач", "Оператор лечащего врача"]},
{"url": "/mainmenu/direction/info", "title": "История направления", "nt": False, "access": ["Лечащий врач", "Оператор лечащего врача", "Лаборант", "Врач-лаборант", "Просмотр журнала"]},
{"url": "/mainmenu/directions/multiprint", "title": "Печать направлений", "nt": False, "access": ["*"]},
{"url": "/mainmenu/results_fastprint", "title": "Печать результатов", "nt": False, "access": ["Лечащий врач", "Оператор лечащего врача"]},
{"url": "/mainmenu/biomaterial/get", "title": "Забор биоматериала", "nt": False, "access": ["Заборщик биоматериала"]},
{"url": "/mainmenu/receive", "title": "Приём биоматериала", "nt": False, "access": ["Получатель биоматериала"]},
{"url": "/mainmenu/statistics-tickets", "title": "Статталоны", "nt": False, "access": [
"Оформление статталонов",
"Лечащий врач",
"Оператор лечащего врача",
]},
{"url": "/mainmenu/receive/one_by_one", "title": "Приём биоматериала по одному", "nt": False, "access": ["Получатель биоматериала"]},
{"url": "/mainmenu/receive/journal_form", "title": "Журнал приёма", "nt": False, "access": ["Получатель биоматериала"]},
{"url": "/results/enter", "title": "Ввод результатов", "nt": False, "access": ["Врач-лаборант", "Лаборант", "Сброс подтверждений результатов"]},
{"url": "/construct/menu", "title": "Конструктор справочника", "nt": False,
"access": ["Конструктор: Лабораторные исследования",
"Конструктор: Параклинические (описательные) исследования",
"Конструктор: Консультации",
"Конструктор: Ёмкости для биоматериала",
"Конструктор: Настройка УЕТов",
"Конструктор: Группировка исследований по направлениям"]},
{"url": "/statistic", "title": "Статистика", "nt": False, "access": ["Просмотр статистики", "Врач-лаборант"]},
{"url": "/mainmenu/results_history", "title": "Поиск", "nt": False,
"access": ["Лечащий врач",
"Оператор лечащего врача",
"Врач-лаборант",
"Лаборант",
"Врач параклиники"]},
{"url": "/mainmenu/results_report", "title": "Отчёт по результатам", "nt": False,
"access": ["Лечащий врач",
"Оператор лечащего врача",
"Врач-лаборант",
"Лаборант",
"Врач параклиники"]},
{"url": "/mainmenu/discharge", "title": "Выписки", "nt": False, "access": ["Загрузка выписок", "Поиск выписок"], "module": "discharge_module"},
{"url": "/mainmenu/create_user", "title": "Создать пользователя", "nt": False, "access": ["Создание и редактирование пользователей"]},
{"url": "/mainmenu/change_password", "title": "Настройка профилей пользователей", "nt": False, "access": ["Создание и редактирование пользователей"]},
{"url": "/mainmenu/create_podr", "title": "Управление подразделениями", "nt": False, "access": ["Создание и редактирование пользователей"]},
{"url": "/mainmenu/view_log", "title": "Просмотр журнала", "nt": False, "access": ["Просмотр журнала"]},
# {"url": "/reports", "title": "Отчёты", "nt": False, "access": []},
{"url": "/admin", "title": "Администрирование L2", "nt": False, "access": []},
{"url": "/silk/", "title": "Профилирование", "nt": False, "access": []},
{"url": '/mainmenu/cards', "title": "Картотека L2", "nt": True, "access": ["Картотека L2"], "module": "mis_module"},
{"url": "/mainmenu/direction_visit", "title": "Посещения по направлениям", "nt": False, "access": ["Посещения по направлениям", "Врач параклиники"], "module": "paraclinic_module"},
{"url": "/mainmenu/results/paraclinic", "title": "Ввод описательных результатов", "nt": False, "access": ["Врач параклиники"], "module": "paraclinic_module"},
{"url": '/mainmenu/hosp', "title": "Госпитализация", "nt": True, "access": ["Госпитализация"], "module": "hosp_module"},
{"url": '/mainmenu/rmis_confirm', "title": "Подтверждение отправки результатов в РМИС", "nt": False, "access": ["Подтверждение отправки результатов в РМИС"]},
{"url": '/cases/', "title": "Случаи обслуживания", "nt": False, "access": []},
]
if settings.LDAP and settings.LDAP["enable"]:
pages.append({"url": "/mainmenu/ldap_sync", "title": "Синхронизация с LDAP", "nt": False, "access": []})
if settings.RMQ_ENABLED:
pages.append({"url": "/mainmenu/rmq", "title": "Rabbit MQ", "nt": False, "access": []})
pages.append({"url": "/mainmenu/utils", "title": "Инструменты", "nt": False, "access": []})
if SettingManager.get("home_page", default="false") != "false":
pages.append(
{"url": SettingManager.get(key="home_page", default="http://home"), "title": "Домашняя страница",
"nt": True, "access": ["*"]})
if SettingManager.get("support", default="false") != "false":
pages.append(
{"url": SettingManager.get(key="support", default="false"), "title": "Техническая поддержка",
"nt": True, "access": ["*"]})
data = make_menu(pages, groups, request.user.is_superuser, request.path)
return {"mainmenu": data, "version": VERSION}
def make_menu(pages, groups, superuser, current_path=None):
menu = []
groups_set = set(groups)
for page in pages:
if (not superuser and "*" not in page["access"] and len(groups_set & set(page["access"])) == 0) or (
page.get("module") and not SettingManager.get(page["module"], default='false', default_type='b')):
continue
page["active"] = current_path == page.get("url")
menu.append(page)
return menu
def profile(request):
if not request.user.is_authenticated:
return {}
return {"specialities": [x.title for x in request.user.doctorprofile.specialities.all() if not x.hide]}
| 65.192982 | 197 | 0.583692 | 741 | 7,432 | 5.784076 | 0.2861 | 0.05063 | 0.094027 | 0.037797 | 0.285114 | 0.225852 | 0.216986 | 0.150023 | 0.150023 | 0.068595 | 0 | 0.000699 | 0.230086 | 7,432 | 113 | 198 | 65.769912 | 0.74834 | 0.008881 | 0 | 0.173469 | 0 | 0 | 0.423275 | 0.052146 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05102 | false | 0.010204 | 0.061224 | 0 | 0.173469 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a7f0433ba3a569747e0d7771759a3942aab2f1 | 729 | py | Python | api/configfile.py | giorno420/pyDashactyl | e8adac76e3d73c332a723bcc577faeaf779fa72b | [
"MIT"
] | null | null | null | api/configfile.py | giorno420/pyDashactyl | e8adac76e3d73c332a723bcc577faeaf779fa72b | [
"MIT"
] | null | null | null | api/configfile.py | giorno420/pyDashactyl | e8adac76e3d73c332a723bcc577faeaf779fa72b | [
"MIT"
] | null | null | null | import json
"""
Python config file.
This file is used by every single other file in
this project, just for convenience.
It gets config data and user data from `settings.json` and `users.json`, and assigns
them to Pythonic variables
Make a pull request if you think anything can be improved.
"""
with open('settings.json', 'r') as cfg:
settings = json.load(cfg)
discordsettings = settings["discord"]
pterosettings = settings["pterodactyl"]
clientID = discordsettings['application_id']
clientSecret = discordsettings['secret_key']
redirectURI = discordsettings['redirect_uri']
pteroURL = pterosettings['url']
pteroAppKey = pterosettings['key']
with open('api\\users.json') as usrs:
users = json.load(usrs) | 24.3 | 84 | 0.742112 | 96 | 729 | 5.604167 | 0.677083 | 0.066915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152263 | 729 | 30 | 85 | 24.3 | 0.87055 | 0 | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a8daf782d0eb6dfe40db0dd5af7fc8869ecb45 | 1,099 | py | Python | gcn.py | Aveek-Saha/Graph-Conv-Net | 16c4608d8ff3c2d2d97c1f2b4aecddece456684b | [
"MIT"
] | 1 | 2021-09-16T07:03:49.000Z | 2021-09-16T07:03:49.000Z | gcn.py | Aveek-Saha/Graph-Conv-Net | 16c4608d8ff3c2d2d97c1f2b4aecddece456684b | [
"MIT"
] | null | null | null | gcn.py | Aveek-Saha/Graph-Conv-Net | 16c4608d8ff3c2d2d97c1f2b4aecddece456684b | [
"MIT"
] | 1 | 2021-02-16T14:08:59.000Z | 2021-02-16T14:08:59.000Z | import tensorflow as tf
import numpy as np
import networkx as nx
# import matplotlib.pyplot as plt
def norm_adjacency_matrix(A):
I = tf.eye(tf.shape(A)[0])
A_hat = A + I
D_inv = tf.linalg.tensor_diag(
tf.pow(tf.reduce_sum(A_hat, 0), tf.cast(-0.5, tf.float32)))
D_inv = tf.where(tf.math.is_inf(D_inv), tf.zeros_like(D_inv), D_inv)
A_hat = D_inv @ A_hat @ D_inv
return A_hat
class GraphConvolutionLayer(tf.keras.layers.Layer):
def __init__(self, units, A, activation=tf.identity, rate=0.0, l2=0.0):
super(GraphConvolutionLayer, self).__init__()
self.activation = activation
self.units = units
self.rate = rate
self.l2 = l2
self.A = A
def build(self, input_shape):
self.W = self.add_weight(
shape=(input_shape[1], self.units),
dtype=self.dtype,
initializer='glorot_uniform',
regularizer=tf.keras.regularizers.l2(self.l2)
)
def call(self, X):
X = tf.nn.dropout(X, self.rate)
X = self.A @ X @ self.W
return self.activation(X)
| 24.977273 | 75 | 0.619654 | 170 | 1,099 | 3.829412 | 0.4 | 0.043011 | 0.02765 | 0.024578 | 0.030722 | 0.030722 | 0 | 0 | 0 | 0 | 0 | 0.019536 | 0.254777 | 1,099 | 43 | 76 | 25.55814 | 0.775336 | 0.028207 | 0 | 0 | 0 | 0 | 0.013133 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.1 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a94585dad8923405082850f97610b33b356cf8 | 18,173 | py | Python | mlrun/frameworks/_common/mlrun_interface.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | mlrun/frameworks/_common/mlrun_interface.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | mlrun/frameworks/_common/mlrun_interface.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | import copy
import functools
import inspect
from abc import ABC
from types import MethodType
from typing import Any, Dict, Generic, List, Tuple
from .utils import MLRunInterfaceableType
RestorationInformation = Tuple[
Dict[str, Any], # Interface properties.
Dict[str, Any], # Replaced properties.
List[str], # Replaced methods and functions.
]
class MLRunInterface(ABC, Generic[MLRunInterfaceableType]):
"""
An abstract class for enriching an object interface with the properties, methods and functions written below.
A class inheriting MLRun interface should insert what ever it needs to be inserted to the object to the following
attributes: '_PROPERTIES', '_METHODS' and '_FUNCTIONS'. Then it should implement 'add_interface' and call 'super'.
In order to replace object's attributes, the attributes to replace are needed to be added to the attributes:
'_REPLACED_PROPERTIES', '_REPLACED_METHODS' and '_REPLACED_FUNCTIONS'. The original attribute will be kept in a
backup attribute with the prefix noted in '_ORIGINAL_ATTRIBUTE_NAME'. Replacing functions / methods will be the one
located by looking for the prefix noted in '_REPLACING_ATTRIBUTE_NAME'. The replacing function / method can be a
MLRunInterface class method that return a function / method.
For example: if "x" is in the list then the method "object.x" will be stored as "object.original_x" and "object.x"
will then point to the method "MLRunInterface.mlrun_x". If "mlrun_x" is a class method, it will point to the
function returned from MLRunInterface.mlrun_x()".
"""
# Attributes to be inserted so the MLRun interface will be fully enabled.
_PROPERTIES = {} # type: Dict[str, Any]
_METHODS = [] # type: List[str]
_FUNCTIONS = [] # type: List[str]
# Attributes to replace so the MLRun interface will be fully enabled.
_REPLACED_PROPERTIES = {} # type: Dict[str, Any]
_REPLACED_METHODS = [] # type: List[str]
_REPLACED_FUNCTIONS = [] # type: List[str]
# Name template for the replaced attribute to be stored as in the object.
_ORIGINAL_ATTRIBUTE_NAME = "original_{}"
# Name template of the function / method to look for to replace the original function / method.
_REPLACING_ATTRIBUTE_NAME = "mlrun_{}"
@classmethod
def add_interface(
cls,
obj: MLRunInterfaceableType,
restoration_information: RestorationInformation = None,
):
"""
Enrich the object with this interface properties, methods and functions so it will have this framework MLRun's
features.
:param obj: The object to enrich his interface.
:param restoration_information: Restoration information tuple as returned from 'remove_interface' in order to
add the interface in a certain state.
"""
# Set default value to the restoration data:
if restoration_information is None:
restoration_information = (None, None, None)
# Add the MLRun properties:
cls._insert_properties(
obj=obj,
properties=restoration_information[0],
)
# Replace the object's properties in MLRun's properties:
cls._replace_properties(obj=obj, properties=restoration_information[1])
# Add the MLRun functions:
cls._insert_functions(obj=obj)
# Replace the object's functions / methods in MLRun's functions / methods:
cls._replace_functions(obj=obj, functions=restoration_information[2])
@classmethod
def remove_interface(cls, obj: MLRunInterfaceableType) -> RestorationInformation:
"""
Remove the MLRun features from the given object. The properties and replaced attributes found in the object will
be returned.
:param obj: The object to remove the interface from.
:return: A tuple of interface restoration information:
[0] = The interface properties.
[1] = The replaced properties.
[2] = The replaced methods and functions.
"""
# Get the interface properties from the element:
properties = {
attribute: getattr(obj, attribute)
for attribute in cls._PROPERTIES
if hasattr(obj, attribute) # Later it will be asserted.
}
# Get the replaced properties from the object:
replaced_properties = {
attribute: getattr(obj, attribute)
for attribute in cls._REPLACED_PROPERTIES
if hasattr(obj, cls._ORIGINAL_ATTRIBUTE_NAME.format(attribute))
}
# Get the replaced methods and functions from the object:
replaced_functions = [
function_name
for function_name in [*cls._REPLACED_METHODS, *cls._REPLACED_FUNCTIONS]
if hasattr(obj, cls._ORIGINAL_ATTRIBUTE_NAME.format(function_name))
]
# Restore the replaced attributes:
for attribute_name in [
*replaced_properties,
*replaced_functions,
]:
cls._restore_attribute(obj=obj, attribute_name=attribute_name)
# Remove the interface from the object:
for attribute_name in [*cls._PROPERTIES, *cls._METHODS, *cls._FUNCTIONS]:
assert hasattr(
obj, attribute_name
), f"Can't remove the attribute '{attribute_name}' as the object doesn't has it."
# Mark it first as None so the actual object won't be deleted:
setattr(obj, attribute_name, None)
delattr(obj, attribute_name)
return properties, replaced_properties, replaced_functions
@classmethod
def is_applied(cls, obj: MLRunInterfaceableType) -> bool:
"""
Check if the given object has MLRun interface attributes in it. Interface is applied if all of its attributes
are found in the object. If only replaced attributes are configured in the interface, then the interface is
applied if some of at least one is found in the object.
:param obj: The object to check.
:return: True if the MLRun interface is applied on the object and False if not.
"""
# Check for the attributes:
attributes = [*cls._PROPERTIES, *cls._METHODS, *cls._FUNCTIONS]
if attributes:
return all(hasattr(obj, attribute) for attribute in attributes)
# The interface has only replaced attributes, check if at least one is in the object:
replaced_attributes = [
*cls._REPLACED_PROPERTIES,
*cls._REPLACED_METHODS,
*cls._REPLACED_FUNCTIONS,
]
return any(hasattr(obj, attribute) for attribute in replaced_attributes)
@classmethod
def _insert_properties(
cls,
obj: MLRunInterfaceableType,
properties: Dict[str, Any] = None,
):
"""
Insert the properties of the interface to the object. The properties default values are being copied (not deep
copied) into the object.
:param obj: The object to enrich.
:param properties: Properties to set in the object.
"""
# Set default dictionary if there is no properties to restore:
if properties is None:
properties = {}
# Verify the provided properties are supported by this interface (noted in the interface '_PROPERTIES'):
error_properties = [
property_name
for property_name in properties
if property_name not in cls._PROPERTIES
]
assert not error_properties, (
f"The following properties provided to insert to the object are not supported by this interface: "
f"{error_properties}"
)
# Insert the properties, copy only default values:
for property_name, default_value in cls._PROPERTIES.items():
# Verify there is no property with the same name in the object:
assert not hasattr(obj, property_name), (
f"Can't insert the property '{property_name}' as the object already have an attribute with the same "
f"name."
)
# Insert the property to the object prioritizing given values over default ones:
value = (
properties[property_name]
if property_name in properties
else copy.copy(default_value)
)
setattr(obj, property_name, value)
@classmethod
def _insert_functions(cls, obj: MLRunInterfaceableType):
"""
Insert the functions / methods of the interface to the object.
:param obj: The object to enrich.
"""
# Insert the functions / methods:
for function_name in [*cls._METHODS, *cls._FUNCTIONS]:
# Verify there is no function / method with the same name in the object:
assert not hasattr(obj, function_name), (
f"Can't insert the function / method '{function_name}' as the object already have a function / method "
f"with the same name. To replace a function / method, add the name of the function / method to the "
f"'_REPLACED_METHODS' / '_REPLACED_METHODS' list and follow the instructions documented."
)
# Get the function / method:
func = getattr(cls, function_name)
# If the function is a method and not a function (appears in '_METHODS' and not '_FUNCTIONS'), set the
# 'self' to the object:
if function_name in cls._METHODS:
func = MethodType(func, obj)
# Insert the function / method to the object:
setattr(obj, function_name, func)
@classmethod
def _replace_properties(
cls, obj: MLRunInterfaceableType, properties: Dict[str, Any] = None
):
"""
Replace the properties of the given object according to the configuration in the MLRun interface.
:param obj: The object to replace its properties.
:param properties: The properties to replace in the object. Defaulted to all the properties in the interface
'_REPLACE_PROPERTIES' dictionary.
"""
# Set default replacing properties if there are no properties given:
if properties is None:
properties = {
property_name: copy.copy(property_value)
for property_name, property_value in cls._REPLACED_PROPERTIES.items()
}
else:
# Verify the provided properties are supported by this interface (noted in the interface's
# '_REPLACED_PROPERTIES' dictionary):
error_properties = [
property_name
for property_name in properties
if property_name not in cls._REPLACED_PROPERTIES
]
assert not error_properties, (
f"The following properties provided to be replace in the object are not supported by this "
f"interface: {error_properties}"
)
# Replace the properties in the object:
for property_name, property_value in properties.items():
# Verify there is a property with this name in the object to replace:
assert hasattr(
obj, property_name
), f"Can't replace the property '{property_name}' as the object doesn't have a property with this name."
# Replace the property:
cls._replace_property(
obj=obj,
property_name=property_name,
property_value=property_value,
include_none=True,
)
@classmethod
def _replace_functions(
cls, obj: MLRunInterfaceableType, functions: List[str] = None
):
"""
Replace the functions / methods of the given object according to the configuration in the MLRun interface.
:param obj: The object to replace its functions / methods.
:param functions: The functions / methods to replace in the object. Defaulted to all the functions / methods in
the interface '_REPLACE_METHODS' and '_REPLACE_FUNCTIONS' lists.
"""
# Set default list if there are no functions / methods to restore:
if functions is None:
functions = [*cls._REPLACED_METHODS, *cls._REPLACED_FUNCTIONS]
else:
# Verify the provided functions / methods are supported by this interface (noted in the interface's
# '_REPLACED_METHODS' or '_REPLACE_FUNCTIONS' lists):
error_functions = [
function_name
for function_name in functions
if function_name
not in [*cls._REPLACED_METHODS, *cls._REPLACED_FUNCTIONS]
]
assert not error_functions, (
f"The following functions / methods provided to be replace in the object are not supported by this "
f"interface: {error_functions}"
)
# Replace the functions / methods in the object:
for function_name in functions:
# Verify there is a function / method with this name in the object to replace:
assert hasattr(obj, function_name), (
f"Can't replace the function / method '{function_name}' as the object doesn't have a function / method "
f"with this name."
)
# Replace the method:
cls._replace_function(obj=obj, function_name=function_name)
@classmethod
def _replace_property(
cls,
obj: MLRunInterfaceableType,
property_name: str,
property_value: Any = None,
include_none: bool = False,
):
"""
Replace the property in the object with the configured property in this interface. The original property will be
stored in a backup attribute with the prefix noted in '_ORIGINAL_ATTRIBUTE_NAME' and the replacing property
will be the one with the prefix noted in '_REPLACING_ATTRIBUTE_NAME'. If the property value should be None, set
'include_none' to True, otherwise the interface default will be copied if 'property_value' is None.
:param obj: The object to replace its property.
:param property_name: The property name to replace.
:param property_value: The value to set. If not provided, the interface's default will be copied.
:param include_none: Whether to enable the property value to be None.
"""
# Get the original property from the object:
original_property = getattr(obj, property_name)
# Set a backup attribute with for the original property:
original_property_name = cls._ORIGINAL_ATTRIBUTE_NAME.format(property_name)
setattr(obj, original_property_name, original_property)
# Check if a value is provided, if not copy the default value in this interface if None should not be included:
if not include_none and property_value is None:
property_value = copy.copy(cls._REPLACED_PROPERTIES[property_name])
# Replace the property:
setattr(obj, property_name, property_value)
@classmethod
def _replace_function(cls, obj: MLRunInterfaceableType, function_name: str):
"""
Replace the method / function in the object with the configured method / function in this interface. The
original method / function will be stored in a backup attribute with the prefix noted in
'_ORIGINAL_ATTRIBUTE_NAME' and the replacing method / function will be the one with the prefix noted in
'_REPLACING_ATTRIBUTE_NAME'.
:param obj: The object to replace its method.
:param function_name: The method / function name to replace.
"""
# Get the original function from the object:
original_function = getattr(obj, function_name)
# Set a backup attribute with for the original function:
original_function_name = cls._ORIGINAL_ATTRIBUTE_NAME.format(function_name)
setattr(obj, original_function_name, original_function)
# Get the function to replace from the interface:
replacing_function_name = cls._REPLACING_ATTRIBUTE_NAME.format(function_name)
replacing_function = getattr(cls, replacing_function_name)
# Check if the replacing function is a class method (returning a function to use as the replacing function):
if inspect.ismethod(replacing_function):
replacing_function = replacing_function()
# Wrap the replacing function with 'functools.wraps' decorator so the properties of the original function will
# be passed to the replacing function:
replacing_function = functools.wraps(original_function)(replacing_function)
# If the replacing function is a method and not a function (appears in the _REPLACED_METHODS and not
# _REPLACED_FUNCTIONS), set the 'self' to the object:
if function_name in cls._REPLACED_METHODS:
replacing_function = MethodType(replacing_function, obj)
# Replace the function:
setattr(obj, function_name, replacing_function)
@classmethod
def _restore_attribute(cls, obj: MLRunInterfaceableType, attribute_name: str):
"""
Restore the replaced attribute (property, method or function) in the object, removing the backup attribute as
well.
:param obj: The object to restore its method.
:param attribute_name: The method to restore.
"""
# Get the original attribute:
original_attribute_name = cls._ORIGINAL_ATTRIBUTE_NAME.format(attribute_name)
original_attribute = getattr(obj, original_attribute_name)
# Set the attribute to point back to the original attribute:
setattr(obj, attribute_name, original_attribute)
# Remove the original backup attribute:
setattr(obj, original_attribute_name, None)
delattr(obj, original_attribute_name)
| 45.546366 | 120 | 0.654487 | 2,164 | 18,173 | 5.357209 | 0.092421 | 0.040369 | 0.018028 | 0.014664 | 0.357888 | 0.298715 | 0.234711 | 0.193048 | 0.163461 | 0.12982 | 0 | 0.000463 | 0.286194 | 18,173 | 398 | 121 | 45.660804 | 0.893232 | 0.441974 | 0 | 0.222772 | 0 | 0.014851 | 0.110677 | 0 | 0 | 0 | 0 | 0 | 0.039604 | 1 | 0.049505 | false | 0 | 0.034653 | 0 | 0.143564 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a9471a68dd58de8072e2825adc0e140124a1f0 | 3,658 | py | Python | better_work_data/better_work_data/db/db_helper.py | JackDan9/miniProgram | 6c9b2714042a21fa8aa60d90800a903fbd51c0f5 | [
"MIT"
] | null | null | null | better_work_data/better_work_data/db/db_helper.py | JackDan9/miniProgram | 6c9b2714042a21fa8aa60d90800a903fbd51c0f5 | [
"MIT"
] | 6 | 2021-12-13T20:50:22.000Z | 2022-03-21T06:42:46.000Z | better_work_data/better_work_data/db/db_helper.py | JackDan9/miniProgram | 6c9b2714042a21fa8aa60d90800a903fbd51c0f5 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from datetime import datetime
import pymysql
from twisted.enterprise import adbapi
from scrapy.utils.project import get_project_settings # 导入settings配置
class DBHelper():
'''这个类是读取settings中的配置,自行修改代码进行操作
Twisted是用Python实现的基于事件驱动的网络引擎框架;
Twisted支持许多常见的传输及应用层协议,
包括TCP、UDP、SSL/TLS、HTTP、IMAP、SSH、IRC以及FTP。
就像Python一样, Twisted也具有“内置池”(batteries-included)的特点。
Twisted对于其支持的所有协议都带有客户端和服务器实现, 同时附带有基于命令行的工具, 使得配置和部署产品级的Twisted应用变得非常方便。
'''
def __init__(self):
settings = get_project_settings() # 获取settings配置, 设置需要的信息
db_params = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8', # 编码要加上, 否则可能出现中文乱码问题
cursorclass=pymysql.cursors.DictCursor,
use_unicode=False
)
db_pool = adbapi.ConnectionPool('pymysql', **db_params)
self.db_pool = db_pool
# 连接数据库
def connect(self):
return self.db_pool
# 插入数据到数据库
def insert_parent_news(self, parent_news_item):
parent_news_sql = "INSERT INTO `parent_news`(`order`, `title`, `summary`, `source_type`, `source_name`, `publish_on`, `created_on`, `updated_on`) VALUES(%s, %s, %s, %s, %s, %s, %s, %s);"
# 调用插入的方法
query = self.db_pool.runInteraction(self._conditional_parent_news_insert, sql=parent_news_sql, parent_news_item=parent_news_item)
# 调用一场处理方法
query.addErrback(self._handle_error)
return parent_news_item
def insert_child_news(self, child_news_item):
child_news_sql = "INSERT INTO `child_news`(`parent_id`, `language`, `author_name`, `site_name`, `title`, `summary`, `url`, `mobile_url`, `is_attachment`, `attachment_name`, `attachment_url`, `publish_on`, `created_on`, `updated_on`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query = self.db_pool.runInteraction(self._conditional_child_news_insert, sql=child_news_sql, child_news_item=child_news_item)
query.addErrback(self._handle_error)
return child_news_item
def _conditional_parent_news_insert(self, tx, sql=None, parent_news_item=None):
if parent_news_item.get('created_on') is None:
parent_news_item['created_on'] = datetime.now()
if parent_news_item.get('updated_on') is None:
parent_news_item['updated_on'] = datetime.now()
params = (parent_news_item['order'], parent_news_item['title'], parent_news_item['summary'], parent_news_item['source_type'], parent_news_item['source_name'], parent_news_item['publish_on'], parent_news_item['created_on'], parent_news_item['updated_on'])
tx.execute(sql, params)
def _conditional_child_news_insert(self, tx, sql=None, child_news_item=None):
if child_news_item.get('created_on') is None:
child_news_item['created_on'] = datetime.now()
if child_news_item.get('updated_on') is None:
child_news_item['updated_on'] = datetime.now()
params = (child_news_item['parent_id'], child_news_item['language'], child_news_item['author_name'], child_news_item['site_name'], child_news_item['title'], child_news_item['summary'],
child_news_item['url'], child_news_item['mobile_url'], child_news_item['is_attachment'], child_news_item['attachment_name'], child_news_item['attachment_url'], child_news_item['publish_on'])
tx.execute(sql, params)
def _handle_error(self, failure):
print('--------------database operation exception!!-----------------')
print(failure)
| 48.131579 | 301 | 0.682887 | 460 | 3,658 | 5.063043 | 0.245652 | 0.130528 | 0.117218 | 0.025762 | 0.324173 | 0.254616 | 0.174753 | 0.037355 | 0.037355 | 0.037355 | 0 | 0.000668 | 0.182067 | 3,658 | 75 | 302 | 48.773333 | 0.777741 | 0.099508 | 0 | 0.083333 | 0 | 0.041667 | 0.251536 | 0.029791 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0.020833 | 0.083333 | 0.020833 | 0.3125 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69a94db0c5b7ee171f18c600e534c664386f496a | 19,335 | py | Python | f3dasm/simulator/abaqus/abaqus_src/modelling/step.py | gawelk/F3DAS | 4a4e7233add608820de9ee0fd1c369c2fa1d24c1 | [
"BSD-3-Clause"
] | 45 | 2019-10-15T06:08:23.000Z | 2020-08-01T03:15:11.000Z | f3dasm/simulator/abaqus/abaqus_src/modelling/step.py | gawelk/F3DAS | 4a4e7233add608820de9ee0fd1c369c2fa1d24c1 | [
"BSD-3-Clause"
] | 1 | 2020-02-28T10:35:41.000Z | 2020-03-09T13:19:54.000Z | f3dasm/simulator/abaqus/abaqus_src/modelling/step.py | gawelk/F3DAS | 4a4e7233add608820de9ee0fd1c369c2fa1d24c1 | [
"BSD-3-Clause"
] | 10 | 2020-01-10T09:42:58.000Z | 2020-07-20T19:57:15.000Z | '''
Created on 2020-04-08 14:55:21
Last modified on 2020-04-22 16:06:23
Python 2.7.16
v0.1
@author: L. F. Pereira (lfpereira@fe.up.pt)
Main goal
---------
Define abaqus steps.
References
----------
1. Simulia (2015). ABAQUS 2016: Scripting Reference Guide
'''
#%% imports
# abaqus
from abaqusConstants import (LANCZOS, DEFAULT, SOLVER_DEFAULT, OFF,
AUTOMATIC, NONE, DIRECT, RAMP, FULL_NEWTON,
PROPAGATED, LINEAR, ALL, DISPLACEMENT, ON,
AC_ON)
# standard library
import abc
#%% abstract classes
class Step(object):
__metaclass__ = abc.ABCMeta
def __init__(self, name, previous, model=None):
'''
Parameters
----------
name : str
previous : str
Previous step
model : abaqus mdb object
'''
self.name = name
self.previous = previous
# computations
if model:
self.create_step(model)
def create_step(self, model):
# get method
create_step = getattr(model, self.method_name)
# create step
create_step(name=self.name, previous=self.previous, **self.args)
#%% particular step definition
class StaticStep(Step):
method_name = 'StaticStep'
def __init__(self, name, previous='Initial', model=None, description='',
timePeriod=1., nlgeom=OFF, stabilizationMethod=NONE,
stabilizationMagnitude=2e-4, adiabatic=OFF,
timeIncrementationMethod=AUTOMATIC, maxNumInc=100,
initialInc=None, minInc=None, maxInc=None,
matrixSolver=DIRECT, matrixStorage=SOLVER_DEFAULT,
amplitude=RAMP, extrapolation=LINEAR, fullyPlastic='',
noStop=OFF, maintainAttributes=False,
useLongTermSolution=OFF, solutionTechnique=FULL_NEWTON,
reformKernel=8, convertSDI=PROPAGATED,
adaptiveDampingRatio=0.05, continueDampingFactors=OFF):
'''
Parameters
----------
description : str
Step description.
timePeriod : float
Total time period.
nlgeom : bool
Whether to allow for geometric nonlinearity.
stabilizationMethod : abaqus constant
Stabilization type.
stabilizationMagnitude : float
Damping intensity of the automatic damping algorithm. Ignored if
stabilizationMethod=None.
adiabatic : bool
Whether to perform an adiabatic stress analysis.
timeIncrementationMethod : abaqus constant
maxNumInc : int
Number of incrementations in a step.
initalInc : float
Initial time increment.
minInc : float
Minimum tome increment allowed.
maxInc : float
Maximum time increment allowed.
matrixSolver : abaqus constant
Type of solver.
matrixStorage : abaqus constant
Type of matrix storage.
amplitude : abaqus constant
Amplitude variation for loading magnitudes during the step.
extrapolation : abaqus constant
Type of extrapolation to use in determining the incremental solution
for a nonlinear analysis.
fullyPlastic : str
Region being monitored for fully plastic behavior.
noStop : bool
Whether to accept the solution to an increment after the maximum
number of interations allowed has been completed, even if the
equilibrium tolerances are not satisfied.
maintainAttributes : bool
Whether to retain attributes from an existing step with the same
name.
useLongTermSolution : bool
Whether to obtain the fully relaxed long-term elastic solution with
time-domain viscoelasticity or the long-term elastic-plastic solution
for two-layer viscoplasticity.
solutionTechnique : abaqus constant
Technique used for solving nonlinear equations.
reformKernel : int
Number of quasi-Newton iterations allowed before the kernel matrix
is reformed.
convertSDI : abaqus constant
Whether to force a new iteration if severe discontinuities occur
during an iteration.
adaptiveDampingRatio : float
Maximum allowable ratio of the stabilization energy to the total
strain energy. Ignored if stabilizationMethod=None.
continueDampingFactors : bool
Whether this step will carry over the damping factors from the
results of the preceding general step.
Notes
-----
-for further informations see p49-134 of [1].
'''
# computations
initialInc = timePeriod if initialInc is None else initialInc
minInc = min(initialInc, timePeriod * 1e-5) if minInc is None else minInc
maxInc = timePeriod if maxInc is None else maxInc
# create args dict
self.args = {'description': description,
'timePeriod': timePeriod,
'nlgeom': nlgeom,
'stabilizationMethod': stabilizationMethod,
'stabilizationMagnitude': stabilizationMagnitude,
'adiabatic': adiabatic,
'timeIncrementationMethod': timeIncrementationMethod,
'maxNumInc': maxNumInc,
'initialInc': initialInc,
'minInc': minInc,
'maxInc': maxInc,
'matrixSolver': matrixSolver,
'matrixStorage': matrixStorage,
'amplitude': amplitude,
'extrapolation': extrapolation,
'fullyPlastic': fullyPlastic,
'noStop': noStop,
'maintainAttributes': maintainAttributes,
'useLongTermSolution': useLongTermSolution,
'solutionTechnique': solutionTechnique,
'reformKernel': reformKernel,
'convertSDI': convertSDI,
'adaptiveDampingRatio': adaptiveDampingRatio,
'continueDampingFactors': continueDampingFactors}
# initialize parent
Step.__init__(self, name, previous, model=model)
class StaticRiksStep(Step):
method_name = 'StaticRiksStep'
def __init__(self, name, previous='Initial', model=None,
description='', nlgeom=OFF, adiabatic=OFF, maxLPF=None,
nodeOn=OFF, maximumDisplacement=0., dof=0, region=None,
timeIncrementationMethod=AUTOMATIC, maxNumInc=100,
totalArcLength=1., initialArcInc=None, minArcInc=None,
maxArcInc=None, matrixStorage=SOLVER_DEFAULT,
extrapolation=LINEAR, fullyPlastic='', noStop=OFF,
maintainAttributes=False, useLongTermSolution=OFF,
convertSDI=PROPAGATED):
'''
Parameters
----------
description : str
Step description.
nlgeom : bool
Whether to allow for geometric nonlinearity.
adiabatic : bool
Whether to perform an adiabatic stress analysis.
maxLPF : float
Maximum value of the load proportionality factor.
nodeOn : bool
Whether to monitor the finishing displacement value at a node.
maximumDisplacement : float
Value of the total displacement (or rotation) at the node node and
degree of freedom that, if crossed during an increment, ends the
step at the current increment. Only applicable when nodeOn=ON.
dof : int
Degree of freedom being monitored. Only applicable when nodeOn=ON
region : abaqus region object
Vertex at which the finishing displacement value is being monitored.
Only applicable when nodeOn=ON.
timeIncrementationMethod : abaqus constant
maxNumInc : int
Number of incrementations in a step.
totalArcLength : float
Total load proportionality factor associated with the load in this
step.
initialArcInc : float
Initial load proportionality factor.
minArcInc : float
Minimum arc length increment allowed.
maxArcInc : Maximum arc length increment allowed.
matrixStorage : abaqus constant
Type of matrix storage.
extrapolation : abaqus constant
Type of extrapolation to use in determining the incremental solution
for a nonlinear analysis.
fullyPlastic : str
Region being monitored for fully plastic behavior.
noStop : bool
Whether to accept the solution to an increment after the maximum
number of interations allowed has been completed, even if the
equilibrium tolerances are not satisfied.
maintainAttributes : bool
Whether to retain attributes from an existing step with the same
name.
useLongTermSolution : bool
Whether to obtain the fully relaxed long-term elastic solution with
time-domain viscoelasticity or the long-term elastic-plastic solution
for two-layer viscoplasticity.
convertSDI : abaqus constant
Whether to force a new iteration if severe discontinuities occur
during an iteration.
Notes
-----
-for further informations see p49-128 of [1].
'''
# computations
initialArcInc = totalArcLength if initialArcInc is None else initialArcInc
minArcInc = min(initialArcInc, 1e-5 * totalArcLength) if minArcInc is None else minArcInc
maxArcInc = totalArcLength if maxArcInc is None else maxArcInc
# create arg dict
self.args = {'description': description,
'nlgeom': nlgeom,
'adiabatic': adiabatic,
'maxLPF': maxLPF,
'nodeOn': nodeOn,
'maximumDisplacement': maximumDisplacement,
'dof': dof,
'timeIncrementationMethod': timeIncrementationMethod,
'maxNumInc': maxNumInc,
'totalArcLength': totalArcLength,
'initialArcInc': initialArcInc,
'minArcInc': minArcInc,
'maxArcInc': maxArcInc,
'matrixStorage': matrixStorage,
'extrapolation': extrapolation,
'fullyPlastic': fullyPlastic,
'noStop': noStop,
'maintainAttributes': maintainAttributes,
'useLongTermSolution': useLongTermSolution,
'convertSDI': convertSDI,
}
if nodeOn is ON and region:
self.args['region'] = region
# initialize parent
Step.__init__(self, name, previous, model=model)
class BuckleStep(Step):
method_name = 'BuckleStep'
def __init__(self, name, previous='Initial', model=None, numEigen=20,
description='', eigensolver=LANCZOS, minEigen=None,
maxEigen=None, vectors=None, maxIterations=30, blockSize=DEFAULT,
maxBlocks=DEFAULT, matrixStorage=SOLVER_DEFAULT,
maintainAttributes=False):
'''
Parameters
----------
numEigen : int
Number of eigenvalues to be estimated.
description : str
Step description.
eigensolver : abaqus constant
minEigen : float
Minimum eigenvalue of interest. Ignored if eigensolver!=LANCZOS.
maxEigen : float
Maximum eigenvalue of interest.
vectors : int
Number of vectors used in each iteration.
maxIterations : int
Maximum number of iterations.
blockSize : abaqus constant or int
Size of the Lanczos block steps. Ignored if eigensolver!=LANCZOS.
maxBlocks : abaqus constant or int
Maximum number of Lanczos block steps within each Lanczos run.
Ignored if eigensolver!=LANCZOS.
matrixStorage : abaqus constant
Type of matrix storage.
maintainAttributes : bool
Whether to retain attributes from an existing step with the same
name.
Notes
-----
-for further informations see p49-10 of [1].
'''
# computations
vectors = min(2 * numEigen, numEigen * 8) if vectors is None else vectors
# create arg dict
self.args = {'numEigen': numEigen,
'description': description,
'eigensolver': eigensolver,
'minEigen': minEigen,
'maxEigen': maxEigen,
'vectors': vectors,
'maxIterations': maxIterations,
'blockSize': blockSize,
'maxBlocks': maxBlocks,
'matrixStorage': matrixStorage,
'maintainAttributes': maintainAttributes}
# initialize parent
Step.__init__(self, name, previous, model=model)
class FrequencyStep(Step):
method_name = 'FrequencyStep'
def __init__(self, name, previous='Initial', model=None, eigensolver=LANCZOS,
numEigen=ALL, description='', shift=0., minEigen=None,
maxEigen=None, vectors=None, maxIterations=30, blockSize=DEFAULT,
maxBlocks=DEFAULT, normalization=DISPLACEMENT,
propertyEvaluationFrequency=None, projectDamping=ON,
acousticDamping=AC_ON, acousticRangeFactor=1.,
frictionDamping=OFF, matrixStorage=SOLVER_DEFAULT,
maintainAttributes=False, simLinearDynamics=OFF,
residualModes=OFF, substructureCutoffMultiplier=5.,
firstCutoffMultiplier=1.7, secondCutoffMultiplier=1.1,
residualModeRegion=None, residualModeDof=None,
limitSavedEigenVectorRegion=None):
'''
Parameters
----------
eigensolver : abaqus constant
Arguments ignored if eigenSolver!=LANCZOS: blockSize, maxBlocks,
normalization, propertyEvaluationFrequency.
Arguments ignored if eigenSolver!=LANCZOS or AMS: minEigen,
maxEigen, acousticCoupling.
Arguments ignored if eigenSolver!=AMS: projectDamping,
acousticRangeFactor, substructureCutoffMultiplier,
firstCutoffMultiplier, secondCutoffMultiplier, residualModeRegion,
regionalModeDof, limitSavedEigenVectorRegion.
numEigen : int or abaqus constant
Number of eigenvalues to be estimated.
description : str
Step description.
shift : float
Shift point in cycles per time.
minEigen : float
Minimum eigenvalue of interest.
maxEigen : float
Maximum eigenvalue of interest.
vectors : int
Number of vectors used in each iteration.
maxIterations : int
Maximum number of iterations.
blockSize : abaqus constant or int
Size of the Lanczos block steps.
maxBlocks : abaqus constant or int
Maximum number of Lanczos block steps within each Lanczos run.
Ignored if eigensolver!=LANCZOS.
normalization : abaqus constant
Method for normalizing eigenvectors.
propertyEvaluationFrequency : float
Frequency at which to evaluate frequency-dependent properties for
viscoelasticity, springs, and dashpots during the eigenvalues
extraction.
projectDamping : bool
Whether to include projection of viscous and structural damping
operators during AMS eigenvalue extraction.
acousticCoupling : abaqus constant
Type of acoustic-structural coupling in models with acoustic
and structural elements coupled using the *TIE option or in models
with ASI-type elements.
acousticRangeFactor : float
Ratio of the maximum acoustic frequency to the maximum structural
frequency.
frictionDamping : bool
Whether to add to the damping matrix contributions due to friction
effects.
matrixStorage : abaqus constant
Type of matrix storage.
maintainAttributes : bool
Whether to retain attributes from an existing step with the same
name.
simLinearDynamics : bool
Whether to activate the SIM-based linear dynamics procedure.
residualModes : bool
Whether to include residual modes from an immediately preceding
Static, LinearPerturbation step.
substructureCutoffMultiplier : float
Cutoff frequency for substructure eigenproblems.
firstCutoffMultiplier : float
First cutoff frequency for a reduced eigenproblem.
secondCutoffMultiplier : float
Second cutoff freqency for a reduced eigenproblem.
residualModeRegion : sequence of str
Name of a region for which residual modes are requested.
residualModeDof : sequence of int
Degrees of freedom for which residual modes are requested.
limitSavedEigenvectorRegion : region object
Region for which eigenvectors should be saved.
Notes
-----
-for further informations see p49-65 of [1].
'''
# computations
vectors = min(2 * numEigen, numEigen * 8) if vectors is None else vectors
# create arg dict
self.args = {
'eigensolver': eigensolver,
'numEigen': numEigen,
'description': description,
'shift': shift,
'minEigen': minEigen,
'maxEigen': maxEigen,
'vectors': vectors,
'maxIterations': maxIterations,
'blockSize': blockSize,
'maxBlocks': maxBlocks,
'normalization': normalization,
'propertyEvaluationFrequency': propertyEvaluationFrequency,
'projectDamping': projectDamping,
'acousticDamping': acousticDamping,
'acousticRangeFactor': acousticRangeFactor,
'frictionDamping': frictionDamping,
'matrixStorage': matrixStorage,
'maintainAttributes': maintainAttributes,
'simLinearDynamics': simLinearDynamics,
'residualModes': residualModes,
'substructureCutoffMultiplier': substructureCutoffMultiplier,
'firstCutoffMultiplier': firstCutoffMultiplier,
'secondCutoffMultiplier': secondCutoffMultiplier,
'residualModeRegion': residualModeRegion,
'residualModeDof': residualModeDof,
'limitSavedEigenVectorRegion': limitSavedEigenVectorRegion}
# initialize parent
Step.__init__(self, name, previous, model=model)
| 41.670259 | 97 | 0.606206 | 1,672 | 19,335 | 6.975478 | 0.222488 | 0.027609 | 0.018949 | 0.015433 | 0.415845 | 0.383349 | 0.359513 | 0.344766 | 0.329847 | 0.293149 | 0 | 0.007933 | 0.33504 | 19,335 | 463 | 98 | 41.760259 | 0.899199 | 0.45684 | 0 | 0.33758 | 0 | 0 | 0.130941 | 0.02528 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038217 | false | 0 | 0.012739 | 0 | 0.11465 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69aa431f75da8c8368149a2a99fcbf41b5cc11bd | 9,756 | py | Python | Fuzzy_clustering/ver_tf2/scripts_for_tf2/RBFNN_model.py | joesider9/forecasting_library | db07ff8f0f2693983058d49004f2fc6f8849d197 | [
"Apache-2.0"
] | null | null | null | Fuzzy_clustering/ver_tf2/scripts_for_tf2/RBFNN_model.py | joesider9/forecasting_library | db07ff8f0f2693983058d49004f2fc6f8849d197 | [
"Apache-2.0"
] | null | null | null | Fuzzy_clustering/ver_tf2/scripts_for_tf2/RBFNN_model.py | joesider9/forecasting_library | db07ff8f0f2693983058d49004f2fc6f8849d197 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.keras import backend as K
from tensorflow.python.framework import ops
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from clustering.algorithms import FCV
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
from Fuzzy_clustering.ver_0.utils_for_forecast import split_continuous
from util_database import write_database
from Fuzzy_clustering.ver_tf2.Forecast_model import forecast_model
class RBF_model(tf.keras.Model):
def __init__(self, num_centr):
super(RBF_model, self).__init__()
self.num_centr = num_centr
def find_centers(self,X_train):
self.N, self.D = X_train.shape
self.batch_size = self.N
try:
centers = FCV(X_train, n_clusters=self.num_centr, r=4).optimize()
c = centers.C
except:
c = KMeans(n_clusters=self.num_centr, random_state=0).fit(X_train)
c = c.cluster_centers_
centroids = c.astype(np.float32)
return centroids
def initialize(self,inputs):
centroids = self.find_centers(inputs)
cnt = pd.DataFrame(centroids, index=['c' + str(i) for i in range(centroids.shape[0])],
columns=['v' + str(i) for i in range(centroids.shape[1])])
var_init = pd.DataFrame(columns=['v' + str(i) for i in range(centroids.shape[1])])
for r in cnt.index:
v = (cnt.loc[r] - cnt.drop(r)).min()
v[v == 0] = 0.0001
v.name = r
var_init = var_init.append(v)
var_init = tf.convert_to_tensor(var_init.values, dtype=tf.float32, name='var_init')
self.var = tf.Variable(var_init,
dtype=tf.float32, name='RBF_variance')
self.centroids = tf.convert_to_tensor(centroids, dtype=tf.float32, name='centroids')
@tf.function
def lin_out(self, x, y):
return tf.linalg.lstsq(x, y, l2_regularizer=0)
@tf.function
def rbf_map(self, x):
s = tf.shape(x)
d1 = tf.transpose(tf.tile(tf.expand_dims(x, 0), [self.num_centr, 1, 1]), perm=[1, 0, 2]) - tf.tile(
tf.expand_dims(self.centroids, 0), [s[0], 1, 1])
d = tf.sqrt(
tf.reduce_sum(tf.pow(tf.multiply(d1, tf.tile(tf.expand_dims(self.var, 0), [s[0], 1, 1])), 2), axis=2))
return tf.cast(tf.exp(tf.multiply(tf.constant(-1, dtype=tf.float32), tf.square(d))), tf.float32)
def call(self, inputs, training=None, mask=None):
if training:
x = inputs[:, :-1]
y = tf.expand_dims(inputs[:, -1], 1)
else:
x=inputs
phi = self.rbf_map(x)
if training:
self.w = self.lin_out(phi, y)
h = tf.matmul(phi, self.w)
else:
h = tf.matmul(phi, self.w)
return h
class sum_square_loss(tf.keras.losses.Loss):
def __init__(self, name='SSE', **kwargs):
super(sum_square_loss, self).__init__(name=name, **kwargs)
def call(self, y_true, y_pred, sample_weight=None):
return math_ops.reduce_sum(math_ops.square(y_true - y_pred))
class RBF_train():
def __init__(self, path_model, rated=None, max_iterations=10000,):
self.path_model = path_model
self.rated = rated
self.max_iterations = max_iterations
def distance(self, obj_new, obj_old, obj_max, obj_min):
if np.any(np.isinf(obj_old)):
obj_old = obj_new.copy()
obj_max = obj_new.copy()
return True, obj_old, obj_max, obj_min
if np.any(np.isinf(obj_min)) and not np.all(obj_max == obj_new):
obj_min = obj_new.copy()
d = 0
for i in range(obj_new.shape[0]):
if obj_max[i] < obj_new[i]:
obj_max[i] = obj_new[i]
if obj_min[i] > obj_new[i]:
obj_min[i] = obj_new[i]
d += (obj_new[i] - obj_old[i]) / (obj_max[i] - obj_min[i])
if d < 0:
obj_old = obj_new.copy()
return True, obj_old, obj_max, obj_min
else:
return False, obj_old, obj_max, obj_min
def train(self, X_train, y_train, X_val, y_val, X_test, y_test, num_centr, lr, gpu_id=[-1]):
tf.config.experimental.set_visible_devices(gpu_id[0], 'GPU')
tf.config.experimental.set_memory_growth(gpu_id[0], True)
tf.config.set_soft_device_placement(True)
tf.debugging.set_log_device_placement(True)
self.N, self.D = X_train.shape
X_val = np.vstack((X_val, X_test))
y_val = np.vstack((y_val, y_test))
X_train = X_train.astype('float32',casting='same_kind')
X_val = X_val.astype('float32',casting='same_kind')
y_train = y_train.astype('float32',casting='same_kind')
y_val = y_val.astype('float32',casting='same_kind')
batch_size = self.N
model = RBF_model(num_centr)
model.initialize(X_train)
optimizer = tf.keras.optimizers.Adam(lr)
batches = [np.random.choice(self.N, batch_size, replace=False) for _ in range(self.max_iterations)]
obj_old = np.inf * np.ones(4)
obj_max = np.inf * np.ones(4)
obj_min = np.inf * np.ones(4)
if self.rated is None:
loss_fn = sum_square_loss()
mae = tf.keras.metrics.MeanAbsolutePercentageError(name='mae')
mse = tf.keras.metrics.MeanSquaredLogarithmicError(name='mse')
rms = tf.keras.metrics.RootMeanSquaredError(name='rms')
else:
loss_fn = sum_square_loss()
mae = tf.keras.metrics.MeanAbsolutePercentageError(name='mae')
mse = tf.keras.metrics.MeanSquaredLogarithmicError(name='mse')
rms = tf.keras.metrics.RootMeanSquaredError(name='rms')
res = dict()
self.best_weights = None
best_iteration = 0
best_glob_iterations = 0
max_iterations = self.max_iterations
ext_iterations = self.max_iterations
train_flag = True
patience = 20000
wait = 0
while train_flag:
for i in tqdm(range(max_iterations)):
if i % 500 == 0:
with tf.GradientTape() as tape:
predictions = model(np.hstack((X_train,y_train)), training=True)
loss = loss_fn(y_train, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
pred_val = model(X_val, training=False)
val_mae = mae(y_val, pred_val)
val_mse = mse(y_val, pred_val)
val_rms = rms(y_val, pred_val)
val_sse = loss_fn(y_val, pred_val)
obj_new = np.array([val_mae, val_mse, val_sse, val_rms])
flag, obj_old, obj_max, obj_min = self.distance(obj_new, obj_old, obj_max, obj_min)
if flag:
res[str(i)] = obj_old
print(val_mae.numpy())
self.best_weights = model.get_weights()
best_iteration = i
wait = 0
else:
wait += 1
if wait > patience:
train_flag = False
break
else:
with tf.GradientTape() as tape:
predictions = model(np.hstack((X_train,y_train)), training=True)
loss = loss_fn(y_train, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
wait += 1
best_glob_iterations = ext_iterations + best_iteration
if (max_iterations - best_iteration) <= 10000:
ext_iterations += 20000
max_iterations = 20000
else:
best_glob_iterations = ext_iterations + best_iteration
train_flag = False
model.set_weights(self.best_weights)
model.save_weights(self.path_model + '/rbf_model.h5')
model_dict = dict()
model_dict['centroids'] = model.centroids.numpy()
model_dict['Radius'] = model.var.numpy()
model_dict['n_vars'] = self.D
model_dict['num_centr'] = num_centr
model_dict['W'] = model.w.numpy()
model_dict['best_iteration'] = best_glob_iterations
model_dict['metrics'] = obj_old
model_dict['error_func'] = res
print("Total accuracy cnn: %s" % obj_old[0])
return obj_old[3], model.centroids.numpy(), model.var.numpy(), model.w.numpy(), model_dict
if __name__ == '__main__':
cluster_dir = 'D:/APE_net_ver2/Regressor_layer/rule.2'
data_dir = 'D:/APE_net_ver2/Regressor_layer/rule.2/data'
rated = None
X = np.load(os.path.join(data_dir, 'X_train.npy'))
y = np.load(os.path.join(data_dir, 'y_train.npy'))
static_data = write_database()
forecast = forecast_model(static_data, use_db=False)
forecast.load()
X = X[:, 0:-1]
X = forecast.sc.transform(X)
y = forecast.scale_y.transform(y)
X_train, X_test, y_train, y_test = split_continuous(X, y, test_size=0.15, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15, random_state=42)
rbf=RBF_train(cluster_dir+'/RBFNN',rated=rated, max_iterations=1000)
rbf.train(X_train, y_train, X_val, y_val, X_test, y_test, 12, 0.0001, gpu_id=tf.config.experimental.list_physical_devices('GPU')) | 41.514894 | 134 | 0.597581 | 1,353 | 9,756 | 4.05765 | 0.189948 | 0.017486 | 0.014754 | 0.013115 | 0.344262 | 0.308379 | 0.24827 | 0.207468 | 0.202186 | 0.196357 | 0 | 0.01883 | 0.2869 | 9,756 | 235 | 134 | 41.514894 | 0.770303 | 0 | 0 | 0.220588 | 0 | 0 | 0.034539 | 0.008302 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053922 | false | 0 | 0.068627 | 0.009804 | 0.181373 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69ab65349b051891a98e709bb7854ddb4d9f2d1d | 14,159 | py | Python | training/DeepJet_callbacks.py | AlexDeMoor/DeepJetCore | d3aa4a8b8cf4b203878feeb36c4b0a8cedd9c215 | [
"Apache-2.0"
] | null | null | null | training/DeepJet_callbacks.py | AlexDeMoor/DeepJetCore | d3aa4a8b8cf4b203878feeb36c4b0a8cedd9c215 | [
"Apache-2.0"
] | null | null | null | training/DeepJet_callbacks.py | AlexDeMoor/DeepJetCore | d3aa4a8b8cf4b203878feeb36c4b0a8cedd9c215 | [
"Apache-2.0"
] | null | null | null | '''
Created on 7 Apr 2017
@author: jkiesele
'''
import matplotlib
matplotlib.use('Agg')
from .ReduceLROnPlateau import ReduceLROnPlateau
from ..evaluation import plotLoss
from ..evaluation import plotBatchLoss
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import Callback, EarlyStopping,History,ModelCheckpoint #, ReduceLROnPlateau # , TensorBoard
# loss per epoch
from time import time
from pdb import set_trace
import json
from keras import backend as K
import matplotlib
import os
matplotlib.use('Agg')
class plot_loss_or_metric(Callback):
def __init__(self,outputDir,metrics):
self.metrics=metrics
self.outputDir=outputDir
def on_epoch_end(self,epoch, logs={}):
lossfile=os.path.join( self.outputDir, 'full_info.log')
allinfo_history=None
with open(lossfile, 'r') as infile:
allinfo_history=json.load(infile)
nepochs=len(allinfo_history)
allnumbers=[[] for i in range(len(self.metrics))]
epochs=[]
for i in range(nepochs):
epochs.append(i)
for j in range(len(self.metrics)):
allnumbers[j].append(allinfo_history[i][self.metrics[j]])
import matplotlib.pyplot as plt
for j in range(len(self.metrics)):
f = plt.figure()
plt.plot(epochs,allnumbers[j],'r',label=self.metrics[j])
plt.ylabel(self.metrics[j])
plt.xlabel('epoch')
#plt.legend()
f.savefig(self.outputDir+'/'+self.metrics[j]+'.pdf')
plt.close()
class newline_callbacks_begin(Callback):
def __init__(self,outputDir,plotLoss=False):
self.outputDir=outputDir
self.loss=[]
self.val_loss=[]
self.full_logs=[]
self.plotLoss=plotLoss
def on_epoch_end(self,epoch, logs={}):
if len(logs)<1:
return
import os
lossfile=os.path.join( self.outputDir, 'losses.log')
print('\n***callbacks***\nsaving losses to '+lossfile)
# problem with new keras version calling callbacks even after exceptions
if logs.get('loss') is None:
return
if logs.get('val_loss') is None:
return
self.loss.append(logs.get('loss'))
self.val_loss.append(logs.get('val_loss'))
f = open(lossfile, 'a')
f.write(str(logs.get('loss')))
f.write(" ")
f.write(str(logs.get('val_loss')))
f.write("\n")
f.close()
learnfile=os.path.join( self.outputDir, 'learn.log')
try:
with open(learnfile, 'a') as f:
f.write(str(float(K.get_value(self.model.optimizer.lr)))+'\n')
lossfile=os.path.join( self.outputDir, 'full_info.log')
if os.path.isfile(lossfile):
with open(lossfile, 'r') as infile:
self.full_logs=json.load(infile)
normed = {}
for vv in logs:
normed[vv] = float(logs[vv])
self.full_logs.append(normed)
with open(lossfile, 'w') as out:
out.write(json.dumps(self.full_logs))
except:
pass
if self.plotLoss:
try:
plotLoss(self.outputDir+'/losses.log',self.outputDir+'/losses.pdf',[])
except:
pass
class batch_callback_begin(Callback):
def __init__(self,outputDir,plotLoss=False,plot_frequency=-1,batch_frequency=1):
self.outputDir=outputDir
self.loss=[]
self.val_loss=[]
self.full_logs=[]
self.plotLoss=plotLoss
self.plot_frequency=plot_frequency
self.plotcounter=0
self.batch_frequency=batch_frequency
self.batchcounter=0
def read(self):
import os
if not os.path.isfile(self.outputDir+'/batch_losses.log') :
return
blossfile=os.path.join( self.outputDir, 'batch_losses.log')
f = open(blossfile, 'r')
self.loss = []
for line in f:
if len(line)<1: continue
tl=float(line.split(' ')[0])
self.loss.append(tl)
f.close()
def on_batch_end(self,batch,logs={}):
if len(logs)<1:
return
if logs.get('loss') is None:
return
self.batchcounter += 1
if not self.batch_frequency == self.batchcounter:
return
self.batchcounter=0
self.loss.append(logs.get('loss'))
if self.plot_frequency<0:
return
self.plotcounter+=1
if self.plot_frequency == self.plotcounter:
self.plot()
self.plotcounter = 0
def _plot(self):
if len(self.loss) < 2:
return
batches = [self.batch_frequency*i for i in range(len(self.loss))]
plt.close()
plt.plot(batches,self.loss,'r-',label='loss')
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
if len(batches) > 50:
smoothed = smooth(self.loss,50)
#remove where the simple smoothing doesn't give reasonable results
plt.plot(batches[25:-25],smoothed[25:-25],'g-',label='smoothed')
plt.legend()
plt.xlabel("# batches")
plt.ylabel("training loss")
plt.yscale("log")
plt.savefig(self.outputDir+'/batch_losses.pdf')
plt.close()
def plot(self):
self._plot()
def save(self):
import os
blossfile=os.path.join( self.outputDir, 'batch_losses.log')
f = open(blossfile, 'w')
for i in range(len(self.loss)):
f.write(str(self.loss[i]))
f.write("\n")
self.loss=[]
self.val_loss=[]
f.close()
def on_epoch_end(self,epoch,logs={}):
self.plot()
self.save()
def on_epoch_begin(self, epoch, logs=None):
self.read()
if len(self.loss):
self.plot()
class newline_callbacks_end(Callback):
def on_epoch_end(self,epoch, logs={}):
print('\n***callbacks end***\n')
class Losstimer(Callback):
def __init__(self, every = 50):
self.points = []
self.every = every
self.counter=0
def on_train_begin(self, logs):
self.start = time()
def on_batch_end(self, batch, logs={}):
if (self.counter != self.every):
self.counter+=1
return
self.counter = 0
elapsed = time() - self.start
cop = {}
for i, j in logs.items():
cop[i] = float(j)
cop['elapsed'] = elapsed
self.points.append(cop)
class checkTokens_callback(Callback):
def __init__(self,cutofftime_hours=48):
self.cutofftime_hours=cutofftime_hours
def on_epoch_begin(self, epoch, logs=None):
from .tokenTools import checkTokens
checkTokens(self.cutofftime_hours)
class saveCheckPointDeepJet(Callback):
'''
Slight extension of the normal checkpoint to multiple checkpoints per epoch
'''
def __init__(self,outputFile,model,check_n_batches=-1,nrotate=3):
self.outputFile=outputFile
self.djmodel=model
self.counter=0
self.rotate_idx=0
self.rotations=[str(i) for i in range(nrotate)]
self.check_n_batches=check_n_batches
def on_batch_end(self,batch,logs={}):
if self.check_n_batches < 1:
return
if self.counter < self.check_n_batches:
self.counter+=1
return
self.djmodel.save(self.outputFile[:-3]+'_rot_'+self.rotations[self.rotate_idx]+'.h5')
self.djmodel.save(self.outputFile)
self.counter=0
self.rotate_idx += 1
if self.rotate_idx >= len(self.rotations):
self.rotate_idx=0
def on_epoch_end(self,epoch, logs={}):
if len(logs)<1:
return
if logs.get('loss') is None:
return
if logs.get('val_loss') is None:
return
self.djmodel.save(self.outputFile)
class DeepJet_callbacks(object):
def __init__(self,
model,
stop_patience=-1,
lr_factor=0.5,
lr_patience=-1,
lr_epsilon=0.001,
lr_cooldown=4,
lr_minimum=1e-5,
outputDir='',
minTokenLifetime=5,
checkperiod=10,
backup_after_batches=-1,
checkperiodoffset=0,
plotLossEachEpoch=True,
additional_plots=None,
batch_loss = False):
self.nl_begin=newline_callbacks_begin(outputDir,plotLossEachEpoch)
self.nl_end=newline_callbacks_end()
self.callbacks=[self.nl_begin]
if batch_loss:
self.batch_callback=batch_callback_begin(outputDir,plotLossEachEpoch)
self.callbacks.append(self.batch_callback)
if minTokenLifetime>0:
self.tokencheck=checkTokens_callback(minTokenLifetime)
self.callbacks.append(self.tokencheck)
if lr_patience>0:
self.reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=lr_factor, patience=lr_patience,
mode='min', verbose=1, epsilon=lr_epsilon,
cooldown=lr_cooldown, min_lr=lr_minimum)
self.callbacks.append(self.reduce_lr)
self.modelbestcheck=ModelCheckpoint(outputDir+"/KERAS_check_best_model.h5",
monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=False)
self.callbacks.append(self.modelbestcheck)
if checkperiod>0:
self.modelcheckperiod=ModelCheckpoint(outputDir+"/KERAS_check_model_block_"+str(checkperiodoffset)+"_epoch_{epoch:02d}.h5",
verbose=1,period=checkperiod, save_weights_only=False)
self.callbacks.append(self.modelcheckperiod)
self.modelcheck=saveCheckPointDeepJet(outputDir+"/KERAS_check_model_last.h5",model,backup_after_batches)
self.callbacks.append(self.modelcheck)
if stop_patience>0:
self.stopping = EarlyStopping(monitor='val_loss',
patience=stop_patience,
verbose=1, mode='min')
self.callbacks.append(self.stopping)
if additional_plots:
self.additionalplots = plot_loss_or_metric(outputDir,additional_plots)
self.callbacks.append(self.additionalplots)
self.history=History()
self.timer = Losstimer()
self.callbacks.extend([ self.nl_end, self.history,self.timer])
from DeepJetCore.TrainData import TrainData
from DeepJetCore.dataPipeline import TrainDataGenerator
class PredictCallback(Callback):
def __init__(self,
samplefile,
function_to_apply=None, #needs to be function(counter,[model_input], [predict_output], [truth])
after_n_batches=50,
batchsize=10,
on_epoch_end=False,
use_event=0,
decay_function=None,
offset=0
):
super(PredictCallback, self).__init__()
self.samplefile=samplefile
self.function_to_apply=function_to_apply
self.counter=0
self.call_counter=offset
self.decay_function=decay_function
self.after_n_batches=after_n_batches
self.run_on_epoch_end=on_epoch_end
if self.run_on_epoch_end and self.after_n_batches>=0:
print('PredictCallback: can only be used on epoch end OR after n batches, falling back to epoch end')
self.after_n_batches=0
td=TrainData()
td.readFromFile(samplefile)
if use_event>=0:
td.skim(use_event)
self.batchsize = 1
self.td = td
self.gen = TrainDataGenerator()
self.gen.setBatchSize(batchsize)
self.gen.setSkipTooLargeBatches(False)
def reset(self):
self.call_counter=0
def predict_and_call(self,counter):
self.gen.setBuffer(self.td)
predicted = self.model.predict_generator(self.gen.feedNumpyData(),
steps=self.gen.getNBatches(),
max_queue_size=1,
use_multiprocessing=False,
verbose=2)
if not isinstance(predicted, list):
predicted=[predicted]
self.function_to_apply(self.call_counter,self.td.copyFeatureListToNumpy(),
predicted,self.td.copyTruthListToNumpy())
self.call_counter+=1
def on_epoch_end(self, epoch, logs=None):
self.counter=0
if not self.run_on_epoch_end: return
self.predict_and_call(epoch)
def on_batch_end(self, batch, logs=None):
if self.after_n_batches<=0: return
self.counter+=1
if self.counter>self.after_n_batches:
self.counter=0
self.predict_and_call(batch)
if self.decay_function is not None:
self.after_n_batches=self.decay_function(self.call_counter)
| 31.817978 | 136 | 0.555689 | 1,567 | 14,159 | 4.854499 | 0.179324 | 0.029052 | 0.015775 | 0.024188 | 0.250296 | 0.184041 | 0.150519 | 0.121073 | 0.086368 | 0.066912 | 0 | 0.010539 | 0.343245 | 14,159 | 444 | 137 | 31.88964 | 0.807506 | 0.027121 | 0 | 0.317073 | 0 | 0 | 0.040742 | 0.008949 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085366 | false | 0.006098 | 0.060976 | 0 | 0.222561 | 0.009146 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69ae283d0ff5b55a3dfbfd1de32e6dac0789c024 | 9,028 | py | Python | scripts/matriz_confusion.py | lmc00/tfg_en_desarrollo | 30e61f4bb3f060f7468b1bb94930fcbe0d0f92ae | [
"Apache-2.0"
] | null | null | null | scripts/matriz_confusion.py | lmc00/tfg_en_desarrollo | 30e61f4bb3f060f7468b1bb94930fcbe0d0f92ae | [
"Apache-2.0"
] | null | null | null | scripts/matriz_confusion.py | lmc00/tfg_en_desarrollo | 30e61f4bb3f060f7468b1bb94930fcbe0d0f92ae | [
"Apache-2.0"
] | 1 | 2020-11-25T15:16:36.000Z | 2020-11-25T15:16:36.000Z | # Método para generar la matriz de confusión.
# Hay que editar el parámetro del TIMESTAMP para poner el del modelo que quieres evaluar.
# El primer bloque de código está copiado del de Ignacio, pero con "INCISO" de vez en cuando, que son necesarios
import os
import json
import numpy as np
import matplotlib.pylab as plt
import seaborn
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
from tensorflow.keras.models import load_model
from imgclas.data_utils import load_image, load_data_splits, load_class_names
from imgclas.test_utils import predict
from imgclas import paths, plot_utils, utils, test_utils
# User parameters to set
TIMESTAMP = input("Indica el timestamp. Sin espacios. Mismo formato que en models: ") # timestamp of the model
MODEL_NAME = 'final_model.h5' # model to use to make the prediction
TOP_K = 2 # number of top classes predictions to save
# Set the timestamp
paths.timestamp = TIMESTAMP
# Load the data
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++antes de class names")
class_names = load_class_names(splits_dir=paths.get_ts_splits_dir()) # INCISO: Estas son las clases que había en el modelo
# en el momento en el que estrenaste (dado por el timestamp). No las que tienes en data/dataset_files
print("----------------------------------------------------------------------despues de class names")
# Load training configuration
conf_path = os.path.join(paths.get_conf_dir(), 'conf.json')
with open(conf_path) as f:
conf = json.load(f)
# Load the model
print("--------------------------------------------------------------------------------------------------------------------------------------------------------------------------antes")
model = load_model(os.path.join(paths.get_checkpoints_dir(), MODEL_NAME))
#model = load_model(os.path.join(paths.get_checkpoints_dir(), MODEL_NAME), custom_objects=utils.get_custom_objects())
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++después")
# INCISO: Ahora la parte que continúa está basada en el predicting a datasplit txt file que incluye Ignacio en el notebook
# 3.0 . Esta preparación previa es necesaria para computar la matriz de confusión.
#
# OJO: ahora lo que le vas a dar para testear el modelo dado por el timestamp SÍ se encuentra en data/dataset_files
# Y ES CON LO QUE TÚ QUIERES TESTEAR EL MODELO.
SPLIT_NAME = input("Indica el nombre del split con el que evaluas. Es de data/dataset_files. Ejemplos: val train ...: ")
# Load the data
X, y = load_data_splits(splits_dir=paths.get_ts_splits_dir(),
im_dir=conf['general']['images_directory'],
split_name=SPLIT_NAME)
# Predict
# Añade esto si quieres no usar aumentacion en la validacion:
#
print(conf['augmentation']['val_mode'])
pred_lab, pred_prob = predict(model, X, conf, top_K=TOP_K, filemode='local')
#Ahora guardamos las predicciones
# Save the predictions
pred_dict = {'filenames': list(X),
'pred_lab': pred_lab.tolist(),
'pred_prob': pred_prob.tolist()}
if y is not None:
pred_dict['true_lab'] = y.tolist()
# No incluimos la parte de guardarlo en json porque lo vamos a utilizar ahora mismo.
# Importamos el warning este porque Ignacio lo sugiere.
import warnings
warnings.filterwarnings("ignore") # To ignore UndefinedMetricWarning: [Recall/Precision/F-Score] is ill-defined and being set to 0.0 in labels with no [true/predicted] samples.
# INCISO: Sacamos por pantalla distintas métricas relevantes SOBRE EL SPLIT SELECCIONADO
true_lab, pred_lab = np.array(pred_dict['true_lab']), np.array(pred_dict['pred_lab'])
top1 = test_utils.topK_accuracy(true_lab, pred_lab, K=1)
top2 = test_utils.topK_accuracy(true_lab, pred_lab, K=2)
# INCISO: LO COMENTO PORQUE QUIERO EL TOP 2 top5 = test_utils.topK_accuracy(true_lab, pred_lab, K=5)
# INCISO: También vamos a guardarlo en un .txt, del que solicitaremos nombre al usuario
nombre_metricas = input("Ponle nombre al fichero con las métricas relevantes. No especifiques formato, va a ser .txt por defecto: " )
nombre_metricas = nombre_metricas + ".txt"
m = open(nombre_metricas,'w')
print('Top1 accuracy: {:.1f} %'.format(top1 * 100))
m.write('Top1 accuracy: {:.1f} %'.format(top1 * 100) + '\n')
print('Top2 accuracy: {:.1f} %'.format(top2 * 100))
m.write('Top2 accuracy: {:.1f} %'.format(top2 * 100) + '\n')
# INCISO ESTO LO COMENTO PORQUE AHORA QUIERO TOP 2 print('Top5 accuracy: {:.1f} %'.format(top5 * 100))
#m.write('Top5 accuracy: {:.1f} %'.format(top5 * 100) + '\n')
labels = range(len(class_names))
print('\n')
m.write('\n')
print('Micro recall: {:.1f} %'.format(100 * recall_score(true_lab, pred_lab[:, 0], labels=labels, average='micro')))
m.write('Micro recall: {:.1f} %'.format(100 * recall_score(true_lab, pred_lab[:, 0], labels=labels, average='micro')) + '\n')
print('Macro recall: {:.1f} %'.format(100 * recall_score(true_lab, pred_lab[:, 0], labels=labels, average='macro')))
m.write('Macro recall: {:.1f} %'.format(100 * recall_score(true_lab, pred_lab[:, 0], labels=labels, average='macro')) + '\n')
print('Macro recall (no labels): {:.1f} %'.format(100 * recall_score(true_lab, pred_lab[:, 0], average='macro')))
m.write('Macro recall (no labels): {:.1f} %'.format(100 * recall_score(true_lab, pred_lab[:, 0], average='macro')) + '\n')
print('Weighted recall: {:.1f} %'.format(100 * recall_score(true_lab, pred_lab[:, 0], labels=labels, average='weighted')))
m.write('Weighted recall: {:.1f} %'.format(100 * recall_score(true_lab, pred_lab[:, 0], labels=labels, average='weighted')) + '\n')
print('\n')
m.write('\n')
print('Micro precision: {:.1f} %'.format(100 * precision_score(true_lab, pred_lab[:, 0], labels=labels, average='micro')))
m.write('Micro precision: {:.1f} %'.format(100 * precision_score(true_lab, pred_lab[:, 0], labels=labels, average='micro')) + '\n')
print('Macro precision: {:.1f} %'.format(100 * precision_score(true_lab, pred_lab[:, 0], labels=labels, average='macro')))
m.write('Macro precision: {:.1f} %'.format(100 * precision_score(true_lab, pred_lab[:, 0], labels=labels, average='macro')) + '\n')
print('Macro precision (no labels): {:.1f} %'.format(100 * precision_score(true_lab, pred_lab[:, 0], average='macro')))
m.write('Macro precision (no labels): {:.1f} %'.format(100 * precision_score(true_lab, pred_lab[:, 0], average='macro')) + '\n')
print('Weighted precision: {:.1f} %'.format(100 * precision_score(true_lab, pred_lab[:, 0], labels=labels, average='weighted')))
m.write('Weighted precision: {:.1f} %'.format(100 * precision_score(true_lab, pred_lab[:, 0], labels=labels, average='weighted')) + '\n')
print('\n')
m.write('\n')
print('Micro F1 score: {:.1f} %'.format(100 * f1_score(true_lab, pred_lab[:, 0], labels=labels, average='micro')))
m.write('Micro F1 score: {:.1f} %'.format(100 * f1_score(true_lab, pred_lab[:, 0], labels=labels, average='micro')) + '\n')
print('Macro F1 score: {:.1f} %'.format(100 * f1_score(true_lab, pred_lab[:, 0], labels=labels, average='macro')))
m.write('Macro F1 score: {:.1f} %'.format(100 * f1_score(true_lab, pred_lab[:, 0], labels=labels, average='macro')) + '\n')
print('Macro F1 score (no labels): {:.1f} %'.format(100 * f1_score(true_lab, pred_lab[:, 0], average='macro')))
m.write('Macro F1 score (no labels): {:.1f} %'.format(100 * f1_score(true_lab, pred_lab[:, 0], average='macro')) + '\n')
print('Weighted F1 score: {:.1f} %'.format(100 * f1_score(true_lab, pred_lab[:, 0], labels=labels, average='weighted')))
m.write('Weighted F1 score: {:.1f} %'.format(100 * f1_score(true_lab, pred_lab[:, 0], labels=labels, average='weighted')) + '\n')
m.close()
# INCISO: YA VAMOS CON LA MATRIZ DE CONFUSIÓN!!
def plt_conf_matrix(conf_mat, labels=False):
fig = plt.figure(figsize=(20, 20))
hm = seaborn.heatmap(conf_mat, annot=False, square=True, cbar_kws={'fraction':0.046, 'pad':0.04},
xticklabels=labels, yticklabels=labels, cmap="YlGnBu")
fontsize = None
hm.yaxis.set_ticklabels(hm.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
hm.xaxis.set_ticklabels(hm.xaxis.get_ticklabels(), rotation=90, ha='right', fontsize=fontsize)
plt.ylabel('True label')
plt.xlabel('Predicted label')
y_true, y_pred = np.array(pred_dict['true_lab']), np.array(pred_dict['pred_lab'])[:, 0] #Aqui, al poner [:,0] te asegura que de todas las probabilidades
# tú coges la que más tienes como etiqueta predicha
conf_mat = confusion_matrix(y_true, y_pred, labels=range(len(class_names)), sample_weight=None)
normed_conf = conf_mat / np.sum(conf_mat, axis=1)[:, np.newaxis]
# plt_conf_matrix(conf_mat)
plt_conf_matrix(normed_conf, labels=class_names)
nombre_confusion = input("Ponle nombre a la matriz de confusion. No especifiques formato: " )
plt.savefig(nombre_confusion,dpi = 50,format = "png", bbox_inches = 'tight')
| 57.503185 | 184 | 0.671577 | 1,311 | 9,028 | 4.475973 | 0.234172 | 0.039366 | 0.049421 | 0.066803 | 0.439673 | 0.421097 | 0.39349 | 0.380879 | 0.380879 | 0.362474 | 0 | 0.026356 | 0.134249 | 9,028 | 156 | 185 | 57.871795 | 0.724411 | 0.233385 | 0 | 0.064516 | 0 | 0.021505 | 0.281709 | 0.064216 | 0 | 0 | 0 | 0.00641 | 0 | 1 | 0.010753 | false | 0 | 0.11828 | 0 | 0.129032 | 0.236559 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69afcb6111e2b727f9b4db4fba7fb9a04892dfe5 | 1,099 | py | Python | time/plot.py | gray0018/Normal-integration-benchmark | 3f4fff86e659ae2a3588c0960ebb0af39e4a1e21 | [
"MIT"
] | null | null | null | time/plot.py | gray0018/Normal-integration-benchmark | 3f4fff86e659ae2a3588c0960ebb0af39e4a1e21 | [
"MIT"
] | null | null | null | time/plot.py | gray0018/Normal-integration-benchmark | 3f4fff86e659ae2a3588c0960ebb0af39e4a1e21 | [
"MIT"
] | null | null | null | import numpy as np
import operator
import matplotlib.pyplot as plt
import json
import os
# directory = '.'
# d = {}
# for filename in os.listdir(directory):
# if filename.endswith(".npy"):
# t = np.load(filename)
# d[filename[:-4]] = float(t)
# j = json.dumps(d)
# f = open("woloop.json","w")
# f.write(j)
# f.close()
plt.style.use(['science','no-latex'])
with open('withloop.json') as json_file:
d_w = json.load(json_file)
d_w = dict(sorted(d_w.items(), key=operator.itemgetter(1)))
with open('woloop.json') as json_file:
d_wo = json.load(json_file)
d_wo = dict(sorted(d_wo.items(), key=operator.itemgetter(1)))
fig, axes = plt.subplots(figsize=(5,5))
axes.scatter(d_w.keys(), d_w.values())
axes.scatter(d_wo.keys(), d_wo.values())
axes.legend(['With nested loops','W/O nested loops'])
axes.set_ylabel('Time (s)', fontsize=16)
axes.set_xlabel('Model-resolution', fontsize=16)
chartBox = axes.get_position()
axes.set_position([chartBox.x0, chartBox.y0*2,
chartBox.width,
chartBox.height])
plt.xticks(rotation=90)
plt.show() | 24.422222 | 61 | 0.658781 | 172 | 1,099 | 4.104651 | 0.459302 | 0.014164 | 0.050992 | 0.03966 | 0.167139 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015152 | 0.159236 | 1,099 | 45 | 62 | 24.422222 | 0.748918 | 0.207461 | 0 | 0 | 0 | 0 | 0.111498 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.208333 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69b249ca121767aa062b4c57848e58f48033a136 | 2,132 | py | Python | python/video-tools/rename-toutv.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 1 | 2019-11-23T10:44:58.000Z | 2019-11-23T10:44:58.000Z | python/video-tools/rename-toutv.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 8 | 2020-07-16T07:14:12.000Z | 2020-10-14T17:25:33.000Z | python/video-tools/rename-toutv.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 1 | 2019-11-23T10:45:00.000Z | 2019-11-23T10:45:00.000Z | #!/usr/bin/env python
# coding=utf8
import os
import os.path
import re
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4 or higher')
from enum import Enum
def main():
VideoTypes = Enum('VideoType', 'emission film')
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.[\d]+kbps\.ts'.format(filename_chars))
# pattern = re.compile('([{0}]+)\.(S[\d]+E[\d]+)\.([{0}]+)'.format(filename_chars))
files_to_rename = {}
video_type = None
def parse_filename(filename):
nonlocal video_type
match = pattern.search(filename)
if match:
show = match.group(1).replace('.', ' ')
episode = match.group(2)
season = match.group(3)
title = match.group(4).replace('.', ' ')
if show.lower() == title.lower():
video_type = VideoTypes.film
else:
video_type = VideoTypes.emission
return show, episode, season, title
else:
sys.exit('ERROR: Unrecognized character in {0}\n'.format(filename))
for filename in sorted(os.listdir(os.getcwd())):
if not filename.endswith('.ts'):
continue
show, episode, season, title = parse_filename(filename)
if video_type == VideoTypes.film:
renamed_filename = '{} ({}).mp4'.format(title, season)
else:
renamed_filename = '{} - {} - {}.mp4'.format(show, episode, title)
print(filename)
print('\t{}'.format(renamed_filename))
files_to_rename[filename] = renamed_filename
response = input('Rename files? (y/n) ')
if response == 'y':
for filename in files_to_rename:
os.rename(
os.path.join(
os.getcwd(),
filename
),
os.path.join(
os.getcwd(),
files_to_rename[filename]
)
)
if __name__ == '__main__':
main()
| 29.205479 | 102 | 0.518293 | 228 | 2,132 | 4.710526 | 0.364035 | 0.041899 | 0.048417 | 0.031657 | 0.074488 | 0.040968 | 0.040968 | 0.040968 | 0.040968 | 0 | 0 | 0.011197 | 0.329737 | 2,132 | 72 | 103 | 29.611111 | 0.740378 | 0.054878 | 0 | 0.12963 | 0 | 0 | 0.117296 | 0.036282 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.092593 | 0 | 0.148148 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69b34c5fbd0dd31fc5ad9d5907ba03629ebdf899 | 6,582 | py | Python | flask_resty/related.py | d-wysocki/flask-resty | 2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19 | [
"MIT"
] | 86 | 2015-11-25T07:09:10.000Z | 2022-02-15T19:40:30.000Z | flask_resty/related.py | d-wysocki/flask-resty | 2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19 | [
"MIT"
] | 180 | 2015-11-24T23:02:53.000Z | 2022-03-31T04:05:38.000Z | flask_resty/related.py | d-wysocki/flask-resty | 2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19 | [
"MIT"
] | 17 | 2015-12-28T11:05:47.000Z | 2022-03-15T12:10:02.000Z | import functools
from .exceptions import ApiError
# -----------------------------------------------------------------------------
class RelatedId:
"""Resolve a related item by a scalar ID.
:param view_class: The :py:class:`ModelView` corresponding to the related
model.
:param str field_name: The field name on request data.
"""
def __init__(self, view_class, field_name):
self._view_class = view_class
self.field_name = field_name
def create_view(self):
# Separating this out saves instantiating the view multiple times for
# list fields.
return self._view_class()
def resolve_related_id(self, view, id):
return view.resolve_related_id(id)
class Related:
"""A component for resolving deserialized data fields to model instances.
The `Related` component is responsible for resolving related model
instances by ID and for constructing nested model instances. It supports
multiple related types of functionality. For a view with::
related = Related(
foo=RelatedId(FooView, "foo_id"),
bar=BarView,
baz=Related(models.Baz, qux=RelatedId(QuxView, "qux_id"),
)
Given deserialized input data like::
{
"foo_id": "3",
"bar": {"id": "4"},
"baz": {name: "Bob", "qux_id": "5"},
"other_field": "value",
}
This component will resolve these data into something like::
{
"foo": <Foo(id=3)>,
"bar": <Bar(id=4)>,
"baz": <Baz(name="Bob", qux=<Qux(id=5)>>,
"other_field": "value",
}
In this case, the Foo, Bar, and Qux instances are fetched from the
database, while the Baz instance is freshly constructed. If any of the Foo,
Bar, or Qux instances do not exist, then the component will fail the
request with a 422.
Formally, in this specification:
- A `RelatedId` item will retrieve the existing object in the database
with the ID from the specified scalar ID field using the specified view.
- A view class will retrieve the existing object in the database using the
object stub containing the ID fields from the data field of the same
name, using the specified view. This is generally used with the
`RelatedItem` field class, and unlike `RelatedId`, supports composite
IDs.
- Another `Related` item will apply the same resolution to a nested
dictionary. Additionally, if the `Related` item is given a callable as
its positional argument, it will construct a new instance given that
callable, which can often be a model class.
`Related` depends on the deserializer schema to function accordingly, and
delegates validation beyond the database fetch to the schema. `Related`
also automatically supports cases where the fields are list fields or are
configured with ``many=True``. In those cases, `Related` will iterate
through the sequence and resolve each item in turn, using the rules as
above.
:param item_class: The SQLAlchemy mapper corresponding to the related item.
:param dict kwargs: A mapping from related fields to a callable resolver.
"""
def __init__(self, item_class=None, **kwargs):
self._item_class = item_class
self._resolvers = kwargs
def resolve_related(self, data):
"""Resolve the related values in the request data.
This method will replace values in `data` with resolved model
instances as described above. This operates in place and will mutate
`data`.
:param data object: The deserialized request data.
:return: The deserialized data with related fields resolved.
:rtype: object
"""
for field_name, resolver in self._resolvers.items():
if isinstance(resolver, RelatedId):
data_field_name = resolver.field_name
else:
data_field_name = field_name
if data_field_name not in data:
# If this field were required, the deserializer would already
# have raised an exception.
continue
# Remove the data field (in case it's different) so we can keep
# just the output field.
value = data.pop(data_field_name)
if value is None:
# Explicitly clear the related item if the value was None.
data[field_name] = None
continue
try:
resolved = self.resolve_field(value, resolver)
except ApiError as e:
pointer = f"/data/{data_field_name}"
raise e.update({"source": {"pointer": pointer}})
data[field_name] = resolved
if self._item_class:
return self._item_class(**data)
return data
def resolve_field(self, value, resolver):
"""Resolve a single field value.
:param value: The value corresponding to the field we are resolving.
:param resolver: A callable capable of resolving the given `value`.
:type resolver: :py:class:`Related` | :py:class:`RelatedId` | func
"""
# marshmallow always uses lists here.
many = isinstance(value, list)
if many and not value:
# As a tiny optimization, there's no need to resolve an empty list.
return value
if isinstance(resolver, Related):
resolve_item = resolver.resolve_related
elif isinstance(resolver, RelatedId):
view = resolver.create_view()
resolve_item = functools.partial(resolver.resolve_related_id, view)
else:
resolve_item = resolver().resolve_related_item
if many:
return [resolve_item(item) for item in value]
return resolve_item(value)
def __or__(self, other):
"""Combine two `Related` instances.
`Related` supports view inheritance by implementing the `|` operator.
For example, `Related(foo=..., bar=...) | Related(baz=...)` will create
a new `Related` instance with resolvers for each `foo`, `bar` and
`baz`. Resolvers on the right-hand side take precedence where each
`Related` instance has the same key.
"""
if not isinstance(other, Related):
return NotImplemented
return self.__class__(
other._item_class or self._item_class,
**{**self._resolvers, **other._resolvers},
)
| 36.566667 | 79 | 0.624734 | 815 | 6,582 | 4.942331 | 0.287117 | 0.033515 | 0.022592 | 0.012413 | 0.047666 | 0.031281 | 0.020854 | 0.020854 | 0 | 0 | 0 | 0.001923 | 0.28897 | 6,582 | 179 | 80 | 36.77095 | 0.858761 | 0.588119 | 0 | 0.071429 | 0 | 0 | 0.015444 | 0.009867 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.035714 | 0.035714 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69b3ed41ddcf330b9785bd4810fc8e2f6713d932 | 6,489 | py | Python | src/data/split.py | bathienle/master-thesis-code | 58182f54a56c34fb4a33d67743ca515c80e33657 | [
"Apache-2.0"
] | 2 | 2021-06-22T13:43:40.000Z | 2022-03-01T18:15:32.000Z | src/data/split.py | bathienle/master-thesis-code | 58182f54a56c34fb4a33d67743ca515c80e33657 | [
"Apache-2.0"
] | null | null | null | src/data/split.py | bathienle/master-thesis-code | 58182f54a56c34fb4a33d67743ca515c80e33657 | [
"Apache-2.0"
] | null | null | null | """
Split the dataset into train/val/test sets from a Cytomine dataset.
"""
import csv
import logging
import numpy as np
import os
from argparse import ArgumentParser
from itertools import chain
from cytomine import Cytomine
from cytomine.models import (
AnnotationCollection, ImageInstanceCollection, TermCollection,
UserCollection
)
def parse_arguments():
"""
Parse the arguments of the program.
Return
------
args : class argparse.Namespace
The parsed arguments.
"""
desc = "Split dataset into train/val/test sets or folds and test sets."
parser = ArgumentParser(description=desc)
parser.add_argument(
'--host',
help="The Cytomine host."
)
parser.add_argument(
'--public_key',
help="The Cytomine public key."
)
parser.add_argument(
'--private_key',
help="The Cytomine private key."
)
parser.add_argument(
'--project_id',
help="The project from the dataset."
)
parser.add_argument(
'--path',
help="Path to the dataset."
)
parser.add_argument(
'--term',
help="The specific term of the annotations."
)
parser.add_argument(
'--ratio',
type=float,
default=0.8,
help="The train/test split ratio of the dataset."
)
parser.add_argument(
'--cross',
type=bool,
default=False,
help="Whether to split into folds for a cross-validation."
)
parser.add_argument(
'--cv',
type=int,
default=5,
help="If cross is enabled, the number of folds to split the data."
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
# Connect to Cytomine
Cytomine(args.host, args.public_key, args.private_key, logging.INFO)
# Get the term ID
terms = TermCollection().fetch_with_filter('project', args.project_id)
term_id = next(
(term.id for term in terms if term.name.lower() == args.term.lower())
)
# Get the users of Cytomine
users = UserCollection().fetch()
users = {user.id: user.username for user in users}
# Get all images of a Cytomine project
images = ImageInstanceCollection()
images = images.fetch_with_filter('project', args.project_id)
id_to_images = {image.id: image for image in images}
# Get the all the annotations of all the images of the project
annotations = AnnotationCollection(
project=args.project_id,
term=term_id,
showMeta=True,
)
annotations.fetch()
id_to_annotations = {
annotation.id: annotation for annotation in annotations
}
# Map the annotations to the corresponding image
image_to_label = {
image.id: [annot.id for annot in annotations if annot.image == image.id]
for image in images
}
# Remove no label images
image_to_label = {
image: labels for image, labels in image_to_label.items() if labels
}
# Remove the unexisting or deleted images/annotations
filenames = os.listdir(os.path.join(args.path, 'images'))
annotations = list(chain.from_iterable(
[annotation for annotation in image_to_label.values()]
))
annotations = [f'{annotation}.jpg' for annotation in annotations]
image_to_label = {
image: list(
set([f'{label}.jpg' for label in labels]).intersection(filenames)
)
for image, labels in image_to_label.items()
}
# Sort the image by the number of annotations
image_to_label = {
image: labels for image, labels in
sorted(
image_to_label.items(),
key=lambda item: len(item[1]),
reverse=True
)
}
# Get the number of annotations
n_labels = sum([len(v) for v in image_to_label.values()])
# Split the dataset with the ratio
split_train_test = round(n_labels * args.ratio)
split_train_val = round(split_train_test * 0.8) # Split 80/20
train_set = []
test_set = []
for image, labels in image_to_label.items():
if len(train_set) < split_train_test:
train_set.extend(labels)
else:
test_set.extend(labels)
# Split either into train/val/test or into folds
if args.cross:
folds = np.array_split(np.array(train_set), args.cv)
categories = [f'fold{i+1}' for i in range(args.cv)] + ['test']
sets = [fold.tolist() for fold in folds] + [test_set]
else:
val_set = train_set[split_train_val:]
train_set = train_set[:split_train_val]
categories = ['train', 'val', 'test']
sets = [train_set, val_set, test_set]
# Save details about the split into a CSV file
detail_path = os.path.join(args.path, f'details-{args.project_id}.csv')
header = ['Annotation ID', 'Image ID', 'Image filename', 'Term', 'User',
'Category']
with open(detail_path, 'w', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
# Write the information of each set
for category, filenames in zip(categories, sets):
for filename in filenames:
annotation_id = int(filename[:-4])
annotation = id_to_annotations[annotation_id]
image = id_to_images[annotation.image]
csv.writer(file).writerow([
annotation_id,
annotation.image,
image.filename,
args.term,
users[annotation.user],
category
])
# Split the annotations to the corresponding directories
subdirs = ['images', 'masks', 'inclusions', 'exclusions']
# Create the directories
for category in categories:
for subdir in subdirs:
os.makedirs(
os.path.join(args.path, category, subdir),
exist_ok=True
)
# Move the images and masks to the destination directory
for dataset, name in zip(sets, categories):
for filename in dataset:
for subdir in subdirs:
os.rename(
os.path.join(args.path, subdir, filename),
os.path.join(args.path, name, subdir, filename)
)
# Delete empty directories
for subdir in subdirs:
os.rmdir(os.path.join(args.path, subdir))
| 29.22973 | 80 | 0.608568 | 790 | 6,489 | 4.873418 | 0.226582 | 0.018182 | 0.031169 | 0.021818 | 0.213766 | 0.1 | 0.061039 | 0.042857 | 0.034286 | 0 | 0 | 0.002618 | 0.293728 | 6,489 | 221 | 81 | 29.361991 | 0.837443 | 0.13284 | 0 | 0.113924 | 0 | 0 | 0.113465 | 0.005206 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006329 | false | 0 | 0.050633 | 0 | 0.063291 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69b532a45670bfc1aba43201c129695282189089 | 14,886 | py | Python | omsdk/typemgr/FieldType.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 61 | 2018-02-21T00:02:20.000Z | 2022-01-26T03:47:19.000Z | omsdk/typemgr/FieldType.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 31 | 2018-03-24T05:43:39.000Z | 2022-03-16T07:10:37.000Z | omsdk/typemgr/FieldType.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 25 | 2018-03-13T10:06:12.000Z | 2022-01-26T03:47:21.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
from omsdk.sdkcenum import TypeHelper
from omsdk.sdkcunicode import UnicodeHelper
from omsdk.typemgr.TypeState import TypeState, TypeBase
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
import logging
logger = logging.getLogger(__name__)
# private
#
# def __init__(self, init_value, typename, fname, alias, parent=None, volatile=False)
# def __eq__, __ne__, __lt__, __le__, __gt__, __ge__
# def __str__, __repr__
# def __getattr__
# def __delattr__
# def __setattr__
#
# protected:
#
# def my_accept_value(self, value):
#
# public:
# def is_changed(self)
# def sanitized_value(self):
# def copy(self, other)
# def commit(self)
# def reject(self)
# def freeze(self)
# def unfreeze(self)
# def is_frozen(self)
# def json_encode(self)
# def child_state_changed(self, child, child_state)
# def parent_state_changed(self, new_state)
class FieldType(TypeBase):
def __init__(self, init_value, typename, fname, alias, parent=None, volatile=False, modifyAllowed = True, deleteAllowed = True, rebootRequired=False, default_on_delete=''):
if PY2:
super(FieldType, self).__init__()
else:
super().__init__()
self._type = typename
self._alias = alias
self._fname = fname
self._volatile = volatile
self._parent = parent
self._composite = False
self._index = 1
self._modifyAllowed = modifyAllowed
self._deleteAllowed = deleteAllowed
self._rebootRequired = rebootRequired
self._default_on_delete = default_on_delete
self._list = False
self._freeze = False
self.__dict__['_state'] = TypeState.UnInitialized
self._value = init_value
# Value APIs
def __getattr__(self, name):
if name in self.__dict__ and name not in ['_orig_value']:
return self.__dict__[name]
elif name == '_optimal' and self._composite:
return tuple(sorted([i for i in self.__dict__['_value'] \
if i._value is not None]))
raise AttributeError('Invalid attribute ' + name)
@property
def Value(self):
return self._value
@property
def OptimalValue(self):
return (self._optimal if self._composite else self._value)
#deprecate this. replace with Property
def get_value(self):
return self._value
# Value APIs
def __setattr__(self, name, value):
# Do not allow access to internal variables
if name in ['_orig_value', '_state']:
raise AttributeError('Invalid attribute ' + name)
# Freeze mode: No sets allowed
if '_freeze' in self.__dict__ and self._freeze:
raise ValueError('object in freeze mode')
# allow updates to other fields except _value
# should we allow updates to '_type', '_alias', '_fname'?
if name not in ['_value']:
self.__dict__[name] = value
return
# Create-only attribute : No updates allowed
if not self._modifyAllowed and \
self._state in [TypeState.Committed, TypeState.Changing]:
raise ValueError('updates not allowed to this object')
# CompositeField : sets not allowed in composite fields
if self._composite:
raise AttributeError('composite objects cannot be modified')
# value is None, object was committed; ==> no change
if value is None and \
self._state in [TypeState.Committed, TypeState.Precommit, TypeState.Changing]:
return
# Validate value and convert it if needed
valid = False
msg = None
if value is None or TypeHelper.belongs_to(self._type, value):
valid = True
elif type(self) == type(value):
value = value._value
valid = True
elif UnicodeHelper.is_string(value):
value = UnicodeHelper.stringize(value)
# expected value is int
if self._type == int:
value = int(value)
valid = True
# expected value is bool
elif self._type == bool:
value = bool(value)
valid = True
# expected value is str
elif self._type == str:
valid = True
# expected value is enumeration
elif TypeHelper.is_enum(self._type):
newvalue = TypeHelper.convert_to_enum(value, self._type)
if newvalue is not None:
value = newvalue
valid = True
else:
msg = str(value) + " is not " + str(self._type)
else:
msg = str(value) + " cannot be converted to " + str(self._type)
else:
msg = "No type conversion found for '" + str(value) + "'. "\
"Expected " + str(self._type.__name__) + ". Got " +\
type(value).__name__
if valid and not self.my_accept_value(value):
msg = type(self).__name__ +" returned failure for " + str(value)
valid = False
# if invalid, raise ValueError exception
if not valid:
raise ValueError(msg)
# same value - no change
if name in self.__dict__ and self._value == value:
return
# List fields, simply append the new entry!
if self._list and name in self.__dict__ and self.__dict__[name]:
value = self.__dict__[name] + "," + value
# modify the value
self.__dict__[name] = value
if self._state in [TypeState.UnInitialized, TypeState.Precommit, TypeState.Initializing]:
self.__dict__['_state'] = TypeState.Initializing
elif self._state in [TypeState.Committed, TypeState.Changing]:
if self._orig_value == self._value:
self.__dict__['_state'] = TypeState.Committed
else:
self.__dict__['_state'] = TypeState.Changing
else:
print("Should not come here")
if self.is_changed() and self._parent:
self._parent.child_state_changed(self, self._state)
# Value APIs
def __delattr__(self, name):
# Do not allow access to internal variables
if name in ['_orig_value', '_track', '_freeze', '_type', '_default_on_delete',
'_value', '_volatile', '_composite']:
raise AttributeError('Invalid attribute ' + name)
# Freeze mode - don't allow any updates
if '_freeze' in self.__dict__ and self._freeze:
raise AttributeError('object in freeze mode')
if name in self.__dict__:
del self.__dict__[name]
def set_value(self, value):
self._value = value
# nulls the value
def nullify_value(self):
if '_value' in self.__dict__:
self.__dict__['_value'] = None
if self._state in [TypeState.UnInitialized, TypeState.Precommit, TypeState.Initializing]:
self.__dict__['_state'] = TypeState.Initializing
elif self._state in [TypeState.Committed, TypeState.Changing]:
if self._orig_value == self._value:
self.__dict__['_state'] = TypeState.Committed
else:
self.__dict__['_state'] = TypeState.Changing
else:
print("Should not come here")
if self.is_changed() and self._parent:
self._parent.child_state_changed(self, self._state)
# Value APIs
def my_accept_value(self, value):
return True
# Representation APIs
def __str__(self):
return str(self._value)
# Representation APIs
def sanitized_value(self):
if '_value' not in self.__dict__ or self._value is None:
return None
return TypeHelper.resolve(self._value)
# State APIs:
def is_changed(self):
return self._state in [TypeState.Initializing, TypeState.Changing]
def reboot_required(self):
return self.is_changed() and self._rebootRequired
# State : to Committed
# allowed even during freeze
def commit(self, loading_from_scp = False):
if self.is_changed() or self._state == TypeState.Precommit:
if not self._composite:
self.__dict__['_orig_value'] = self._value
if loading_from_scp:
self.__dict__['_state'] = TypeState.Precommit
else:
self.__dict__['_state'] = TypeState.Committed
return True
# State : to Committed
# allowed even during freeze
def reject(self):
if self.is_changed():
if not self._composite:
if '_orig_value' not in self.__dict__:
del self.__dict__['_value']
self.__dict__['_state'] = TypeState.UnInitialized
else:
self.__dict__['_value'] = self._orig_value
self.__dict__['_state'] = TypeState.Committed
return True
# Does not have children - so not implemented
def child_state_changed(self, child, child_state):
not_implemented
# what to do?
def parent_state_changed(self, new_state):
not_implemented
# Object APIs
def copy(self, other):
if isinstance(other, type(self)):
self._value = other._value
return True
return False
def print_commit(self):
print(self._state)
# Compare APIs:
def __lt__(self, other):
if self._state is TypeState.UnInitialized:
return False
if other is None:
return False
myvalue = self._value
if isinstance(other, type(self)):
othervalue = other._value
elif isinstance(other, self._type):
othervalue = other
else:
raise TypeError('cannot compare with ' + type(other).__name__)
if myvalue is None and othervalue is not None:
return True
if myvalue is None and othervalue is None:
return False
return myvalue < othervalue
# Compare APIs:
def __le__(self, other):
if self._state is TypeState.UnInitialized:
return False
if self._value is None and other is None:
return True
if self._value is not None and other is None:
return False
myvalue = self._value
if isinstance(other, type(self)):
othervalue = other._value
elif isinstance(other, self._type):
othervalue = other
else:
raise TypeError('cannot compare with ' + type(other).__name__)
if myvalue is not None and othervalue is None:
return False
if myvalue is None and othervalue is not None:
return True
return myvalue <= othervalue
# Compare APIs:
def __gt__(self, other):
if self._state is TypeState.UnInitialized:
return False
if self._value is None:
return False
if self._value is not None and other is None:
return True
myvalue = self._value
if isinstance(other, type(self)):
othervalue = other._value
elif isinstance(other, self._type):
othervalue = other
else:
raise TypeError('cannot compare with ' + type(other).__name__)
if myvalue is not None and othervalue is None:
return True
return myvalue > othervalue
# Compare APIs:
def __ge__(self, other):
if self._state is TypeState.UnInitialized:
return False
if self._value is None and other is None:
return True
if self._value is not None and other is None:
return True
myvalue = self._value
if isinstance(other, type(self)):
othervalue = other._value
elif isinstance(other, self._type):
othervalue = other
else:
raise TypeError('cannot compare with ' + type(other).__name__)
if myvalue is None and othervalue is None:
return True
if myvalue is None and othervalue is not None:
return False
return myvalue >= othervalue
# Don't allow comparision with string ==> becomes too generic
# Compare APIs:
def __eq__(self, other):
if self._state is TypeState.UnInitialized:
return False
if self._value is None and other is None:
return True
if self._value is not None and other is None:
return False
myvalue = self._value
if isinstance(other, type(self)):
othervalue = other._value
elif isinstance(other, self._type):
othervalue = other
else:
raise TypeError('cannot compare with ' + type(other).__name__)
if myvalue is None and othervalue is None:
return True
if myvalue is None and othervalue is not None:
return True
return myvalue == othervalue
# Compare APIs:
def __ne__(self, other):
return not self.__eq__(other)
# Freeze APIs
def freeze(self):
self._freeze = True
# Freeze APIs
def unfreeze(self):
self.__dict__['_freeze'] = False
# Freeze APIs
def is_frozen(self):
return self.__dict__['_freeze']
def json_encode(self):
return self._value
def _clear_duplicates(self):
pass
def printx(self):
print(str(type(self._value))+"<>"+str(self._type)+"::"+str(self._value))
| 34.780374 | 177 | 0.590555 | 1,682 | 14,886 | 4.938169 | 0.160523 | 0.031784 | 0.021671 | 0.029136 | 0.496749 | 0.444016 | 0.408981 | 0.367325 | 0.347821 | 0.333012 | 0 | 0.001805 | 0.329974 | 14,886 | 427 | 178 | 34.861827 | 0.83086 | 0.167809 | 0 | 0.510638 | 0 | 0 | 0.057203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113475 | false | 0.003546 | 0.017731 | 0.035461 | 0.308511 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69b853f40f346029e133948525fa18fe2b5354f4 | 16,785 | py | Python | LinearOptimization/__init__.py | snickers2524/SFUMATH308LinearOptimization | 47a6c030c5221857be483d0eca8dd712d8d7c155 | [
"MIT"
] | null | null | null | LinearOptimization/__init__.py | snickers2524/SFUMATH308LinearOptimization | 47a6c030c5221857be483d0eca8dd712d8d7c155 | [
"MIT"
] | null | null | null | LinearOptimization/__init__.py | snickers2524/SFUMATH308LinearOptimization | 47a6c030c5221857be483d0eca8dd712d8d7c155 | [
"MIT"
] | null | null | null | import numpy as np
def getIndexPositions(listOfElements, item):
""" Returns the indexes of all occurrences of give element in
the list- listOfElements """
indexPosList = []
for index in range(0, len(listOfElements)):
if listOfElements[index] == item:
indexPosList.insert(len(indexPosList), index)
return indexPosList
def getIndexPositionsThatStartWith(listOfElements, item):
""" Returns the indexes of all occurrences of give element in
the list- listOfElements """
indexPosList = []
for index in range(0, len(listOfElements)):
if listOfElements[index].startswith(item):
indexPosList.insert(len(indexPosList), index)
return indexPosList
class Optimizer:
def __init__(self, A, primal_independent=[], primal_dependent=[]):
self.A = np.array(A) # Tableau A
self.A = self.A.astype('float64')
self.n = len(A[0, :]) - 1 # Number of Columns
self.m = len(A[:, 0]) - 1 # Number of Rows
if primal_independent != [] and primal_dependent != []: # Both Independent and Dependent Vars are specified in the class call
self.primal_ind = primal_independent + ["-1"]
self.primal_dep = primal_dependent + ["f"]
elif primal_independent == [] and primal_dependent == []: # Both Independent and Dependent Vars are not specified in the class call
self.primal_ind = self.__variable_creator("x", self.n) + ["-1"]
self.primal_dep = self.__variable_creator("t", self.m) + ["f"]
elif not primal_independent: # Only Dependent Vars are specified in the class call
self.primal_ind = self.__variable_creator("x", self.n) + ["-1"]
self.primal_dep = primal_dependent + ["f"]
elif not primal_independent: # Only Independent Vars are specified in the class call
self.primal_ind = primal_independent + ["-1"]
self.primal_dep = self.__variable_creator("t", self.m) + ["f"]
self.blands_rule = self.primal_ind + self.primal_dep # Initializes Variable Order for Blands Anti Cycling Rule
self.dual_ind = self.__dual_variable_creator("y", self.m, True) + ["-1"]
self.dual_dep = self.__dual_variable_creator("s", self.n, False) + ["g"]
self.primal_recorded_equations = []
self.dual_recorded_equations = []
if len(self.primal_ind) != self.n + 1 or len(self.primal_dep) != self.m + 1:
raise Exception("ERROR, INCORRECT VARIABLES")
def __dual_variable_creator(self, var, iterable, dual_primal): # Based on the constraints of the primal LP, it creates the variables and constrains of the Dual LP
variables = [var] * iterable
for j in range(0, iterable):
if not dual_primal:
if self.primal_ind[j].startswith("*"): # Testing for Equality in the Constraints
variables[j] = '0'
else:
variables[j] = var + str(j + 1)
else:
if self.primal_dep[j] == '0': # Testing for Equality in the Constraints
variables[j] = '*' + var + str(j + 1)
else:
variables[j] = var + str(j + 1)
return variables
def __variable_creator(self, var, iterable):
variables = [var] * iterable
for j in range(0, iterable):
variables[j] = var + str(j + 1)
return variables
def print(self):
# problem_complete = [""]
# [problem_complete.insert(len(problem_complete),i) for i in self.primal_ind]
# for i in range(0,self.m):
# problem_complete.insert(len(problem_complete),self.dual_ind[i])
# [problem_complete.insert(len(problem_complete), elem) for elem in self.A[i,:]]
# problem_complete.insert(len(problem_complete),self.primal_dep[i])
# [problem_complete.insert(len(problem_complete), j) for j in self.dual_dep]
#
# for i in range(0,self.m):
# for j in range(0,self.n):
# print(problem_complete[i+j])
# print()
# print(problem_complete)
print("A = ", self.A)
print("Primal Independent Variables: ", self.primal_ind)
print("Primal Dependent Variables: ", self.primal_dep)
print("Dual Independent Variables: ", self.dual_ind)
print("Dual Dependent Variables: ", self.dual_dep)
print("Primal Stored Equations: ", self.primal_recorded_equations)
print("Dual Stored Equations: ", self.dual_recorded_equations)
def simplex_algorithm(self):
self.print()
print("************************************************************************************************************************")
print("************************************************************************************************************************")
primal_equality_constraints = getIndexPositions(self.primal_dep, '0')
primal_unconstrained_variables = getIndexPositionsThatStartWith(self.primal_ind, "*")
while len(primal_equality_constraints) > 0 and len(primal_unconstrained_variables) > 0:
if self.A[primal_equality_constraints[0], primal_unconstrained_variables[0]] == 0: # Testing if it tries to pivot on a zero. If so it passes to the method that is equipped to deal with it
self.removing_primal_unconstrained_variables(primal_unconstrained_variables)
primal_equality_constraints = getIndexPositions(self.primal_dep, '0')
primal_unconstrained_variables = getIndexPositionsThatStartWith(self.primal_ind, "*")
continue
self.__pivot([primal_equality_constraints[0], primal_unconstrained_variables[0]])
# Recording Rows and Columns of the Tableau
self.primal_recorded_equations.insert(len(self.primal_recorded_equations), [self.A[primal_equality_constraints[0], :], self.primal_ind[:], self.primal_dep[primal_equality_constraints[0]]])
self.dual_recorded_equations.insert(len(self.dual_recorded_equations), [self.A[:, primal_unconstrained_variables[0]], self.dual_ind[:], self.dual_dep[primal_unconstrained_variables[0]]])
# Deleting the Corresponding Variables
del self.primal_ind[primal_unconstrained_variables[0]]
del self.primal_dep[primal_equality_constraints[0]]
del self.dual_ind[primal_equality_constraints[0]]
del self.dual_dep[primal_unconstrained_variables[0]]
# Deleting Rows and Columns of the Tableau
self.A = np.delete(self.A, primal_equality_constraints[0], axis=0)
self.m = self.m - 1
self.A = np.delete(self.A, primal_unconstrained_variables[0], axis=1)
self.n = self.n - 1
# Resetting the Variables
primal_equality_constraints = getIndexPositions(self.primal_dep, '0')
primal_unconstrained_variables = getIndexPositionsThatStartWith(self.primal_ind, "*")
if len(primal_unconstrained_variables) > 0:
self.removing_primal_unconstrained_variables(primal_unconstrained_variables)
if len(primal_equality_constraints) > 0:
self.removing_equality_constraints(primal_equality_constraints)
while any(self.A[0:self.m, self.n] < 0): # Testing if the Tableau is Maximum Basic Feasible
if not self.__max_basic_feasible():
return
while any(self.A[self.m, 0:self.n] > 0): # Testing if the Tableau is Optimal
if not self.__optimal():
return
print("************************************************************************************************************************")
print("************************************************************************************************************************")
self.print()
def removing_equality_constraints(self, pec):
primal_equality_constraints = pec
while len(primal_equality_constraints) > 0:
try:
pivot_row = int(np.where(self.A[primal_equality_constraints[0], 0:self.n] != 0)[0][0])
except IndexError as error:
print(error)
print('The LP as entered cannot be converted into canonical form, please check the tableau for incorrect entries. Look for a row of zeros in row: ', primal_equality_constraints[0])
self.__pivot([primal_equality_constraints[0], pivot_row])
# Recording Rows and Columns of the Tableau
self.dual_recorded_equations.insert(len(self.dual_recorded_equations), [self.A[:, pivot_row], self.dual_ind[:], self.dual_dep[pivot_row]])
# Deleting the Corresponding Variables
del self.primal_ind[pivot_row]
del self.dual_dep[pivot_row]
# Deleting Rows and Columns of the Tableau
self.A = np.delete(self.A, pivot_row, axis=1)
self.n = self.n - 1
# Resetting the Variables
primal_equality_constraints = getIndexPositions(self.primal_dep, '0')
print("************************************************************************************************************************")
def removing_primal_unconstrained_variables(self, puc):
primal_unconstrained_variables = puc
while len(primal_unconstrained_variables) > 0:
try:
pivot_row = int(np.where(self.A[primal_unconstrained_variables[0], 0:self.n] != 0)[0][0])
except IndexError as error:
print(error)
print('The LP as entered cannot be converted into canonical form, please check the tableau for incorrect entries. Look for a column of zeros in row: ', primal_unconstrained_variables[0])
pivot_row = int(pivot_row)
# Pivoting
self.__pivot([pivot_row, primal_unconstrained_variables[0]])
# Recording Rows and Columns of the Tableau
self.primal_recorded_equations.insert(len(self.primal_recorded_equations), [self.A[pivot_row, :], self.primal_ind[:], self.primal_dep[pivot_row]])
# Deleting the Corresponding Variables
del self.primal_dep[pivot_row]
del self.dual_ind[pivot_row]
# Deleting Rows and Columns of the Tableau
self.A = np.delete(self.A, pivot_row, axis=0)
self.m = self.m - 1
# Reset Variables
primal_unconstrained_variables = getIndexPositionsThatStartWith(self.primal_ind, "*")
print("************************************************************************************************************************")
def __final_result(self):
print("Primal: ")
for dep in range(0, self.m):
print(" ", self.primal_dep[dep], " = ", self.A[dep, self.n])
print("Dual")
for dep in range(0, self.n):
print(" ", self.dual_dep[dep], " = ", self.A[self.m, dep])
def __optimal(self):
possible_pivot_columns = np.where(self.A[self.m, 0:self.n] > 0)
possible_pivot_columns = possible_pivot_columns[0]
for j in possible_pivot_columns: # Testing if the Linear Program is Unbounded
if not self.__unbounded_check(j):
return False
if len(possible_pivot_columns) > 1: # If there are more than one choice, this will pass to method to decide which one
# self. __min_optimal([self.primal_dep[i] for i in possible_pivot_columns], False)
pivot_col = self.__which_row_or_col_to_use([self.primal_dep[i] for i in possible_pivot_columns], False)
print(pivot_col)
else:
# self.__min_optimal([self.primal_dep[i] for i in possible_pivot_columns], False)
pivot_col = possible_pivot_columns[0]
pivot_cell = [self.__min(pivot_col, np.where(self.A[0:self.m, pivot_col] > 0)[0][0]), pivot_col]
# print(np.where(self.A[0:self.m, pivot_col] > 0)[0])
# return
self.__pivot(pivot_cell)
return True
def __unbounded_check(self, j): # Method That tests if a Linear Program is Unbounded, by testing a specific c_j > 0 and a column j
if all(self.A[0:self.m, j] <= 0):
print("The Linear Program is Unbounded in column: ", j, " No Solution!")
self.print()
return False
else:
return True
def __max_basic_feasible(self):
# loop from the bottom up, testing to see if any b_i<0.
for i in reversed(range(0, self.m)): # Going from bottom up for each b_i so i is maximal
if self.A[i, self.n] < 0: # testing to see if a pivot is needed (i.e. is b_i < 0)
if not self.__infeasible_check(i): # if the row is infeasible, automatically ends the method
return False
possible_pivot_columns = np.where(self.A[i, 0:self.n] < 0)
if len(possible_pivot_columns[0]) > 1: # If there are more than one choice, this will pass to method to decide which one
pivot_col = self.__which_row_or_col_to_use([self.primal_ind[i] for i in possible_pivot_columns[0]], True)
else:
pivot_col = possible_pivot_columns[0][0]
pivot_cell = [self.__min(pivot_col, i), pivot_col]
self.__pivot(pivot_cell)
return True
def __pivot(self, pivot_cell):
print("The Pivot Position is: ", "( ", pivot_cell[0], " , ", pivot_cell[1], " ). This gives: ")
temp_primal_dep = self.primal_ind[pivot_cell[1]] # Switching Variables of The Primal Linear Program
self.primal_ind[pivot_cell[1]] = self.primal_dep[pivot_cell[0]]
self.primal_dep[pivot_cell[0]] = temp_primal_dep
temp_dual_dep = self.dual_ind[pivot_cell[0]] # Switching Variables of the Dual Linear Program
self.dual_ind[pivot_cell[0]] = self.dual_dep[pivot_cell[1]]
self.dual_dep[pivot_cell[1]] = temp_dual_dep
temp = np.copy(self.A)
for i in range(0, self.m + 1): # Going through each entry in the tableau
for j in range(0, self.n + 1):
if i == pivot_cell[0] and j != pivot_cell[1]: # Pivot if on the same row as the pivot cell (and is not the pivot cell)
self.A[i, j] = temp[i, j] / temp[pivot_cell[0], pivot_cell[1]]
elif j == pivot_cell[1] and i != pivot_cell[0]: # Pivot if on the same column as the pivot cell (and is not the pivot cell)
self.A[i, j] = -1 * temp[i, j] / temp[pivot_cell[0], pivot_cell[1]]
elif i == pivot_cell[0] and j == pivot_cell[1]: # Pivoting on the actual pivot cell
self.A[i, j] = 1 / temp[i, j]
else: # All other entries
self.A[i, j] = (temp[i, j] * temp[pivot_cell[0], pivot_cell[1]] - temp[pivot_cell[0], j] * temp[i, pivot_cell[1]]) / temp[pivot_cell[0], pivot_cell[1]]
self.print()
def __min(self, col, start_row):
min_ratio = self.A[start_row, self.n] / self.A[start_row, col]
current_row = start_row
# print(current_row)
for i in range(start_row + 1, self.m):
if self.A[i, col] <= 0:
continue
if min_ratio > self.A[i, self.n] / self.A[i, col] >= 0:
min_ratio = self.A[i, self.n] / self.A[i, col]
current_row = i
elif self.A[i, self.n] / self.A[i, col] == min_ratio:
current_row = self.__which_row_or_col_to_use([self.primal_dep[i] for i in [current_row, i]], False)
min_ratio = self.A[current_row, self.n] / self.A[current_row, col]
# print(current_row)
return current_row
def __which_row_or_col_to_use(self, choices, col): # Returns the Proper Choice According to the Order for Blands Anti Cycling Rule.
# If choosing pivot according to columns, set col = True in the method call, else set it to false
for bland in self.blands_rule:
if bland in choices:
if col:
return self.primal_ind.index(bland)
else:
return self.primal_dep.index(bland)
def __infeasible_check(self, i):
# This method checks if given a row of the tableau with b_i<0, it will test if the other the entries in the row are neg/pos and return false if they are indicating
# that the tableau is infeasible
if all(self.A[i, 0:self.n] >= 0):
print("Tableau is Infeasible in Row: ", i, " No Solution!")
self.print()
return False
else:
return True
| 52.783019 | 203 | 0.595413 | 2,152 | 16,785 | 4.44145 | 0.104089 | 0.054405 | 0.034003 | 0.035363 | 0.670538 | 0.606089 | 0.520402 | 0.431471 | 0.357711 | 0.325277 | 0 | 0.011563 | 0.263211 | 16,785 | 317 | 204 | 52.949527 | 0.7613 | 0.198928 | 0 | 0.361607 | 0 | 0.008929 | 0.105306 | 0.053888 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075893 | false | 0 | 0.004464 | 0 | 0.160714 | 0.138393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69be35315cfb09e0f8fbf8c5b357eea847bb9220 | 4,253 | py | Python | pilot/user/atlas/memory.py | anisyonk/pilot2 | f06cbf903e3f235d6fc504b54faa02006b95256d | [
"Apache-2.0"
] | null | null | null | pilot/user/atlas/memory.py | anisyonk/pilot2 | f06cbf903e3f235d6fc504b54faa02006b95256d | [
"Apache-2.0"
] | null | null | null | pilot/user/atlas/memory.py | anisyonk/pilot2 | f06cbf903e3f235d6fc504b54faa02006b95256d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, paul.nilsson@cern.ch, 2018
from .utilities import get_memory_values
from pilot.common.errorcodes import ErrorCodes
from pilot.util.auxiliary import get_logger, set_pilot_state
from pilot.util.processes import kill_processes
errors = ErrorCodes()
def allow_memory_usage_verifications():
"""
Should memory usage verifications be performed?
:return: boolean.
"""
return True
def get_ucore_scale_factor(job):
"""
Get the correction/scale factor for SCORE/4CORE/nCORE jobs on UCORE queues/
:param job: job object.
:return: scale factor (int).
"""
log = get_logger(job.jobid)
try:
job_corecount = float(job.corecount)
except Exception as e:
log.warning('exception caught: %s (job.corecount=%s)' % (e, str(job.corecount)))
job_corecount = None
try:
schedconfig_corecount = float(job.infosys.queuedata.corecount)
except Exception as e:
log.warning('exception caught: %s (job.infosys.queuedata.corecount=%s)' % (e, str(job.infosys.queuedata.corecount)))
schedconfig_corecount = None
if job_corecount and schedconfig_corecount:
try:
scale = job_corecount / schedconfig_corecount
log.debug('scale=%f' % scale)
except Exception as e:
log.warning('exception caught: %s (using scale factor 1)' % e)
scale = 1
else:
log.debug('will use scale factor 1')
scale = 1
return scale
def memory_usage(job):
"""
Perform memory usage verification.
:param job: job object
:return: exit code (int), diagnostics (string).
"""
exit_code = 0
diagnostics = ""
log = get_logger(job.jobid)
# Get the maxPSS value from the memory monitor
summary_dictionary = get_memory_values(job.workdir, name=job.memorymonitor)
if not summary_dictionary:
exit_code = errors.BADMEMORYMONITORJSON
diagnostics = "Memory monitor output could not be read"
return exit_code, diagnostics
maxdict = summary_dictionary.get('Max', {})
maxpss_int = maxdict.get('maxPSS', -1)
# Only proceed if values are set
if maxpss_int != -1:
maxrss = job.infosys.queuedata.maxrss
if maxrss:
# correction for SCORE/4CORE/nCORE jobs on UCORE queues
scale = get_ucore_scale_factor(job)
try:
maxrss_int = 2 * int(maxrss * scale) * 1024 # Convert to int and kB
except Exception as e:
log.warning("unexpected value for maxRSS: %s" % e)
else:
# Compare the maxRSS with the maxPSS from memory monitor
if maxrss_int > 0 and maxpss_int > 0:
if maxpss_int > maxrss_int:
diagnostics = "job has exceeded the memory limit %d kB > %d kB (2 * queuedata.maxrss)" % \
(maxpss_int, maxrss_int)
log.warning(diagnostics)
# Create a lockfile to let RunJob know that it should not restart the memory monitor after it has been killed
#pUtil.createLockFile(False, self.__env['jobDic'][k][1].workdir, lockfile="MEMORYEXCEEDED")
# Kill the job
set_pilot_state(job=job, state="failed")
job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(errors.PAYLOADEXCEEDMAXMEM)
kill_processes(job.pid)
else:
log.info("max memory (maxPSS) used by the payload is within the allowed limit: "
"%d B (2 * maxRSS = %d B)" % (maxpss_int, maxrss_int))
else:
if maxrss == 0 or maxrss == "0":
log.info("queuedata.maxrss set to 0 (no memory checks will be done)")
else:
log.warning("queuedata.maxrss is not set")
return exit_code, diagnostics
| 34.298387 | 133 | 0.610863 | 519 | 4,253 | 4.903661 | 0.33526 | 0.033006 | 0.026719 | 0.028291 | 0.164244 | 0.099804 | 0.088802 | 0.088802 | 0.061297 | 0.044008 | 0 | 0.010087 | 0.300729 | 4,253 | 123 | 134 | 34.577236 | 0.845662 | 0.236539 | 0 | 0.283582 | 0 | 0 | 0.158876 | 0.011371 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.059701 | 0 | 0.164179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69bf52e942c0d69a50c6990eb95bacea62b42777 | 6,038 | py | Python | validator/validator.py | iestynpryce/file-validator | c1a16c3ae41847d92c0a251fa0ae386be6f39766 | [
"MIT"
] | null | null | null | validator/validator.py | iestynpryce/file-validator | c1a16c3ae41847d92c0a251fa0ae386be6f39766 | [
"MIT"
] | null | null | null | validator/validator.py | iestynpryce/file-validator | c1a16c3ae41847d92c0a251fa0ae386be6f39766 | [
"MIT"
] | null | null | null | from copy import deepcopy
from os.path import basename
import pickle
import re
import sys
import xml.etree.ElementTree as ET
class Validate():
# Store information about the fields
field_mapping = dict()
field_length = dict()
field_type = dict()
diefast = True
database = None
file_status = dict()
def __init__(self, dblocation, diefast=True):
self.database = dblocation
self.diefast = diefast
def store_results(self):
out = open(self.database, 'wb')
pickle.dump(self.file_status, out)
out.close()
def validate_header(self, header, field_config):
fields = header.split('|')
prefix = fields[0]
if prefix != 'H':
print("Error: header prefix is '" + prefix + "', 'H' expected.")
return False
required_fields = {k.text: True for k in field_config.findall('.//name')}
for index, field in enumerate(fields[1:], start=1):
if field in required_fields:
self.field_mapping[field] = index
del(required_fields[field])
else:
print("Error: header: field '" + field + "' found in header, but not in the configuration")
return False
if len(required_fields) > 0:
print("Error: header missing the following fields:")
for k in required_fields.keys():
print(k)
return False
return True
def validate_footer(self, footer, nlines, interface_id, filename_dt):
fields = footer.split('|')
prefix = fields[0]
if prefix != 'F':
print("Error: footer prefix is '" + prefix + "', 'F' expected.")
return False
interface = fields[1]
if interface != interface_id:
print("Error: footer file group is '" + interface + "', '" + interface_id + "' expected.")
return False
dt_stamp = fields[2]
if dt_stamp != filename_dt:
print("Error: footer datetimestamp is:", dt_stamp, "Expected:", filename_dt)
return False
read_obs = fields[3]
if int(read_obs) != nlines:
print("Error: footer", nlines, "lines read", read_obs, "expected.")
return False
return True
def validate_line(self, line, linenum, field_config):
fields = line.split('|')
prefix = fields[0]
if prefix != 'B':
print("Error: line", format(linenum), "body prefix is '" + prefix + "', 'B' expected.")
return False
for f, i in self.field_mapping.items():
# Check if the field even exists
try:
field = fields[i]
except IndexError:
print("Error: line", linenum, "column", i+1, "- field not found. (Field name:", f + ")")
return False
# Check if it's within the length limits
if len(field) > self.field_length[f]:
print("Error: line", linenum, "column", i+1, "- field too long.", len(field), ">", self.field_length[f], "(Field name:", f + ")")
return False
# Check the type of the field
self.check_type(field, f)
return True
def check_type(self, field, field_name):
return True
def validator_setup(self, fields):
for field in fields.findall('field'):
name = field.find('name').text
length = int(field.find('length').text)
ftype = field.find('type').text
self.field_length[name] = length
self.field_type[name] = ftype
def validate_file(self, config, filename):
tree = ET.parse(config)
root = tree.getroot()
error = False
for interface in root.findall('interface'):
fields = interface.find('fields')
self.validator_setup(fields)
group = interface.find('group').text
name = interface.find('fileName').find('name').text
m = re.match(name, basename(filename))
if m == None:
print("Error: filename '" + basename(filename) +"' does not match the expected pattern:", name)
return False
interface_name = m.group(int(interface.find('fileName').find('fileInterface').text))
sub_group = m.group(int(interface.find('fileName').find('subGroup').text))
date_time = m.group(int(interface.find('fileName').find('dateTimeGroup').text))
try:
with open(filename) as f:
nlines = 0
first_line = f.readline().rstrip()
last_line = f.readline().rstrip()
ok = self.validate_header(first_line, fields)
if not ok:
error = True
if self.diefast:
sys.exit(-1)
nlines += 1
for line in f:
nlines +=1
ok = self.validate_line(last_line, nlines, fields)
if not ok:
error = True
if self.diefast:
sys.exit(-1)
last_line = line.rstrip()
nlines += 1
ok = self.validate_footer(last_line, nlines, group, date_time)
if not ok:
error = True
if self.diefast:
sys.exit(-1)
except IOError as e:
print("Error:", e.value)
return False
if not error:
print("SUCCESS: validated", basename(filename), "from group:", group + ", subgroup:", sub_group + ", interface:",
interface_name +". Timestamp:", date_time)
return True
else:
return False
| 34.701149 | 145 | 0.51474 | 647 | 6,038 | 4.709428 | 0.224111 | 0.046931 | 0.031178 | 0.032819 | 0.189367 | 0.175583 | 0.09616 | 0.062685 | 0.040368 | 0.040368 | 0 | 0.00478 | 0.376284 | 6,038 | 173 | 146 | 34.901734 | 0.804302 | 0.021862 | 0 | 0.296296 | 0 | 0 | 0.118455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059259 | false | 0 | 0.044444 | 0.007407 | 0.288889 | 0.103704 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69c12ea8e597124ef988d5e50b496e666fa30337 | 1,912 | py | Python | models/encoders/encoder_transformer.py | tushar117/counter-neural-essay-length-copy | 010aee9b4869c32472ec821f52ce6c9914410a2e | [
"MIT"
] | 6 | 2021-11-13T02:20:42.000Z | 2022-01-03T14:22:25.000Z | models/encoders/encoder_transformer.py | tushar117/counter-neural-essay-length-copy | 010aee9b4869c32472ec821f52ce6c9914410a2e | [
"MIT"
] | 2 | 2021-11-12T06:00:31.000Z | 2022-03-29T15:11:31.000Z | models/encoders/encoder_transformer.py | tushar117/counter-neural-essay-length-copy | 010aee9b4869c32472ec821f52ce6c9914410a2e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import utils
from utils import LONG, FLOAT
from models.transformer.Models import Encoder as Transf_Encoder
class Encoder_Transfomer(nn.Module):
""" encoders class """
#
def __init__(self, config, x_embed):
super().__init__()
self.use_gpu = config.use_gpu
len_max_seq = 0
if config.pad_level == "doc":
len_max_seq = config.max_len_doc
else:
len_max_seq = config.max_len_sent
self.d_model = config.embed_size
self.model = Transf_Encoder(
x_embed=x_embed.x_embed, n_src_vocab=config.max_vocab_cnt, len_max_seq=len_max_seq,
d_word_vec=config.embed_size, d_model=self.d_model, d_inner=config.d_inner_hid,
n_layers=config.transf_n_layers, n_head=config.n_head, d_k=config.d_k, d_v=config.d_v,
dropout=config.dropout
)
#self.encoder_out_size = config.d_model
self.encoder_out_size = self.d_model
return
# end def __init__
# generate positional input consists of order
def gen_positional_input(self, seq_x):
pos_x = []
for cur_batch in seq_x:
cur_pos = []
for ind, val in enumerate(cur_batch):
if val != 0:
cur_pos.append(ind+1)
else:
cur_pos.append(0)
pos_x.append(cur_pos)
pos_input = torch.LongTensor(pos_x)
pos_input = utils.cast_type(pos_input, LONG, self.use_gpu)
return pos_input
# end def_gen_positional_input
#
def forward(self, text_inputs, mask_input, len_seq, mode=""):
pos_x = self.gen_positional_input(text_inputs)
encoder_out, *_ = self.model(text_inputs, pos_x)
encoder_out = encoder_out * mask_input.unsqueeze(2)
return encoder_out
# end forward
| 27.710145 | 98 | 0.625523 | 271 | 1,912 | 4.0369 | 0.309963 | 0.054845 | 0.041133 | 0.027422 | 0.038391 | 0.038391 | 0 | 0 | 0 | 0 | 0 | 0.004409 | 0.28818 | 1,912 | 68 | 99 | 28.117647 | 0.799412 | 0.093096 | 0 | 0.04878 | 0 | 0 | 0.001745 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.121951 | 0 | 0.292683 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69c6a761f9ad45bb17ab0266ac95a1768e562491 | 1,760 | py | Python | some_completed_notebooks/energy_levels_not_commented/gradient.py | Quantum-Computing-Cooperation/Quantum_Hackathon_2021 | 69bb50ebd93cbc2bdca59de360408a5a2ecd6f2e | [
"Apache-2.0"
] | 4 | 2021-11-17T19:42:15.000Z | 2022-03-05T18:36:59.000Z | some_completed_notebooks/energy_levels_not_commented/gradient.py | Quantum-Computing-Cooperation/Quantum_Hackathon_2021 | 69bb50ebd93cbc2bdca59de360408a5a2ecd6f2e | [
"Apache-2.0"
] | null | null | null | some_completed_notebooks/energy_levels_not_commented/gradient.py | Quantum-Computing-Cooperation/Quantum_Hackathon_2021 | 69bb50ebd93cbc2bdca59de360408a5a2ecd6f2e | [
"Apache-2.0"
] | 6 | 2021-11-19T18:37:07.000Z | 2021-11-20T22:26:17.000Z | from qiskit.opflow.state_fns import CircuitStateFn, StateFn
from qiskit.opflow.expectations import PauliExpectation
from qiskit.opflow.converters import CircuitSampler
import numpy as np
def ei(i, n):
vi = np.zeros(n)
vi[i] = 1.0
return vi[:]
def gradient(n_qubits, n_layer, op, ansatz, params, shots, instance):
n_params = len(params)
wfn_circuits = []
op = StateFn(op, is_measurement=True)
for i in range(n_params):
wfn_circuits.append(CircuitStateFn(
ansatz(params + ei(i, n_params) * np.pi / 2.0, n_spins=n_qubits, n_layer=n_layer, full_rotation=True)))
wfn_circuits.append(CircuitStateFn(
ansatz(params - ei(i, n_params) * np.pi / 2.0, n_spins=n_qubits, n_layer=n_layer, full_rotation=True)))
# Now measure circuits
results = []
for wfn in wfn_circuits:
braket = op @ wfn
# Simulate the sampling
grouped = PauliExpectation().convert(braket)
sampled_op = CircuitSampler(instance).convert(grouped)
# Expectation value
mean_value = sampled_op.eval().real
est_err = 0
# If the simulations is not unitary evolution, return an error bar
if (not instance.is_statevector):
variance = PauliExpectation().compute_variance(sampled_op).real
est_err = np.sqrt(variance / shots)
results.append([mean_value, est_err])
g = np.zeros((n_params, 2))
for i in range(n_params):
rplus = results[2 * i]
rminus = results[2 * i + 1]
# G = (Ep - Em)/2
# var(G) = var(Ep) * (dG/dEp)**2 + var(Em) * (dG/dEm)**2
g[i, :] = (rplus[0] - rminus[0]) / 2.0, np.sqrt(rplus[1] ** 2 + rminus[1] ** 2) / 2.0
return g | 34.509804 | 115 | 0.614205 | 245 | 1,760 | 4.273469 | 0.346939 | 0.040115 | 0.045845 | 0.037249 | 0.225406 | 0.225406 | 0.191022 | 0.191022 | 0.191022 | 0.191022 | 0 | 0.018547 | 0.264773 | 1,760 | 51 | 116 | 34.509804 | 0.790572 | 0.114205 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69c80f5af3f6bc89594adb960d2a07c5548b7847 | 6,814 | py | Python | src/ext/executor/compare_executor.py | terryyrliang/facenet | 0f6c0074cef5f1254abfa2bfa0357db023f1908d | [
"MIT"
] | null | null | null | src/ext/executor/compare_executor.py | terryyrliang/facenet | 0f6c0074cef5f1254abfa2bfa0357db023f1908d | [
"MIT"
] | null | null | null | src/ext/executor/compare_executor.py | terryyrliang/facenet | 0f6c0074cef5f1254abfa2bfa0357db023f1908d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import tensorflow as tf
from scipy import misc
app_path = os.environ['APP_PATH']
for p in app_path.split(';'):
sys.path.append(p)
import os
import copy
import facenet
import align.detect_face
from common import config_fetcher
from kafka import KafkaConsumer
from kafka import KafkaProducer
import json
from bean import event
servers = config_fetcher.bootstrap_hosts
group_id = config_fetcher.group_id
compare_topic = config_fetcher.compare_topic
aggregate_topic = config_fetcher.aggregate_topic
model = config_fetcher.model
# To consume latest messages and auto-commit offsets
consumer = KafkaConsumer(compare_topic,
group_id = group_id,
bootstrap_servers = servers,
value_deserializer=lambda m: json.loads(m.decode('ascii')))
producer = KafkaProducer(value_serializer=lambda v:json.dumps(v).encode('utf-8'), bootstrap_servers = servers)
def execute():
# images = load_and_align_data(image_files, image_size, margin, gpu_memory_fraction)
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(model)
#########################################################################################################
############################################# Split Line ################################################
#########################################################################################################
print('load model done...')
for message in consumer:
try:
request = message.value
image_files = request['face_extract_path']
target_extract_path = request['target_extract_path']
image_files.insert(0, target_extract_path)
print("get a request")
images = load_and_align_data(image_files, config_fetcher.compare_is, config_fetcher.compare_margin, config_fetcher.compare_gmf)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = {images_placeholder: images, phase_train_placeholder: False}
emb = sess.run(embeddings, feed_dict=feed_dict)
nrof_images = len(image_files)
print_target_images(nrof_images, image_files)
print_result_matrix(np, nrof_images, emb)
fr = extract_final_result(np, nrof_images, emb)
result = False
for r in fr:
if r < 1:
result = True
break
next_request = build_next_request(request, result, '')
print('-----------------------------------')
print(event.convert_to_dict(next_request))
producer.send(aggregate_topic, next_request)
print('-----------------------------------')
print("process one request done...")
except Exception as e:
print(e)
def extract_final_result(np, nrof_images, emb):
final_result = []
for j in range(1, nrof_images):
dist = np.sqrt(np.sum(np.square(np.subtract(emb[0, :], emb[j, :]))))
final_result.append(dist)
return final_result
def print_target_images(nrof_images, image_files):
print('Images:')
for i in range(nrof_images):
print('%1d: %s' % (i, image_files[i]))
print('')
def print_result_matrix(np, nrof_images, emb):
# Print distance matrix
print('Distance matrix')
print(' ', end='')
for i in range(nrof_images):
print(' %1d ' % i, end='')
print('')
for i in range(nrof_images):
print('%1d ' % i, end='')
for j in range(nrof_images):
dist = np.sqrt(np.sum(np.square(np.subtract(emb[i, :], emb[j, :]))))
print(' %1.4f ' % dist, end='')
print('')
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
tmp_image_paths = copy.copy(image_paths)
img_list = []
for image in tmp_image_paths:
img = misc.imread(os.path.expanduser(image), mode='RGB')
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
if len(bounding_boxes) < 1:
image_paths.remove(image)
print("can't detect face, remove ", image)
continue
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
return images
def build_next_request(old_request, result, error_message):
nr = event.AggregatorRequest(old_request['client_id'], old_request['session_id'], old_request['trace_id'],
old_request['total_image_numbers'], old_request['request_order'], old_request['root_path'],
old_request['extract_root_path'], old_request['face_image_path'], result, error_message)
return nr
if __name__ == "__main__":
execute() | 43.96129 | 148 | 0.567361 | 789 | 6,814 | 4.640051 | 0.287706 | 0.032778 | 0.021852 | 0.016389 | 0.191205 | 0.174542 | 0.157061 | 0.105436 | 0.066102 | 0.042611 | 0 | 0.011041 | 0.282213 | 6,814 | 155 | 149 | 43.96129 | 0.737477 | 0.045495 | 0 | 0.110236 | 0 | 0 | 0.076019 | 0.011593 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047244 | false | 0 | 0.133858 | 0 | 0.204724 | 0.188976 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69ca51f9a5be7291a799b3ee485e4a0de181e34a | 4,356 | py | Python | tests/test_it2.py | gamis/flo | bcaf3856cc2dcef0ef223e3e8901dcfbce34d5a0 | [
"MIT"
] | null | null | null | tests/test_it2.py | gamis/flo | bcaf3856cc2dcef0ef223e3e8901dcfbce34d5a0 | [
"MIT"
] | null | null | null | tests/test_it2.py | gamis/flo | bcaf3856cc2dcef0ef223e3e8901dcfbce34d5a0 | [
"MIT"
] | null | null | null | import random
import pytest
from flo.it2 import from_, for_each, unique_index, index
from flo.lamb import e_
def test_usage_examples() -> None:
mylist = ['pretty', 'cool', 'items', 'kiddo']
expected = {'P': 'PRETTY', 'I': 'ITEMS'}
expected_repr = "It(list[4] * _.upper() / _ contains 'E')"
it = from_(mylist).map(e_.upper()).filter(e_.has('E'))
assert repr(it) == expected_repr
assert it.collect(unique_index, key=e_[0]) == expected
it = from_(mylist).map(e_.upper()).where(e_.has('E'))
assert it.to(unique_index, key=e_[0]) == expected
assert (for_each('pretty', 'cool', 'items', 'kiddo') * e_.upper() / e_.has('E') > list) == list(expected.values())
@pytest.mark.parametrize('src,mapper,rep,expected',
[([3, 4], str, "It(list[2] * str)", ["3", "4"]),
([3, 4], e_ + 1, "It(list[2] * _+1)", [4, 5])])
def test_map(src, mapper, rep, expected):
it = from_(src).map(mapper)
assert repr(it) == rep
assert it.to(list) == expected
@pytest.mark.parametrize('src,mapper,rep,expected',
[(["3 1", "a", "b c d"], str.split, "It(list[3] * split * flatten)", ["3", "1", "a", "b", "c", "d"]),
(["3,1", "a", "b,c,d"], e_.split(','), "It(list[3] * _.split(',') * flatten)", ["3", "1", "a", "b", "c", "d"]),
(["3,1", "a", "b,c,d"], (str.split, {'sep': ','}), "It(list[3] * split(_, sep=',') * flatten)", ["3", "1", "a", "b", "c", "d"])
])
def test_flatmap(src, mapper, rep, expected):
if isinstance(mapper, tuple):
mapper, kwargs = mapper
else:
kwargs = {}
it = from_(src).flatmap(mapper, **kwargs)
assert repr(it) == rep
assert it.to(list) == expected
@pytest.mark.parametrize('src,f,rep,expected',
[([3, 4], e_ > 3, "It(list[2] / _>3)", [4]),
([3, 4], e_.apply(str).not_in({'2'}), "It(list[2] / _.apply(str) not in {'2'})", [3, 4])])
def test_filter(src, f, rep, expected):
it = from_(src).filter(f)
assert repr(it) == rep
assert it.to(list) == expected
@pytest.mark.parametrize('src,f,rep,expected',
[([3, 4], e_ > 3, "It(list[2] / not _>3)", [3]),
([3, 4], e_.apply(str).not_in({'2'}), "It(list[2] / not _.apply(str) not in {'2'})", [])])
def test_exclude(src, f, rep, expected):
it = from_(src).exclude(f)
assert repr(it) == rep
assert it.to(list) == expected
@pytest.mark.parametrize('src,f,expected',
[([3, 4], (set,), {3, 4}),
([3, 4], (list, str), '[3, 4]')
])
def test_collect(src, f, expected):
assert from_(src).collect(*f) == expected
@pytest.mark.parametrize('src,expected',
[([[1, 2, 3], [4]], [1, 2, 3, 4]),
([[1, 2, 3], [], [4]], [1, 2, 3, 4]),
([[1, 2, 3], None, [4]], TypeError),
([[1, 2, 3], 15, [4]], TypeError),
])
def test_flatten(src: list, expected: list):
try:
assert from_(src).flatten().to(list) == expected
except TypeError:
assert expected == TypeError
def test_zip_with():
assert for_each(1, 2, 3).zip_with('abc').to(list) == [(1, 'a'), (2, 'b'), (3, 'c')]
def test_chain():
assert for_each(1, 2, 3).chain('abc').to(list) == [1, 2, 3, 'a', 'b', 'c']
def test_dropwhile():
assert from_(range(10)).dropwhile(e_ < 3).to(list) == [3, 4, 5, 6, 7, 8, 9]
def test_takewhile():
assert from_(range(10)).takewhile(e_ < 3).to(list) == [0, 1, 2]
def test_cache():
rng = random.Random()
original = for_each(1, 2, 3).map(lambda e: e + rng.random())
assert original.to(list) != original.to(list)
cached = original.cache()
first = cached.to(list)
second = cached.to(list)
assert first == second
def test_index():
src = ['apples', 'bananas', 'apricots', 'grapes', 'grapefruit']
actual = from_(src).to(index, key=e_[0])
expected = dict(a=['apples', 'apricots'], b=['bananas'], g=['grapes', 'grapefruit'])
assert actual == expected
try:
from_(src).to(unique_index, key=e_[0])
pytest.fail('Expected exception')
except KeyError as e:
pass
| 34.571429 | 153 | 0.504821 | 598 | 4,356 | 3.560201 | 0.177258 | 0.016909 | 0.014091 | 0.067637 | 0.387976 | 0.33396 | 0.267262 | 0.23861 | 0.202912 | 0.202912 | 0 | 0.038305 | 0.268825 | 4,356 | 125 | 154 | 34.848 | 0.630141 | 0 | 0 | 0.186813 | 0 | 0 | 0.144169 | 0.01056 | 0 | 0 | 0 | 0 | 0.241758 | 1 | 0.142857 | false | 0.010989 | 0.043956 | 0 | 0.186813 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69cc030fefacd641eb7ed7ab0afe45ef96bb7bd1 | 3,242 | py | Python | foliant/backends/mdtopdf.py | foliant-docs/mdtopdf | c383af7b92779f2dca3644e127b0a2466e15afbc | [
"MIT"
] | null | null | null | foliant/backends/mdtopdf.py | foliant-docs/mdtopdf | c383af7b92779f2dca3644e127b0a2466e15afbc | [
"MIT"
] | null | null | null | foliant/backends/mdtopdf.py | foliant-docs/mdtopdf | c383af7b92779f2dca3644e127b0a2466e15afbc | [
"MIT"
] | null | null | null | import json
import shutil
from subprocess import run, PIPE, STDOUT, CalledProcessError
from pathlib import PosixPath
from foliant.utils import spinner
from foliant.backends.base import BaseBackend
class Backend(BaseBackend):
_flat_src_file_name = '__all__.md'
targets = ('pdf', 'site')
required_preprocessors_after = [{
'flatten': {
'flat_src_file_name': _flat_src_file_name
}},
{'mdtopdf': {}}
]
defaults = {
'mdtopdf_path': 'md-to-pdf',
'options': {},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._flat_src_file_path = self.working_dir / self._flat_src_file_name
config = self.config.get('backend_config', {}).get('mdtopdf', {})
self._mdtopdf_config = {**self.defaults, **config}
self._slug = f'{self._mdtopdf_config.get("slug", self.get_slug())}'
self._slug_for_commands = self._escape_control_characters(str(self._slug))
self._cachedir = self.project_path / '.mdtopdfcache'
shutil.rmtree(self._cachedir, ignore_errors=True)
self._cachedir.mkdir()
self.logger = self.logger.getChild('mdtopdf')
self.logger.debug(f'Backend inited: {self.__dict__}')
def _escape_control_characters(self, source_string: str) -> str:
escaped_string = source_string.replace('"', "\\\"").replace('$', "\\$").replace('`', "\\`")
return escaped_string
def _generate_config_file(self, options: dict) -> PosixPath:
config_path = self._cachedir / 'config.json'
config = {}
for key, value in options.items():
config[key.replace('-', '_')] = value
# md-to-pdf on unix requires --no-sandbox puppeteer flag to work properly
launch_args = config.setdefault('launch_options', {}).setdefault('args', [])
if '--no-sandbox' not in launch_args:
launch_args.append('--no-sandbox')
with open(config_path, 'w') as f:
json.dump(config, f)
return config_path
def _get_html_command(self) -> str:
pass
def _get_pdf_command(self) -> str:
components = [self._mdtopdf_config['mdtopdf_path']]
config_path = self._generate_config_file(self._mdtopdf_config['options'])
components.append(f'--config-file {config_path}')
components.append(str(self._flat_src_file_path))
components.append(f'"{self._slug_for_commands}.pdf"')
command = ' '.join(components)
self.logger.debug(f'PDF generation command: {command}')
return command
def make(self, target: str) -> str:
with spinner(f'Making {target} with md-to-pdf', self.logger, self.quiet, self.debug):
try:
command = self._get_pdf_command()
self.logger.debug('Running the command.')
run(command, shell=True, check=True, stdout=PIPE, stderr=STDOUT)
return f'{self._slug}.{target}'
except CalledProcessError as exception:
raise RuntimeError(f'Build failed: {exception.output.decode()}')
except Exception as exception:
raise type(exception)(f'Build failed: {exception}')
| 33.081633 | 99 | 0.62554 | 373 | 3,242 | 5.158177 | 0.327078 | 0.02183 | 0.034304 | 0.031185 | 0.019751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.239975 | 3,242 | 97 | 100 | 33.42268 | 0.780844 | 0.0219 | 0 | 0 | 0 | 0 | 0.16977 | 0.035342 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089552 | false | 0.014925 | 0.089552 | 0 | 0.313433 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69cd79a3c79726917baeb1caec48a1c929c2f4cf | 12,859 | py | Python | plottingScripts/PlotTweak.py | JaakkoAhola/LES-scripting | 1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2 | [
"MIT"
] | null | null | null | plottingScripts/PlotTweak.py | JaakkoAhola/LES-scripting | 1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2 | [
"MIT"
] | null | null | null | plottingScripts/PlotTweak.py | JaakkoAhola/LES-scripting | 1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 13:30:13 2020
@author: Jaakko Ahola, Finnish Meteorological Institute
@licence: MIT licence Copyright
"""
import matplotlib
import numpy
import time
from Data import Data
from Simulation import Simulation
class PlotTweak:
# DELETE
def hideXLabels(ax, xLabelListShowBoolean, xLabelListMajorLineBoolean):
k = 0
for label in ax.xaxis.get_ticklabels():
label.set_visible(xLabelListShowBoolean[k])
k = k + 1
k = 0
for line in ax.xaxis.get_ticklines()[0::2]:
if xLabelListMajorLineBoolean[k]:
line.set_markersize(matplotlib.rcParams["xtick.major.size"])
else:
line.set_markersize(matplotlib.rcParams["xtick.minor.size"])
k= k + 1
return ax
# DELETE
def hideYLabels(ax, param):
k = 0
for label in ax.yaxis.get_ticklabels():
if numpy.mod(k,param) != 0:
label.set_visible(False)
k+=1
return ax
# DELETE
def hideColorbarXLabels(cbar, colorbarLabelListShowBoolean):
k = 0
for label in cbar.ax.xaxis.get_ticklabels():
label.set_visible(colorbarLabelListShowBoolean[k])
k = k + 1
return cbar
# DELETE
def hideColorbarYLabels(cbar, colorbarLabelListShowBoolean):
k = 0
for label in cbar.ax.yaxis.get_ticklabels():
label.set_visible(colorbarLabelListShowBoolean[k])
k = k + 1
# DELETE
def setXTicksLabelsAsTimeOld(ax, timeHvalues, tickInterval = 16, unit = "h", startPoint = 0, xLabelListShow = None, xLabelListMajorLine = None, setXlabel = True):
first = timeHvalues[0]
last = timeHvalues[-1]
xticks = numpy.arange(first,last+0.1, 0.5)
#xticks = xticks[::tickInterval]
xticklabels = [ str(int(round(elem,1))) for elem in list(xticks) ]
#print(xticks)
ax.set_xticks( xticks )
ax.set_xticklabels(xticklabels)
timesize = numpy.shape(xticks)[0]
#####################
if xLabelListShow is None:
xLabelListShowBoolean = [False]*timesize
#timesize = numpy.shape(timeHvalues)[0]
for i in range(timesize):
if numpy.mod(i, tickInterval) == 0:
xLabelListShowBoolean[i] = True
else:
xLabelListShowBoolean = Data.getMaskedList( xticks, xLabelListShow)
#############################
if xLabelListMajorLine is None:
xLabelListMajorLineBoolean = [False]*timesize
for i in range(timesize):
if numpy.mod(i, tickInterval) == 0:
xLabelListMajorLineBoolean[i] = True
else:
xLabelListMajorLineBoolean = Data.getMaskedList( xticks, xLabelListMajorLine)
ax = PlotTweak.hideXLabels(ax, xLabelListShowBoolean, xLabelListMajorLineBoolean)
if setXlabel:
ax.set_xlabel(r"$\mathbf{time (" + unit + ")}$")
else:
ax.set_xlabel(None)
ax.set_xlim( first ,last )
return ax #DELETE ENDS
def setXticks(ax, ticks = None, start = 0, end = 8, interval = 0.5):
if ticks is None:
ticks = Data.getIntergerList( start, end, interval)
PlotTweak._setTicks(ax.set_xticks, ticks)
return ticks
def setYticks(ax, ticks, start = 0, end = 1000, interval = 50):
if ticks is None:
ticks = Data.getIntergerList( start, end, interval)
PlotTweak._setTicks(ax.set_yticks, ticks)
return ticks
# axset is in [ax.set_xticks, ax.set_yticks, ]
def _setTicks(axset, ticks):
axset(ticks)
return ticks
def setXLabels(ax, ticks, shownLabels = None, start = 0, end = 8, interval = 2):
shownLabelsBoolean = PlotTweak._setLabels( ax.set_xticklabels, ax.xaxis, ticks, shownLabels, start, end, interval)
return shownLabelsBoolean
def setYLabels(ax, ticks, shownLabels = None, start = 0, end = 8, interval = 2):
shownLabelsBoolean = PlotTweak._setLabels( ax.set_yticklabels, ax.yaxis, ticks, shownLabels, start, end, interval)
return shownLabelsBoolean
def _setLabels(axset, ax_axis, ticks, shownLabels = None, start = 0, end = 8, interval = 2):
axset(ticks)
if shownLabels is None:
shownLabels = Data.getIntergerList( start, end, interval )
shownLabelsBoolean = Data.getMaskedList(ticks, shownLabels)
PlotTweak._hideLabels(ax_axis, shownLabelsBoolean)
return shownLabelsBoolean
# ax_axis is eithery ax.yaxis or colorbar.ax.xaxis or colorbar.ax.yaxis
def _hideLabels(ax_axis, shownLabelsBoolean):
k = 0
for label in ax_axis.get_ticklabels():
label.set_visible(shownLabelsBoolean[k])
k = k + 1
def hideXTickLabels(ax):
PlotTweak._hideAllTickLabels(ax.get_xticklabels)
def hideYTickLabels(ax):
PlotTweak._hideAllTickLabels(ax.get_yticklabels)
def _hideAllTickLabels(axTicksGetter):
matplotlib.pyplot.setp(axTicksGetter()[:], visible=False)
def setXTickSizes(ax, labelListMajorLineBoolean,
majorFontsize = matplotlib.rcParams["xtick.major.size"],
minorFontSize = matplotlib.rcParams["xtick.minor.size"]):
PlotTweak._setTickSizes(ax.xaxis, labelListMajorLineBoolean, majorFontsize, minorFontSize)
def setYTickSizes(ax, labelListMajorLineBoolean,
majorFontsize = matplotlib.rcParams["ytick.major.size"],
minorFontSize = matplotlib.rcParams["ytick.minor.size"]):
PlotTweak._setTickSizes(ax.yaxis, labelListMajorLineBoolean, majorFontsize, minorFontSize)
# ax_axis is eithery ax.yaxis or colorbar.ax.xaxis or colorbar.ax.yaxis
def _setTickSizes(ax_axis, labelListMajorLineBoolean,
majorFontsize,
minorFontSize):
k = 0
for line in ax_axis.get_ticklines()[0::2]:
if labelListMajorLineBoolean[k]:
line.set_markersize( majorFontsize )
else:
line.set_markersize(minorFontSize)
k= k + 1
def getUnitLabel(label, unit, useBold = False):
if useBold:
boldingStart = "\mathbf{"
boldingEnd = "}"
else:
boldingStart = ""
boldingEnd = ""
return r"$" +boldingStart + "{" + label + "}{\ } ( " + unit + ")" + boldingEnd + "$"
def setXaxisLabel(ax, label, unit = None, useBold = False):
PlotTweak._setLabel(ax.set_xlabel, label, unit, useBold)
def setYaxisLabel(ax, label, unit = None, useBold = False):
PlotTweak._setLabel(ax.set_ylabel, label, unit, useBold)
def _setLabel(labelPointer, label, unit, useBold):
if unit is not None:
label = PlotTweak.getUnitLabel(label, unit, useBold)
labelPointer(label)
def getLogaritmicTicks(tstart, tend, includeFives = False):
# tstart = -17
# tend = -9
logaritmicTicks = numpy.arange(tstart, tend)
if includeFives:
fives = numpy.arange(tstart+numpy.log10(5), tend)
logaritmicTicks = numpy.sort(numpy.concatenate((logaritmicTicks,fives)))
return logaritmicTicks
def setXLim(ax, start = 0, end = 1):
ax.set_xlim( start ,end )
def setYLim(ax, start = 0, end = 1):
ax.set_ylim( start ,end )
def setAnnotation(ax,
text,
fontsize = None,
xPosition = 0, yPosition = 0,
bbox_props = dict(boxstyle="square,pad=0.1", fc="w", ec="0.5", alpha=0.9)):
ax.annotate( text, xy=(xPosition, yPosition), size=fontsize, bbox = bbox_props)
def setTightLayot(fig):
fig.tight_layout()
def setAxesOff(ax):
ax.axis('off')
def useLegend(ax = None, loc = "best"):
if ax is None:
matplotlib.pyplot.legend(loc = loc)
else:
ax.legend(loc = loc)
def setLegendSimulation(ax, simulationList, loc = "center"):
collectionOfLabelsColors = {}
for simulation in simulationList:
collectionOfLabelsColors[ simulation.getLabel() ] = simulation.getColor()
PlotTweak.setLegend(ax, collectionOfLabelsColors, loc )
def setLegend(ax,
collectionOfLabelsColors,
loc = "center", fontsize = None):
legend_elements = []
for label, color in collectionOfLabelsColors.items():
legend_elements.append( matplotlib.patches.Patch( label=label,
facecolor=color))
ax.legend( handles=legend_elements, loc=loc, frameon = True, framealpha = 1.0, fontsize = fontsize )
def setArtist(ax,
collectionOfLabelsColors,
loc = "center", fontsize = None):
legend_elements = []
for label, color in collectionOfLabelsColors.items():
legend_elements.append( matplotlib.patches.Patch( label=label,
facecolor=color))
artist = ax.legend( handles=legend_elements, loc=loc, frameon = True, framealpha = 1.0, fontsize = fontsize )
ax.add_artist(artist)
# setSuperWrapper
# if xticks given, it overwrites xstart, xend, xinterval when getting ticks
# if yticks given, it overwrites ystart, yend, yinterval when getting ticks
def setSuperWrapper(ax,
xstart = 0, xend = 8, xtickinterval = 0.5, xlabelinterval = 1,
xticks = None,
xShownLabels = None,
xTickMajorFontSize = matplotlib.rcParams["xtick.major.size"],
xTickMinorFontSize = matplotlib.rcParams["xtick.minor.size"],
ystart = 0, yend = 100, ytickinterval = 50, ylabelinterval = 5,
yticks = None,
yShownLabels = None,
yTickMajorFontSize = matplotlib.rcParams["ytick.major.size"],
yTickMinorFontSize = matplotlib.rcParams["ytick.minor.size"],
annotationText = None,
annotationXPosition = 2,
annotationYPosition = 30,
xlabel = None,
xunit = None,
ylabel = None,
yunit = None,
setLegend = False,
useBold = False
):
# set xticks
xticks = PlotTweak.setXticks(ax, ticks = xticks, start = xstart, end = xend, interval = xtickinterval)
# set xlabels
xShownLabelsBoolean = PlotTweak.setXLabels(ax, xticks, shownLabels = xShownLabels, start = xstart, end = xend, interval = xlabelinterval )
# set xtick sizes
PlotTweak.setXTickSizes(ax, xShownLabelsBoolean, majorFontsize= xTickMajorFontSize, minorFontSize=xTickMinorFontSize)
# set yticks
yticks = PlotTweak.setYticks(ax, ticks = yticks, start = ystart, end = yend, interval = ytickinterval)
# set ylabels
yShownLabelsBoolean = PlotTweak.setYLabels(ax, yticks, shownLabels = yShownLabels, start = ystart, end = yend, interval = ylabelinterval )
# set ytick sizes
PlotTweak.setYTickSizes(ax, yShownLabelsBoolean, majorFontsize= yTickMajorFontSize, minorFontSize=yTickMinorFontSize)
# limit axes
PlotTweak.setXLim(ax, start = xstart, end = xend)
PlotTweak.setYLim(ax, start = ystart, end = yend)
# set axes labels
PlotTweak.setXaxisLabel( ax, xlabel, xunit, useBold)
PlotTweak.setYaxisLabel( ax, ylabel, yunit, useBold)
# set legend
if setLegend:
PlotTweak.setLegend( ax )
# set annotation for figure
if annotationText is not None:
PlotTweak.setAnnotation(ax, annotationText, xPosition = annotationXPosition, yPosition = annotationYPosition)
def main():
pass
if __name__ == "__main__":
start = time.time()
main()
end = time.time()
print("Script completed in " + str(round((end - start),0)) + " seconds")
| 38.385075 | 167 | 0.577417 | 1,202 | 12,859 | 6.113977 | 0.21381 | 0.010886 | 0.004763 | 0.006804 | 0.333923 | 0.239897 | 0.211866 | 0.201796 | 0.18574 | 0.166825 | 0 | 0.012158 | 0.328408 | 12,859 | 334 | 168 | 38.5 | 0.838814 | 0.065091 | 0 | 0.234513 | 0 | 0 | 0.023375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.150442 | false | 0.004425 | 0.022124 | 0 | 0.230089 | 0.004425 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
69cdbff0837f3d21e5dafd54a75dea3bef070906 | 1,723 | py | Python | So You Have Trained a Deep Learning Model/detectObjectsGPTool.py | esrinederland/DevSummit2020 | 09b14929552a6a5297ac7b117ebd5f21cf6eea37 | [
"MIT"
] | 3 | 2021-04-07T14:26:25.000Z | 2021-04-15T14:56:12.000Z | So You Have Trained a Deep Learning Model/detectObjectsGPTool.py | esrinederland/DevSummit2021 | 09b14929552a6a5297ac7b117ebd5f21cf6eea37 | [
"MIT"
] | null | null | null | So You Have Trained a Deep Learning Model/detectObjectsGPTool.py | esrinederland/DevSummit2021 | 09b14929552a6a5297ac7b117ebd5f21cf6eea37 | [
"MIT"
] | null | null | null | import arcpy
import arcgis
import json
## Connect to your GIS Portal
portalUrl = "https://pada.ad.local/" + "portal"
gis = arcgis.gis.GIS(url=portalUrl, profile="portalAdmin", verify_cert=False)
## Get geometry from input parameter
inputFC = arcpy.GetParameterAsText(0)
with arcpy.da.SearchCursor(inputFC, ["SHAPE@JSON"]) as sCursor:
for row in sCursor:
inputGeometry = row[0]
## Get input raster and clip to geometry
inputRaster = arcgis.raster.ImageryLayer("https://pada.ad.local/server/rest/services/Hosted/Imagery_Heerenveen/ImageServer", gis=gis)
rasterClip = arcgis.raster.functions.clip(inputRaster, json.loads(inputGeometry))
rasterClip.save()
## Get Deep Learning Model
modelObject = gis.content.get("054c037f7c1c47a99561410d5efe7b65")
model = arcgis.learn.Model(modelObject)
model.install()
## Set Model arguments
modelArguments = {
"padding": 100,
"batch_size": 4,
"threshold": 0.9,
"return_bboxes": False
}
context = {
"cellSize": 0.3,
"processorType" : "CPU"
}
## Inferencing
outputFL = arcgis.learn.detect_objects(
rasterClip,
model,
model_arguments=modelArguments,
run_nms=True,
confidence_score_field="Confidence",
class_value_field="Class",
max_overlap_ratio=0.3,
context=context,
gis=gis)
## Append new features to existing Feature Service
featureService = arcgis.features.FeatureLayer("https://pada.ad.local/server/rest/services/Hosted/DetectObjects_test2/FeatureServer/0", gis=gis)
result = featureService.edit_features(adds=outputFL.layers[0].query(where="1=1"))
## Delete temp Feature Service
outputFL.delete()
## Set output parameter
stringResult = json.dumps(result)
arcpy.SetParameterAsText(1, stringResult) | 28.716667 | 143 | 0.74231 | 210 | 1,723 | 6.019048 | 0.561905 | 0.018987 | 0.026108 | 0.037975 | 0.063291 | 0.063291 | 0.063291 | 0.063291 | 0 | 0 | 0 | 0.02681 | 0.134068 | 1,723 | 60 | 144 | 28.716667 | 0.820375 | 0.145676 | 0 | 0 | 0 | 0.025 | 0.224897 | 0.022008 | 0.025 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.075 | 0 | 0.075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |