hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e79009e224114b1ec0ae87c6bd3d54452a3b6720 | 791 | py | Python | story_with_exercises/5_ex_rename_like_a_pro.py | flrs/pycharm_tips_and_tricks | 21a2768f2cc3b6b2af8e672d32c7bd178900adad | [
"MIT"
] | 1 | 2020-02-17T15:08:57.000Z | 2020-02-17T15:08:57.000Z | story_with_exercises/5_ex_rename_like_a_pro.py | flrs/pycharm_tips_and_tricks | 21a2768f2cc3b6b2af8e672d32c7bd178900adad | [
"MIT"
] | null | null | null | story_with_exercises/5_ex_rename_like_a_pro.py | flrs/pycharm_tips_and_tricks | 21a2768f2cc3b6b2af8e672d32c7bd178900adad | [
"MIT"
] | null | null | null | """Rename Like a Pro
Exercise:
The contents of the people_on_meetup variable look more like a zoo. Let's rename the following items:
- variable "people_on_meetup" -> "animals_in_zoo"
- class "Meetup" -> "Zoo"
To rename an item, put the cursor on the item you want to rename and then press Shift+F6 on Windows/Linux
or ⇧+F6 on Mac OS.
"""
people_on_meetup = [
'A tiny horse',
'Mystic Mouse',
'Steg O Saurus',
'Tardi Grade'
]
class Meetup:
def __init__(self, members):
self.members = members
def count_members(self):
return len(self.members)
if __name__ == '__main__':
this_meetup = Meetup(people_on_meetup)
print('Hello, Pythonistas!')
print('We are a great group of {}.'.format(
this_meetup.count_members()
))
| 22.6 | 105 | 0.663717 | 116 | 791 | 4.310345 | 0.586207 | 0.064 | 0.112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003284 | 0.230089 | 791 | 34 | 106 | 23.264706 | 0.816092 | 0.4311 | 0 | 0 | 0 | 0 | 0.230248 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0.058824 | 0.235294 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e79061260d07a54089774bc6a61e074894413160 | 832 | py | Python | bot/plugins/trivia/content/flags.py | best-coloc-ever/globibot | a4c4dd8bb3b71bff09bd2e1c4c5ce58ab1bad176 | [
"MIT"
] | 14 | 2016-02-03T16:47:01.000Z | 2019-12-09T14:24:25.000Z | bot/plugins/trivia/content/flags.py | best-coloc-ever/globibot | a4c4dd8bb3b71bff09bd2e1c4c5ce58ab1bad176 | [
"MIT"
] | 11 | 2016-08-19T22:00:25.000Z | 2022-01-13T00:39:48.000Z | bot/plugins/trivia/content/flags.py | best-coloc-ever/globibot | a4c4dd8bb3b71bff09bd2e1c4c5ce58ab1bad176 | [
"MIT"
] | 6 | 2016-08-19T21:30:58.000Z | 2019-09-27T05:24:08.000Z | from .helpers import *
from .behavior import trivia_behavior
from io import BytesIO
DELAY = 20
async def premise(item):
country, image = item
flag_image_url = 'https://www.countries-ofthe-world.com/{}'.format(image)
flag_image = await Utils.fetch(flag_image_url)
return dict(
file_path=BytesIO(flag_image),
filename='flag.png',
content='You have {} seconds to guess the name of that country'.format(DELAY),
)
def resolve(item, answers):
country, _ = item
winner, message = Resolve.fastest(answers, country.lower(), skill='geography')
return winner, dict(content=message)
FlagsTrivia = trivia_behavior(
fetch = Fetch.read_json('flags.json'),
pick = Pick.random_collection,
premise = premise,
query = Query.timed(DELAY),
resolve = resolve,
)
| 24.470588 | 86 | 0.679087 | 104 | 832 | 5.317308 | 0.576923 | 0.065099 | 0.0434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00303 | 0.206731 | 832 | 33 | 87 | 25.212121 | 0.834848 | 0 | 0 | 0 | 0 | 0 | 0.144231 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e790a3d718923f21aded92fe9214fe4322d6f5f0 | 2,687 | py | Python | pipe_text_wrapper.py | STealthy-and-haSTy/PipeText | 518149cc3183ab9b6ba22be47405677259b7db7a | [
"MIT"
] | 1 | 2020-10-16T08:41:23.000Z | 2020-10-16T08:41:23.000Z | pipe_text_wrapper.py | STealthy-and-haSTy/PipeText | 518149cc3183ab9b6ba22be47405677259b7db7a | [
"MIT"
] | null | null | null | pipe_text_wrapper.py | STealthy-and-haSTy/PipeText | 518149cc3183ab9b6ba22be47405677259b7db7a | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
### ---------------------------------------------------------------------------
class PipeCommandHistory():
LIST_LIMIT = 50
def __init__(self):
self.storage = []
def push(self, text, temp=False):
self.del_duplicates(text)
self.storage.insert(0, text)
if len(self.storage) > self.LIST_LIMIT:
del self.storage[self.LIST_LIMIT:]
def del_duplicates(self, text):
self.storage = [s for s in self.storage if s != text]
def get(self):
return self.storage
def empty(self):
return len(self.storage) == 0
_pipe_cmd_history = PipeCommandHistory()
### ---------------------------------------------------------------------------
class PipeTextWrapperCommand(sublime_plugin.WindowCommand):
def run(self, working_dir=None):
last_cmd = '' if _pipe_cmd_history.empty() else _pipe_cmd_history.get()[0]
panel = self.window.show_input_panel('shell_cmd', last_cmd,
lambda shell_cmd: self.execute(shell_cmd, working_dir),
None, None)
panel.settings().set('_pipe_cmd_input', True)
panel.settings().set('_pipe_cmd_idx', 0)
panel.run_command('select_all')
def execute(self, shell_cmd, working_dir):
_pipe_cmd_history.push(shell_cmd)
self.window.run_command('pipe_text', {
'shell_cmd': shell_cmd,
'working_dir': working_dir
})
### ---------------------------------------------------------------------------
class PipeTextHistoryCommand(sublime_plugin.TextCommand):
def run(self, edit, prev=False):
history = _pipe_cmd_history.get()
cur_idx = self.view.settings().get("_pipe_cmd_idx", 0)
cur_idx = (cur_idx + (-1 if prev else 1)) % len(history)
self.view.settings().set("_pipe_cmd_idx", cur_idx)
self.view.replace(edit, sublime.Region(0, len(self.view)), history[cur_idx])
self.view.run_command('select_all')
def is_enabled(self, prev=False):
return len(_pipe_cmd_history.get()) > 1
### ---------------------------------------------------------------------------
class PipeTextEventListener(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
if key == 'pipe_text_input':
lhs = view.settings().get('_pipe_cmd_input', False)
rhs = bool(operand)
return lhs == rhs if operator == sublime.OP_EQUAL else lhs != rhs
return None
### ---------------------------------------------------------------------------
| 31.244186 | 100 | 0.534053 | 291 | 2,687 | 4.656357 | 0.268041 | 0.056827 | 0.061993 | 0.037638 | 0.15203 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005278 | 0.224414 | 2,687 | 85 | 101 | 31.611765 | 0.644914 | 0.14105 | 0 | 0 | 0 | 0 | 0.061955 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.04 | 0.06 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e79113b7e230adcf093bfd7230288a6161d84a7d | 4,663 | py | Python | pytracer/texture/texturemap/__init__.py | zjiayao/pyTracer | c2b4ef299ecbdca1c519059488f7cd2438943ee4 | [
"MIT"
] | 9 | 2017-11-20T18:17:27.000Z | 2022-01-27T23:00:31.000Z | pytracer/texture/texturemap/__init__.py | zjiayao/pyTracer | c2b4ef299ecbdca1c519059488f7cd2438943ee4 | [
"MIT"
] | 4 | 2021-06-08T19:03:51.000Z | 2022-03-11T23:18:44.000Z | pytracer/texture/texturemap/__init__.py | zjiayao/pyTracer | c2b4ef299ecbdca1c519059488f7cd2438943ee4 | [
"MIT"
] | 1 | 2017-11-20T22:48:01.000Z | 2017-11-20T22:48:01.000Z | """
__init__.py
pytracer.texture.texturemap package
Texture map definitions.
Created by Jiayao on Aug 5, 2017
Modified on Aug 14, 2017
"""
from __future__ import absolute_import
from abc import (ABCMeta, abstractmethod)
from pytracer import *
import pytracer.geometry as geo
import pytracer.transform as trans
__all__ = ['TextureMapping2D', 'TextureMapping3D', 'SphericalMapping2D', 'UVMapping2D',
'CylindricalMapping2D', 'PlannarMapping2D', 'IdentityMapping3D']
class TextureMapping2D(object, metaclass=ABCMeta):
def __repr__(self):
return "{}".format(self.__class__)
@abstractmethod
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
"""
Mapping maps the point given by dg to
(s, t) texture coordinates.
Returning a list of `FLOAT`s:
[s, t, dsdx, dtdx, dsdy, dtdy]
"""
raise NotImplementedError('src.core.texture.{}.map(): abstract method '
'called'.format(self.__class__))
class UVMapping2D(TextureMapping2D):
def __init__(self, su: FLOAT, sv: FLOAT, du: FLOAT, dv: FLOAT):
self.su = su
self.sv = sv
self.du = du
self.dv = dv
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
s = self.su * dg.u + self.du
t = self.sv * dg.v + self.dv
dsdx = self.su * dg.dudx
dtdx = self.sv * dg.dvdx
dsdy = self.su * dg.dudy
dtdy = self.sv * dg.dvdy
return [s, t, dsdx, dtdx, dsdy, dtdy]
class SphericalMapping2D(TextureMapping2D):
def __init__(self, w2t: 'trans.Transform'):
self.w2t = w2t
def __sphere(self, p: 'geo.Point') -> [FLOAT]:
"""
Spherical Mapping for single
point. Returns list
[s, t].
"""
v = geo.normalize(self.w2t(p) - geo.Point(0., 0., 0.))
theta = geo.spherical_theta(v)
phi = geo.spherical_phi(v)
return [theta * INV_PI, phi * INV_2PI]
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
s, t = self.__sphere(dg.p)
# compute texture coordinate
# differentials
# using forward differencing
delta = .1
sx, tx = self.__sphere(dg.p + delta * dg.dpdx)
dsdx = (sx - s) / delta
dtdx = (tx - t) / delta
if dtdx > .5:
dtdx = 1. - dtdx
elif dtdx < -.5:
dtdx = -(dtdx + 1.)
sy, ty = self.__sphere(dg.p + delta * dg.dpdy)
dsdy = (sy - s) / delta
dtdy = (ty - s) / delta
if dtdy > .5:
dtdy = 1. - dtdy
elif dtdy < -.5:
dtdy = -(dtdy + 1.)
return [s, t, dsdx, dtdx, dsdy, dtdy]
class CylindricalMapping2D(TextureMapping2D):
def __init__(self, w2t: 'trans.Transform'):
self.w2t = w2t
def __cylinder(self, p: 'geo.Point') -> [FLOAT]:
"""
Cylinderical Mapping for single
point. Returns list
[s, t].
"""
v = geo.normalize(self.w2t(p) - geo.Point(0., 0., 0.))
return [(PI + self.arctan2(v.y, v.x)) * INV_2PI, v.z]
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
s, t = self.__cylinder(dg.p)
# compute texture coordinate
# differentials
# using forward differencing
delta = .1
sx, tx = self.__cylinder(dg.p + delta * dg.dpdx)
dsdx = (sx - s) / delta
dtdx = (tx - t) / delta
if dtdx > .5:
dtdx = 1. - dtdx
elif dtdx < -.5:
dtdx = -(dtdx + 1.)
sy, ty = self.__cylinder(dg.p + delta * dg.dpdy)
dsdy = (sy - s) / delta
dtdy = (ty - s) / delta
if dtdy > .5:
dtdy = 1. - dtdy
elif dtdy < -.5:
dtdy = -(dtdy + 1.)
return [s, t, dsdx, dtdx, dsdy, dtdy]
class PlannarMapping2D(TextureMapping2D):
def __init__(self, vs: 'geo.Vector', vt: 'geo.Vector', ds: FLOAT = 0., dt: FLOAT = 0.):
self.vs = vs
self.vt = vt
self.ds = ds
self.dt = dt
def __call__(self, dg: 'geo.DifferentialGeometry') -> [FLOAT]:
v = dg.p - geo.Point(0., 0., 0.)
return [self.ds + v.dot(self.vs),
self.dt + v.dot(self.vt),
dg.dpdx.dot(self.vs),
dg.dpdx.dot(self.vt),
dg.dpdy.dot(self.vs),
dg.dpdy.dot(self.vt)]
class TextureMapping3D(object, metaclass=ABCMeta):
"""
TextureMapping3D Class
Base class for 3D texture mappings
"""
def __repr__(self):
return "{}".format(self.__class__)
@abstractmethod
def __call__(self, dg: 'geo.DifferentialGeometry') -> ['geo.Point', 'geo.Vector', 'geo.Vector']:
"""
Mapping 3D point to texture
Returns a list:
[p, dpdx, dpdy]
where p is the mapped point, dpdx, dpdy
are mapped derivatives.
"""
raise NotImplementedError('src.core.texture.{}.map(): abstract method '
'called'.format(self.__class__))
class IdentityMapping3D(TextureMapping3D):
def __init__(self, w2t: 'trans.Transform'):
self.w2t = w2t
def __call__(self, dg: 'geo.DifferentialGeometry') -> ['geo.Point', 'geo.Vector', 'geo.Vector']:
return [self.w2t(dg.p), self.w2t(dg.dpdx), self.w2t(dg.dpdy)]
| 25.620879 | 97 | 0.633712 | 645 | 4,663 | 4.417054 | 0.206202 | 0.027027 | 0.027027 | 0.031941 | 0.538084 | 0.525448 | 0.509302 | 0.484731 | 0.45981 | 0.45981 | 0 | 0.021415 | 0.208878 | 4,663 | 181 | 98 | 25.762431 | 0.750881 | 0.149689 | 0 | 0.519231 | 0 | 0 | 0.135484 | 0.056774 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.048077 | 0.028846 | 0.355769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e794ec0a4b84110bf4cfe442ef69d62eb12a32aa | 5,780 | py | Python | lib/task/worker.py | mrmansano/sublime-ycmd | fece62f0ce4e9cbf96ed8ba07f5cecb24b21427e | [
"MIT"
] | 12 | 2018-01-24T20:58:10.000Z | 2021-12-21T15:02:10.000Z | lib/task/worker.py | mrmansano/sublime-ycmd | fece62f0ce4e9cbf96ed8ba07f5cecb24b21427e | [
"MIT"
] | 4 | 2018-01-13T14:39:45.000Z | 2020-11-25T00:05:27.000Z | lib/task/worker.py | mrmansano/sublime-ycmd | fece62f0ce4e9cbf96ed8ba07f5cecb24b21427e | [
"MIT"
] | 2 | 2018-10-23T17:13:44.000Z | 2019-05-12T04:10:17.000Z | #!/usr/bin/env python3
'''
lib/task/worker.py
Task pool worker thread. Meant for internal use only.
Runs a thread to process items in a task pool. The class itself does not
inherit from `threading.Thread` directly. Instead, a helper function is exposed
for use in a thread target.
Users should not need to access this. Task pools will generate and manage
workers by itself.
'''
import queue
import logging
import threading
# for type annotations only:
from ..task.task import Task # noqa: F401
logger = logging.getLogger('sublime-ycmd.' + __name__)
def spawn_worker(pool, name=None):
if name is not None and not isinstance(name, str):
raise TypeError('name must be a str: %r' % (name))
worker_instance = Worker(pool)
def run_worker():
try:
worker_instance.run()
except Exception as e:
logger.error(
'unhandled exception during worker thread loop: %r', e,
)
# explicitly delete references since worker is about to exit:
worker_instance.clear()
worker_thread = threading.Thread(target=run_worker, name=name)
worker_thread.daemon = True
worker_instance.handle = worker_thread
logger.debug('created worker: %r', worker_instance)
worker_thread.start()
return worker_instance
class Worker(object):
'''
Worker thread abstraction class.
Defines a worker unit that runs an infinite loop, processing tasks from a
task pool.
This class is compatible with (i.e. can inherit from) `threading.Thread`.
It is deliberately left as a plain object though.
This class does not use locking. It is expected that the owners will.
'''
def __init__(self, pool, handle=None):
self._pool = pool # type: Pool
self._handle = None # type: threading.Thread
self.handle = handle
def run(self):
'''
Starts the worker thread, running an infinite loop waiting for jobs.
This should be run on an alternate thread, as it will block.
'''
task_queue = self.pool.queue # type: queue.Queue
logger.debug('task worker starting: %r', self)
while True:
# explicitly specify `block`, in case the queue has custom settings
task = task_queue.get(block=True) # type: Task
if task is not None:
# NOTE : Tasks should catch their own exceptions.
try:
task.run()
except Exception as e:
logger.error(
'exception during task execution: %r',
e, exc_info=True,
)
# explicitly clear reference to task
del task
continue
# task is none, so check if a shutdown is requested
if not self.pool.running:
logger.debug('task pool has stopped running, exit loop')
# pass on the signal to any other worker threads
try:
task_queue.put(None, block=True, timeout=1)
except queue.Full:
logger.warning(
'task queue is full, '
'cannot signal other workers to exit'
)
break
logger.warning('unhandled task on worker thread: %r', task)
logger.debug('task worker exiting: %r', self)
def join(self, timeout=None):
'''
Joins the underlying thread for this worker.
If `timeout` is omitted, this will block indefinitely until the thread
has exited.
If `timeout` is provided, it should be the maximum number of seconds to
wait until returning. If the thread is still alive after the timeout
expires, a `TimeoutError` will be raised.
'''
handle = self._handle # type: threading.Thread
if not handle:
# worker is already dead
return
handle.join(timeout=timeout)
if handle.is_alive():
timeout_desc = (
' after %rs' % (timeout) if timeout is not None else ''
)
raise TimeoutError('thread did not exit%s' % (timeout_desc))
def clear(self):
'''
Clears the locally held reference to the task pool and thread handle.
'''
self._pool = None
self._handle = None
@property
def handle(self):
'''
Retrieves the currently held thread handle, if any.
'''
return self._handle
@handle.setter
def handle(self, handle):
'''
Sets the thread handle for the worker.
'''
if handle is None:
# clear state
self._handle = None
return
if handle is not None and not isinstance(handle, threading.Thread):
raise TypeError(
'thread handle must be a threading.Thread: %r' % (handle)
)
self._handle = handle
@property
def pool(self):
'''
Retrieves the parent task pool.
'''
return self._pool
@property
def name(self):
'''
Retrieves the name from the thread handle, if available.
'''
if self._handle:
return self._handle.name
return None
@name.setter
def name(self, name):
'''
Sets the name of the held thread handle.
'''
if self._handle:
self._handle.name = name
# else, meh, whatever
def __repr__(self):
return '%s(%r)' % ('Worker', {
'handle': self.handle,
'name': self.name,
'pool': self.pool,
})
| 28.613861 | 79 | 0.569896 | 686 | 5,780 | 4.736152 | 0.297376 | 0.040012 | 0.024623 | 0.016005 | 0.035088 | 0.035088 | 0.019698 | 0 | 0 | 0 | 0 | 0.001335 | 0.351903 | 5,780 | 201 | 80 | 28.756219 | 0.86599 | 0.329412 | 0 | 0.171717 | 0 | 0 | 0.116039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.040404 | 0.010101 | 0.252525 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e79e7ea88c51825b1ee8f322458db326ca24c050 | 1,129 | py | Python | datasets/augmentations.py | sithu31296/re_identification | 28c2cf32c6c8c9d79330e1419a7156fe10d8ac95 | [
"MIT"
] | null | null | null | datasets/augmentations.py | sithu31296/re_identification | 28c2cf32c6c8c9d79330e1419a7156fe10d8ac95 | [
"MIT"
] | null | null | null | datasets/augmentations.py | sithu31296/re_identification | 28c2cf32c6c8c9d79330e1419a7156fe10d8ac95 | [
"MIT"
] | null | null | null | from torchvision import transforms
def get_transforms(cfg):
train_transform = transforms.Compose([
transforms.Resize(cfg['TRAIN']['IMG_SIZE']),
transforms.ColorJitter(cfg['TRAIN']['AUG']['B_P'], cfg['TRAIN']['AUG']['C_P'], cfg['TRAIN']['AUG']['S_P'], cfg['TRAIN']['AUG']['H_P']),
transforms.RandomGrayscale(cfg['TRAIN']['AUG']['G_P']), # Local Grayscale Transformation https://arxiv.org/abs/2101.08533
transforms.Pad(10),
transforms.RandomCrop(cfg['TRAIN']['IMG_SIZE']),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
transforms.RandomErasing(cfg['TRAIN']['AUG']['RE_P']), # Random Erasing Data Augmentation https://arxiv.org/pdf/1708.04896
])
test_transform = transforms.Compose([
transforms.Resize(cfg['EVAL']['IMG_SIZE']),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return train_transform, test_transform | 51.318182 | 169 | 0.59876 | 131 | 1,129 | 5.053435 | 0.427481 | 0.108761 | 0.099698 | 0.054381 | 0.391239 | 0.320242 | 0.18429 | 0.18429 | 0.18429 | 0.18429 | 0 | 0.076923 | 0.217006 | 1,129 | 22 | 170 | 51.318182 | 0.671946 | 0.11426 | 0 | 0.210526 | 0 | 0 | 0.10521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e79f9590300eb19ba909c0b5322d32b4049e33b7 | 2,056 | py | Python | dora/tests/test_xp.py | kingjr/dora | f70fab1620c6cad6fc094be15ab22994bd08dd01 | [
"MIT"
] | 98 | 2021-09-21T14:27:21.000Z | 2022-03-18T17:46:45.000Z | dora/tests/test_xp.py | kingjr/dora | f70fab1620c6cad6fc094be15ab22994bd08dd01 | [
"MIT"
] | 6 | 2021-09-22T13:29:48.000Z | 2022-03-14T16:45:30.000Z | dora/tests/test_xp.py | kingjr/dora | f70fab1620c6cad6fc094be15ab22994bd08dd01 | [
"MIT"
] | 5 | 2021-09-21T12:42:01.000Z | 2022-01-27T17:22:17.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import torch
import pytest
from ..conf import DoraConfig
from ..xp import XP
class _Cfg:
pass
def get_dora(tmpdir: Path):
return DoraConfig(dir=Path(tmpdir), exclude=["a"])
def test_dora_dir_abs():
dora = get_dora('outputs')
assert dora.dir.is_absolute()
dora.dir = 'plop'
assert dora.dir.is_absolute()
def test_sig(tmpdir):
tmpdir = Path(str(tmpdir))
dora = get_dora(tmpdir)
xp = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 5), ("b", 12)])
assert xp.sig is not None
xp2 = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 12), ("b", 12)])
assert xp.sig == xp2.sig
xp3 = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 12), ("b", 24)])
assert xp.sig != xp3.sig
def test_properties(tmpdir):
tmpdir = Path(str(tmpdir))
dora = get_dora(tmpdir)
xp = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 5), ("b", 12)])
xp.folder.relative_to(tmpdir)
xp.submitit.relative_to(tmpdir)
xp.rendezvous_file.relative_to(tmpdir)
xp.history.relative_to(tmpdir)
xp._argv_cache.relative_to(tmpdir)
def test_link(tmpdir):
tmpdir = Path(str(tmpdir))
dora = get_dora(tmpdir)
xp = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 5), ("b", 12)])
xp.folder.mkdir(parents=True)
xp.link.push_metrics({"plop": 42})
xp = XP(dora=dora, cfg=_Cfg(), argv=[], delta=[("a", 5), ("b", 12)])
assert xp.link.history == []
xp.link.load()
assert xp.link.history == [{"plop": 42}]
val = [{"plok": 43, "out": Path("plop"), "mat": torch.zeros(5)}]
xp.link.update_history(val)
assert xp.link.history == [{"plok": 43, "out": "plop", "mat": [0.] * 5}]
with pytest.raises(ValueError):
xp.link.update_history({"plop": 42})
with pytest.raises(ValueError):
xp.link.update_history([{"plop": object()}])
| 26.701299 | 76 | 0.618191 | 305 | 2,056 | 4.062295 | 0.298361 | 0.038741 | 0.048426 | 0.062954 | 0.40678 | 0.356739 | 0.356739 | 0.356739 | 0.356739 | 0.277643 | 0 | 0.022236 | 0.190661 | 2,056 | 76 | 77 | 27.052632 | 0.722356 | 0.089981 | 0 | 0.285714 | 0 | 0 | 0.036461 | 0 | 0 | 0 | 0 | 0 | 0.163265 | 1 | 0.102041 | false | 0.020408 | 0.102041 | 0.020408 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7a093439e75bee53c35c1b7acda09ddedee4b02 | 1,897 | py | Python | pyqtgraph_extensions/examples/demo_axis_alignment.py | draustin/pyqtgraph_extensions | 9f53756bdab8c61749c3596d40024971d77c893a | [
"MIT"
] | 10 | 2019-05-22T17:10:07.000Z | 2022-02-09T08:14:28.000Z | pyqtgraph_extensions/examples/demo_axis_alignment.py | draustin/pyqtgraph_extensions | 9f53756bdab8c61749c3596d40024971d77c893a | [
"MIT"
] | 3 | 2020-06-09T22:36:43.000Z | 2021-07-19T21:31:57.000Z | pyqtgraph_extensions/examples/demo_axis_alignment.py | draustin/pyqtgraph_extensions | 9f53756bdab8c61749c3596d40024971d77c893a | [
"MIT"
] | null | null | null | """Show how multiple AlignedPlotItems have aligned AxisItems by using their parent's graphics layout."""
import sys
from textwrap import wrap
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph_extensions as pgx
if QtGui.QApplication.instance() is None:
qapp=QtGui.QApplication(sys.argv)
else:
# Presumably running in a GUI with event QApplication already created
qapp=None
long_label = 'multiline<br>axis label<br>(e.g. complex units)'
# To hold AlignedPlotItems, need to use the extended version of GraphicsLayout/GraphicsLayoutWidget.
glwx=pgx.GraphicsLayoutWidget()
glwx.addLabel('<br>'.join(wrap("<em>pyqtgraph PlotItem</em> - since the label of the left axis of the first PlotItem is"
"two lines, the left axes of the PlotItems aren't aligned.", 40)))
glwx.addHorizontalSpacer(100)
glwx.addLabel('<br>'.join(wrap("<em>pyqtgraph_extensions AlignedPlotItem</em> - because they use their parent's layout"
"grid for their components (axes, title, ViewBox) these components are aligned.", 40)))
glwx.nextRow()
# Make left column showing pyqtgraph PlotItems.
glo=pg.GraphicsLayout()
glwx.addItem(glo)
plt1=glo.addPlot(labels={'left':long_label, 'bottom': 'x'},title='PlotItem 1')
glo.nextRow()
plt2=glo.addPlot(labels={'left':'y (units)','bottom':'x'},title='PlotItem 2')
glwx.nextColumn()
# Make right column showing pyqtgraph_extensions AlignedPlotItems.
glx=pgx.GraphicsLayout()
glwx.addItem(glx)
aplt1=glx.addAlignedPlot(labels={'left':long_label, 'bottom': 'x'},title='AlignedPlotItem 1')
# aplt1 takes up 4 rows (title, top axis, view box, and bottom axis).
glx.nextRows() # equivalent to 4 calls glx.nextRow()
aplt2=glx.addAlignedPlot(labels={'left':'y (units)','bottom':'x'},title='AlignedPlotItem 2')
glwx.resize(800,400)
glwx.show()
if qapp is not None:
sys.exit(qapp.exec_())
| 43.113636 | 120 | 0.732736 | 265 | 1,897 | 5.218868 | 0.483019 | 0.028923 | 0.034707 | 0.02603 | 0.133044 | 0.133044 | 0.133044 | 0 | 0 | 0 | 0 | 0.014697 | 0.139167 | 1,897 | 43 | 121 | 44.116279 | 0.832211 | 0.253558 | 0 | 0 | 0 | 0.03125 | 0.340925 | 0.017082 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15625 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7a16d2263756e23b3fd10f42e7ec1fd27b2c6d0 | 377 | py | Python | generators/cfg.py | Zarux/Steam-friend-graph-V2 | 1670a7e49904812b4f86b0d2590a25d77c05c6ee | [
"MIT"
] | null | null | null | generators/cfg.py | Zarux/Steam-friend-graph-V2 | 1670a7e49904812b4f86b0d2590a25d77c05c6ee | [
"MIT"
] | 6 | 2021-03-09T09:46:03.000Z | 2022-02-26T12:28:11.000Z | generators/cfg.py | Zarux/Steam-friend-graph-V2 | 1670a7e49904812b4f86b0d2590a25d77c05c6ee | [
"MIT"
] | null | null | null | import json
class Config:
api_key = None
db_host = None
db_pw = None
db_user = None
db_table_profile = None
db_table_friends = None
db = None
def __init__(self):
with open('../cfg/cfg.json', 'r') as f:
config = json.loads(f.read())
for name, value in config.items():
setattr(self, name, value)
| 20.944444 | 47 | 0.559682 | 52 | 377 | 3.826923 | 0.596154 | 0.180905 | 0.110553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.334218 | 377 | 17 | 48 | 22.176471 | 0.792829 | 0 | 0 | 0 | 0 | 0 | 0.04244 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7a5af266194fe6108245ee31d8f2544211c0ebb | 2,518 | py | Python | __init__.py | devsysenv/usr | 989ad012b790b6d20f8dae90f0724109cd27b761 | [
"MIT"
] | null | null | null | __init__.py | devsysenv/usr | 989ad012b790b6d20f8dae90f0724109cd27b761 | [
"MIT"
] | null | null | null | __init__.py | devsysenv/usr | 989ad012b790b6d20f8dae90f0724109cd27b761 | [
"MIT"
] | 1 | 2022-03-09T12:24:37.000Z | 2022-03-09T12:24:37.000Z | #!/usr/bin/env python
"""User package initialization module."""
import os
import sys
import dselib
def _context(varfile=None):
"""returns the DSE context object for this script."""
try:
myself = __file__
except NameError:
myself = sys.argv[0]
return dselib.DSEContext(myself, varfile)
_me = _context('user')
def init():
"""usr.init() method
init() is an optional function that, if present, will be invoked at the
start of DSE initialization.
"""
print('DSE_USER: pre-initialization for', dselib.GetDSEUser())
dseDefaults = dselib.GetDSESystemDefaults()
# Check user.def[DSE_HOST], sys.platform, os.name and the root for environment variables to init
sectionsToLoad = [dselib.GetDSEHost(), sys.platform, os.name, None]
# If DSE_PROJECT is defined, put that first in the section list since PROJECT has highest precedence
if dselib.GetDSEProject():
sectionsToLoad.insert(0, dselib.GetDSEProject())
for section in sectionsToLoad:
# Load all the variables in 'section' to the environment (unless they are already there)
dseDefaults.userenv.config.loadSectionToEnv(section)
dselib.AddElementToSearchPath(_me.whereami(), 1, 1)
if os.name == 'nt':
print('DSE_USER: initializing for Windows OS ...')
os.system(f"doskey.exe /macrofile={os.path.join(_me.whereami(), 'p', 'cmd', 'doskey.txt')}")
if dselib.GetDSEProject():
projmacros = _me.whereami() / 'projects' / f"{dselib.GetDSEProject()}-doskey.txt"
if projmacros.is_file():
_me.logger.info(f"Adding project macros from {projmacros}")
os.system(f"doskey.exe /macrofile={projmacros}")
# on posix systems, add symbolic links to the Python scripts w/o the .py
if os.name == 'posix':
_me.logger.debug(f"Adding symbolic links to Python scripts in {_me.whereami()}")
# dselib.pysymlinkdir(_me.whereami(), None, ['grep.py', 'which.py'])
def post():
"""usr.post() method
post() is an optional function that, if present, will be invoked at the
end of DSE initialization. This is not normally used, but here in
case you need to override something that init() did."""
print(f'{dselib.GetDSEUser()}: User init post routine.')
if __name__ == "__main__":
print('DSE User Package.')
print('This module is not directly callable.')
sys.exit(1)
| 32.282051 | 105 | 0.645751 | 317 | 2,518 | 5.044164 | 0.44795 | 0.03127 | 0.022514 | 0.025016 | 0.097561 | 0.097561 | 0.06379 | 0.06379 | 0.06379 | 0.06379 | 0 | 0.002604 | 0.23749 | 2,518 | 77 | 106 | 32.701299 | 0.830208 | 0.339158 | 0 | 0.057143 | 0 | 0.028571 | 0.289714 | 0.078125 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.085714 | 0 | 0.2 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7a69e21b01e752968007e111248ff8bffeec7d2 | 3,833 | py | Python | migrations/utils/sql_enum_migrator.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 23 | 2022-03-17T12:24:09.000Z | 2022-03-31T09:13:30.000Z | migrations/utils/sql_enum_migrator.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 18 | 2022-03-21T08:17:44.000Z | 2022-03-30T12:42:30.000Z | migrations/utils/sql_enum_migrator.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 16 | 2022-03-17T12:24:14.000Z | 2022-03-31T12:15:12.000Z | """
OpenVINO DL Workbench
Helping Class to migrate enums in database
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Tuple, Set
import sqlalchemy.engine
from alembic import op
from sqlalchemy import Enum
class SQLEnumMigrator:
from_enum: Enum
new_enum: Enum
to_enum: Enum
enable_enum_check = False
table_column_pairs: Tuple[Tuple[str, str]]
def __init__(self,
# ((table_name, column_name))
table_column_pairs: Tuple[Tuple[str, str], ...],
enum_name: str,
from_types: Tuple[str, ...],
to_types: Tuple[str, ...]):
self.table_column_pairs = table_column_pairs
self.from_enum = Enum(*from_types, name=enum_name)
self.to_enum = Enum(*{*to_types, *from_types}, name=f'tmp_{enum_name}')
self.new_enum = Enum(*to_types, name=enum_name)
def upgrade(self):
self._migrate(self.from_enum, self.to_enum, self.new_enum)
def downgrade(self):
self._migrate(self.new_enum, self.to_enum, self.from_enum)
def _migrate(self, from_enum: Enum, tmp_enum: Enum, to_enum: Enum):
if self.enable_enum_check:
self._check_enum_values(op.get_bind())
# create a temporary "tmp_..." type
tmp_enum.create(op.get_bind(), checkfirst=False)
# assign columns to a tmp type
for [table_name, column_name] in self.table_column_pairs:
op.execute(f'ALTER TABLE {table_name} ALTER COLUMN {column_name} TYPE {tmp_enum.name}'
f' USING {column_name}::text::{tmp_enum.name}')
# drop old enum
from_enum.drop(op.get_bind(), checkfirst=False)
# Create new enum
to_enum.create(op.get_bind(), checkfirst=False)
# assign columns to a new enum
for [table_name, column_name] in self.table_column_pairs:
op.execute(f'ALTER TABLE {table_name} ALTER COLUMN {column_name} TYPE {to_enum.name}'
f' USING {column_name}::text::{to_enum.name}')
# drop tmp enum
tmp_enum.drop(op.get_bind(), checkfirst=False)
@staticmethod
def _get_enum_values(enum_name: str, connection: sqlalchemy.engine.Connection) -> Set[str]:
enum_values = next(iter(connection.execute(f'SELECT enum_range(NULL::{enum_name})')))
enum_values = enum_values[0].strip('{}').split(',')
return set(enum_values)
def _check_enum_values(self, connection: sqlalchemy.engine.Connection) -> None:
db_enum_values = self._get_enum_values(self.from_enum.name, connection)
migration_enum_values = set(self.from_enum.enums)
missing_db_enum_values = db_enum_values - migration_enum_values
if missing_db_enum_values:
raise ValueError(
f'Old enum tuple for {self.from_enum.name} has missing values: {missing_db_enum_values}. '
f'Please add them to the migration.'
)
excess_migration_enum_values = migration_enum_values - db_enum_values
if excess_migration_enum_values:
raise ValueError(
f'Old enum tuple for {self.from_enum.name} has excess values: {excess_migration_enum_values}. '
f'Please remove them from the migration.'
)
| 38.717172 | 111 | 0.66658 | 526 | 3,833 | 4.614068 | 0.262357 | 0.082406 | 0.039555 | 0.031314 | 0.310672 | 0.247219 | 0.247219 | 0.171405 | 0.171405 | 0.171405 | 0 | 0.003091 | 0.240282 | 3,833 | 98 | 112 | 39.112245 | 0.830357 | 0.206366 | 0 | 0.071429 | 0 | 0 | 0.176393 | 0.06565 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.071429 | 0 | 0.303571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7a75ab051e3341f6c0083af645a52d0c765ee59 | 4,542 | py | Python | post_process/box_sphere_SMC_NSC.py | Milad-Rakhsha/Friction-Contact | 59d17b231c5dd764c741c941e5443141d43ec7e8 | [
"BSD-3-Clause"
] | null | null | null | post_process/box_sphere_SMC_NSC.py | Milad-Rakhsha/Friction-Contact | 59d17b231c5dd764c741c941e5443141d43ec7e8 | [
"BSD-3-Clause"
] | null | null | null | post_process/box_sphere_SMC_NSC.py | Milad-Rakhsha/Friction-Contact | 59d17b231c5dd764c741c941e5443141d43ec7e8 | [
"BSD-3-Clause"
] | null | null | null | import csv,os,sys
import subprocess,re
import matplotlib
#matplotlib.use('TkAgg')
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from decimal import Decimal
from collections import OrderedDict
from matplotlib.ticker import FormatStrFormatter
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams.update({'font.size': 18})
plt.rc('xtick',labelsize=24)
plt.rc('ytick',labelsize=24)
MARKERSIZE=5
path_DEM = str(sys.argv[1])
path_DVI_chrono = str(sys.argv[2])
path_DVI_python = str(sys.argv[3])
def prepare(path, prefix, suffix, prefix2, suffix2, pad):
cmd=r'ls %s/%s* | wc -l '%(path,prefix)
print(cmd)
process = subprocess.check_output(cmd, shell=True)
frame=int(process)
dt=0.5/frame
OUT=np.zeros((frame,17))
for i in range(1,frame):
if (pad):
i_frame="%03d"%i
else:
i_frame=i
FILE=path+"/"+ prefix +str(i_frame)+ suffix
table = pd.read_csv(FILE)
N_SMC=table["bi"].shape[0]
OUT[i,0]=i*dt
for contact in range(0,N_SMC):
c_i=table["bi"][contact]
c_j=table["bj"][contact]
#make sure i=0 and j!=0
if(c_j==0):
c_j=c_i
c_i=0
OUT[i,c_j*2-1]=table['Fn'][contact]
OUT[i,c_j*2]=table['Ft'][contact]
FILE2=path+"/"+ prefix2 +str(i_frame)+ suffix2
table2 = pd.read_csv(FILE2)
OUT[i,N_SMC*2+1+0]=table2['x'][0]
OUT[i,N_SMC*2+1+1]=table2['y'][0]
OUT[i,N_SMC*2+1+2]=table2['z'][0]
OUT[i,N_SMC*2+1+3]=table2['vx'][0]
OUT[i,N_SMC*2+1+4]=table2['vy'][0]
OUT[i,N_SMC*2+1+5]=table2['vz'][0]
return OUT
def make_highlights(ax):
textstr = r'$F_t>0$'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.8)
ax.text(0.8, 0.5, textstr, transform=ax.transAxes, fontsize=18,
verticalalignment='top', bbox=props)
ax.axvspan(0.25, 0.5, facecolor='blue', alpha=0.1)
def plot(label,DVI_F):
fig = plt.figure(num=None,figsize=(10, 10), facecolor='w', edgecolor='k')
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
# ax3 = fig.add_subplot(313)
fig.subplots_adjust(hspace=2.0)
color=['ro','bo','b','r-o','k', 'ko']
for i in range(1,6):
ax1.plot(DVI_F[:,0],DVI_F[:,i*2-1],
color[i],
linewidth=1, markersize=MARKERSIZE,label='contact %d'%i
)
ax2.plot(DVI_F[:,0],DVI_F[:,i*2],
color[i],
linewidth=1, markersize=MARKERSIZE,label='contact %d'%i
)
# ax3.plot(DVI_F[:,0],DVI_F[:,-6],
# 'r',
# linewidth=1, markersize=MARKERSIZE,label='x'
# )
# ax3.plot(DVI_F[:,0],DVI_F[:,-6],
# 'b',
# linewidth=1, markersize=MARKERSIZE,label='u_x'
# )
ax2.legend(fancybox=True, shadow=True, ncol=1)
ax1.legend(fancybox=True, shadow=True, ncol=1)
ax1.set_xlim(0, 0.5)
ax1.set_ylim(0, 3)
ax2.set_xlim(0, 0.5)
# ax3.set_xlim(0, 0.5)
ax2.set_ylim(0, 1.5)
ax1.legend(loc='center left')
ax2.legend(loc='center left')
make_highlights(ax1)
make_highlights(ax2)
# make_highlights(ax3)
ax2.set_xlabel(r'$t(s)$',fontsize=22,)
ax1.set_ylabel(r'$F_n(N)$', fontsize=22,)
ax2.set_ylabel(r'$F_t(N)$', fontsize=22,)
# ax3.set_ylabel(r'$x(m)$',fontsize=22,)
plt.tight_layout(pad=1.50)
# ax3.yaxis.set_major_formatter(FormatStrFormatter('%.0e'))
# ax2.set_ylabel(r'$F$')
plt.savefig('DVI_DEM'+label+'.png')
#plt.show()
# DEM_F=prepare(path_DEM,'F_SCM_', '.txt', False)
#DVI_F_chrono=prepare(path_DVI_chrono,'F_NSC_', '.txt', 'data_', '.csv', False)
DVI_F_python=prepare(path_DVI_python,'stepforce','.csv', 'stepdata_sphere_', '.csv', True)
#plot("_chrono",DVI_F_chrono)
plot("_python",DVI_F_python)
| 34.671756 | 90 | 0.528181 | 636 | 4,542 | 3.636792 | 0.31761 | 0.022482 | 0.015132 | 0.020752 | 0.201902 | 0.130134 | 0.125811 | 0.102032 | 0.043234 | 0.043234 | 0 | 0.051911 | 0.308675 | 4,542 | 130 | 91 | 34.938462 | 0.684713 | 0.147732 | 0 | 0.044444 | 0 | 0 | 0.067273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.1 | 0 | 0.144444 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7a7a001dc96d78bf26c54dfa2c539b4c6caf7e2 | 19,323 | py | Python | codes/sensitivity_analysis_withRealParameters.py | atsoukevin93/tumorgrowth | 96bda28a6ae6455c53c3b573c05746b6d4f2e802 | [
"CC0-1.0"
] | null | null | null | codes/sensitivity_analysis_withRealParameters.py | atsoukevin93/tumorgrowth | 96bda28a6ae6455c53c3b573c05746b6d4f2e802 | [
"CC0-1.0"
] | null | null | null | codes/sensitivity_analysis_withRealParameters.py | atsoukevin93/tumorgrowth | 96bda28a6ae6455c53c3b573c05746b6d4f2e802 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from fipy import *
from numpy import *
import scipy.sparse as sp
import scipy.sparse.linalg as la
import parameterFunctions.immuneResponse as delt
import parameterFunctions.sigmaF as sigmaF
import inspect
from collections import OrderedDict
import pygpc
from pygpc.sobol_saltelli import get_sobol_indices_saltelli
from pygpc.sobol_saltelli import saltelli_sampling
# This function is a modified version of the original pygpc function
def modified_get_sobol_indices(gpc_object, coeffs, n_samples=1e4):
"""
Calculate the available sobol indices from the gPC coefficients (standard) or by sampling.
In case of sampling, the Sobol indices are calculated up to second order.
sobol, sobol_idx, sobol_idx_bool = SGPC.get_sobol_indices(coeffs, algorithm="standard", n_samples=1e4)
Parameters
----------
coeffs: ndarray of float [n_basis x n_out]
GPC coefficients
algorithm : str, optional, default: "standard"
Algorithm to determine the Sobol indices
- "standard": Sobol indices are determined from the gPC coefficients
- "sampling": Sobol indices are determined from sampling using Saltelli's Sobol sampling sequence [1, 2, 3]
n_samples : int, optional, default: 1e4
Number of samples to determine Sobol indices by sampling. The efficient number of samples
increases to n_samples * (2*dim + 2) in Saltelli's Sobol sampling sequence.
Returns
-------
sobol: ndarray of float [n_sobol x n_out]
Normalized Sobol indices w.r.t. total variance
sobol_idx: list of ndarray of int [n_sobol x (n_sobol_included)]
Parameter combinations in rows of sobol.
sobol_idx_bool: ndarray of bool [n_sobol x dim]
Boolean mask which contains unique multi indices.
Notes
-----
.. [1] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
.. [2] Saltelli, A. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
.. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
"""
if gpc_object.p_matrix is None:
dim = gpc_object.problem.dim
else:
dim = gpc_object.problem_original.dim
if gpc_object.problem_original is None:
problem_original = gpc_object.problem
else:
problem_original = gpc_object.problem_original
# generate uniform distributed sobol sequence (parameter space [0, 1])
coords_norm_01 = saltelli_sampling(n_samples=n_samples, dim=dim, calc_second_order=True)
coords_norm = zeros(coords_norm_01.shape)
# transform to respective input pdfs using inverse cdfs
for i_key, key in enumerate(problem_original.parameters_random.keys()):
coords_norm[:, i_key] = problem_original.parameters_random[key].icdf(coords_norm_01[:, i_key])
# run model evaluations
res = gpc_object.get_approximation(coeffs=coeffs, x=coords_norm)
# determine sobol indices
sobol, sobol_idx, sobol_idx_bool = get_sobol_indices_saltelli(y=exp(res),
dim=dim,
calc_second_order=True,
num_resamples=100,
conf_level=0.95)
# sort
idx = flip(argsort(sobol[:, 0], axis=0))
sobol = sobol[idx, :]
sobol_idx = [sobol_idx[i] for i in idx]
sobol_idx_bool = sobol_idx_bool[idx, :]
return sobol, sobol_idx, sobol_idx_bool
# @wrap_non_picklable_objects
class MyModel(pygpc.AbstractModel):
def __init__(self):
self.fname = inspect.getfile(inspect.currentframe())
# pass
# def __reduce__(self):
# return (MyModel, (self.fname,))
def validate(self):
pass
def dichotomy(self, mu_a, mu_b, eps, mesK, Q, aDiffE, aConVE, Id, gF, t_s, E, delta_tild):
while mu_b - mu_a > eps:
mu = (mu_b + mu_a) / 2.
bE = (mesK * Q * mu).T
E_new = la.spsolve((- aDiffE.tocsc() - mu * aConVE + Id.multiply(mesK * gF * t_s)), bE)
E.setValue(E_new)
E.updateOld()
F_mu_m = numerix.sum(mesK * delta_tild.value * E.value) - 1.
bE = (mesK * Q * mu_a).T
E_new = la.spsolve((- aDiffE.tocsc() - mu_a * aConVE + Id.multiply(mesK * gF * t_s)), bE)
E.setValue(E_new)
E.updateOld()
F_mu_a = numerix.sum(mesK * delta_tild.value * E.value) - 1.
# print ('{0} x {1}'.format(F_mu_m, F_mu_a))
if F_mu_m * F_mu_a <= 0:
mu_b = mu
else:
mu_a = mu
return mu
def not_converge(self, x, y):
if (abs(x - y) / y) <= 1e-6:
return True
else:
return False
def simulate(self, process_id=None, matlab_engine=None):
step = 0
res = asarray([])
print(self.p['a'].flatten())
print("PARAM/LA TAILLE: {0}/{1}".format(self.p["a"], self.p["a"].shape))
print('HIHIHIHIHIHI ', float64(self.p["a"]))
for idx in range(self.p["a"].shape[0]):
# print(self.p["a"]*self.p['D']*self.p['sF'])
t_s = 1. / self.p["a"][idx]
x_s = sqrt(self.p["D"][idx] * t_s)
c_s = 1. / (t_s * (x_s ** 2) * self.p["delta"][idx])
# nu = (D/a)*sqrt(D/a)*(S*d*delta)/(chi*sF)
# mu1_s = mu1_tild
mu1_s = c_s / (self.p["R"][idx] * t_s)
# mu0_s = a * mu1_s / V
phi_s = (x_s ** 2) / (mu1_s * t_s * self.p["chi"][idx])
# Q = S*mu1_s*t_s/c_s
Q = 1.
# print(self.p["sF"] )
U = (self.p["sF"][idx] * x_s ** 2) / (self.p["K"][idx] * phi_s)
# print('VOICI LA VALEUR DU PARAMETRE: {0}'.format(self.p["sF"][0]))
radius = 1. / x_s
cellSize = radius/10.
mesh = Gmsh2D('''
cellSize = %(cellSize)g;
radius = %(radius)g;
Point(1) = {0, 0, 0, cellSize};
Point(2) = {-radius, 0, 0, cellSize};
Point(3) = {0, radius, 0, cellSize};
Point(4) = {radius, 0, 0, cellSize};
Point(5) = {0, -radius, 0, cellSize};
Circle(6) = {2, 1, 3};
Circle(7) = {3, 1, 4};
Circle(8) = {4, 1, 5};
Circle(9) = {5, 1, 2};
Line Loop(10) = {6, 7, 8, 9};
Plane Surface(11) = {10};
''' % locals())
# print('je suis ici')
x = mesh.cellCenters
xt, yt = mesh.cellCenters
nVol = mesh.numberOfCells
nFaces = mesh.numberOfFaces
intF = mesh.interiorFaceIDs
extF = arange(0, nFaces, 1)[array(mesh.exteriorFaces)]
intFacesCells = mesh.faceCellIDs[:, intF]
extFacesCells = mesh.faceCellIDs[:, extF]
TKL = mesh._calcFaceAreas() / mesh._calcFaceToCellDistAndVec()[0].sum(axis=0)
mes_edge = mesh._calcFaceAreas()
mesK = mesh.cellVolumes
# ------------------------------------------ The Chemical Potential ------------------------------
aDiffP = zeros((nVol, nVol))
aDiffP = sp.csc_matrix(aDiffP)
aDiffP = aDiffP + sp.coo_matrix((-TKL[intF], (intFacesCells[0], intFacesCells[0])), shape=(nVol, nVol))
aDiffP = aDiffP + sp.coo_matrix((TKL[intF], (intFacesCells[0], intFacesCells[1])), shape=(nVol, nVol))
aDiffP = aDiffP + sp.coo_matrix((TKL[intF], (intFacesCells[1], intFacesCells[0])), shape=(nVol, nVol))
aDiffP = aDiffP + sp.coo_matrix((-TKL[intF], (intFacesCells[1], intFacesCells[1])), shape=(nVol, nVol))
# -----------------------------------Neumann Boundary condition------------------------------------------
aDiffP = aDiffP + sp.coo_matrix((0. * TKL[extF], (extFacesCells[0], extFacesCells[0])), shape=(nVol, nVol))
e = ones((1, nVol))
EaDiffP = sp.csc_matrix(concatenate((concatenate((aDiffP.T.todense(), (mesK * e).T), axis=1),
array([append((mesK * e).T, 0.)])), axis=0))
# -----------------------------------Dirichlet Boundary condition------------------------------------------
test = CellVariable(mesh=mesh, value=0.)
phi = CellVariable(name="$\phi(t,x,y)$", mesh=mesh, value=0.0, hasOld=1)
# sF = sigmaF.SigmaF2D(params.sF, xt, yt, Rs=0.05)
sF = sigmaF.SigmaF2D(1. / x_s, xt, yt, Rs=0.05 / (x_s ** 2))
F = sF
extendedF = append(mesK * U * F, 0.)
phi_new = la.spsolve(EaDiffP, extendedF)
phi.setValue(phi_new[0:nVol])
phi.updateOld()
# ------------------------------------------ The Chemoattractant ------------------------------
aDiffE = zeros((nVol, nVol))
aDiffE = sp.csc_matrix(aDiffE)
aDiffE = aDiffE + sp.coo_matrix((-TKL[intF], (intFacesCells[0], intFacesCells[0])), shape=(nVol, nVol))
aDiffE = aDiffE + sp.coo_matrix((TKL[intF], (intFacesCells[0], intFacesCells[1])), shape=(nVol, nVol))
aDiffE = aDiffE + sp.coo_matrix((TKL[intF], (intFacesCells[1], intFacesCells[0])), shape=(nVol, nVol))
aDiffE = aDiffE + sp.coo_matrix((-TKL[intF], (intFacesCells[1], intFacesCells[1])), shape=(nVol, nVol))
# -----------------------------------Dirichlet Boundary condition------------------------------------------
aDiffE = aDiffE + sp.coo_matrix((-TKL[extF], (extFacesCells[0], extFacesCells[0])), shape=(nVol, nVol))
aConVE = zeros((nVol, nVol))
aConVE = sp.csc_matrix(aConVE)
dPhi_int = numerix.dot(phi.faceGrad.value, mesh.faceNormals)[intF]
aConVE = aConVE + sp.coo_matrix((mes_edge[intF] * plus(dPhi_int), (intFacesCells[0], intFacesCells[0])),
shape=(nVol, nVol))
aConVE = aConVE + sp.coo_matrix((-mes_edge[intF] * minus(dPhi_int), (intFacesCells[0], intFacesCells[1])),
shape=(nVol, nVol))
aConVE = aConVE + sp.coo_matrix((-mes_edge[intF] * plus(dPhi_int), (intFacesCells[1], intFacesCells[0])),
shape=(nVol, nVol))
aConVE = aConVE + sp.coo_matrix((mes_edge[intF] * minus(dPhi_int), (intFacesCells[1], intFacesCells[1])),
shape=(nVol, nVol))
dPhi_ext = numerix.dot(phi.faceGrad.value, mesh.faceNormals)[extF]
aConVE = aConVE + sp.coo_matrix((mes_edge[extF] * plus(dPhi_ext), (extFacesCells[0], extFacesCells[0])),
shape=(nVol, nVol))
Id = sp.spdiags(numerix.ones(nVol), [0], nVol, nVol)
# ---------------Variables and parameters for the Immune Cells Displacement equation---------
# E = CellVariable(name="$E(t,x,y)$", mesh=mesh, value=0.53235e6/c_s, hasOld=1)
E = CellVariable(name="$E(t,x,y)$", mesh=mesh, value=0., hasOld=1)
delta_tild = CellVariable(name="$\delta_t(x,y)$", mesh=mesh, value=0.)
delta_tild.setValue(delt.GaussianImmuneResponse2D(1. / x_s, xt, yt, Ra=0.02 / x_s ** 2))
gF = self.p["gF"][idx]
Id = sp.spdiags(numerix.ones(nVol), [0], nVol, nVol)
# ---------------------------------------------- Dichotomie Method --------------------------------------------
mu_a = 0.
mu_b = 1.
F_mu_m = 0.
F_mu_a = 0.
eps = 1e-10
mu = self.dichotomy(mu_a, mu_b, eps, mesK, Q, aDiffE, aConVE, Id, gF, t_s, E, delta_tild)
while self.not_converge(mu, mu_b):
# print(mu, mu_b)
mu_b = (mu_a + mu_b) / 2.
mu = self.dichotomy(mu_a, mu_b, eps, mesK, Q, aDiffE, aConVE, Id, gF, t_s, E, delta_tild)
print('Step -- > {0}'.format(step))
step = step + 1
res = append(res, [mu*mu1_s*1e-9])
print('mu:{0}'.format(mu))
# res = np.asarray([mu*mu1_s*1e-9])
# res[:, newaxis]
res = log(res[:, newaxis])
return res
def norm_n(V, dx, n):
if n == 0:
c_max = max(abs(V))
yield c_max
else:
norme = sum(abs(dx*V)**n)
yield norme**(1./n)
def mo(x):
return numerix.L2norm(x)
def plus(z):
return 0.5*(z+abs(z))
def minus(z):
return 0.5*(-z+abs(z))
def alphan(n):
if n == 0:
return 1
return 2 * alphan(n - 1.) / ((2. ** n) - 1.)
def toss(deb, fin):
return random.uniform(deb, fin)
# --------------- Sensitivity Analysis---------------------------
# Create the coffee cup model
# model = un.Model(run=evaluate_mu_un, labels=["tumor volume($mm^3$)"])
model = MyModel()
parameters = OrderedDict()
parameters["a"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0.1, 0.5])
# parameters["a"] = pygpc.Norm(pdf_shape=[0.2, 0.09])
parameters["D"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[8.64e-5, 1e-3])
# parameters["D"] = [8.64e-5]
parameters["delta"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[1., 60.])
# parameters["R"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[7.573e-8, 1.231e-6])
# parameters["R"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[6.456e-8, 1.520e-6])#IC1
# parameters["R"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[5.5e-7, 1.036e-6])#IC3_99
parameters["R"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[6.11e-7, 9.74e-7])#IC4_95
# parameters["R"] = pygpc.Norm(pdf_shape=[7.923174114490609e-07, 7.945822739100839e-15])
parameters["chi"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[86.4, 86.4e5])
# parameters["chi"] = [86.4]
parameters["sF"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[5e-17, 0.625e-16])
# parameters["sF"] = [5e-17]
parameters["K"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[1e-2, 1.])
# parameters["K"] = [1e-2]
# parameters["gF"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[2e-2, 1.])
parameters["gF"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[2e-2, 1.])
interval = 'IC4_hetero'
# parameters["chi"] = 86.4
# parameters["sF"] = 5e-17
# parameters["K"] = 1e-2
# parameters["gF"] = 2e-2
problem = pygpc.Problem(model=model, parameters=parameters)
# basis = pygpc.Basis()
# basis.init_basis_sgpc(problem=problem,
# order=[5, 5, 5],
# order_max=15,
# order_max_norm=1,
# interaction_order=3)
# basis.plot_basis(dims=[0, 1, 2])
#
fn_results = 'Sensitivity_data/PCE_data'.format(interval)
save_session_format = ".hdf5"
# ---------------------------------- Personnalized Options ------------------------------
options = dict()
options["method"] = "reg"
# options["method"] = "quad"
options["solver"] = "Moore-Penrose"
# options["solver"] = "OMP"
options["settings"] = None
options["order"] = [5] * problem.dim # The univariate polynomials expansion orders
options["order_max"] = 5
options["order_max_norm"] = 0.7
# options["order_max_norm"] = 1.
options["interaction_order"] = 2
# options["interaction_order"] = 2
options["matrix_ratio"] = 2
# options["error_type"] = "nrmsd"
options["error_type"] = "loocv"
options["n_samples_validation"] = 1e3
options["n_cpu"] = 2
options["fn_results"] = fn_results
options["save_session_format"] = save_session_format
options["gradient_enhanced"] = False
options["gradient_calculation"] = "FD_1st2nd"
options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2}
options["backend"] = "omp"
# options["grid"] = pygpc.Random
# options["grid"] = pygpc.LHS(parameters_random=problem.parameters_random, seed=1)
options["grid_options"] = None
n_coeffs = pygpc.get_num_coeffs_sparse(order_dim_max=options["order"],
order_glob_max=options["order_max"],
order_inter_max=options["interaction_order"],
dim=problem.dim)
# problem.dim
grid = pygpc.LHS(parameters_random=problem.parameters_random,
n_grid=options["matrix_ratio"] * n_coeffs,
seed=1)
# grid = pygpc.Random(parameters_random=problem.parameters_random,
# n_grid=options["matrix_ratio"] * n_coeffs,
# seed=1)
# print('taille grille', grid.n_grid)
# options["fn_results"] = 'Sensitivity_data/PCE_data_{0}'.format(grid.n_grid)
algorithm = pygpc.Static(problem=problem, options=options, grid=grid)
#
# gpc, coeffs, results = algorithm.run()
session = pygpc.Session(algorithm=algorithm)
# # session.grid = algorithm.grid
#
# # #
# # # # # run gPC session
session, coeffs, results = session.run()
dataPath = 'Sensitivity_data/Pygpc_Sobol_idx.txt'.format(interval)
outF = open(dataPath, "w")
mean = session.gpc[0].get_mean(coeffs)
# outF.write('Mean: '+mean)
print("Mean: {}".format(mean))
std = session.gpc[0].get_std(coeffs)
# outF.write('Std: '+std)
print("Std: {}".format(std))
sobol, sobol_idx, sobol_idx_bool = modified_get_sobol_indices(session.gpc[0], coeffs, n_samples=10)
n_idx = len(sobol_idx)
for i in range(n_idx):
print("Parameter x{}: {}".format(sobol_idx[i]+1, sobol[i][0]))
str_tmp = ''
for k in range(problem.dim):
if len(sobol_idx[i])==k+1:
if k+1==1:
str_tmp = str(sobol_idx[i][k] + 1)
elif k+1>1:
for m in range(k):
str_tmp = str_tmp + str(sobol_idx[i][m] + 1)+' '
str_tmp = str_tmp + str(sobol_idx[i][k] + 1)
outF.write(str_tmp +','+str(sobol[i][0]))
outF.write('\n')
print(sobol_idx_bool)
outF.close()
pygpc.validate_gpc_plot(session=session,
coeffs=coeffs,
random_vars=["a", "delta"],
n_grid=[25, 25],
output_idx=0,
fn_out=session.fn_results+'plot',
folder="gpc_vs_original_plot",
n_cpu=options["n_cpu"])
# Validate gPC approximation vs original model function using Monte Carlo simulation
nrmsd = pygpc.validate_gpc_mc(session=session,
coeffs=coeffs,
n_samples=1e3,
fn_out=session.fn_results+'mc',
n_cpu=options["n_cpu"])
| 40.006211 | 123 | 0.549915 | 2,462 | 19,323 | 4.165719 | 0.198619 | 0.017941 | 0.016088 | 0.019891 | 0.331416 | 0.276716 | 0.23986 | 0.217239 | 0.17931 | 0.156299 | 0 | 0.038517 | 0.278476 | 19,323 | 482 | 124 | 40.089212 | 0.697102 | 0.28391 | 0 | 0.091255 | 0 | 0 | 0.100088 | 0.00655 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045627 | false | 0.003802 | 0.041825 | 0.015209 | 0.13308 | 0.034221 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7a94a2ff34c1076fb500ca11f0f92ab4291d4b0 | 2,314 | py | Python | src/day11.py | birdman74/advent-of-code-2021 | 190cd4110ef3553258a26c8521bdf372c006a77c | [
"Apache-2.0"
] | null | null | null | src/day11.py | birdman74/advent-of-code-2021 | 190cd4110ef3553258a26c8521bdf372c006a77c | [
"Apache-2.0"
] | null | null | null | src/day11.py | birdman74/advent-of-code-2021 | 190cd4110ef3553258a26c8521bdf372c006a77c | [
"Apache-2.0"
] | null | null | null | import os
from typing import List
MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = os.path.join(MODULE_DIR, "..")
INPUT_SOURCE_DIR = os.path.join(PROJECT_DIR, "input")
def get_data_lines(input_file_name):
input_file = os.path.join(INPUT_SOURCE_DIR, input_file_name)
print(f"Input file: {input_file}")
data_file = open(input_file)
return data_file.read().split("\n")
def gain_energy(octopi: List[List[int]], x: int, y: int):
energy = octopi[x][y]
if energy == 10:
return
octopi[x][y] = new_energy = energy + 1
if new_energy == 10:
[[gain_energy(octopi, new_x, new_y) for new_y in range(max(0, y - 1), min(y + 2, 10))]
for new_x in range(max(0, x - 1), min(x + 2, 10))]
def perform_steps(octopi: List[List[int]], steps: int):
flasher_count = 0
for _ in range(steps):
for x in range(len(octopi)):
for y in range(len(octopi[0])):
gain_energy(octopi, x, y)
for x in range(len(octopi)):
for y in range(len(octopi[0])):
octopi[x][y] = octopi[x][y] % 10
iteration_flasher_count = sum(x.count(0) for x in octopi)
flasher_count += iteration_flasher_count
return flasher_count
def do_the_thing(input_file_name):
data_lines = get_data_lines(input_file_name)
print(f"Number of data lines: {len(data_lines)}")
octopi = []
for data_line in data_lines:
octopi.append(list(map(int, data_line)))
flasher_count = perform_steps(octopi, 100)
print(f"Total flashers after 100 steps: {flasher_count}\n#################################\n")
def do_the_thing_2(input_file_name):
data_lines = get_data_lines(input_file_name)
print(f"Number of data lines: {len(data_lines)}")
octopi = []
for data_line in data_lines:
octopi.append(list(map(int, data_line)))
iteration = flasher_count = 0
while flasher_count < 100:
iteration += 1
flasher_count = perform_steps(octopi, 1)
print(f"First all flash event on iteration: {iteration}\n#################################\n")
def day_11_do(input_file_name):
do_the_thing(input_file_name)
def day_11_do_2(input_file_name):
do_the_thing_2(input_file_name)
day_11_do("day11.txt")
day_11_do_2("day11.txt")
| 27.223529 | 98 | 0.639585 | 362 | 2,314 | 3.81768 | 0.201657 | 0.091172 | 0.094067 | 0.04631 | 0.40521 | 0.348046 | 0.287265 | 0.261939 | 0.261939 | 0.261939 | 0 | 0.02682 | 0.210458 | 2,314 | 84 | 99 | 27.547619 | 0.729611 | 0 | 0 | 0.254545 | 0 | 0 | 0.128349 | 0.043215 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.036364 | 0 | 0.218182 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7a9c1267f6e65a3675addd7271b9a0a997345e4 | 3,430 | py | Python | src/models/adversarial_validation.py | solery-git/Yandex_MIPT_user_identification | 6861c14ebeeaef963b1d180080b87637a9578dd5 | [
"FTL"
] | null | null | null | src/models/adversarial_validation.py | solery-git/Yandex_MIPT_user_identification | 6861c14ebeeaef963b1d180080b87637a9578dd5 | [
"FTL"
] | null | null | null | src/models/adversarial_validation.py | solery-git/Yandex_MIPT_user_identification | 6861c14ebeeaef963b1d180080b87637a9578dd5 | [
"FTL"
] | null | null | null | # -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings('ignore')
import pickle
import yaml
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, hstack as sparse_hstack, vstack as sparse_vstack
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
import eli5
PROJECT_DIR = Path(__file__).resolve().parents[2]
PATH_PROCESSED = 'data/processed'
PATH_MODELS = 'models'
PARAMS_ALL = yaml.safe_load(open(PROJECT_DIR.joinpath('params.yaml')))
SEED = PARAMS_ALL['meta']['seed']
def csr_hstack(arglist):
return csr_matrix(sparse_hstack(arglist))
def csr_vstack(arglist):
return csr_matrix(sparse_vstack(arglist))
def get_mask_top_n(arr, n):
indices = np.argpartition(arr, -n)[-n:]
result = np.zeros(len(arr), dtype=np.bool)
result[indices] = True
return result
def show_feature_weights(estimator, data_feature_names, fe_feature_names):
feature_names = data_feature_names + fe_feature_names
# top 30 data features
data_feature_names_set = set(data_feature_names)
data_explanation = eli5.explain_weights(estimator, feature_names=feature_names, top=30, feature_filter=lambda name: name in data_feature_names_set)
print(eli5.format_as_text(data_explanation, highlight_spaces=True))
# features from feature engineering
fe_feature_names_set = set(fe_feature_names)
fe_explanation = eli5.explain_weights(estimator, feature_names=feature_names, feature_filter=lambda name: name in fe_feature_names_set)
print(eli5.format_as_text(fe_explanation, show=['targets']))
def main():
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'X_train.pkl'), 'rb') as fin:
X_train_sparse = pickle.load(fin)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'X_test.pkl'), 'rb') as fin:
X_test_sparse = pickle.load(fin)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'y.pkl'), 'rb') as fin:
target = pickle.load(fin)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'data_feature_names.pkl'), 'rb') as fin:
data_feature_names = pickle.load(fin)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'fe_feature_names.pkl'), 'rb') as fin:
fe_feature_names = pickle.load(fin)
train_len = X_train_sparse.shape[0]
test_len = X_test_sparse.shape[0]
y = np.array([0] * train_len + [1] * test_len)
X = csr_vstack([X_train_sparse, X_test_sparse])
logit = LogisticRegression(C=1, random_state=SEED, solver='liblinear')
logit.fit(X, y)
predictions_proba = logit.predict_proba(X)[:, 1]
logit_score = roc_auc_score(y, predictions_proba)
print('Score:', logit_score)
print('Number of train examples:', X_train_sparse.shape[0])
adv_valid_mask = get_mask_top_n(predictions_proba[:train_len], 50000)
validation_examples = X_train_sparse[adv_valid_mask]
print('Number of adversarial validation examples:', validation_examples.shape[0])
validation_targets = target[adv_valid_mask]
class_0, class_1 = list(np.bincount(validation_targets))
print(f'Class 0: {class_0}, class 1: {class_1}')
show_feature_weights(logit, data_feature_names, fe_feature_names)
with open(PROJECT_DIR.joinpath(PATH_PROCESSED, 'adv_valid_mask.pkl'), 'wb') as fout:
pickle.dump(adv_valid_mask, fout, protocol=2)
if __name__ == '__main__':
main() | 37.692308 | 151 | 0.732653 | 494 | 3,430 | 4.767206 | 0.259109 | 0.107006 | 0.054352 | 0.065393 | 0.356263 | 0.292144 | 0.210616 | 0.194055 | 0.146072 | 0.093418 | 0 | 0.010742 | 0.158601 | 3,430 | 91 | 152 | 37.692308 | 0.805267 | 0.022157 | 0 | 0 | 0 | 0 | 0.08296 | 0.006565 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0.030769 | 0.276923 | 0.092308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7aa8cac8e96224781686bd1388c518adf6e852b | 3,803 | py | Python | U3/src/e6.py | craciunescu/algo | 81e91fa72d8896b459900510ee270d25de15f6fd | [
"MIT"
] | null | null | null | U3/src/e6.py | craciunescu/algo | 81e91fa72d8896b459900510ee270d25de15f6fd | [
"MIT"
] | null | null | null | U3/src/e6.py | craciunescu/algo | 81e91fa72d8896b459900510ee270d25de15f6fd | [
"MIT"
] | null | null | null | """
@author: David E. Craciunescu
@date: 2020/04/30 (yyyy/mm/dd)
6. After passing through the Tile Room and stealing the Craddle of Life,
Indiana Croft faces a new challenge before leaving the Cursed Temple! The
Temple itself is located on a bridge under which there is a deep darkness.
Fortunately, this place also appears in the diary. The bridge crosses the
so-called Valley of Shadows, which begins with a descent slope (not
necessarily constant), so that after reaching the lowest point he must start
to climb to the other end of the bridge.
Just at the bottom of the valley, one can find a river, but the diary does
not give any specific information about its whereabouts, so Indiana Croft
only knows the river can be found "at the bottom of the valley" and nothing
else. On the slopes, there are sharp rocks.
If Indiana Croft had time, he could easily find the point where to get off
the bridge to get exactly to the river, given that he has a laser pointer
that he can measure heights with and tells him how many meters there are
from the bridge to the ground at a certain point. Unfortunately, the priests
of the Temple have already found him and they are chasing him down. If he
doesn't jump off the bridge they'll catch him before he gets off the bridge.
Our adventurer must quickly find the position of the river to get off and
flee safely.
In order to save our hero, design the algorithm that Indiana Croft should
use to find the minimum point of the valley under the conditions mentioned
above. The algorithm must be efficient, for he cannot afford to waste a
single second: at least in the best case it must have a logarithmic order.
You can consider the time that it takes for Indiana Croft to travel along
the bridge as negligible and that the estimate of the point of the river
where to drop off can have an approximation error of ε meters (ε is a given
constant).
Explain the reasoning behind the provided solution and analyze its
efficiency and complexity.
---
The problem basically forces us to use Gradient Descent. Since we have to
optimize at each move and cannot afford to waste time on the absolute
optimal of answers, we look at what happens to the slope of the function
created by the heights of the bridge.
Even though recursive, the complexity of this algorithm is clearly O(logn),
since at each iteration, no matter what happens, the dataset is divided in
half.
I also took extra efford to make the implementation space efficient as well.
This means that no extra storage elements or auxiliary temporal variables
are used when calculating the gradient descent, only a dataset, a start
point and an endpoint.
Last thing. I ignored the "the estimate of the point of the river where to
drop off can have an approximation error of ε meters" and chose to go
directly with the lowest possible error there could be.
"""
from typing import List
from numbers import Number
def grad_descent(data: List[Number]) -> Number:
""" Simple algorithm for gradient descent """
start = 0
end = len(data) - 1
def grad_descent_aux(data, start, end):
""" grad_descent auxiliary function """
# Basic cases.
is_tuple = (end - start) <= 2
is_increasing = data[start] < data[end]
if is_tuple: return start if is_increasing else end
# Not-so-basic cases.
mid_idx = (start + end) // 2
is_descending = data[mid_idx - 1] >= data[mid_idx]
if is_descending: return grad_descent_aux(data, mid_idx, end)
return grad_descent_aux(data, start, mid_idx)
return grad_descent_aux(data, start, end)
| 43.215909 | 80 | 0.720221 | 615 | 3,803 | 4.419512 | 0.429268 | 0.022075 | 0.020603 | 0.02649 | 0.11663 | 0.1078 | 0.059603 | 0.059603 | 0.059603 | 0.059603 | 0 | 0.004843 | 0.239811 | 3,803 | 87 | 81 | 43.712644 | 0.935317 | 0.777018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ad41b8d693a843781c9b1f0c9248a6d00b3029 | 492 | py | Python | olamundo.py/exercicios_refeitos/ex024.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | olamundo.py/exercicios_refeitos/ex024.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | olamundo.py/exercicios_refeitos/ex024.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | '''
ex024: Crie um programa que leia o nome de uma cidade e diga se ela começa ou não com o nome ‘SANTO’
'''
from colorise import set_color, reset_color
cores = {
'limpa': '\033[m',
'white': '\033[1;97m',
}
set_color(fg='cyan')
nome_cidade = str(input('Informe o nome de uma cidade: ')).strip().title()
separador = nome_cidade.split()
print('O nome da cidade começa com Santo? ', end='')
reset_color()
print('{}{}{}'.format(cores['white'], separador[0] == 'Santo', cores['limpa']))
| 28.941176 | 100 | 0.658537 | 77 | 492 | 4.12987 | 0.61039 | 0.062893 | 0.044025 | 0.062893 | 0.100629 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031175 | 0.152439 | 492 | 16 | 101 | 30.75 | 0.731415 | 0.203252 | 0 | 0 | 0 | 0 | 0.302083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ae68bcd377bc20a3f17e6c42c1085c7778f122 | 3,154 | py | Python | preprocessing/preprocessTrainingfiles_generateFasttextinput.py | gerbentimmerman/community-based-abuse-detection | 8ac03ccf1e594c2588b243e45ac535a0977bbcb0 | [
"MIT"
] | null | null | null | preprocessing/preprocessTrainingfiles_generateFasttextinput.py | gerbentimmerman/community-based-abuse-detection | 8ac03ccf1e594c2588b243e45ac535a0977bbcb0 | [
"MIT"
] | null | null | null | preprocessing/preprocessTrainingfiles_generateFasttextinput.py | gerbentimmerman/community-based-abuse-detection | 8ac03ccf1e594c2588b243e45ac535a0977bbcb0 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import pandas as pd
import csv
import re
import emoji
import redditcleaner
from nltk.tokenize import TweetTokenizer
def filterText(text, tokenizer):
# Filter URLs
text = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', "<URL>", text)
# Filter numbers
text = re.sub(r'\b\d+\b', "<NUMBER>", text)
# Filter usernames
text = re.sub(r'\b@\w\b', "@USER", text)
# Convert emojis to text
text = emoji.demojize(text)
text = redditcleaner.clean(text)
# Tokenize text
tokens = tokenizer.tokenize(text)
return " ".join(tokens)
def createFasttextEmbeddingInput(dataset):
"""
Create file for training the fasttext embeddings
"""
readfile = "../data/reddit/preprocessed_reddit_{}_large.csv".format(dataset)
df = pd.read_csv(readfile, header=0, engine='python')
outputfile = "{}_train_fasttext_large.en".format(dataset)
with open(outputfile, "a+", encoding='utf-8') as f:
comments = df.iloc[:, 1].values
for comment in comments:
f.write(str(comment) + "\n")
def preprocessComments(dataset):
tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
# Non-abusive locations
years = ['2012', '2013', '2014','2015', '2016', '2017']
months = ['01', '04', '07', '10']
# Choose whole text, 1 sentence or 2 sentences
files = ['reddish_', 'reddish1sent_', 'reddish2sent_']
if dataset == "non_abusive":
csvfile = "../data/reddit/preprocessed_reddit_non_abusive.csv"
fieldnames = ['subreddit', 'text', 'labels']
with open(csvfile, "a+", encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for year in years:
for month in months:
print(year, month)
file = "../data/reddit/non-abusive/{}/{}{}-{}.csv".format(year, files[0], year, month)
df = pd.read_csv(file, header=None)
# Drop empty rows
df.dropna(subset=[9], inplace=True)
# Assign label to non-abusive data
df['labels'] = "NOT"
# Clean message and add (subreddit, text) to csvfile
rows = df.iloc[:, [4,9, 11]].values
for row in rows:
clean_comment = filterText(row[1], tokenizer)
row_dict = {'subreddit':row[0] ,'text': clean_comment, 'labels': row[2]}
writer.writerow(row_dict)
elif dataset == "abusive":
input_file = "../data/reddit/abusive/reddish.csv"
csvfile = "../data/reddit/preprocessed_reddit_abusive_large.csv"
fieldnames = ['subreddit', 'text', 'labels']
with open(csvfile, "a+", encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# read inputfile
df = pd.read_csv(input_file, header=None)
# Drop empty rows
df.dropna(subset=[9], inplace=True)
# Clean messages and add (message, labels) to csvfile
rows = df.iloc[:, [4,9,10]].values
for row in rows[1:]:
clean_comment = filterText(row[1], tokenizer)
row_dict = {'subreddit': row[0],'text': clean_comment, 'labels': row[2]}
writer.writerow(row_dict)
def main():
dataset = "abusive"
preprocessComments(dataset)
#createFasttextEmbeddingInput(dataset)
if __name__ == '__main__':
main() | 27.666667 | 112 | 0.657895 | 418 | 3,154 | 4.868421 | 0.363636 | 0.02457 | 0.013268 | 0.014742 | 0.361671 | 0.298772 | 0.290909 | 0.27027 | 0.27027 | 0.27027 | 0 | 0.025229 | 0.170577 | 3,154 | 114 | 113 | 27.666667 | 0.752676 | 0.137603 | 0 | 0.25 | 0 | 0.015625 | 0.212829 | 0.121617 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.09375 | 0 | 0.171875 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7b0d46435cdec670279bd998320fd1810c2643c | 918 | py | Python | fp_demo/functional3.py | AegirAexx/python-sandbox | fa1f584f615c6ed04f80b9dd92d2b241248c9ebe | [
"Unlicense"
] | null | null | null | fp_demo/functional3.py | AegirAexx/python-sandbox | fa1f584f615c6ed04f80b9dd92d2b241248c9ebe | [
"Unlicense"
] | null | null | null | fp_demo/functional3.py | AegirAexx/python-sandbox | fa1f584f615c6ed04f80b9dd92d2b241248c9ebe | [
"Unlicense"
] | null | null | null | """" Playing around with MAP higher order function and lambdas. """
from datetime import datetime
from pprint import pprint
from scientist import scientists
def age(yob):
""" Accepts year of birth and returns the persons age. """
return datetime.now().year - yob
NAMES_AND_AGES = tuple(
map(lambda x: {'name': x.name, 'age': age(x.born)}, scientists))
pprint(NAMES_AND_AGES)
print('---------------------------')
def ip_str_1(sci):
"""String interpolation using format_map() & vars() | Also see format()."""
message = f'{sci.name} is {datetime.now().year - sci.born} years old.'
return message
def ip_str_2(sci):
"""String interpolation using format_map() & vars() | Also see format()."""
data = '%s is %d years old' % (sci.name, (datetime.now().year - sci.born))
return data
NAMES_AND_AGES2 = tuple(
map(lambda x: ip_str_2(x), scientists))
pprint(NAMES_AND_AGES2)
| 24.810811 | 79 | 0.652505 | 131 | 918 | 4.450382 | 0.427481 | 0.054889 | 0.077187 | 0.051458 | 0.25729 | 0.181818 | 0.181818 | 0.181818 | 0.181818 | 0.181818 | 0 | 0.006623 | 0.17756 | 918 | 36 | 80 | 25.5 | 0.765563 | 0.27451 | 0 | 0 | 0 | 0 | 0.169518 | 0.041991 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7b19bb8fade9b6e53704f91808df520b32c215d | 1,217 | py | Python | src/mqtt_bridge/app.py | dftossem/mqtt_ros_aws_iot | 0e0ae8d30d25753c5a12d936d07ae94730f3eccd | [
"MIT"
] | 5 | 2021-07-23T09:52:40.000Z | 2021-09-22T21:11:53.000Z | src/mqtt_bridge/app.py | dftossem/mqtt_ros_aws_iot | 0e0ae8d30d25753c5a12d936d07ae94730f3eccd | [
"MIT"
] | null | null | null | src/mqtt_bridge/app.py | dftossem/mqtt_ros_aws_iot | 0e0ae8d30d25753c5a12d936d07ae94730f3eccd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import inject
import rospy
import time
import json
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
from .bridge import create_bridge
from .util import lookup_object
def mqtt_bridge_node():
# init node
rospy.init_node('mqtt_bridge_node')
# load parameters
params = rospy.get_param('~', {})
bridge_params = params.get('bridge', [])
# create mqtt client
mqtt_client_factory_name = rospy.get_param(
'~mqtt_client_factory', '.mqtt_client:createMqttClient')
mqtt_client_factory = lookup_object(mqtt_client_factory_name)
mqtt_client = mqtt_client_factory(params)
# dependency injection
config = create_config(mqtt_client)
inject.configure(config)
# configure bridges, one per factory
bridges = []
for bridge_args in bridge_params:
bridges.append(create_bridge(**bridge_args))
rospy.on_shutdown(mqtt_client.disconnect)
# Connect and subscribe to AWS IoT
mqtt_client.connect()
rospy.spin()
def create_config(mqtt_client):
def config(binder):
binder.bind(AWSIoTMQTTClient, mqtt_client)
return config
__all__ = ['mqtt_bridge_node'] | 24.836735 | 65 | 0.72309 | 149 | 1,217 | 5.583893 | 0.38255 | 0.15625 | 0.102163 | 0.048077 | 0.064904 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001014 | 0.189811 | 1,217 | 49 | 66 | 24.836735 | 0.842799 | 0.127362 | 0 | 0 | 0 | 0 | 0.083412 | 0.027488 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.275862 | 0 | 0.413793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7b5ecbc66cbd464a70b67852e8b57e73a22b34a | 422 | py | Python | system-test/tweet/test_tweet.py | jaimebuelta/django-docker-template | f850626a3bc6ac7ccf791ca56b859a7b1d3d87a1 | [
"MIT"
] | 94 | 2017-07-30T21:33:46.000Z | 2022-01-10T13:41:03.000Z | system-test/tweet/test_tweet.py | jaimebuelta/django-docker-template | f850626a3bc6ac7ccf791ca56b859a7b1d3d87a1 | [
"MIT"
] | 1 | 2019-02-01T13:45:42.000Z | 2019-02-01T13:45:42.000Z | system-test/tweet/test_tweet.py | jaimebuelta/django-docker-template | f850626a3bc6ac7ccf791ca56b859a7b1d3d87a1 | [
"MIT"
] | 19 | 2017-07-31T12:03:12.000Z | 2021-11-27T05:43:04.000Z | import os
import requests
HOSTPORT = os.environ.get('SYSTEM_TEST_HOSTPORT')
TWEET_URL = HOSTPORT + 'tweet/'
def test_tweets():
result = requests.get(TWEET_URL)
assert result.status_code == 200
tweets = result.json()
assert len(tweets) == 2
for tweet in tweets:
# Get all the linked urls
url = tweet['href']
result = requests.get(url)
assert result.status_code == 200
| 23.444444 | 49 | 0.651659 | 56 | 422 | 4.785714 | 0.482143 | 0.097015 | 0.126866 | 0.156716 | 0.208955 | 0.208955 | 0 | 0 | 0 | 0 | 0 | 0.021944 | 0.244076 | 422 | 17 | 50 | 24.823529 | 0.818182 | 0.054502 | 0 | 0.153846 | 0 | 0 | 0.075567 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7bc9e4e1a533046092b0e42330e482c0a72d04c | 3,990 | py | Python | PressCurve.py | RichBu/PressCurve | 4c89f04489abae5d3c87be2663133b8c974457bf | [
"MIT"
] | null | null | null | PressCurve.py | RichBu/PressCurve | 4c89f04489abae5d3c87be2663133b8c974457bf | [
"MIT"
] | null | null | null | PressCurve.py | RichBu/PressCurve | 4c89f04489abae5d3c87be2663133b8c974457bf | [
"MIT"
] | null | null | null | """
This program uses Digital Oscilliscope data read in from a PicScope.
The PicoScope outputs the data as a CSV and this Python app reads it in.
Then, we plot on an X-Y chart
By Rich Budek 02/12/2021 in Python 3.8
"""
import pandas as pd
import numpy as np
import jinja2
import math
import re
from pandas import DataFrame
import matplotlib.pyplot as plt
# program to read in CSV file from PicoScope and create a graph
# excel was dragged to a halt because the data set is so large
class Config_Data:
#set up by user once
filepath = "Z:\Shared Folders\Data\WCO\Customer\BHPB\BHPB_Pressure\Graph-Python\PressCurve"
filename_readings = "Test_01_02b_csv.csv"
class Project_Data:
#data that gets transferred between functions
full_filename_readings = ""
file_orders_is_csv = False
def ReadAllReadings(_project_data):
#read all of the current orders
orders = pd.read_excel(_project_data.full_filename_readings)
return orders
#main function or run
def main():
#this is the "main" program
#print welcome
print(" ")
print("Sample Program")
print("by Rich Budek")
print(" ")
#setup needed variables
config_data = Config_Data()
project_data = Project_Data()
#create all the full file path names here, so only have to do it once
project_data.full_filename_readings = config_data.filepath + "\\" + config_data.filename_readings
if project_data.full_filename_readings[-3:].lower() == 'csv':
project_data.file_readings_is_csv = True
else:
project_data.file_readings_is_csv = False
#these are all the data tables
readings = []
#read in the readings
#this can be a database, but for this example write to xls file so can see the output
#if write to cloud database, anyone can read it
if project_data.file_readings_is_csv:
#FUTURE read in csv file
readings = pd.read_csv(project_data.full_filename_readings,index_col=0, skiprows=3)
pass
else:
readings = pd.read_excel(project_data.full_filename_readings)
readings_len = len(readings.index)
print ("number of readings = {:d}".format( readings_len ) )
#plot #01 all the hole diameters
df_readings = readings
df_readings_len = len(df_readings.index)
print ("number of df readings = {:d}".format( df_readings_len ) )
#start plt #01
fig_01 = plt.figure(figsize=(11,8), dpi=100.0)
#fig_01 = plt.figure(figsize=(11,8))
ax01=df_readings.plot(title='Mini Bone Air Pressure', kind='line',figsize=(11,8),color=['blue','red'])
ax01.set_ylim(-0.5, 3.0)
ax01.set(xlabel='Time (in secs) ', ylabel='Measured Air Pressure (in Volts)')
xticks_num = np.arange(-1.1, 4.1, step=0.1)
#xticks_label = map(str, xticks_num)
xticks_label = ['{:1.3f}'.format(x) for x in xticks_num]
ax01.set_xticks(xticks_num)
ax01.set_xticklabels(xticks_label, rotation=90)
#put notes on the plot
ax01.text(-1.000, 2.9, 'Test conducted 01/23/2019 on-site by Rich Budek using portable PLC with valves', fontsize=12)
ax01.text(-1.000, 2.8, 'to control the moldset. PLC was adjusted to provide overlap between close and', fontsize=12)
ax01.text(-1.000, 2.7, 'eject operation. Holes were drilled oversize by the customer.', fontsize=12)
ax01.text(-1.000, 2.6, 'Results: Steady state eject never hits supply air pressure.', fontsize=12)
#set up secondary axis
ax02 = ax01.twinx() #instantiate a second axis with same x-axis data
ax02.set_ylim(-23.8, 143)
ax02.set(ylabel='Non-Calibrated Calculated Air Pressure (in PSI)')
df_sec_axis = pd.DataFrame(range(0,readings_len))
df_sec_axis = pd.DataFrame({'shop air': range(120, 120)})
ax02 = df_sec_axis.plot( legend='False', figsize=(11,8), secondary_y=True )
fig_03 = ax01.get_figure()
fig_03.savefig('plot_01.svg')
print (" ")
print (".Program start.")
if __name__ == "__main__":
main()
print (".Program end.")
| 31.171875 | 121 | 0.69599 | 621 | 3,990 | 4.318841 | 0.388084 | 0.049217 | 0.044743 | 0.042878 | 0.180089 | 0.106264 | 0.074944 | 0.03132 | 0 | 0 | 0 | 0.045355 | 0.198747 | 3,990 | 127 | 122 | 31.417323 | 0.793556 | 0.245865 | 0 | 0.078125 | 0 | 0 | 0.21961 | 0.022834 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.015625 | 0.109375 | 0 | 0.25 | 0.140625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7bca805299e41e9d298d69596d25e7ce1959ef5 | 580 | py | Python | src/model/bow.py | slein89/BOW_transferlearning | 785fe5e48da0dc0e9170e526f221daee154bebec | [
"MIT"
] | 1 | 2019-03-05T11:23:26.000Z | 2019-03-05T11:23:26.000Z | src/model/bow.py | slein89/BOW_transferlearning | 785fe5e48da0dc0e9170e526f221daee154bebec | [
"MIT"
] | null | null | null | src/model/bow.py | slein89/BOW_transferlearning | 785fe5e48da0dc0e9170e526f221daee154bebec | [
"MIT"
] | null | null | null | from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import GradientBoostingClassifier
def bow_pipeline(X_train, y_train):
pipeline = Pipeline([
('countvect', CountVectorizer(analyzer='word',
min_df=0.0,
max_df=0.7,
ngram_range=(1,2))),
('GradientBoosting', GradientBoostingClassifier(n_estimators=200))
])
model = pipeline.fit(X_train,y_train)
return model
| 38.666667 | 77 | 0.624138 | 57 | 580 | 6.175439 | 0.614035 | 0.09375 | 0.039773 | 0.068182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021898 | 0.291379 | 580 | 14 | 78 | 41.428571 | 0.83455 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7bd68b12a5e8867bbc35ac7cd372730ec172944 | 3,967 | py | Python | train_offline.py | denisyarats/exorl | a3fb07a420939280aa0918150923dcca7e82bf2a | [
"MIT"
] | 23 | 2022-02-08T20:28:47.000Z | 2022-03-31T11:00:25.000Z | train_offline.py | denisyarats/exorl | a3fb07a420939280aa0918150923dcca7e82bf2a | [
"MIT"
] | 1 | 2022-03-10T04:45:19.000Z | 2022-03-10T04:45:19.000Z | train_offline.py | denisyarats/exorl | a3fb07a420939280aa0918150923dcca7e82bf2a | [
"MIT"
] | null | null | null | import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import os
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
os.environ['MUJOCO_GL'] = 'egl'
from pathlib import Path
import hydra
import numpy as np
import torch
from dm_env import specs
import dmc
import utils
from logger import Logger
from replay_buffer import make_replay_loader
from video import VideoRecorder
torch.backends.cudnn.benchmark = True
def get_domain(task):
if task.startswith('point_mass_maze'):
return 'point_mass_maze'
return task.split('_', 1)[0]
def get_data_seed(seed, num_data_seeds):
return (seed - 1) % num_data_seeds + 1
def eval(global_step, agent, env, logger, num_eval_episodes, video_recorder):
step, episode, total_reward = 0, 0, 0
eval_until_episode = utils.Until(num_eval_episodes)
while eval_until_episode(episode):
time_step = env.reset()
video_recorder.init(env, enabled=(episode == 0))
while not time_step.last():
with torch.no_grad(), utils.eval_mode(agent):
action = agent.act(time_step.observation,
global_step,
eval_mode=True)
time_step = env.step(action)
video_recorder.record(env)
total_reward += time_step.reward
step += 1
episode += 1
video_recorder.save(f'{global_step}.mp4')
with logger.log_and_dump_ctx(global_step, ty='eval') as log:
log('episode_reward', total_reward / episode)
log('episode_length', step / episode)
log('step', global_step)
@hydra.main(config_path='.', config_name='config')
def main(cfg):
work_dir = Path.cwd()
print(f'workspace: {work_dir}')
utils.set_seed_everywhere(cfg.seed)
device = torch.device(cfg.device)
# create logger
logger = Logger(work_dir, use_tb=cfg.use_tb)
# create envs
env = dmc.make(cfg.task, seed=cfg.seed)
# create agent
agent = hydra.utils.instantiate(cfg.agent,
obs_shape=env.observation_spec().shape,
action_shape=env.action_spec().shape)
# create replay buffer
data_specs = (env.observation_spec(), env.action_spec(), env.reward_spec(),
env.discount_spec())
# create data storage
domain = get_domain(cfg.task)
datasets_dir = work_dir / cfg.replay_buffer_dir
replay_dir = datasets_dir.resolve() / domain / cfg.expl_agent / 'buffer'
print(f'replay dir: {replay_dir}')
replay_loader = make_replay_loader(env, replay_dir, cfg.replay_buffer_size,
cfg.batch_size,
cfg.replay_buffer_num_workers,
cfg.discount)
replay_iter = iter(replay_loader)
# create video recorders
video_recorder = VideoRecorder(work_dir if cfg.save_video else None)
timer = utils.Timer()
global_step = 0
train_until_step = utils.Until(cfg.num_grad_steps)
eval_every_step = utils.Every(cfg.eval_every_steps)
log_every_step = utils.Every(cfg.log_every_steps)
while train_until_step(global_step):
# try to evaluate
if eval_every_step(global_step):
logger.log('eval_total_time', timer.total_time(), global_step)
eval(global_step, agent, env, logger, cfg.num_eval_episodes,
video_recorder)
metrics = agent.update(replay_iter, global_step)
logger.log_metrics(metrics, global_step, ty='train')
if log_every_step(global_step):
elapsed_time, total_time = timer.reset()
with logger.log_and_dump_ctx(global_step, ty='train') as log:
log('fps', cfg.log_every_steps / elapsed_time)
log('total_time', total_time)
log('step', global_step)
global_step += 1
if __name__ == '__main__':
main()
| 30.992188 | 79 | 0.637509 | 509 | 3,967 | 4.67387 | 0.261297 | 0.067255 | 0.035309 | 0.015973 | 0.094998 | 0.052963 | 0.029424 | 0.029424 | 0.029424 | 0 | 0 | 0.004798 | 0.264432 | 3,967 | 127 | 80 | 31.23622 | 0.810487 | 0.029745 | 0 | 0.022989 | 0 | 0 | 0.060922 | 0.005988 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0 | 0.137931 | 0.011494 | 0.218391 | 0.022989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7bf132349c97d22d52cdb3650636fe8ed882551 | 1,606 | py | Python | December-15/python3_ASHIK11ab.py | ASHIK11ab/A-December-of-Algorithms-2021 | 2eeb1192c69b67b1c64033c1df155a705d1219c4 | [
"MIT"
] | null | null | null | December-15/python3_ASHIK11ab.py | ASHIK11ab/A-December-of-Algorithms-2021 | 2eeb1192c69b67b1c64033c1df155a705d1219c4 | [
"MIT"
] | null | null | null | December-15/python3_ASHIK11ab.py | ASHIK11ab/A-December-of-Algorithms-2021 | 2eeb1192c69b67b1c64033c1df155a705d1219c4 | [
"MIT"
] | null | null | null | import ast
class Solution:
def __init__(self, orders, deadline):
self.orders = orders
self.deadline = deadline
def solve(self):
# `y` -> no of bracelets to be made in a day to deliver all orders
# on time. The starting value of `y` will be >= maximum element in
# the orders list since, no order can be half complete on the end
# of a day.
y = max(self.orders)
while True:
cont_sub_arrays = find_cont_sub_arrays(self.orders[:], y)
if len(cont_sub_arrays) <= self.deadline:
print(y)
break
else:
y += 1
def find_cont_sub_arrays(array, y):
""" Returns the continuous sub arrays where each sub array sum is <= y. """
cont_sub_arrays = []
outer_index = 0
while array != [] :
temp = []
for j in range(len(array)):
cont_sub_array = array[:j+1]
# Store the continuous sub arrays temporarily since we
# are only intrested in the longest continuous sub array
# whose sum is <= y.
if sum(cont_sub_array) <= y:
temp = cont_sub_array
# If a valid sub array found is the last sub array of the array
# then add it to the list of sub arrays.
if j+1 == len(array):
cont_sub_arrays.append(temp)
array = array[j+1:]
else:
cont_sub_arrays.append(temp)
array = array[j:]
break
return cont_sub_arrays
def main():
orders = ast.literal_eval(input("number of bracelets = "))
no_of_days = int(input("n = "))
s = Solution(orders=orders, deadline=no_of_days)
s.solve()
if __name__ == "__main__":
main() | 27.220339 | 77 | 0.620174 | 241 | 1,606 | 3.958506 | 0.373444 | 0.080713 | 0.109015 | 0.035639 | 0.071279 | 0.071279 | 0.071279 | 0.071279 | 0 | 0 | 0 | 0.004325 | 0.280199 | 1,606 | 59 | 78 | 27.220339 | 0.820934 | 0.313823 | 0 | 0.157895 | 0 | 0 | 0.03125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.026316 | 0 | 0.184211 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7c5214a6861e2cb908a3d5d48328445859dbfbd | 4,237 | py | Python | windows.py | dcxSt/pfb-mod | 1615790d0782f3becbba72a3e40c5b79ca4dc28b | [
"MIT"
] | 1 | 2021-11-09T13:02:29.000Z | 2021-11-09T13:02:29.000Z | windows.py | dcxSt/pfb-mod | 1615790d0782f3becbba72a3e40c5b79ca4dc28b | [
"MIT"
] | null | null | null | windows.py | dcxSt/pfb-mod | 1615790d0782f3becbba72a3e40c5b79ca4dc28b | [
"MIT"
] | null | null | null | #!/usr/bin/python3.8
"""
Created on 2021-06-07
Author : Stephen Fay
"""
import numpy as np
from constants import *
import helper as h
#%% spectrum transformers (spectrum ~ ft_block)
f3 = lambda x:x*(1/(np.abs(x)+0.1)+0.3)
f4 = lambda x:x*(1/(np.abs(x)+0.000001))
f5 = lambda x:x*(1/(np.abs(x)+0.01))
f6 = lambda x:x*(1/(np.abs(x)+0.000000000001))
f7 = lambda x:x*(1/(np.abs(x)+10.0**(-50)))
# repete the transformation procedure n times
def repete_func(f,ft_block,n,ntap=NTAP,lblock=LBLOCK):
# apply f to ft_block n times
for i in range(n):
ft_block = f(ft_block)
complex_rec = h.matrix_eig_to_window_complex(ft_block,ntap)
ft_block = h.window_to_matrix_eig(np.real(complex_rec),ntap,lblock)
return ft_block,complex_rec
#%% candidate replacement windows
def william_wallace(ntap=NTAP,lblock=LBLOCK):
"""
input : a sinc or sinc hamming window, produces similar results
output : a candidate window that doesn't have as much leaking
"""
sinc = h.sinc_window(ntap,lblock)
# input("type(sinc) {}\nsinc start: {}".format(type(sinc),sinc[:10]))
ft_block = h.window_to_matrix_eig(sinc,ntap,lblock)
ft_block,complex_rec = repete_func(f6,ft_block,10,ntap,lblock) # result is almost identitcal if we use f7 instead of f6
candidate_1 = np.real(complex_rec)
return candidate_1
#%% run this file
if __name__=="__main__":
import matplotlib.pyplot as plt
from datetime import datetime as dt
ntap,lblock = NTAP,32 # LBLOCK
sinc = h.sinc_window(ntap,lblock)
ft_block_original = h.window_to_matrix_eig(sinc,ntap,lblock) # alternatively use SINC_HAMMING
ft_block = ft_block_original.copy()
ft_block,complex_rec = repete_func(f6,ft_block,10,ntap,lblock)
abs_rec = np.abs(complex_rec)
imag_rec = np.imag(complex_rec)
reconstructed_window = np.real(complex_rec)
### modified spectrum
plt.subplots(figsize=(16,14))
plt.subplot(431)
plt.imshow(np.real(ft_block_original),aspect="auto")
plt.title("real original")
plt.colorbar()
plt.subplot(432)
plt.imshow(np.abs(ft_block_original),aspect="auto")
plt.title("absolute original")
plt.colorbar()
plt.subplot(433)
plt.imshow(np.imag(ft_block_original),aspect="auto")
plt.title("imaginary original")
plt.colorbar()
### corresponding reconstruction from window
plt.subplot(434)
plt.imshow(np.real(ft_block),aspect="auto")
plt.title("real (constructed from window)\nTHE ACTUAL THING")
plt.colorbar()
plt.subplot(435)
plt.imshow(np.abs(ft_block),aspect="auto")
plt.title("absolute (constructed from window)\nTHE ACTUAL THING")
plt.colorbar()
plt.subplot(436)
plt.imshow(np.imag(ft_block),aspect="auto")
plt.title("imaginary (constructed from window)\nTHE ACTUAL THING")
plt.colorbar()
### the window
plt.subplot(425)
plt.plot(abs_rec,"k-.",alpha=0.3,label="abs")
plt.plot(imag_rec,alpha=0.4,color="orange",label="imaginary")
plt.plot(sinc,color="grey",alpha=0.4,label="sinc")
plt.plot(reconstructed_window,"b-",label="real")
plt.title("window")
plt.legend()
### the boxcar
box = h.window_to_box(reconstructed_window)
plt.subplot(426)
short_box = box[int(ntap*lblock/2-15):int(ntap*lblock/2+15)]
plt.plot(np.real(short_box),"b-",alpha=0.3,label="real")
plt.plot(np.abs(short_box),"k-",label="abs")
plt.grid()
plt.title("box zoom")
plt.legend()
plt.subplot(427)
short_box = box[int(ntap*lblock/2-150):int(ntap*lblock/2+150)]
plt.plot(np.real(short_box),"b-",alpha=0.3,label="real")
plt.plot(np.abs(short_box),"k-",label="abs")
plt.title("box zoom")
plt.grid()
plt.legend()
plt.subplot(428)
plt.plot(np.real(box),"b-",alpha=0.3,label="real")
plt.plot(np.abs(box),"k-",label="abs")
plt.grid()
plt.title("box")
plt.legend()
plt.tight_layout()
# strdatetime = dt.today().strftime("%Y-%m-%d_%H.%M.%S")
# np.save("figures/experiments/series3_{}.npy".format(strdatetime),reconstructed_window)
# print("saved window")
# plt.savefig("figures/experiments/series3_{}.png".format(strdatetime))
# print("saved figure")
plt.show() | 30.482014 | 123 | 0.671702 | 658 | 4,237 | 4.194529 | 0.275076 | 0.055797 | 0.023913 | 0.03913 | 0.424275 | 0.366304 | 0.28913 | 0.218116 | 0.15471 | 0.138406 | 0 | 0.036189 | 0.165211 | 4,237 | 139 | 124 | 30.482014 | 0.744133 | 0.201794 | 0 | 0.267442 | 0 | 0 | 0.097898 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.05814 | 0 | 0.104651 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7c7b17b656904abd64163333b17badb2111d9ac | 986 | py | Python | Train/test.py | fanchy888/digit_ANN | 5faf0e574321ff5e3c6b8ec82992d20177be15a0 | [
"MIT"
] | null | null | null | Train/test.py | fanchy888/digit_ANN | 5faf0e574321ff5e3c6b8ec82992d20177be15a0 | [
"MIT"
] | null | null | null | Train/test.py | fanchy888/digit_ANN | 5faf0e574321ff5e3c6b8ec82992d20177be15a0 | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
from PIL import Image
import numpy as np
from scipy.io import loadmat
from scipy.io import savemat
def sigmoid(z):
g=1/(1+np.exp(-z))
return g
img=Image.open('test.png')
img=img.convert('L')
grey=img.getdata()
X=np.asarray(grey)
X=np.mat(X.ravel())
theta=loadmat('theta')
theta1=theta['theta1']
theta2=theta['theta2']
theta3=theta['theta3']
a1=np.hstack((np.mat(np.ones((1,1))),X))
a2=sigmoid(a1*theta1.T)
a2=np.hstack((np.mat(np.ones((1,1))),a2))
a3=sigmoid(a2*theta2.T)
a3=np.hstack((np.mat(np.ones((1,1))),a3))
h=sigmoid(a3*theta3.T)
y1=np.argmax(h,axis=1)
print(h)
train_set=loadmat('test')
weight=loadmat('theta')
X=np.mat(train_set['X'])
y=np.mat(train_set['y'])
m=y.shape[0]
a1=np.hstack((np.mat(np.ones((m,1))),X))
a2=sigmoid(a1*theta1.T)
a2=np.hstack((np.mat(np.ones((m,1))),a2))
a3=sigmoid(a2*theta2.T)
a3=np.hstack((np.mat(np.ones((m,1))),a3))
h=sigmoid(a3*theta3.T)
y1=np.argmax(h,axis=1)
accuracy=np.mean(np.double(y1==y))*100
print(accuracy)
| 21.434783 | 41 | 0.679513 | 202 | 986 | 3.30198 | 0.292079 | 0.067466 | 0.089955 | 0.116942 | 0.428786 | 0.428786 | 0.428786 | 0.422789 | 0.356822 | 0.356822 | 0 | 0.057421 | 0.063895 | 986 | 45 | 42 | 21.911111 | 0.665222 | 0.020284 | 0 | 0.210526 | 0 | 0 | 0.044652 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.105263 | 0 | 0.157895 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7c8356eb7291024a35ca2cbb8863750d4cda4f4 | 3,465 | py | Python | dateien_lesen.py | hansalemaos/Everything2TXT | 33dc7f15d9003441d7e38a8d872c3b7f6b3fa00b | [
"MIT"
] | 1 | 2022-02-27T19:07:08.000Z | 2022-02-27T19:07:08.000Z | dateien_lesen.py | hansalemaos/Everything2TXT | 33dc7f15d9003441d7e38a8d872c3b7f6b3fa00b | [
"MIT"
] | null | null | null | dateien_lesen.py | hansalemaos/Everything2TXT | 33dc7f15d9003441d7e38a8d872c3b7f6b3fa00b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import docx2txt
from pdfminer.high_level import extract_text
from pptx import Presentation
from bs4 import BeautifulSoup
from epubextract import epub2txt
from xlsx2html import xlsx2html
import tempfile
from tkinter import Tk
from tkinter.filedialog import askopenfilename
import re
def create_temp_file(ending):
fp = tempfile.TemporaryFile(suffix=ending, delete=False)
return fp.name
def powerpointlesen(pfad):
prs = Presentation(pfad)
ganzertext = ""
for slide in prs.slides:
for shape in slide.shapes:
try:
if hasattr(shape, "text"):
ganzertext = ganzertext + "\n" + shape.text
except Exception as Fehler:
print(Fehler)
return ganzertext
def docxlesen(pfad):
return docx2txt.process(pfad)
def txtdateien_lesen(pfad):
try:
with open(pfad, mode="rb") as f:
dateiohnehtml = f.read()
dateiohnehtml = (
b"""<!DOCTYPE html><html><body><p>"""
+ dateiohnehtml
+ b"""</p></body></html>"""
)
soup = BeautifulSoup(dateiohnehtml, "lxml")
soup = soup.text
return soup.strip()
except Exception as Fehler:
print(Fehler)
with open(pfad, mode="r", encoding="utf-8") as f:
dateiohnehtml = f.read()
return dateiohnehtml
def html_htm_dateien_lesen(pfad):
try:
with open(pfad, mode="rb") as f:
dateiohnehtml = f.read()
soup = BeautifulSoup(dateiohnehtml, "lxml")
soup = soup.text
soup = soup.strip()
return soup
except Exception as Fehler:
print(Fehler)
def pdf_datei_lesen(pfad):
return extract_text(pfad)
def xlsx_datei_einlesen(pfad):
tmpdatei = create_temp_file(ending="html")
xlsx2html(pfad, tmpdatei)
text = html_htm_dateien_lesen(tmpdatei)
return text
def dateienauslesen(pfad):
if str(pfad).endswith("pptx"):
text = powerpointlesen(pfad)
return text
elif str(pfad).endswith("docx"):
text = docxlesen(pfad)
return text
elif str(pfad).endswith("html") or str(pfad).endswith("htm"):
text = txtdateien_lesen(pfad)
return text
elif str(pfad).endswith("pdf"):
text = pdf_datei_lesen(pfad)
return text
elif str(pfad).endswith("epub"):
text = epub2txt(pfad)
text = text.convert()
return text
elif str(pfad).endswith("xlsx"):
text = xlsx_datei_einlesen(pfad)
return text
else:
text = txtdateien_lesen(pfad)
return text
def datei_auswaehlen_mit_tkinter():
Tk().withdraw()
dateiname = askopenfilename()
ausgabeordner = re.sub(r"/[^/]+\.\w+$", "", dateiname)
ausgabedatei = re.sub(r"^.*(/[^/]+)\.\w{,8}", "\g<1>.txt", dateiname)
ausgabedatei = ausgabeordner + ausgabedatei
return dateiname, ausgabedatei
if __name__ == "__main__":
dateiname, ausgabedatei = datei_auswaehlen_mit_tkinter()
textzumspeichern = dateienauslesen(dateiname)
if not str(dateiname).endswith(".txt"):
with open(ausgabedatei, mode="w", encoding="utf-8") as f:
if isinstance(textzumspeichern, str):
f.write(textzumspeichern)
if isinstance(textzumspeichern, list):
textzumspeichern = "\n".join(textzumspeichern)
f.write(textzumspeichern)
print(textzumspeichern)
| 27.283465 | 73 | 0.621356 | 384 | 3,465 | 5.507813 | 0.286458 | 0.037825 | 0.049645 | 0.040189 | 0.280378 | 0.243026 | 0.158865 | 0.084161 | 0.048227 | 0.048227 | 0 | 0.005104 | 0.264935 | 3,465 | 126 | 74 | 27.5 | 0.825285 | 0.006061 | 0 | 0.29703 | 0 | 0 | 0.038755 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089109 | false | 0 | 0.09901 | 0.019802 | 0.346535 | 0.039604 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7cc0e525cff12bc2b3486753006977533ce881a | 9,346 | py | Python | train.py | WKUAILAB/Risk_Level_Prediction | 7074953cf2c19cf2f2ad5c1cad5df0ad30637418 | [
"MIT"
] | null | null | null | train.py | WKUAILAB/Risk_Level_Prediction | 7074953cf2c19cf2f2ad5c1cad5df0ad30637418 | [
"MIT"
] | null | null | null | train.py | WKUAILAB/Risk_Level_Prediction | 7074953cf2c19cf2f2ad5c1cad5df0ad30637418 | [
"MIT"
] | 1 | 2022-02-28T06:29:08.000Z | 2022-02-28T06:29:08.000Z | import numpy as np
import pandas as pd
import os
from joblib import dump
from sklearn.model_selection import train_test_split, RandomizedSearchCV, GridSearchCV
from sklearn.metrics import classification_report, recall_score, precision_recall_fscore_support
from sklearn.ensemble import GradientBoostingClassifier
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Dense, Input, Dropout, Activation, LSTM, concatenate, Reshape, Permute, Lambda, RepeatVector, Multiply
from tensorflow.keras.layers import Embedding, Bidirectional
from tensorflow.keras.initializers import Constant
import tensorflow.keras.backend as K
import lightgbm as lgb
from lightgbm import LGBMClassifier
from utils import fetch_df, actions_to_indices, pretrained_embedding_layer, attention_3d_block, RiskLevelPredict, make_model, read_action_vecs, time_scalar, convert_to_one_hot, learning_rate_010_decay_power_0995, evaluate_recall
cur_dir = '.'
print('current working directory:')
print(os.getcwd())
print(os.listdir())
emb_fn = 'action_page_fasttext.dict'
emb_dir = os.path.join(cur_dir,'data',emb_fn)
model_fn = 'attention_lstm_3'
model_dir = os.path.join('/data/luyining','models',model_fn)
cols = ['has_risk', 'ds', 'user_id', 'order_id', 'reg_days', 'platform', 'usertype', 'mobil_prefix3', 'mobile_prefix5', 'len_sequence', 'cnt_pay', 'max_time_diff', 'min_time_diff', 'avg_time_diff', 'std_time_diff', 'cnt_src', 'device_ios', 'device_android', 'device_wap', 'device_web', 'device_app', 'device_mini', 'cnt_login', 'is_bk_log', 'is_wzp_log', 'is_dc_log', 'cnt_item', 'cnt_cheap_item', 'cnt_lyl_item', 'roi', 'avg_roi', 'is_gift_inclued', 'is_virtual_inclued', 'actions', 'times']
data = fetch_df('temp','rc_risklevel_labels4train_fin4', cols = cols)
action_sequences = pd.DataFrame.to_numpy(data['actions'])
X = []
index = 0
for index in range(len(action_sequences)):
temp_action_sequence = action_sequences[index]
X.append(temp_action_sequence.strip().split(","))
time_sequences = pd.DataFrame.to_numpy(data['times'])
T = []
index = 0
for index in range(len(time_sequences)):
temp_time_sequence = time_sequences[index]
T.append(list(map(np.int64, temp_time_sequence.strip().split(","))))
X = np.asarray(X) # array of action_sequences
T = np.asarray(T) # array of time_sequences
Y = pd.DataFrame.to_numpy(data['has_risk'], dtype = 'int64') # has_risk(categorical)
X_train,X_test,T_train,T_test,y_train,y_test = train_test_split(X, T, Y, test_size=0.3, random_state=0)
## training set
t_scalar = [list(map(time_scalar,i)) for i in T_train] # time scaling
maxLen = len(max(X_train, key=len))
Y_indices = y_train
# 读取单个动作的 embedding,作数值索引
action_to_index, index_to_action, action_to_vec_map = read_action_vecs(emb_dir)
# 把动作转为数值索引
X_indices = actions_to_indices(X_train, action_to_index, maxLen)
# 反过来,最后的动作放在最后面
X_indices = np.array([i[::-1] for i in X_indices])
T_indices = np.array([[-1]*(maxLen-len(i))+i[::-1] for i in t_scalar])
T_indices = T_indices.reshape(T_indices.shape[0], T_indices.shape[1], 1)
## test set
t_scalar_test = [list(map(time_scalar, i)) for i in T_test]
maxLen = len(max(X_train, key =len))
Y_indices_test = y_test
action_to_index, index_to_action, action_to_vec_map = read_action_vecs(emb_dir)
X_indices_test = actions_to_indices(X_test, action_to_index, maxLen)
X_indices_test = np.array([i[::-1] for i in X_indices_test])
T_indices_test = np.array([[-1]*(maxLen-len(i))+i[::-1] for i in t_scalar_test])
T_indices_test = T_indices_test.reshape(T_indices_test.shape[0], T_indices_test.shape[1], 1)
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
keras.metrics.AUC(name='prc', curve='PR'), # precision-recall curve
]
initial_bias = np.log(sum(Y==1) / (Y.shape[0]-sum(Y==1)))
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_recall',
verbose=1,
patience=5,
mode='max',
min_delta=0.003,
restore_best_weights=True)
model = make_model(metrics=METRICS, output_bias = initial_bias, attention_share = False, bidirectional = True)
model.summary()
history = model.fit(
[X_indices,T_indices],
Y_indices,
epochs=50,
batch_size=64,
shuffle=True,
validation_data=([X_indices_test, T_indices_test], Y_indices_test),
validation_split = 0.2, #从测试集中划分80%给训练集
validation_freq = 1, #测试的间隔次数为1,
callbacks=[early_stopping]
)
model.save(model_dir)
feature_columns = ['len_sequence', 'cnt_pay', 'max_time_diff', 'min_time_diff', 'avg_time_diff', 'std_time_diff', 'cnt_src', 'device_ios', 'device_android', 'device_wap', 'device_web', 'device_app', 'device_mini', 'cnt_login', 'is_bk_log', 'is_wzp_log', 'is_dc_log', 'cnt_item', 'cnt_cheap_item', 'cnt_lyl_item', 'roi', 'avg_roi', 'is_gift_inclued','is_virtual_inclued']
feature_columns.append('lstm')
target_column = ['has_risk']
t_scalar_total = [list(map(time_scalar,i)) for i in T] # time scaling
Y_indices_total = Y
# 把动作转为数值索引
X_indices_total = actions_to_indices(X, action_to_index, maxLen)
# 反过来,最后的动作放在最后面
X_indices_total = np.array([i[::-1] for i in X_indices_total])
# T_indices = np.array([[-1]*(maxLen-len(i))+i[::-1] for i in t])
T_indices_total = np.array([[-1]*(maxLen-len(i))+i[::-1] for i in t_scalar_total])
T_indices_total = T_indices_total.reshape(T_indices_total.shape[0], T_indices_total.shape[1], 1)
data['lstm'] = model.predict([X_indices_total, T_indices_total], batch_size=64)
data[feature_columns] = data[feature_columns].astype(float)
data[target_column] = data[target_column].astype(int)
train_x, test_x, train_y, test_y = train_test_split(data[feature_columns], data[target_column], test_size = 0.2, random_state = 0)
train_x, validation_x, train_y, validation_y = train_test_split(train_x, train_y, test_size = 0.2, random_state = 0)
fit_params={"early_stopping_rounds":30,
"eval_metric" : evaluate_recall,
"eval_set" : [(validation_x,validation_y)],
'eval_names': ['valid'],
'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_010_decay_power_0995)],
'verbose': 100
}
param_test ={'num_leaves': sp_randint(6, 50),
'min_child_samples': sp_randint(100, 500),
'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],
'subsample': sp_uniform(loc=0.2, scale=0.8),
'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),
'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],
'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100]}
n_HP_points_to_test = 500
clf = lgb.LGBMClassifier(objective = 'binary',
boosting = 'gbdt',
seed = 0,
max_depth=-1,
learning_rate = 0.05,
random_state=314,
silent=True,
metric=None,
n_jobs=4,
n_estimators=5000)
gs = RandomizedSearchCV(
estimator=clf,
param_distributions=param_test,
n_iter=n_HP_points_to_test,
scoring='recall',
cv=5,
refit=True,
random_state=314,
verbose=True)
gs.fit(train_x, train_y, **fit_params)
opt_parameters = gs.best_params_
clf_sw = lgb.LGBMClassifier(**clf.get_params())
#set optimal parameters
clf_sw.set_params(**opt_parameters)
gs_sample_weight = GridSearchCV(estimator=clf_sw,
param_grid={'scale_pos_weight':[1,2,6,7,8,12]},
scoring='recall',
cv=5,
refit=True,
verbose=True)
gs_sample_weight.fit(train_x, train_y, **fit_params)
opt_parameters["scale_pos_weight"] = gs_sample_weight.best_params_['scale_pos_weight']
#Configure locally from hardcoded values
clf_final = lgb.LGBMClassifier(**clf.get_params())
#set optimal parameters
clf_final.set_params(**opt_parameters)
# #Train the final model with learning rate decay
clf_final.fit(train_x, train_y,
**fit_params
)
train_prob_cv = clf_final.predict_proba(train_x)[:,1]
validation_prob_cv = clf_final.predict_proba(validation_x)[:,1]
test_prob_cv = clf_final.predict_proba(test_x)[:,1]
print(classification_report(train_y,train_prob_cv>0.5))
print('--------------------------------------------------')
print(classification_report(validation_y,validation_prob_cv>0.5))
print('--------------------------------------------------')
print(classification_report(test_y,test_prob_cv>0.5))
dump(clf_final, '/data/luyining/models/lgb_3.pkl')
| 43.469767 | 492 | 0.675904 | 1,338 | 9,346 | 4.404335 | 0.246637 | 0.025793 | 0.010182 | 0.008315 | 0.313423 | 0.280672 | 0.2318 | 0.212116 | 0.185644 | 0.120143 | 0 | 0.023763 | 0.184999 | 9,346 | 214 | 493 | 43.672897 | 0.749902 | 0.046437 | 0 | 0.107143 | 0 | 0 | 0.133048 | 0.0233 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.113095 | 0 | 0.113095 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7cd2f7b970a9548bf93ebb125e5939b4572b405 | 2,543 | py | Python | mptb/models/commons.py | to-aoki/my-pytorch-bert | 8e412ae6331f5f19fee55b430be389de2f5c49a6 | [
"Apache-2.0"
] | 21 | 2019-03-04T03:43:19.000Z | 2022-02-14T15:50:41.000Z | mptb/models/commons.py | to-aoki/my-pytorch-bert | 8e412ae6331f5f19fee55b430be389de2f5c49a6 | [
"Apache-2.0"
] | 1 | 2019-10-07T17:49:21.000Z | 2019-12-14T11:50:10.000Z | mptb/models/commons.py | to-aoki/my-pytorch-bert | 8e412ae6331f5f19fee55b430be389de2f5c49a6 | [
"Apache-2.0"
] | 5 | 2019-07-19T07:04:55.000Z | 2020-07-01T13:24:14.000Z | # This file is based on
# https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/modeling.py.
# changing class names and variables names for my understanding of BERT.
# and Modified a bit to visualize with bertviz.
#
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.o
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Network model."""
import math
import torch
import torch.nn as nn
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
except ImportError:
class LayerNorm(nn.Module):
"""A layernorm module in the TF style (epsilon inside the square root)."""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size)) # gamma
self.bias = nn.Parameter(torch.zeros(hidden_size)) # beta
self.variance_epsilon = eps
def forward(self, x):
mean = x.mean(dim=-1, keepdim=True)
var = ((x - mean)**2).mean(dim=-1, keepdim=True)
std = (var + self.variance_epsilon).sqrt()
return self.weight * (x - mean)/std + self.bias
class PositionwiseFeedForward(nn.Module):
""" FeedForward Neural Networks for each position """
def __init__(self, config, eps=1e-12):
super().__init__()
self.intermediate = nn.Linear(config.hidden_size, config.intermediate_size)
self.output = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.layer_norm = LayerNorm(config.hidden_size, eps=eps)
def forward(self, attention_output):
hidden_states = gelu(self.intermediate(attention_output))
hidden_states = self.dropout(self.output(hidden_states))
return self.layer_norm(hidden_states + attention_output)
| 40.365079 | 105 | 0.701534 | 353 | 2,543 | 4.937677 | 0.467422 | 0.034423 | 0.027539 | 0.018359 | 0.04475 | 0.022949 | 0 | 0 | 0 | 0 | 0 | 0.013242 | 0.198191 | 2,543 | 62 | 106 | 41.016129 | 0.841589 | 0.416044 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0.033333 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7cd8e5b68df4a3597e07017cc8ad1f64fc2dc4a | 1,388 | py | Python | simple_server.py | mlacayoemery/owslib-pywps-echo | 9b19936989d8261986f6184547206386afb8c1fe | [
"Unlicense"
] | null | null | null | simple_server.py | mlacayoemery/owslib-pywps-echo | 9b19936989d8261986f6184547206386afb8c1fe | [
"Unlicense"
] | null | null | null | simple_server.py | mlacayoemery/owslib-pywps-echo | 9b19936989d8261986f6184547206386afb8c1fe | [
"Unlicense"
] | null | null | null | import flask
import pywps
class EchoVector(pywps.Process):
def __init__(self):
inputs = [pywps.ComplexInput('message',
'Input message',
supported_formats=[pywps.Format('application/gml+xml'),
pywps.Format('text/xml')],
mode=pywps.validator.mode.MODE.NONE)]
outputs = [pywps.ComplexOutput('response',
'Output response',
supported_formats=[pywps.Format('application/gml+xml')])]
super(EchoVector, self).__init__(
self._handler,
identifier='echo_vector',
title='Echo Vector Test',
abstract='Returns the given vector',
version='1.0.0.0',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
response.outputs['response'].data = request.inputs['message'][0].data
return response
app = flask.Flask(__name__)
wps_processes = [EchoVector()]
service = pywps.Service(wps_processes)
@app.route('/wps', methods=['GET', 'POST'])
def wps():
return service
bind_host='127.0.0.1'
app.run(threaded=True,host=bind_host)
| 30.844444 | 96 | 0.53098 | 131 | 1,388 | 5.450382 | 0.465649 | 0.046218 | 0.058824 | 0.07563 | 0.123249 | 0.123249 | 0.123249 | 0 | 0 | 0 | 0 | 0.012236 | 0.352305 | 1,388 | 44 | 97 | 31.545455 | 0.78198 | 0 | 0 | 0 | 0 | 0 | 0.131124 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.058824 | 0.029412 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7cdb089436833732bfa37700ee373f489662c99 | 889 | py | Python | tests/test_jaccard_score.py | jo-mueller/biapol-utilities | 773b60a64fa12641ba869addd7b4be9a4ab87ecd | [
"BSD-3-Clause"
] | 4 | 2021-12-17T19:37:11.000Z | 2022-03-29T16:39:31.000Z | tests/test_jaccard_score.py | jo-mueller/biapol-utilities | 773b60a64fa12641ba869addd7b4be9a4ab87ecd | [
"BSD-3-Clause"
] | 34 | 2021-11-04T14:10:24.000Z | 2022-01-31T13:23:44.000Z | tests/test_jaccard_score.py | jo-mueller/biapol-utilities | 773b60a64fa12641ba869addd7b4be9a4ab87ecd | [
"BSD-3-Clause"
] | 2 | 2021-12-14T13:53:16.000Z | 2021-12-15T12:30:51.000Z | # -*- coding: utf-8 -*-
from biapol_utilities import label
import numpy as np
def test_compare_labels():
a = np.asarray([5, 0, 0, 1, 1, 1, 2, 2])
b = np.asarray([5, 0, 0, 1, 1, 1, 2, 3])
result = label.compare_labels(a, b)
assert('jaccard_score' in result.columns)
assert('dice_score' in result.columns)
def test_compare_labels2():
a = np.asarray([5, 0, 0, 1, 1, 1, 2, 2])
b = np.asarray([6, 0, 0, 1, 1, 1, 2, 3])
result = label.compare_labels(a, b)
assert(np.max(result.label) == np.max([a, b]))
def test_compare_labels3():
a = np.asarray([5, 0, 0, 1, 1, 1, 2, 2])
b = np.asarray([6, 0, 0, 1, 1, 1, 2, 3])
result = label.compare_labels(a, b)
assert(result[result.label == 0].jaccard_score.to_numpy()[0] == 1.0)
if __name__ == "__main__":
test_compare_labels()
test_compare_labels2()
test_compare_labels3()
| 21.166667 | 72 | 0.598425 | 153 | 889 | 3.294118 | 0.248366 | 0.047619 | 0.035714 | 0.047619 | 0.404762 | 0.404762 | 0.404762 | 0.404762 | 0.404762 | 0.402778 | 0 | 0.082133 | 0.219348 | 889 | 41 | 73 | 21.682927 | 0.644092 | 0.023622 | 0 | 0.363636 | 0 | 0 | 0.035797 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7d05b674fc0a756fe709726953552b8a0021a86 | 1,420 | py | Python | commands/elections.py | Nichodon/Democracy-Bot | 708613944b25f7331b18153e5f90c18f44e38aff | [
"MIT"
] | null | null | null | commands/elections.py | Nichodon/Democracy-Bot | 708613944b25f7331b18153e5f90c18f44e38aff | [
"MIT"
] | null | null | null | commands/elections.py | Nichodon/Democracy-Bot | 708613944b25f7331b18153e5f90c18f44e38aff | [
"MIT"
] | null | null | null | import asyncio
from demobot.utils import *
from demobot.handlers import add_message_handler, nested_get, nested_set, nested_pop
from commands.utilities import save
async def running(Demobot, msg, reg):
if nested_get(msg.server.id, "roles", 'citizen') in msg.author.roles:
aliases = {
'rep': 'representative',
'representative': 'representative',
'ld': 'leader',
'pres': 'leader',
'president': 'leader',
'leader': 'leader'
}
if reg.group('pos') not in aliases:
return
dmm = await Demobot.send_message(msg.author, "DM me a description for " + aliases[reg.group('pos')] + ".")
m = await Demobot.wait_for_message(timeout=600, author=msg.author, channel=dmm.channel)
if not m:
m = "*No description given*"
else:
m = m.content
nested_pop(msg.server.id, 'elections', aliases[reg.group('pos')], msg.author.id)
nested_set(Candidate(m, msg.author.id), msg.server.id, 'elections', aliases[reg.group('pos')], msg.author.id)
await Demobot.send_message(msg.author, "You are now running.")
await save(None, None, None, overrideperms=True)
else:
await Demobot.send_message(msg.channel, "You must be a citizen!")
add_message_handler(running, r'I\s*(?:(?:want|would\s*like)\s*to\s*run|am\s*running)\s*for\s*(?P<pos>.*?)\Z')
| 43.030303 | 117 | 0.619718 | 188 | 1,420 | 4.601064 | 0.409574 | 0.072832 | 0.050867 | 0.079769 | 0.217341 | 0.187283 | 0.113295 | 0.113295 | 0.113295 | 0.113295 | 0 | 0.002747 | 0.230986 | 1,420 | 32 | 118 | 44.375 | 0.789377 | 0 | 0 | 0.068966 | 0 | 0.034483 | 0.209155 | 0.053521 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137931 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7d451a5141cd5cf0b8b79bacc909a39ece22eab | 848 | py | Python | ping.py | FoxUnderGround/Ping-Plot | 4a61b147c2e1f4ee79cd77ea8500fc70175b59c7 | [
"MIT"
] | 1 | 2020-03-06T14:18:34.000Z | 2020-03-06T14:18:34.000Z | ping.py | FoxUnderGround/Ping-Plot | 4a61b147c2e1f4ee79cd77ea8500fc70175b59c7 | [
"MIT"
] | null | null | null | ping.py | FoxUnderGround/Ping-Plot | 4a61b147c2e1f4ee79cd77ea8500fc70175b59c7 | [
"MIT"
] | null | null | null | from pythonping import ping
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
xs_min=[0]
ys_min=[5]
def animate(z):
ping_out = ping('8.8.8.8', count = 2)
ping_arr = str(ping_out)[len(str(ping_out))-28:].split(" ")
print(ping_arr)
for i in range(len(ping_arr)):
if len(ping_arr[i]) > 10:
time_min = ping_arr[i].split("/")[0]
print(time_min)
xs_min.append(float(len(xs_min)))
ys_min.append(float(time_min))
#print(xs_min)
#print(ys_min)
ax1.clear()
ax1.plot(xs_min[-50:],ys_min[-50:],linewidth=2)
ani = animation.FuncAnimation(fig, animate, interval=50)
plt.show()
print(xs_min)
| 21.74359 | 64 | 0.610849 | 129 | 848 | 3.844961 | 0.410853 | 0.060484 | 0.012097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03888 | 0.241745 | 848 | 38 | 65 | 22.315789 | 0.732504 | 0.03066 | 0 | 0 | 0 | 0 | 0.030691 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.166667 | 0 | 0.208333 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7dd0fd747c77ccfad30186eb48f1feb9667fdbd | 7,400 | py | Python | submodules/hal/assemblyHub/prepareHubFiles.py | pbasting/cactus | 833d8ca015deecdfa5d0aca01211632cdaca9e58 | [
"MIT-0"
] | null | null | null | submodules/hal/assemblyHub/prepareHubFiles.py | pbasting/cactus | 833d8ca015deecdfa5d0aca01211632cdaca9e58 | [
"MIT-0"
] | null | null | null | submodules/hal/assemblyHub/prepareHubFiles.py | pbasting/cactus | 833d8ca015deecdfa5d0aca01211632cdaca9e58 | [
"MIT-0"
] | null | null | null | #!/usr/bin/env python
#Copyright (C) 2013 by Ngan Nguyen
#
#Released under the MIT license, see LICENSE.txt
"""
Make "hub.txt", "groups.txt", files that are required by AssemblyHub
Also prepare description.html files
"""
import os, sys
from sonLib.bioio import system
from optparse import OptionGroup
from hal.assemblyHub.assemblyHubCommon import getProperName
from Bio import Phylo
from hal.assemblyHub.treeCommon import isBinaryTree
def writeDescriptionFile(genome, outdir):
filename = os.path.join(outdir, "description.html")
f = open(filename, 'w')
f.write("%s\n" %genome)
f.close()
return
def writeTrackDb_composite_html(file, treeFile):
f = open(file, 'w')
#HACK:
#huburl = "http://hgwdev.cse.ucsc.edu/~nknguyen/ecoli/hub/TEST2"
huburl = "http://hgwdev.cse.ucsc.edu/~nknguyen/birds/birds2"
basename = os.path.basename(treeFile)
f.write("<img src=\"%s/%s\">\n" %(huburl, basename))
f.close()
def writeTrackDb_compositeStart(f, shortLabel, longLabel, bbdirs, bwdirs, genomes, properName, url, img):
#Composite track includes all annotations in BED & WIGGLE formats, their lifted-over tracks, and Snake tracks
f.write("track hubCentral\n")
f.write("compositeTrack on\n")
f.write("shortLabel %s\n" %shortLabel)
f.write("longLabel %s\n" %longLabel)
f.write("group comphub\n")
bedtracktypes = [os.path.basename(b.rstrip('/')) for b in bbdirs]
bedstr = " ".join(["%s=%s" %(item, item) for item in bedtracktypes])
wigtracktypes = [os.path.basename(b.rstrip('/')) for b in bwdirs]
wigstr = " ".join(["%s=%s" %(item, item) for item in wigtracktypes])
f.write("subGroup1 view Track_Type Snake=Alignments %s %s\n" %(bedstr, wigstr))
genomeStr = " ".join(["%s=%s" %(g, getProperName(g, properName)) for g in genomes])
f.write("subGroup2 orgs Organisms %s\n" %genomeStr)
f.write("dragAndDrop subTracks\n")
f.write("#allButtonPair on\n")
#f.write("sortOrder view=+ orgs=+\n")
f.write("dimensions dimensionX=view dimensionY=orgs\n")
f.write("noInherit on\n")
f.write("priority 0\n")
f.write("centerLabelsDense on\n")
f.write("visibility full\n")
f.write("html ../documentation/hubCentral\n")
if url and img:
imgurl = os.path.join(url, os.path.basename(img))
f.write("treeImage %s\n" %imgurl)
f.write("type bigBed 3\n")
f.write("\n")
def writeTrackDb_compositeSubTrack(f, name, visibility):
f.write("\ttrack hubCentral%s\n" %name)
f.write("\tshortLabel %s\n" %name)
f.write("\tview %s\n" %name)
f.write("\tvisibility %s\n" %visibility)
f.write("\tsubTrack hubCentral\n")
f.write("\n")
def writeGroupFile(outdir, hubLabel, annotations):
filename = os.path.join(outdir, "groups.txt")
f = open(filename, 'w')
f.write("name user\n")
f.write("label Custom\n")
f.write("priority 1\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.write("name map\n")
f.write("label Mapping\n")
f.write("priority 2\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
f.write("name comphub\n")
f.write("label %s\n" % hubLabel)
f.write("priority 3\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
f.write("name snake\n")
f.write("label Alignment Snakes\n")
f.write("priority 3\n")
f.write("defaultIsClosed 0\n")
f.write("\n")
for annotation in annotations:
f.write("name annotation%s\n" %annotation)
f.write("label %s Annotations\n" % annotation.capitalize() )
f.write("priority 3\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.write("name exp\n")
f.write("label Experimental\n")
f.write("priority 4\n")
f.write("defaultIsClosed 1\n")
f.write("\n")
f.close()
def writeHubFile(outdir, options):
hubfile = os.path.join(outdir, "hub.txt")
f = open(hubfile, "w")
f.write("hub %s\n" %options.hubLabel)
f.write("shortLabel %s\n" %options.shortLabel)
f.write("longLabel %s\n" %options.longLabel)
f.write("genomesFile genomes.txt\n")
f.write("email %s\n" %options.email)
f.close()
#=========== READ FILES ===========
def readList(file):
items = []
f = open(file, 'r')
for line in f:
items.append(line.strip())
f.close()
return items
def readRename(file):
name2new = {}
f = open(file, 'r')
for line in f:
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
items = line.split('\t')
if len(items) >=2:
name2new[items[0]] = items[1]
f.close()
return name2new
#=========== OPTIONS =============
def addHubOptions(parser):
group = OptionGroup(parser, "HUB INFORMATION")
group.add_option('--hub', dest='hubLabel', default='myHub', help='a single-word name of the directory containing the track hub files. Not displayed to hub users. Default=%default')
group.add_option('--shortLabel', dest='shortLabel', default='my hub', help='the short name for the track hub. Suggested maximum length is 17 characters. Displayed as the hub name on the Track Hubs page and the track group name on the browser tracks page. Default=%default')
group.add_option('--longLabel', dest='longLabel', default='my hub', help='a longer descriptive label for the track hub. Suggested maximum length is 80 characters. Displayed in the description field on the Track Hubs page. Default=%default')
group.add_option('--email', dest='email', default='NoEmail', help='the contact to whom questions regarding the track hub should be directed. Default=%default')
group.add_option('--genomes', dest='genomes', help='File specified list of genomes to make browser for. If specified, only create browsers for these genomes in the order provided by the list. Otherwise create browsers for all genomes in the input hal file')
group.add_option('--rename', dest='rename', help='File that maps halfile genomeNames to names displayed on the browser. Format: <halGenomeName>\\t<genomeNameToDisplayOnBrowser>. Default=%default')
group.add_option('--tree', dest='treeFile', help='Newick binary tree. The order of the tracks and the default track layout will be based on this tree if option "genomes" is not specified. If not specified, try to extract the newick tree from the input halfile.')
group.add_option('--url', dest='url', help='Public url of the hub location')
group.add_option('--twobitdir', dest='twobitdir', help='Optional. Directory containing the 2bit files of each genomes. Default: extract from the input hal file.')
parser.add_option_group(group)
def checkHubOptions(parser, options):
if options.genomes:
options.genomes = readList(options.genomes)
options.properName = {}
if options.rename and os.path.exists(options.rename):
options.properName = readRename(options.rename)
options.treeFig = None
options.leaves = None
options.tree = None
if options.treeFile and not os.path.exists(options.treeFile):
parser.error("The tree file %s does not exist.\n" %options.tree)
elif options.treeFile:
tree = Phylo.read(options.treeFile, 'newick')
if isBinaryTree(tree):
options.tree = tree
else:
sys.stderr.write("Warnning: tree %s is not a binary tree. Will be ignored!" %options.treeFile)
| 40.217391 | 277 | 0.662027 | 1,040 | 7,400 | 4.695192 | 0.263462 | 0.076183 | 0.054475 | 0.013107 | 0.199058 | 0.146222 | 0.113864 | 0.099939 | 0.052632 | 0.052632 | 0 | 0.00582 | 0.187297 | 7,400 | 183 | 278 | 40.437158 | 0.806119 | 0.065811 | 0 | 0.202797 | 0 | 0.041958 | 0.373169 | 0.011313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06993 | false | 0 | 0.041958 | 0 | 0.132867 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7e190c401cd34896ce2b8d4d09a1b734e5c48e3 | 6,698 | py | Python | tools/fastqc/rgFastQC.py | bebatut/tools-iuc | 4fb528145289ad4db04e4589c02e9ddaa1194138 | [
"MIT"
] | null | null | null | tools/fastqc/rgFastQC.py | bebatut/tools-iuc | 4fb528145289ad4db04e4589c02e9ddaa1194138 | [
"MIT"
] | null | null | null | tools/fastqc/rgFastQC.py | bebatut/tools-iuc | 4fb528145289ad4db04e4589c02e9ddaa1194138 | [
"MIT"
] | null | null | null | """
Rewrite of rgFastQC.py for Version 0.11.2 of FastQC.
Changes implemented from tmcgowan at
https://testtoolshed.g2.bx.psu.edu/view/tmcgowan/fastqc
and iuc at https://toolshed.g2.bx.psu.edu/view/iuc/fastqc
with minor changes and bug fixes
SYNOPSIS
rgFastQC.py -i input_file -j input_file.name -o output_html_file [-d output_directory]
[-f fastq|bam|sam] [-n job_name] [-c contaminant_file] [-e fastqc_executable]
EXAMPLE (generated by Galaxy)
rgFastQC.py -i path/dataset_1.dat -j 1000gsample.fastq -o path/dataset_3.dat -d path/job_working_directory/subfolder
-f fastq -n FastQC -c path/dataset_2.dat -e fastqc
"""
import bz2
import glob
import gzip
import mimetypes
import optparse
import os
import re
import shutil
import subprocess
import tempfile
import zipfile
class FastQCRunner(object):
def __init__(self, opts=None):
'''
Initializes an object to run FastQC in Galaxy. To start the process, use the function run_fastqc()
'''
# Check whether the options are specified and saves them into the object
assert opts is not None
self.opts = opts
def prepare_command_line(self):
'''
Develops the Commandline to run FastQC in Galaxy
'''
# Check whether a given file compression format is valid
# This prevents uncompression of already uncompressed files
infname = self.opts.inputfilename
linf = infname.lower()
informat = self.opts.informat
trimext = False
# decompression at upload currently does NOT remove this now bogus ending - fastqc will barf
# patched may 29 2013 until this is fixed properly
ftype = mimetypes.guess_type(self.opts.input)
if linf.endswith('.gz') or linf.endswith('.gzip') or ftype[-1] == "gzip" or informat.endswith('.gz'):
f = gzip.open(self.opts.input)
try:
f.readline()
ftype = ['gzip']
except Exception:
trimext = True
f.close()
elif linf.endswith('bz2') or informat.endswith('.bz2'):
f = bz2.BZ2File(self.opts.input, 'r')
try:
ftype = ['bzip2']
f.readline()
except Exception:
trimext = True
f.close()
elif linf.endswith('.zip'):
if not zipfile.is_zipfile(self.opts.input):
trimext = True
if trimext:
f = open(self.opts.input)
try:
f.readline()
except Exception:
raise Exception("Input file corruption, could not identify the filetype")
infname = os.path.splitext(infname)[0]
# Replace unwanted or problematic charaters in the input file name
self.fastqinfilename = re.sub(r'[^a-zA-Z0-9_\-\.]', '_', os.path.basename(infname))
# check that the symbolic link gets a proper ending, fastqc seems to ignore the given format otherwise
if 'fastq' in self.opts.informat:
# with fastq the .ext is ignored, but when a format is actually passed it must comply with fastqc's
# accepted formats..
self.opts.informat = 'fastq'
elif not self.fastqinfilename.endswith(self.opts.informat):
self.fastqinfilename += '.%s' % self.opts.informat
# Build the Commandline from the given parameters
command_line = [opts.executable, '--outdir %s' % self.opts.outputdir]
if self.opts.contaminants is not None:
command_line.append('--contaminants %s' % self.opts.contaminants)
if self.opts.limits is not None:
command_line.append('--limits %s' % self.opts.limits)
command_line.append('--quiet')
command_line.append('--extract') # to access the output text file
if ftype[-1] == 'gzip':
self.fastqinfilename += '.gz'
elif ftype[-1] == 'bzip2':
self.fastqinfilename += '.bz2'
else:
command_line.append('-f %s' % self.opts.informat)
command_line.append(self.fastqinfilename)
self.command_line = ' '.join(command_line)
def copy_output_file_to_dataset(self):
'''
Retrieves the output html and text files from the output directory and copies them to the Galaxy output files
'''
# retrieve html file
result_file = glob.glob(self.opts.outputdir + '/*html')
with open(result_file[0], 'rb') as fsrc:
with open(self.opts.htmloutput, 'wb') as fdest:
shutil.copyfileobj(fsrc, fdest)
# retrieve text file
text_file = glob.glob(self.opts.outputdir + '/*/fastqc_data.txt')
with open(text_file[0], 'rb') as fsrc:
with open(self.opts.textoutput, 'wb') as fdest:
shutil.copyfileobj(fsrc, fdest)
def run_fastqc(self):
'''
Executes FastQC. Make sure the mandatory import parameters input, inputfilename, outputdir and htmloutput have been specified in the options
'''
# Create a log file
dummy, tlog = tempfile.mkstemp(prefix='rgFastQC', suffix=".log", dir=self.opts.outputdir)
sout = open(tlog, 'w')
self.prepare_command_line()
sout.write(self.command_line)
sout.write('\n')
sout.write("Creating symlink\n") # between the input (.dat) file and the given input file name
os.symlink(self.opts.input, self.fastqinfilename)
sout.write("check_call\n")
subprocess.check_call(self.command_line, shell=True)
sout.write("Copying working %s file to %s \n" % (self.fastqinfilename, self.opts.htmloutput))
self.copy_output_file_to_dataset()
sout.write("Finished")
sout.close()
if __name__ == '__main__':
op = optparse.OptionParser()
op.add_option('-i', '--input', default=None)
op.add_option('-j', '--inputfilename', default=None)
op.add_option('-o', '--htmloutput', default=None)
op.add_option('-t', '--textoutput', default=None)
op.add_option('-d', '--outputdir', default="/tmp/shortread")
op.add_option('-f', '--informat', default='fastq')
op.add_option('-n', '--namejob', default='rgFastQC')
op.add_option('-c', '--contaminants', default=None)
op.add_option('-l', '--limits', default=None)
op.add_option('-e', '--executable', default='fastqc')
opts, args = op.parse_args()
assert opts.input is not None
assert opts.inputfilename is not None
assert opts.htmloutput is not None
if not os.path.exists(opts.outputdir):
os.makedirs(opts.outputdir)
fastqc_runner = FastQCRunner(opts)
fastqc_runner.run_fastqc()
| 39.169591 | 148 | 0.627351 | 853 | 6,698 | 4.838218 | 0.311841 | 0.0504 | 0.026654 | 0.023261 | 0.163315 | 0.094984 | 0.068331 | 0.037315 | 0.037315 | 0 | 0 | 0.007049 | 0.258734 | 6,698 | 170 | 149 | 39.4 | 0.824169 | 0.27456 | 0 | 0.145455 | 0 | 0 | 0.102424 | 0 | 0 | 0 | 0 | 0 | 0.036364 | 1 | 0.036364 | false | 0 | 0.1 | 0 | 0.145455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99adaa9fbb76e38861e85eb3688d0298edfd1677 | 5,563 | py | Python | appengine/monorail/framework/test/paginate_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | appengine/monorail/framework/test/paginate_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 7 | 2022-02-15T01:11:37.000Z | 2022-03-02T12:46:13.000Z | appengine/monorail/framework/test/paginate_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unit tests for pagination classes."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from google.appengine.ext import testbed
from framework import exceptions
from framework import paginate
from testing import testing_helpers
from proto import secrets_pb2
class PageTokenTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub()
def testGeneratePageToken_DiffRequests(self):
request_cont_1 = secrets_pb2.ListRequestContents(
parent='same', page_size=1, order_by='same', query='same')
request_cont_2 = secrets_pb2.ListRequestContents(
parent='same', page_size=2, order_by='same', query='same')
start = 10
self.assertNotEqual(
paginate.GeneratePageToken(request_cont_1, start),
paginate.GeneratePageToken(request_cont_2, start))
def testValidateAndParsePageToken(self):
request_cont_1 = secrets_pb2.ListRequestContents(
parent='projects/chicken', page_size=1, order_by='boks', query='hay')
start = 2
token = paginate.GeneratePageToken(request_cont_1, start)
self.assertEqual(
start,
paginate.ValidateAndParsePageToken(token, request_cont_1))
def testValidateAndParsePageToken_InvalidContents(self):
request_cont_1 = secrets_pb2.ListRequestContents(
parent='projects/chicken', page_size=1, order_by='boks', query='hay')
start = 2
token = paginate.GeneratePageToken(request_cont_1, start)
request_cont_diff = secrets_pb2.ListRequestContents(
parent='projects/goose', page_size=1, order_by='boks', query='hay')
with self.assertRaises(exceptions.PageTokenException):
paginate.ValidateAndParsePageToken(token, request_cont_diff)
def testValidateAndParsePageToken_InvalidSerializedToken(self):
request_cont = secrets_pb2.ListRequestContents()
with self.assertRaises(exceptions.PageTokenException):
paginate.ValidateAndParsePageToken('sldkfj87', request_cont)
def testValidateAndParsePageToken_InvalidTokenFormat(self):
request_cont = secrets_pb2.ListRequestContents()
with self.assertRaises(exceptions.PageTokenException):
paginate.ValidateAndParsePageToken('///sldkfj87', request_cont)
class PaginateTest(unittest.TestCase):
def testVirtualPagination(self):
# Paginating 0 results on a page that can hold 100.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list')
total_count = 0
items_per_page = 100
start = 0
vp = paginate.VirtualPagination(total_count, items_per_page, start)
self.assertEqual(vp.num, 100)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 0)
self.assertFalse(vp.visible)
# Paginating 12 results on a page that can hold 100.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list')
vp = paginate.VirtualPagination(12, 100, 0)
self.assertEqual(vp.num, 100)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 12)
self.assertTrue(vp.visible)
# Paginating 12 results on a page that can hold 10.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list?num=10')
vp = paginate.VirtualPagination(12, 10, 0)
self.assertEqual(vp.num, 10)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 10)
self.assertTrue(vp.visible)
# Paginating 12 results starting at 5 on page that can hold 10.
mr = testing_helpers.MakeMonorailRequest(
path='/issues/list?start=5&num=10')
vp = paginate.VirtualPagination(12, 10, 5)
self.assertEqual(vp.num, 10)
self.assertEqual(vp.start, 6)
self.assertEqual(vp.last, 12)
self.assertTrue(vp.visible)
# Paginating 123 results on a page that can hold 100.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list')
vp = paginate.VirtualPagination(123, 100, 0)
self.assertEqual(vp.num, 100)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 100)
self.assertTrue(vp.visible)
# Paginating 123 results on second page that can hold 100.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list?start=100')
vp = paginate.VirtualPagination(123, 100, 100)
self.assertEqual(vp.num, 100)
self.assertEqual(vp.start, 101)
self.assertEqual(vp.last, 123)
self.assertTrue(vp.visible)
# Paginating a huge number of objects will show at most 1000 per page.
mr = testing_helpers.MakeMonorailRequest(path='/issues/list?num=9999')
vp = paginate.VirtualPagination(12345, 9999, 0)
self.assertEqual(vp.num, 1000)
self.assertEqual(vp.start, 1)
self.assertEqual(vp.last, 1000)
self.assertTrue(vp.visible)
# Test urls for a hotlist pagination
mr = testing_helpers.MakeMonorailRequest(
path='/u/hotlists/17?num=5&start=4')
mr.hotlist_id = 17
mr.auth.user_id = 112
vp = paginate.VirtualPagination(12, 5, 4,
list_page_url='/u/112/hotlists/17')
self.assertEqual(vp.num, 5)
self.assertEqual(vp.start, 5)
self.assertEqual(vp.last, 9)
self.assertTrue(vp.visible)
self.assertEqual('/u/112/hotlists/17?num=5&start=9', vp.next_url)
self.assertEqual('/u/112/hotlists/17?num=5&start=0', vp.prev_url)
| 38.10274 | 77 | 0.729642 | 707 | 5,563 | 5.61669 | 0.224894 | 0.101989 | 0.102745 | 0.070511 | 0.618736 | 0.543692 | 0.533115 | 0.469907 | 0.421556 | 0.352304 | 0 | 0.045033 | 0.165738 | 5,563 | 145 | 78 | 38.365517 | 0.810601 | 0.121697 | 0 | 0.357798 | 0 | 0 | 0.070856 | 0.033272 | 0 | 0 | 0 | 0 | 0.357798 | 1 | 0.06422 | false | 0 | 0.082569 | 0 | 0.165138 | 0.009174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99addb00927f314ffddb52609797a63c1bd4e073 | 4,022 | py | Python | backend/google_calendar.py | engineerjoe440/djjoecalendar | 02e88861d460d71527bf5d714913fd148579b258 | [
"MIT"
] | null | null | null | backend/google_calendar.py | engineerjoe440/djjoecalendar | 02e88861d460d71527bf5d714913fd148579b258 | [
"MIT"
] | null | null | null | backend/google_calendar.py | engineerjoe440/djjoecalendar | 02e88861d460d71527bf5d714913fd148579b258 | [
"MIT"
] | null | null | null | ################################################################################
"""
DJ JOE Website Availability Calendar
------------------------------------
(c) 2021 - Stanley Solutions - Joe Stanley
This application serves the React frontend required to demonstrate the available
dates for DJ Joe Services.
"""
################################################################################
# Import Requisites
import os
import datetime
import requests
from date_support import daterange, _clean_dates, _restore_datetimes
ENV_API_KEY = "GOOGLE_API_KEY"
BASE_URL = (
"https://clients6.google.com/calendar/v3/calendars/engineerjoe440@gmail.com"
"/events?calendarId=engineerjoe440%40gmail.com&singleEvents=true&timeZone="
"America%2FLos_Angeles&maxAttendees=1&maxResults=250&sanitizeHtml=true&"
"timeMin={TIME_MIN}&timeMax={TIME_MAX}&key={API_KEY}"
)
################################################################################
# Supporting Functions
def googlify_datetimes(dts):
dts = _restore_datetimes(_clean_dates(dts))
return [dt.isoformat()+"Z" for dt in dts]
def get_google_date(google_dt_dict):
"""Performs dictionary-specific handling to attempt extraction of dt."""
google_dt = google_dt_dict.get('dateTime', google_dt_dict.get('date'))
return google_dt.split("T")[0]
def get_google_time(google_dt_dict):
"""Performs dictionary-specific handling to attempt extraction of dt."""
google_dt = google_dt_dict.get('dateTime', google_dt_dict.get('date'))
try:
timestring = google_dt.split("T")[1].split('-')[0]
except IndexError:
timestring = "00:00:00"
return timestring
################################################################################
# Event Listing Functions
def get_event_list(start: datetime.datetime, end: datetime.datetime):
"""Identifies a list of all events in the specified date range."""
start, end = googlify_datetimes([start, end])
# Call the Calendar API
REQ_URL = BASE_URL.format(
TIME_MIN = start,
TIME_MAX = end,
API_KEY = os.getenv(ENV_API_KEY),
)
print(REQ_URL)
resp = requests.get(REQ_URL)
if resp.status_code == 200:
return resp.json().get('items', [])
else:
print(
"GOOGLE REQUEST FAILED:",
resp.status_code,
resp.reason,
)
return []
def get_occupied_dates(start: datetime.datetime, end: datetime.datetime):
"""Generates a list of single dt objects representing occupied dates."""
events = get_event_list(start=start, end=end)
occupied_dates = []
# Iteratively process each event
for event in events:
start_date = datetime.datetime.strptime(
get_google_date(event['start']),
"%Y-%m-%d",
)
end = event.get('end')
if end != None:
end_date = get_google_date(end)
end_time = datetime.datetime.strptime(get_google_time(end), "%H:%M:%S")
if end_date != None and end_time.hour != 0:
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
for date in daterange(start_date, end_date):
# Append all dates in range
occupied_dates.append(date)
else:
# Append only start date
occupied_dates.append(start_date)
else:
# Append only start date
occupied_dates.append(start_date)
return occupied_dates
if __name__ == '__main__':
now = datetime.datetime.now() - datetime.timedelta(days=20)
events = get_event_list(now, now + datetime.timedelta(days=30))
for event in events:
print(event['start'].get('dateTime', event['start'].get('date')))
if len(events) == 0:
print("NO EVENTS FOUND")
events = get_occupied_dates(now, now + datetime.timedelta(days=30))
for event in events:
print("event", event)
if len(events) == 0:
print("NO EVENTS FOUND") | 34.973913 | 83 | 0.592491 | 470 | 4,022 | 4.876596 | 0.314894 | 0.034904 | 0.031414 | 0.026178 | 0.287958 | 0.259162 | 0.224258 | 0.224258 | 0.19808 | 0.19808 | 0 | 0.012662 | 0.21457 | 4,022 | 115 | 84 | 34.973913 | 0.712884 | 0.168324 | 0 | 0.186667 | 0 | 0 | 0.147777 | 0.064861 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.053333 | 0 | 0.2 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99aff1381f53e0fd438acad0daa71781fe93be07 | 5,764 | py | Python | seutil/Stream.py | JiyangZhang/seutil | 6a2d0961a5f78f2adbf9f0b5f292f6be71780ca2 | [
"Apache-2.0"
] | 6 | 2020-07-02T02:39:59.000Z | 2022-02-08T18:38:39.000Z | seutil/Stream.py | JiyangZhang/seutil | 6a2d0961a5f78f2adbf9f0b5f292f6be71780ca2 | [
"Apache-2.0"
] | 5 | 2020-11-29T02:26:50.000Z | 2022-01-24T16:26:54.000Z | seutil/Stream.py | JiyangZhang/seutil | 6a2d0961a5f78f2adbf9f0b5f292f6be71780ca2 | [
"Apache-2.0"
] | 1 | 2020-10-09T23:31:22.000Z | 2020-10-09T23:31:22.000Z | from pathlib import Path
import numpy as np
import random
import subprocess
from typing import *
from .IOUtils import IOUtils
class Stream:
"""
Streams help manipulate sequences of objects.
"""
def __init__(self):
self.items = list()
return
@classmethod
def of(cls, one_or_more_items):
"""
Get a new stream from the item / items.
:param one_or_more_items: is converted to list with builtin `list` function.
"""
stream = Stream()
if one_or_more_items is not None:
stream.items = list(one_or_more_items)
# end if, if
return stream
@classmethod
def of_files(cls, dir_path: Union[str, Path]):
"""
Get a stream of the files under the directory.
"""
with IOUtils.cd(dir_path):
cmd_find = "find -mindepth 1 -maxdepth 1 -type f"
files = subprocess.run(["bash","-c",cmd_find], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")[:-1]
# end with
files = [file[2:] for file in files]
stream = cls.of(files)
stream.sorted()
return stream
@classmethod
def of_dirs(cls, dir_path: Union[str, Path]):
"""
Get a stream of the sub-directories under the directory.
"""
with IOUtils.cd(dir_path):
cmd_find = "find -mindepth 1 -maxdepth 1 -type d"
dirs = subprocess.run(["bash","-c",cmd_find], stdout=subprocess.PIPE).stdout.decode("utf-8").split("\n")[:-1]
# end with
dirs = [dir[2:] for dir in dirs]
stream = cls.of(dirs)
stream.sorted()
return stream
def filter(self, predicate_func: Callable[[object], bool]):
"""
Returns a stream consisting of the elements of this stream that match the given predicate.
"""
return Stream.of(item for item in self.items if predicate_func(item))
def count(self):
return sum(self.items)
def reduce(self, count_func: Callable[[str], float] = lambda x: 1):
return sum([count_func(f) for f in self.items])
def sorted(self, key: Callable[[str], object] = lambda f: f,
reverse: bool = False):
"""
Sorts the list of files in the dataset.
"""
list.sort(self.items, key=key, reverse=reverse)
return self
def map(self, map_func: Callable[[str], object],
errors: str = "raise", default: object = ""):
def new_items_generator():
for item in self.items:
try:
new_item = map_func(item)
except:
if errors == "ignore":
yield default
else:
raise
else:
yield new_item
# end for
# end def
return Stream.of(new_items_generator())
def peak(self, peak_func: Callable[[str], None],
errors: str = "ignore"):
for item in self.items:
try:
peak_func(item)
except:
if errors == "ignore":
continue
else:
raise
# end for
return self
def split(self, fraction_list: List[float],
count_func: Callable[[str], float] = lambda x: 1):
"""
Splits the dataset as each part specified by the fractions (assumed to sum up to 1).
Splitting is done by finding the cutting points. If randomization is needed, call shuffle first.
:param count_func: customize the number of data counts in each file.
"""
if self.is_empty():
return tuple(Stream() for i in range(len(fraction_list)))
count_list = [count_func(f) for f in self.items]
cum_count_list = np.cumsum(count_list)
cum_expected_count_list = [f * cum_count_list[-1] for f in np.cumsum(fraction_list)]
cut_index_list = []
last_i = 0
for i, cum_count in enumerate(cum_count_list):
if cum_count >= cum_expected_count_list[len(cut_index_list)]:
last_i = i+1
cut_index_list.append(i+1)
if len(cut_index_list) >= len(cum_expected_count_list):
break
# end if
# end for if
if last_i != len(cum_count_list):
cut_index_list.append(len(cum_count_list))
# end if
cut_index_list.insert(0,0)
return tuple(Stream.of(self.items[cut_index_list[i]:cut_index_list[i + 1]]) for i in range(len(cut_index_list) - 1))
def shuffle(self, seed=None):
"""
Shuffles the list of files in the dataset.
"""
random.seed(seed)
random.shuffle(self.items)
return self
def get(self, index: int):
return self.items[index]
def is_empty(self):
return len(self.items) == 0
def __getitem__(self, item):
new_items = self.items.__getitem__(item)
if not isinstance(item, slice):
new_items = [new_items]
return Stream.of(new_items)
def __setitem__(self, key, value):
return self.items.__setitem__(key, value)
def __delitem__(self, key):
return self.items.__delitem__(key)
def __iter__(self):
return self.items.__iter__()
def __len__(self):
return self.items.__len__()
def __str__(self):
return "Stream with {} items".format(len(self.items))
def __repr__(self):
return self.__str__()
def __add__(self, other):
if isinstance(other, Stream):
return Stream.of(self.items+other.items)
else:
raise NotImplementedError
| 32.022222 | 124 | 0.566967 | 733 | 5,764 | 4.251023 | 0.231924 | 0.054878 | 0.03466 | 0.017972 | 0.270218 | 0.202182 | 0.170732 | 0.154044 | 0.116816 | 0.116816 | 0 | 0.005704 | 0.330847 | 5,764 | 179 | 125 | 32.201117 | 0.802178 | 0.133588 | 0 | 0.228814 | 0 | 0 | 0.029467 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.194915 | false | 0 | 0.050847 | 0.084746 | 0.449153 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99b9d977ad5ba85f80dfdf899e7e306d68769703 | 246 | py | Python | main.py | FlamptX/topgg-webhook-cog | b937bbef71ada9a00b6e748475fa55e5cedbf6ee | [
"MIT"
] | 1 | 2021-05-11T16:22:00.000Z | 2021-05-11T16:22:00.000Z | main.py | FlamptX/topgg-webhook-cog | b937bbef71ada9a00b6e748475fa55e5cedbf6ee | [
"MIT"
] | null | null | null | main.py | FlamptX/topgg-webhook-cog | b937bbef71ada9a00b6e748475fa55e5cedbf6ee | [
"MIT"
] | null | null | null | from discord.ext import commands
from configparser import ConfigParser
parser = ConfigParser()
parser.read("config.txt")
TOKEN = parser.get('config', 'token')
bot = commands.Bot(command_prefix='!')
bot.load_extension("Webhook")
bot.run(TOKEN)
| 20.5 | 38 | 0.760163 | 32 | 246 | 5.78125 | 0.59375 | 0.194595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 246 | 11 | 39 | 22.363636 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0.117886 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99b9dbb04990991f85b416338b39b80cb298f798 | 567 | py | Python | test_ghost/test_01_basic_functionality.py | prosky-pmaj/python-framework-for-web-ui-and-api-testing | 6218aca2e5a45f26507ab624724e1f17dca5546f | [
"MIT"
] | null | null | null | test_ghost/test_01_basic_functionality.py | prosky-pmaj/python-framework-for-web-ui-and-api-testing | 6218aca2e5a45f26507ab624724e1f17dca5546f | [
"MIT"
] | null | null | null | test_ghost/test_01_basic_functionality.py | prosky-pmaj/python-framework-for-web-ui-and-api-testing | 6218aca2e5a45f26507ab624724e1f17dca5546f | [
"MIT"
] | null | null | null | from components.ghost.blogPage import BlogPage
from components.ghost.adminPanelPage import AdminPanelPage
class TestBlogPage(BlogPage):
def test_01_open_blog_page(self):
self.go_to()
assert self.get_title() == "Blog for Testing"
class TestAdminPanelPage(AdminPanelPage):
def test_01_open_admin_panel_page(self):
self.go_to()
assert self.is_log_in_required()
def test_02_log_in_to_admin_panel(self):
self.go_to()
self.log_in_as_admin()
assert not self.is_log_in_required()
self.logOut()
| 27 | 58 | 0.714286 | 78 | 567 | 4.833333 | 0.423077 | 0.05305 | 0.079576 | 0.095491 | 0.228117 | 0.137931 | 0.137931 | 0 | 0 | 0 | 0 | 0.013274 | 0.202822 | 567 | 20 | 59 | 28.35 | 0.820796 | 0 | 0 | 0.2 | 0 | 0 | 0.028219 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.2 | false | 0 | 0.133333 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99ba4295d40e9eb27422490957fc057fa213b493 | 1,038 | py | Python | data_prep/yelp_dataset/fix_user.py | Nithanaroy/GeoReachRecommender | 74443bad1cb363582736f8fa9294a91321848def | [
"MIT"
] | 1 | 2020-03-25T17:39:10.000Z | 2020-03-25T17:39:10.000Z | data_prep/yelp_dataset/fix_user.py | Nithanaroy/GeoReachRecommender | 74443bad1cb363582736f8fa9294a91321848def | [
"MIT"
] | null | null | null | data_prep/yelp_dataset/fix_user.py | Nithanaroy/GeoReachRecommender | 74443bad1cb363582736f8fa9294a91321848def | [
"MIT"
] | 4 | 2017-01-21T15:16:55.000Z | 2020-03-28T17:43:47.000Z | """
{
'type': 'user',
'user_id': (encrypted user id),
'name': (first name),
'review_count': (review count),
'average_stars': (floating point average, like 4.31),
'votes': {(vote type): (count)},
'friends': [(friend user_ids)],
'elite': [(years_elite)],
'yelping_since': (date, formatted like '2012-03'),
'compliments': {
(compliment_type): (num_compliments_of_this_type),
...
},
'fans': (num_fans),
}
"""
import json
def main(f, o):
with open(f, 'r') as fp:
res = []
out = open(o, 'w')
for u in fp.read().splitlines():
user = json.loads(u)
d = {}
d['_id'] = user['user_id']
d['name'] = user['name']
d['review_count'] = user['review_count']
d['friends_count'] = len(user['friends'])
res.append(json.dumps(d))
out.write('[' + ',\n'.join(res) + ']')
out.close()
if __name__ == '__main__':
main('../dataset/user.json', '../dataset/out.json')
| 25.95 | 58 | 0.504817 | 122 | 1,038 | 4.090164 | 0.52459 | 0.088176 | 0.04008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012129 | 0.285164 | 1,038 | 39 | 59 | 26.615385 | 0.660377 | 0.44316 | 0 | 0 | 0 | 0 | 0.203509 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99bb6a49211d151c035db736f93150d9bd8b0d82 | 5,085 | py | Python | src/GlobalTracker/scene.py | lleon95/NanoSciTracker-Python | f682c1f3b9b9f76a6de8ea816df910715539edf1 | [
"Apache-2.0"
] | null | null | null | src/GlobalTracker/scene.py | lleon95/NanoSciTracker-Python | f682c1f3b9b9f76a6de8ea816df910715539edf1 | [
"Apache-2.0"
] | null | null | null | src/GlobalTracker/scene.py | lleon95/NanoSciTracker-Python | f682c1f3b9b9f76a6de8ea816df910715539edf1 | [
"Apache-2.0"
] | null | null | null | # NanoSciTracker - 2020
# Author: Luis G. Leon Vega <luis@luisleon.me>
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This project was sponsored by CNR-IOM
import copy
import cv2 as cv
import LocalTracker.detector as Detector
import LocalTracker.drawutils as DrawUtils
import LocalTracker.tracker as Tracker
import LocalTracker.matcher as DetectionMatcher
import Matcher.matcher as FeatureMatcher
class Scene:
def __init__(
self,
ROI=None,
overlap=0,
detection_sampling=3,
detection_roi=None,
settings=None
):
# Get coordinates
self.roi = ROI
x, y = self.roi
self.x0, self.x1 = x
self.y0, self.y1 = y
self.w = self.x1 - self.x0
self.h = self.y1 - self.y0
self.overlap = overlap
self.frame = None
# ROIs
if detection_roi is None:
self.detection_roi = (
self.overlap,
self.overlap,
self.w - self.overlap,
self.h - self.overlap,
)
else:
self.detection_roi = detection_roi
# BBs
self.trackers = []
self.detections = []
self.new_detections = []
self.trackers_new_detections = []
self.trackers_out_scene = []
self.dead_trackers = []
# Settings
self._settings = settings
if settings is None:
raise RuntimeError("Scene settings are not valid")
self.batches = self._settings.set_if_defined("batches", 2)
self.grayscale = self._settings.set_if_defined("grayscale", True)
self.world_size = self._settings.set_if_defined("world_size", None)
self.counter = 0
self.detection_sampling = detection_sampling
def load_frame(self, frame):
self.frame = frame
def detect(self, gray_frame):
padding = self._settings.set_if_defined("padding", None)
return Detector.detect(gray_frame, self.batches, padding=padding)
def track(self, colour_frame):
Tracker.updateTrackers(colour_frame, self.trackers, ROI=self.detection_roi)
return Tracker.retrieveBBs(self.trackers)
def update(self, colour_frame=None):
if not colour_frame is None:
self.frame = colour_frame
gray_detect = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
# Perform detections and filter the new ones
if self.counter % self.detection_sampling == 0:
self.detections = self.detect(gray_detect)
self.new_detections = DetectionMatcher.inter_match(
self.detections, self.trackers
)
# Deploy new trackers accordingly
self.trackers_new_detections = Tracker.deployTrackers(
self.frame,
self.new_detections,
self.trackers,
ROI=self.detection_roi,
offset=(self.x0, self.y0),
grayscale=self.grayscale,
world_size=self.world_size,
)
else:
self.new_detections = []
self.trackers_new_detections = []
# Perform tracking update
self.track(self.frame)
# Catch trackers which went out of scene
self.trackers_out_scene = Tracker.retrieveOutScene(self.trackers)
self.dead_trackers = Tracker.retrieveDeadTrackers(self.trackers)
self.counter += 1
return (
self.trackers,
self.trackers_out_scene,
self.trackers_new_detections,
self.dead_trackers,
)
def draw(self, colour_frame):
"""
Purple: New detections
Red: Detections
Blue: Trackers
Light blue: Out of scene
"""
colour_copy = copy.deepcopy(colour_frame)
# Draw detections
colour_copy = DrawUtils.draw_detections(
colour_copy, self.new_detections, (255, 0, 255)
)
colour_copy = DrawUtils.draw_detections(
colour_copy, self.detections, (0, 0, 255)
)
# Draw trackers
colour_copy = DrawUtils.draw_trackers(colour_copy, self.trackers, (255, 0, 0))
colour_copy = DrawUtils.draw_trackers(
colour_copy, self.trackers_out_scene, (255, 255, 0)
)
return colour_copy
| 33.019481 | 86 | 0.627139 | 594 | 5,085 | 5.227273 | 0.296296 | 0.0657 | 0.027375 | 0.032206 | 0.171337 | 0.111433 | 0.091465 | 0.064412 | 0.034138 | 0 | 0 | 0.013943 | 0.294789 | 5,085 | 153 | 87 | 33.235294 | 0.851924 | 0.223992 | 0 | 0.121212 | 0 | 0 | 0.01577 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.070707 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99bc36674ac9930132b7fa40a1504d6148e42d4d | 7,573 | py | Python | jmeter_api/configs/http_cookie_manager/elements.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 11 | 2020-03-22T13:30:21.000Z | 2021-12-25T06:23:44.000Z | jmeter_api/configs/http_cookie_manager/elements.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 37 | 2019-12-18T13:12:50.000Z | 2022-02-10T10:52:37.000Z | jmeter_api/configs/http_cookie_manager/elements.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 5 | 2019-12-06T10:55:56.000Z | 2020-06-01T19:32:32.000Z | import logging
from typing import List, Optional
from enum import Enum
from xml.etree.ElementTree import Element
from jmeter_api.basics.config.elements import BasicConfig
from jmeter_api.basics.utils import Renderable, FileEncoding, tree_to_str
class CookiePolicy(Enum):
STANDARD = 'standard'
STANDARD_STRICT = 'standard-strict'
IGNORE = 'ignoreCookies'
NETSCAPE = 'netscape'
DEFAULT = 'default'
RFC2109 = 'rfc2109'
RFC2965 = 'RFC2965'
BEST_MATCH = 'best-match'
class Cookie(Renderable):
TEMPLATE = 'cookie.xml'
root_element_name = 'elementProp'
def __init__(self, *,
name: str,
value: str,
domain: str = '',
path: str = '',
secure: bool = False,
expires: int = 0,
path_specified: bool = True,
domain_specified: bool = True):
self.name = name
self.value = value
self.domain = domain
self.path = path
self.secure = secure
self.expires = expires
self.path_specified = path_specified
self.domain_specified = domain_specified
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError(
f'name must be str. {type(value).__name__} was given')
self._name = value
@property
def value(self) -> str:
return self._value
@value.setter
def value(self, value):
if not isinstance(value, str):
raise TypeError(
f'value must be str. {type(value).__name__} was given')
self._value = value
@property
def domain(self) -> str:
return self._domain
@domain.setter
def domain(self, value):
if not isinstance(value, str):
raise TypeError(
f'domain must be str. {type(value).__name__} was given')
self._domain = value
@property
def path(self) -> str:
return self._path
@path.setter
def path(self, value):
if not isinstance(value, str):
raise TypeError(
f'path must be str. {type(value).__name__} was given')
self._path = value
@property
def expires(self) -> str:
return self._expires
@expires.setter
def expires(self, value):
if not isinstance(value, int):
raise TypeError(
f'expires must be int. {type(value).__name__} was given')
self._expires = str(value)
@property
def secure(self) -> str:
return self._secure
@secure.setter
def secure(self, value):
if not isinstance(value, bool):
raise TypeError(
f'secure must be bool. {type(value).__name__} was given')
self._secure = str(value).lower()
@property
def path_specified(self) -> str:
return self._path_specified
@path_specified.setter
def path_specified(self, value):
if not isinstance(value, bool):
raise TypeError(
f'path_specified must be bool. {type(value).__name__} was given')
self._path_specified = str(value).lower()
@property
def domain_specified(self) -> str:
return self._domain_specified
@domain_specified.setter
def domain_specified(self, value):
if not isinstance(value, bool):
raise TypeError(
f'domain_specified must be bool. {type(value).__name__} was given')
self._domain_specified = str(value).lower()
def to_xml(self) -> str:
xml_tree: Optional[Element] = super().get_template()
element_root = xml_tree.find(self.root_element_name)
element_root.attrib['name'] = self.name
element_root.attrib['testname'] = self.name
for element in list(element_root):
try:
if element.attrib['name'] == 'Cookie.value':
element.text = self.value
elif element.attrib['name'] == 'Cookie.domain':
element.text = self.domain
elif element.attrib['name'] == 'Cookie.path':
element.text = self.path
elif element.attrib['name'] == 'Cookie.secure':
element.text = self.secure
elif element.attrib['name'] == 'Cookie.expires':
element.text = self.expires
elif element.attrib['name'] == 'Cookie.path_specified':
element.text = self.path_specified
elif element.attrib['name'] == 'Cookie.domain_specified':
element.text = self.domain_specified
except KeyError:
logging.error(
f'Unable to properly convert {self.__class__} to xml.')
return tree_to_str(xml_tree)
class HTTPCookieManager(BasicConfig, Renderable):
root_element_name = 'CookieManager'
def __init__(self, *,
cookies: List[Cookie] = [],
clear_each_iter: bool = False,
policy: CookiePolicy = CookiePolicy.STANDARD,
name: str = 'HTTP Cookie Manager',
comments: str = '',
is_enabled: bool = True):
self.cookies = cookies
self.policy = policy
self.clear_each_iter = clear_each_iter
super().__init__(name=name, comments=comments, is_enabled=is_enabled)
@property
def policy(self):
return self._policy
@policy.setter
def policy(self, value):
if not isinstance(value, CookiePolicy):
raise TypeError(
f'policy must be CookiePolicy. {type(value).__name__} was given')
self._policy = value
@property
def clear_each_iter(self) -> str:
return self._clear_each_iter
@clear_each_iter.setter
def clear_each_iter(self, value):
if not isinstance(value, bool):
raise TypeError(
f'clear_each_iter must be bool. {type(value).__name__} was given')
self._clear_each_iter = str(value).lower()
@property
def cookies(self) -> str:
return self._cookies
@cookies.setter
def cookies(self, value):
if not isinstance(value, List):
raise TypeError(
f'arguments must be List. {type(value).__name__} was given')
for el in value:
if not isinstance(el, Cookie):
raise TypeError(
f'arguments must contain only Cookie. {type(value).__name__} was given')
self._cookies = value
def to_xml(self) -> str:
element_root, xml_tree = super()._add_basics()
for element in list(element_root):
try:
if element.attrib['name'] == 'CookieManager.cookies':
element.text = ''
for arg in self.cookies:
element.text += arg.to_xml()
elif element.attrib['name'] == 'CookieManager.clearEachIteration':
element.text = self.clear_each_iter
except KeyError:
logging.error(
f'Unable to properly convert {self.__class__} to xml.')
if not self.policy is CookiePolicy.STANDARD:
el = Element('stringProp', attrib={'name': 'CookieManager.policy'})
el.text = str(self.policy.value)
element_root.append(el)
return tree_to_str(xml_tree)
| 32.502146 | 88 | 0.575201 | 832 | 7,573 | 5.032452 | 0.127404 | 0.032243 | 0.02866 | 0.05732 | 0.433962 | 0.318128 | 0.238357 | 0.21925 | 0.21925 | 0.17005 | 0 | 0.003333 | 0.326555 | 7,573 | 232 | 89 | 32.642241 | 0.817647 | 0 | 0 | 0.242268 | 0 | 0 | 0.152119 | 0.047669 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134021 | false | 0 | 0.030928 | 0.056701 | 0.304124 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99bc394d69ac0c2ddde0ba6fd63c903bf2c13144 | 13,338 | py | Python | electionBuster.py | RarW0lf/electionBuster | 394e92a972c0a6af260811985a8051af40ffa25d | [
"MIT"
] | 16 | 2015-10-22T01:37:28.000Z | 2018-10-17T13:05:44.000Z | electionBuster.py | RarW0lf/electionBuster | 394e92a972c0a6af260811985a8051af40ffa25d | [
"MIT"
] | 2 | 2018-02-10T00:05:43.000Z | 2018-05-17T22:48:22.000Z | electionBuster.py | thorshand/electionBuster | 394e92a972c0a6af260811985a8051af40ffa25d | [
"MIT"
] | 4 | 2017-08-02T12:41:44.000Z | 2018-08-14T16:16:41.000Z | #!/usr/bin/python3.5
##################################################
## Author: Joshua Franklin, Kevin Franklin
## Example input to start:
## sudo ./electionBuster.py -f josh -l franklin -y 2014 -e senate -s pennsyltucky
## 6 arguments are passed:
## 1: The first name of the candidate (mandatory)
## 2: The middle name of the candidate (optional)
## 2: The last name of the candidate (mandatory)
## 3: The year of the election (mandatory)
## 4: The type of race, such as congress, senate, or president. (mandatory)
## 5: The state or region the candidate is from (optional)
##################################################
#TODO: Add a keyboard interrupt
import requests
import sys
import time
import string
import argparse
import socket
from datetime import date
import urllib
from multiprocessing import Pool as ThreadPool, Manager
import collections
import csv
import operator
from modules.utils import genAllDonate,genAll,generate_urls, tryURLforReal
from modules.text_tools import alphabet,alt_alphabets,skipLetter,stringAndStrip,removeDups,reverseLetter,wrongVowel,tlds
confirmedURLs = Manager().list()
allURLS = Manager().list()
class NameDenormalizer(object):
def __init__(self, filename=None):
filename = filename or 'names.csv'
lookup = collections.defaultdict(list)
with open(filename) as f:
reader = csv.reader(f)
for line in reader:
matches = set(line)
for match in matches:
lookup[match].append(matches)
self.lookup = lookup
def __getitem__(self, name):
name = name.upper()
if name not in self.lookup:
raise KeyError(name)
return self.lookup[name]
def get(self, name, default=None):
try:
return self[name]
except KeyError:
return set( [name] )
# Program Timer
start_time = time.time()
# Function: casts and removes those pesky \r and \n
#Parse command line arguments
parser = argparse.ArgumentParser(description='Identifies registered candidate domains')
parser.add_argument('-f','--firstName', help='Candidate\'s first name',required=True)
parser.add_argument('-m','--middleName',help='Candidate\'s optional middle name')
parser.add_argument('-l','--lastName',help='Candidate\'s last name', required=True)
parser.add_argument('-y','--year', help='Year of the election',required=True)
parser.add_argument('-e','--electionType',help='Type of election (congress, senate, president)', required=True)
parser.add_argument('-s','--state', help='Candidate\'s state of origin', action='append' )
#Exists for candidates like Mitt Romney that possibly have an attachment to two states (i.e., Utah, Massachusetts)
parser.add_argument('-a','--aliasFileName', help='Filename containing a list of aliases')
parser.add_argument('-p','--party', help='Party Affiliation')
args = parser.parse_args()
# Stores command line argumetns
# Make all lowercase
fName = args.firstName
fName = fName.lower()
lName = args.lastName
lName = lName.lower()
party = ""
year = args.year
shortYear = year[-2:]
electionType = args.electionType
electionType = electionType.lower()
state = []
stateText = ""
if (args.party) :
party = args.party
fileName = "states.csv"
if (args.aliasFileName) :
fileName = stringAndStrip( args.aliasFileName)
if (args.state) :
nd = NameDenormalizer( fileName )
for aState in args.state:
stateText = stateText + aState.lower()
state.append( stringAndStrip( aState.upper( ) ) )
statenick = list( nd.get( aState.upper() ) )
for s1 in statenick:
for s in s1:
state.append( s )
mName = ""
middleInitial = ""
if (args.middleName) :
mName = args.middleName
mName = mName.lower()
middleInitial = mName[0]
# This assigns the position variable
if (electionType == 'congress') or (electionType == 'congressional') :
position = 'congress'
altPosition = 'congressman' # congresswoman??
elif electionType == 'senate' :
position = 'senator'
altPosition = 'senate'
elif (electionType == 'governor') or (electionType == 'gubernatorial'):
position = 'governor'
altPosition = 'gov'
elif (electionType == 'president') or (electionType == 'presidential') :
position = 'president'
altPosition = 'prez'
elif (electionType == 'mayoral') or (electionType == 'mayor') :
position = 'mayor'
altPosition = 'mayoral'
else :
position = electionType
altPosition = electionType
# top-level domain-names
# # consider removing .me, .info, and .biz if they aren't adding value
# Runs stringAndStrip on everything except fileName b/c that's used elsewhere
fName = stringAndStrip(fName)
lName = stringAndStrip(lName)
year = stringAndStrip(year)
electionType = stringAndStrip(electionType)
# Alerting the users to the types of sites we're expecting to find
# This differs at times since the state variable isn't mandatory to run the script
## Consider deleting this - does it actually provide value?
if (args.state) :
print('We expect to find these URLs excluding subtle variances:')
print('http://www.' + fName + lName + '.com')
print('http://www.' + lName + fName + '.com')
print('http://www.' + fName + year + '.com')
print('http://www.' + lName + year + '.com')
print('http://www.' + fName + lName + year + '.com' )
for stateAlias in state:
print('http://www.' + fName + lName + 'for' + stateAlias + '.com')
print('http://www.' + lName + 'for' + stateAlias + '.com')
print('http://www.' + fName + 'for' + stateAlias + '.com')
print('http://www.' + fName + lName + 'for' + position + '.com')
print('http://www.' + fName + 'for' + position + '.com')
print('http://www.' + fName + 'for' + position + year + '.com')
print('http://www.' + position + fName + lName + '.com')
else :
print('We expect to find these URLs excluding subtle variances:')
print('http://www.' + fName + lName + '.com')
print('http://www.' + lName + fName + '.com')
print('http://www.' + fName + year + '.com')
print('http://www.' + lName + year + '.com')
print('http://www.' + fName + lName + year + '.com' )
print('http://www.' + fName + lName + 'for' + position + '.com')
print('http://www.' + fName + 'for' + position + '.com')
print('http://www.' + fName + 'for' + position + year + '.com')
print('http://www.' + position + fName + lName + '.com')
# This is the result output files
# Makes a unique filename based on data and time
now = date.today()
partyString = ""
if ( args.party ) :
partyString = "-" + party.lower()
tempResults = 'results-' + fName + '-' + lName + '-' + stateText + partyString + '-' + str(now) + '.txt'
resultsFile = open(tempResults, "w")
# This clears the results files before reopening them
resultsFile.close()
resultsFile = open(tempResults, "a")
## Other alphabets are defined as a quick way of doing URL mangling.
## Is this a candidate for deletion?
# alternative alphabets
# 0: No change
# 1: i -> 1 "Eye to One"
# 2: l -> i "El to Eye"
# 3: i -> l "Eye to El"
# 4: o -> 0 "Oh to Zero"
# 5: 0 -> o "Zero to Oh"
# 6: n -> m "En to Em" TODO: Does this swap wrok right?
# 7: m -> n "Em to En"
# 8: e -> 3 "Ee to three"
# 9: 3 -> e "Three to ee"
# These are the template that we'll use based on the optional input parameters.
# The first one is if the state was input.
templates = generate_urls(first_name=args.firstName,
last_name=args.lastName,
state=state,
middlename=args.middleName,
position=position,
altPosition=altPosition,
year=args.year)
# This generates the text mangling
results = genAll(templates, alt_alphabets)
# This generates the text mangling with some other alternatives
resultsDonate = genAllDonate(templates, alt_alphabets)
#### LOOP 1 ####
# All examples use the input of 'josh franklin 2014 president DC'
#################
#http://www.joshfranklin.com
#http://www.josh2014.com
#http://www.franklin2014.com
#http://www.joshfranklin2014.com
#http://www.joshfranklinforDC.com
#http://www.joshfranklinDC.com
#http://www.joshforpresident.com
#http://www.josh4president.com
#http://www.joshforpresident2014.com
#http://www.josh4president2014.com
#http://www.presidentjoshfranklin.com
#http://www.president-josh-franklin.com
#http://www.presidentjoshforpresident2014.com
#http://www.presidentjosh4president2014.com
#http://www.presidentjoshfranklinforpresident2014.com
#http://www.presidentjosh-franklinforpresident2014.com
#http://www.presidentjoshfranklin4president2014.com
#http://www.presidentjosh-franklin4president2014.com
def tryURL(url):
url = stringAndStrip(url)
for domain_name in tlds:
print('Trying: ' + url + domain_name)
allURLS.append(url + domain_name)
print("Entering template loop 1^^^^^^^^^^^^^^^^^^^^^^^^^^" )
print(time.time() - start_time, "seconds")
for r in results:
tryURL( 'http://www.' + r , )
### LOOP 2 ###
# Puts donate at the beginning &
# Removes the period after 'www'
##############tlds a little
tlds.append( '.republican' )
tlds.append( '.democrat' )
tlds.append( '.red' )
tlds.append( '.blue' )
tlds.append( '.vote' )
#These next few look for some of the larger parties
tryURL( 'http://www.republican' + fName + lName )
tryURL( 'http://www.democrat' + fName + lName )
tryURL( 'http://www.libertarian' + fName + lName )
tryURL( 'http://www.independent' + fName + lName )
tryURL( 'http://www.vote' + fName + lName ) #Example: votejoshfranklin.com
tryURL( 'http://www.vote' + fName + middleInitial + lName ) #Example: votejoshmichaelfranklin.com
tryURL( 'http://www.vote' + fName ) #Example: votejosh.com
tryURL( 'http://www.vote' + lName ) #Example: votefranklin.com
tryURL( 'http://www.' + lName + position ) #Example: franklinpresident.com
tryURL( 'http://www.' + lName + altPosition ) #Example: franklinprez.com
tryURL( 'http://www.real' + fName + lName ) #Example: realjoshfranklin.com
for stateAlias in state:
tryURL( 'http://www.' + lName + 'for' + stateAlias ) #Example: franklinforDC.com
tryURL( 'http://www.' + lName + '4' + stateAlias ) #Example: franklin4DC.com
tryURL( 'http://www.friendsof' + fName ) #Example: friendsofjosh.com
tryURL( 'http://www.friendsof' + lName ) #Example: friendsofjosh.com
tryURL( 'http://www.' + fName + 'sucks' ) #Example: joshsucks.com
tryURL( 'http://www.' + lName + 'sucks' ) #Example: franklinsucks.com
tryURL( 'http://www.' + fName ) #Example: josh.vote
tryURL( 'http://www.' + lName ) #Example: franklin.vote
tryURL( 'http://www.' + fName + lName ) #Example: joshfranklin.vote
tryURL( 'http://www.elect' + fName + lName )
tryURL( 'http://www.elect' + fName + middleInitial + lName )
tryURL( 'http://www.elect' + fName )
tryURL( 'http://www.elect' + lName )
tryURL( 'http://www.' + fName + middleInitial + year )
tryURL( 'http://www.' + middleInitial + lName )
print( ' Total URLS: ' + str(len(allURLS)) + "\n" )
allURLS = removeDups( allURLS )
print( 'Unique URLS: ' + str(len(allURLS)) + "\n" )
pool = ThreadPool( 24 )
# Open the urls in their own threads
# and return the results
results = pool.map( tryURLforReal, allURLS )
pool.close()
pool.join()
#print(results)
# Each thread added an entry for each result (found or not, gotta filter the blanks)
# I'm doing this here sinced the file writes might not have been synchronized
# its just a fear I had
for i in results:
resultsFile.write( i )
totalRuntime = time.time() - start_time, "seconds"
###### Write final results to logfile ###########
resultsFile.write( "######################################" + "\n" )
resultsFile.write( "ElectionBuster Scan Results: " + "\n" )
resultsFile.write( "######################################" + "\n" )
resultsFile.write( "INPUTS = " + str(fName) + ", " + str(mName) + ", " + str(lName) + ", " + str(year) + ", " + str(position) + ", " + str(altPosition) + ", " + str(stateText) + ", " + str(party) + "\n" )
resultsFile.write( "Total runtime was " + str(totalRuntime) + "\n" )
resultsFile.write( "There were " + str(len(confirmedURLs)) + " positive results." + "\n" )
resultsFile.write( "There were " + str(len(testedURLs)) + " unique URLs tested." + "\n" )
resultsFile.write( "-------------------------------------" + "\n" )
resultsFile.write( "Positive results: " + "\n" )
resultsFile.write( "-------------------------------------" + "\n" )
for url in confirmedURLs:
resultsFile.write( str(url) + "\n" )
resultsFile.write( "\n" )
resultsFile.write( "-------------------------------------" + "\n" )
resultsFile.write( "EOF " + "\n" )
#for url in allURLS:
# resultsFile.write( str(url) + "\n" )
# print( str( url ) + "\n" )
###### Print final results to screen ###########
print( "###################################### " + "\n" )
print( "ElectionBuster Scan Results: " + "\n" )
print( "###################################### " + "\n" )
print( "INPUTS" + "\n" )
print( "First name: " + fName + "\n" )
print( "Middle name: " + mName + "\n" )
print( "Last name: " + lName + "\n" )
print( "Year: " + year + "\n" )
print( "Election type: " + electionType + "\n" )
print( "-------------------------------------" + "\n" )
print( "Total runtime was " + str(totalRuntime) + "\n" )
print( "-------------------------------------" + "\n" )
print( "Positive results: " + "\n" )
print( "There were " + str(len(confirmedURLs)) + " hits:" + "\n" )
print( "-------------------------------------" + "\n" )
print( "\n" )
for url in confirmedURLs:
print( url )
print( "\n" )
# Bad things happen if these files are not properly closed
resultsFile.close()
| 35.568 | 204 | 0.647623 | 1,632 | 13,338 | 5.273897 | 0.262868 | 0.053677 | 0.040781 | 0.03137 | 0.237597 | 0.141745 | 0.090508 | 0.073312 | 0.073312 | 0.073312 | 0 | 0.008181 | 0.15692 | 13,338 | 374 | 205 | 35.663102 | 0.757225 | 0.276578 | 0 | 0.191304 | 0 | 0 | 0.237795 | 0.042837 | 0 | 0 | 0 | 0.002674 | 0 | 1 | 0.017391 | false | 0 | 0.06087 | 0 | 0.095652 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99bc54e60d9317663e39e91ec8fe0cf6bc80d209 | 20,227 | py | Python | core/authentication/__init__.py | Sick-codes/core | da9250f27a5de4fc3a08e9c47064b19f484b042b | [
"Apache-2.0"
] | null | null | null | core/authentication/__init__.py | Sick-codes/core | da9250f27a5de4fc3a08e9c47064b19f484b042b | [
"Apache-2.0"
] | null | null | null | core/authentication/__init__.py | Sick-codes/core | da9250f27a5de4fc3a08e9c47064b19f484b042b | [
"Apache-2.0"
] | null | null | null | """Initialization of authentication."""
import json
import logging
import secrets
import uuid
from typing import TYPE_CHECKING, List, Optional
if TYPE_CHECKING:
from core.core import ApplicationCore
import aiohttp_jinja2
from aiohttp import hdrs, web
from ..const import CONF_TOKEN_LIFETIME
from .auth_client import AuthenticationClient
from .auth_database import AuthDatabase
_LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class Authentication:
# list of registered auth clients
auth_clients: List[AuthenticationClient] = []
def __init__(
self,
core: "ApplicationCore",
application: web.Application,
auth_database: AuthDatabase,
):
self.core = core
self.app = application
self.authorization = self.core.authorization
self.auth_database = auth_database
# Authorization Endpoint: obtain an authorization grant
self.app.router.add_get(
path="/oauth/authorize", handler=self.authorization_endpoint_get
)
self.app.router.add_post(
path="/oauth/authorize", handler=self.authorization_endpoint_post
)
# Token Endpoint: obtain an access token by authorization grant or refresh token
self.app.router.add_post(
path="/oauth/token", handler=self.token_endpoint_handler
)
self.app.router.add_post("/revoke", self.revoke_token_handler, name="revoke")
self.app.router.add_get("/protected", self.protected_handler, name="protected")
def add_client(self, auth_client: AuthenticationClient):
self.auth_clients.append(auth_client)
async def revoke_token_handler(self, request: web.Request) -> web.StreamResponse:
"""
Revoke the request token and all associated access tokens [RFC 7009]
See Section 2.1: https://tools.ietf.org/html/rfc7009#section-2.1
"""
_LOGGER.info("POST /revoke")
await self.check_authorized(request)
data = await request.post()
token_to_revoke = data["token"]
await self.auth_database.revoke_token(token_to_revoke)
return web.Response(status=200)
async def protected_handler(self, request: web.Request) -> web.StreamResponse:
_LOGGER.warning("GET /protected")
await self.check_permission(request, "library:read")
response = web.Response(body=b"You are on protected page")
return response
@aiohttp_jinja2.template("authorize.jinja2")
async def authorization_endpoint_get(
self, request: web.Request
) -> web.StreamResponse:
"""
Validate the request to ensure that all required parameters are present and valid.
See Section 4.1.1: https://tools.ietf.org/html/rfc6749#section-4.1.1
"""
try:
_LOGGER.debug(f"GET /oauth/authorize from: {request.host}")
response_type = request.query.get("response_type")
client_id = request.query.get("client_id")
# validate required params
if response_type is None or client_id is None:
_LOGGER.warning("The response is missing a response_type or client_id.")
data = """{
"error": "invalid_request",
"error_description": "The request is missing a required parameter"
}"""
return web.json_response(json.loads(data))
# check if client is known
if not any(client.client_id == client_id for client in self.auth_clients):
_LOGGER.warning("The client_id is unknown!")
data = """{
"error":"unauthorized_client",
"error_description":"The client is not authorized to request an authorization code using this method."
}"""
return web.json_response(json.loads(data))
# validate response_type
if response_type != "code":
_LOGGER.warning(
f"The request is using an invalid response_type: {response_type}"
)
data = """{
"error":"unsupported_response_type",
"error_description":"The request is using an invalid response_type"
}"""
return web.json_response(json.loads(data))
redirect_uri = request.query.get("redirect_uri")
# extract client from registered auth_clients with matching client_id
registered_auth_client = next(
filter(lambda client: client.client_id == client_id, self.auth_clients),
None,
)
# validate if redirect_uri is in registered_auth_client
if not any(
uri == redirect_uri for uri in registered_auth_client.redirect_uris
):
_LOGGER.error(f"redirect uri not found: {redirect_uri}")
data = """{
"error":"unauthorized_client",
"error_description":"The redirect_uri is unknown"
}"""
return web.json_response(json.loads(data))
scope = request.query.get("scope")
requested_scopes = scope.split(" ")
# TODO: validate scopes with regex => 1*( %x21 / %x23-5B / %x5D-7E ))
registered_scopes = [
"openid",
"profile",
"email",
"phone",
"library:read",
"library:append",
"library:edit",
"library:write",
"library:share",
"admin.users:read",
"admin.users:invite",
"admin.users:write",
]
_LOGGER.debug(
f"found {len(registered_scopes)} registered scopes and {len(requested_scopes)} requested scopes."
)
# check if the requested scope is registered
for requested_scope in requested_scopes:
if requested_scope not in registered_scopes:
_LOGGER.error(
f"The requested scope '{requested_scope}' is invalid, unknown, or malformed."
)
data = """{
"error":"invalid_scope",
"error_description":"The requested scope is invalid, unknown, or malformed."
}"""
return web.json_response(json.loads(data))
# persist state to preventing cross-site request forgery [Section 10.12](https://tools.ietf.org/html/rfc6749#section-10.12)
# state = request.query.get("state")
# TODO: add scopes & localized descriptions only for requested scopes
return {
"requesting_app": registered_auth_client.client_name,
"permissions": [
{
"scope": "openid",
"localized": "access the users public profile e.g.: username",
},
{
"scope": "profile",
"localized": "access the users personal profile information e.g.: firstname, lastname",
},
{
"scope": "email",
"localized": "access the users associated email address.",
},
{
"scope": "phone",
"localized": "access the users associated phone number.",
},
# {
# "scope": "library.read",
# "localized": "Read only Grant the user to list all photos owned by the user.",
# },
# {
# "scope": "library.append",
# "localized": "Limited write access Grant the user to add new photos, create new albums.",
# },
# {
# "scope": "library.edit",
# "localized": "Grant the user to edit photos owned by the user.",
# },
{
"scope": "library.write",
"localized": "Grant the user to add and edit photos, albums, tags.",
},
# {
# "scope": "library.share",
# "localized": "Grant the user to create new shares (photos/videos/albums).",
# },
# {
# "scope": "admin.users:read",
# "localized": "Grant the user to list users on the system.",
# },
# {
# "scope": "admin.users:invite",
# "localized": "Grant the user to invite new users to the system.",
# },
# {
# "scope": "admin.users:write",
# "localized": "Grant the user to manage users on the system.",
# },
],
}
except Exception as e:
# This error code is needed because a 500 Internal Server
# Error HTTP status code cannot be returned to the client via an HTTP redirect.
_LOGGER.error(f"an unexpected error happened: {e}")
data = """{
"error":"server_error",
"error_description":"The authorization server encountered an unexpected condition that prevented it from fulfilling the request."
}"""
return web.json_response(json.loads(data))
async def authorization_endpoint_post(
self, request: web.Request
) -> web.StreamResponse:
"""
Validate the resource owners credentials.
"""
_LOGGER.debug("POST /oauth/authorize")
data = await request.post()
redirect_uri = request.query["redirect_uri"]
if "client_id" not in request.query:
_LOGGER.warning("invalid form")
raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client")
client_id = request.query["client_id"]
_LOGGER.debug(f"client_id {client_id}")
state = None
if "state" in request.query:
state = request.query["state"]
_LOGGER.debug(f"state {state}")
# check if client is known
if not any(client.client_id == client_id for client in self.auth_clients):
_LOGGER.warning(f"unknown client_id {client_id}")
if state is not None:
raise web.HTTPFound(
f"{redirect_uri}?error=unauthorized_client&state={state}"
)
else:
raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client")
# extract client from registered auth_clients with matching client_id
registered_auth_client = next(
filter(lambda client: client.client_id == client_id, self.auth_clients),
None,
)
# validate if redirect_uri is in registered_auth_client
if not any(uri == redirect_uri for uri in registered_auth_client.redirect_uris):
_LOGGER.error(f"invalid redirect_uri {redirect_uri}")
if state is not None:
raise web.HTTPFound(
f"{redirect_uri}?error=unauthorized_client&state={state}"
)
else:
raise web.HTTPFound(f"{redirect_uri}?error=unauthorized_client")
email = data["email"].strip(" ").lower()
password = data["password"]
# validate credentials
credentials_are_valid = await self.auth_database.check_credentials(
email, password
)
if credentials_are_valid:
# create an authorization code
authorization_code = self.auth_database.create_authorization_code(
email, client_id, request.remote
)
_LOGGER.debug(f"authorization_code: {authorization_code}")
if authorization_code is None:
_LOGGER.warning("could not create auth code for client!")
error_reason = "access_denied"
if state is not None:
raise web.HTTPFound(
f"{redirect_uri}?error={error_reason}&state={state}"
)
else:
raise web.HTTPFound(f"{redirect_uri}?error={error_reason}")
if state is not None:
_LOGGER.debug(
f"HTTPFound: {redirect_uri}?code={authorization_code}&state={state}"
)
redirect_response = web.HTTPFound(
f"{redirect_uri}?code={authorization_code}&state={state}"
)
else:
_LOGGER.debug(f"HTTPFound: {redirect_uri}?code={authorization_code}")
redirect_response = web.HTTPFound(
f"{redirect_uri}?code={authorization_code}"
)
raise redirect_response
else:
error_reason = "access_denied"
_LOGGER.warning(f"redirect with error {error_reason}")
if state is not None:
raise web.HTTPFound(
f"{redirect_uri}?error={error_reason}&state={state}"
)
else:
raise web.HTTPFound(f"{redirect_uri}?error={error_reason}")
async def token_endpoint_handler(self, request: web.Request) -> web.StreamResponse:
"""
Access Token: https://tools.ietf.org/html/rfc6749#section-4.1.3
Refresh Token: https://tools.ietf.org/html/rfc6749#section-6
"""
_LOGGER.debug("POST /oauth/token")
data = await request.post()
# grant_type is REQUIRED
if "grant_type" not in data:
_LOGGER.warning("no grant_type specified!")
data = '{"error":"invalid_request"}'
return web.json_response(json.loads(data))
grant_type = data["grant_type"]
# switch flow based on grant_type
if grant_type == "authorization_code":
return await self._handle_authorization_code_request(data)
elif grant_type == "refresh_token":
return await self._handle_refresh_token_request(request, data)
else:
_LOGGER.warning(f"invalid grant_type! {grant_type}")
data = '{"error":"invalid_request"}'
return web.json_response(json.loads(data))
async def _handle_authorization_code_request(self, data) -> web.StreamResponse:
"""
See Section 4.1.3: https://tools.ietf.org/html/rfc6749#section-4.1.3
"""
# grant_type already checked
# code is REQUIRED
if "code" not in data:
_LOGGER.warning("code param not provided!")
data = {"error": "invalid_request"}
return web.json_response(status=400, data=data)
code = data["code"]
# redirect_uri is REQUIRED
if "redirect_uri" not in data:
_LOGGER.warning("redirect_uri param not provided!")
data = {"error": "invalid_request"}
return web.json_response(status=400, data=data)
redirect_uri = data["redirect_uri"]
# TODO: compare redirect_uri with previous call
_LOGGER.debug(f"TODO: compare redirect_uri {redirect_uri}")
# client_id is REQUIRED
if "client_id" not in data:
data = {"error": "invalid_request"}
return web.json_response(status=400, data=data)
client_id = data["client_id"]
client_code_valid = await self.auth_database.validate_authorization_code(
code, client_id
)
if not client_code_valid:
_LOGGER.error("authorization_code invalid!")
payload = {"error": "invalid_grant"}
return web.json_response(status=400, data=payload)
access_token, refresh_token = await self.auth_database.create_tokens(
code, client_id
)
payload = {
"access_token": access_token,
"token_type": "Bearer",
"expires_in": CONF_TOKEN_LIFETIME,
"refresh_token": refresh_token,
}
return web.json_response(status=200, data=payload)
async def _handle_refresh_token_request(
self, request: web.Request, data
) -> web.StreamResponse:
"""
See Section 6: https://tools.ietf.org/html/rfc6749#section-6
"""
# code is REQUIRED
if "refresh_token" not in data:
_LOGGER.warning("refresh token not provided!")
data = {"error": "invalid_request"}
return web.json_response(data)
refresh_token = data["refresh_token"]
# check if client_id and client_secret are provided as request parameters or HTTP Basic auth header
if "client_id" in data and "client_secret" in data:
# handle request parameters
client_id = data["client_id"]
client_secret = data["client_secret"]
elif hdrs.AUTHORIZATION in request.headers:
# handle basic headers
auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION).split(" ", 1)
if auth_type != "Basic":
return False
# TODO: split auth_val in client_id and client_secret
_LOGGER.error(f"split token into client_id and client_secret: {auth_val}")
client_id = ""
client_secret = ""
registered_auth_client = next(
filter(lambda client: client.client_id == client_id, self.auth_clients),
None,
)
_LOGGER.debug(f"client_id: {client_id}, {registered_auth_client}")
if not registered_auth_client.client_secret == client_secret:
_LOGGER.error("client_id does not match with client_secret")
data = {"error": "invalid_client"}
return web.json_response(data)
access_token, refresh_token = await self.auth_database.renew_tokens(
client_id, refresh_token
)
if access_token is None:
raise web.HTTPForbidden()
payload = {
"access_token": access_token,
"token_type": "Bearer",
"expires_in": CONF_TOKEN_LIFETIME,
"refresh_token": refresh_token,
}
return web.json_response(payload)
def create_client(self):
"""Generate a client_id and client_secret to add new clients."""
client_id = uuid.uuid4()
client_secret = secrets.token_urlsafe(16)
_LOGGER.info(f"generated client_id: {client_id}")
_LOGGER.info(f"generated client_secret: {client_secret}")
async def check_authorized(self, request: web.Request) -> Optional[str]:
"""Check if authorization header and returns user ID if valid"""
if hdrs.AUTHORIZATION in request.headers:
try:
auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION).split(
" ", 1
)
if not await self.auth_database.validate_access_token(auth_val):
raise web.HTTPForbidden()
return await self.auth_database.user_id_for_token(auth_val)
except ValueError:
# If no space in authorization header
_LOGGER.debug("invalid authorization header!")
raise web.HTTPForbidden()
else:
_LOGGER.debug("missing authorization header!")
raise web.HTTPForbidden()
async def check_permission(self, request: web.Request, scope: str) -> None:
"""Check if given authorization header is valid and user has granted access to given scope."""
# check if user is authorized
await self.check_authorized(request)
# check if required scope is granted
await self.core.authorization.check_scope(scope)
| 39.738703 | 145 | 0.565531 | 2,117 | 20,227 | 5.217761 | 0.135097 | 0.033315 | 0.01883 | 0.030418 | 0.440703 | 0.345012 | 0.333152 | 0.257378 | 0.223339 | 0.205685 | 0 | 0.00743 | 0.341277 | 20,227 | 508 | 146 | 39.816929 | 0.8216 | 0.118999 | 0 | 0.319767 | 0 | 0 | 0.242739 | 0.05773 | 0 | 0 | 0 | 0.005906 | 0 | 1 | 0.008721 | false | 0.005814 | 0.031977 | 0 | 0.113372 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99bc74d2f622c7d0048f1d7a1244333cdfa0d30a | 785 | py | Python | Utils/Image_processing.py | philshams/FC_analysis | cabe2385d5061d206a21b230605bfce9e39ec7f2 | [
"MIT"
] | null | null | null | Utils/Image_processing.py | philshams/FC_analysis | cabe2385d5061d206a21b230605bfce9e39ec7f2 | [
"MIT"
] | null | null | null | Utils/Image_processing.py | philshams/FC_analysis | cabe2385d5061d206a21b230605bfce9e39ec7f2 | [
"MIT"
] | null | null | null | import cv2
def process_background(background, track_options):
""" extract background: first frame of first video of a session
Allow user to specify ROIs on the background image """
print(' ... extracting background')
cv2.startWindowThread()
if len(background.shape) == 3:
gray = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
else:
gray = background
blur = cv2.blur(gray, (15, 15))
edges = cv2.Canny(blur, 25, 30)
rois = {'Shelter': None, 'Threat': None, 'Task': None}
if track_options['bg get rois']: # Get user to define Shelter ROI
for rname in rois.keys():
print('\n\nPlease mark {}'.format(rname))
rois[rname] = cv2.selectROI(gray, fromCenter=False)
return edges, rois
| 29.074074 | 78 | 0.628025 | 99 | 785 | 4.939394 | 0.606061 | 0.04908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02901 | 0.253503 | 785 | 26 | 79 | 30.192308 | 0.805461 | 0.182166 | 0 | 0 | 0 | 0 | 0.120827 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99bcdcd5e66866adeb754619341836bf5daa27c7 | 1,553 | py | Python | python/ctci-merge-sort.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
] | 21 | 2015-02-09T18:08:38.000Z | 2021-11-08T15:00:48.000Z | python/ctci-merge-sort.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
] | 7 | 2020-04-12T23:00:19.000Z | 2021-01-30T23:44:24.000Z | python/ctci-merge-sort.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
] | 27 | 2015-07-22T18:08:12.000Z | 2022-02-28T19:50:26.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
# This solution times out on Hackerrank with Python 3
# However, it passes all test cases with Pypy 3
def countInversions(arr):
global COUNT_INVERSIONS
COUNT_INVERSIONS = 0
mergeSort(arr)
return COUNT_INVERSIONS
def mergeSort(arr):
if (len(arr) <= 1):
return arr
# Split the array in two
# Recursively sort both halves
middle = len(arr) // 2
arrLeft = mergeSort(arr[:middle])
arrRight = mergeSort(arr[middle:])
# Merge the two halves
mergedArray = []
leftIndex = 0
rightIndex = 0
global COUNT_INVERSIONS
# Iterate through both lists and append the smaller element
while(leftIndex < len(arrLeft) and rightIndex < len(arrRight)):
if(arrLeft[leftIndex] <= arrRight[rightIndex]):
mergedArray.append(arrLeft[leftIndex])
leftIndex += 1
else:
mergedArray.append(arrRight[rightIndex])
rightIndex += 1
COUNT_INVERSIONS += len(arrLeft) - leftIndex
# Append any left over elements
mergedArray.extend(arrLeft[leftIndex:])
mergedArray.extend(arrRight[rightIndex:])
return mergedArray
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
num_test_cases = int(input())
for _test_case in range(num_test_cases):
_ = int(input())
arr = list(map(int, input().rstrip().split()))
result = countInversions(arr)
fptr.write(str(result) + '\n')
fptr.close()
| 24.650794 | 67 | 0.647135 | 183 | 1,553 | 5.377049 | 0.486339 | 0.07622 | 0.042683 | 0.030488 | 0.04065 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008598 | 0.251127 | 1,553 | 62 | 68 | 25.048387 | 0.837489 | 0.175145 | 0 | 0.05 | 0 | 0 | 0.017282 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99bf91ac2f49e24944ae85032ce5745ad6b70267 | 21,009 | py | Python | ENCODE_publications.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | null | null | null | ENCODE_publications.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | null | null | null | ENCODE_publications.py | T2DREAM/pyencoded-tools | 75fa636995bfc9fe181f9af490ce70dde3f6ce21 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: latin-1 -*-
from Bio import Entrez
from Bio import Medline
import argparse
import os
import csv
import logging
import encodedcc
EPILOG = '''
Takes in a VERY specific file format to use for updating the publications
Also can update the existing publications using the pubmed database
An EMAIL is required to run this script
This is for the Entrez database
This is a dryrun default script
This script requires the BioPython module
Options:
%(prog)s --consortium Consortium_file.txt
This takes the consortium file
%(prog)s --community Community_file.txt
This takes the community file
%(prog)s --updateonly list.txt
Takes file with single column of publication UUIDs, checks against PubMed \
to ensure data is correct and will update if needed
'''
logger = logging.getLogger(__name__)
def getArgs():
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--consortium',
help="File with consortium publication information")
parser.add_argument('--community',
help="File with community publication information")
parser.add_argument('--outfile',
help="Output file name", default='publication_results.txt')
parser.add_argument('--key',
help="The keypair identifier from the keyfile.",
default='default')
parser.add_argument('--keyfile',
help="The keyfile",
default=os.path.expanduser('~/keypairs.json'))
parser.add_argument('--debug',
help="Debug prints out HTML requests and returned JSON \
objects. Default is off",
action='store_true',
default=False)
parser.add_argument('--update',
help="Run script and PATCH objects as needed. \
Default is off",
action='store_true',
default=False)
parser.add_argument('--create',
help="Run script and POST new objects as needed. \
Default is off",
action='store_true',
default=False)
parser.add_argument('--createonly',
help="Run script and POST new objects as needed,\
only look up as needed. Default is off",
action='store_true',
default=False)
parser.add_argument('--updateonly',
help="File containing publication UUIDS from ENCODE database for\
updating. If the publication does not have PMID the script will\
find it comparing based on title and assuming unique title")
parser.add_argument('email',
help="Email needed to make queries to Entrez process")
args = parser.parse_args()
if args.debug:
logging.basicConfig(filename=args.outfile, filemode="w",
format='%(levelname)s:%(message)s',
level=logging.DEBUG)
else: # use the default logging level
logging.basicConfig(filename=args.outfile, filemode="w",
format='%(levelname)s:%(message)s',
level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
return args
class PublicationUpdate:
def __init__(self, arguments):
self.MAPPING = {"abstract": "AB", "authors": "AU", "title": "TI",
"volume": "VI", "journal": "JT", "date_published": "DP",
"page": "PG", "issue": "IP"}
self.entrezDict = {}
self.PATCH_COUNT = 0
self.POST_COUNT = 0
args = arguments
self.UPDATE = args.update
self.CREATE = args.create or args.createonly
self.CREATE_ONLY = args.createonly
self.UPDATE_ONLY = args.updateonly
self.community = args.community
self.consortium = args.consortium
if self.UPDATE:
print("Will PATCH publication objects as needed")
if self.CREATE:
print("POST new pubmeds")
def setup_publication(self):
'''consortium publications file'''
self.consortium_dict = {}
with open(self.consortium, 'r', encoding='ISO-8859-1') as f:
reader = csv.reader(f, delimiter='\t')
for PMID, published_by, categories, catch1, code, catch2, title in reader:
categories = categories.replace(";", ",").rstrip(" ")
published_by = published_by.replace(";", ",").rstrip(" ")
cat = [x.strip(' ').lower() for x in categories.rstrip(',').split(",")]
pub = [x.strip(' ') for x in published_by.rstrip(',').split(",")]
temp = {"published_by": pub, "categories": cat}
self.consortium_dict[PMID] = temp
self.consortium_ids = list(self.consortium_dict.keys())
'''community publications file'''
self.community_dict = {}
with open(self.community, 'r', encoding='ISO-8859-1') as f:
reader = csv.reader(f, delimiter='\t')
for PMID, published_by, categories, data_used, catch1, catch2, title, catch3, catch4, catch5, catch6, catch7, catch8, catch9, catch10, catch11, catch12, catch13, catch14, catch15, catch16, catch17, catch18 in reader:
categories = categories.replace(";", ",").rstrip(" ")
published_by = published_by.replace(";", ",").rstrip(" ")
cat = [x.strip(' ').lower() for x in categories.rstrip(',').split(",")]
pub = [x.strip(' ') for x in published_by.rstrip(',').split(",")]
temp = {"published_by": pub, "categories": cat, "data_used": data_used}
self.community_dict[PMID] = temp
self.community_ids = list(self.community_dict.keys())
def get_entrez(self, idList):
'''gets the values from Entrez
'''
handle = Entrez.efetch(db="pubmed", id=idList,
rettype="medline", retmode="text")
# records is an iterator, so you can iterate through the records only once
records = Medline.parse(handle)
# save the records, you can convert them to a list
records = list(records)
for record in records:
tempDict = {}
for key in self.MAPPING.keys():
if key == "authors":
auth = ", ".join(str(x) for x in record.get("AU", []))
tempDict["authors"] = auth
else:
tempDict[key] = record.get(self.MAPPING.get(key), "")
self.entrezDict[record.get("PMID")] = tempDict
def check_ENCODE(self, idList, connection, otherIdList=[], bothDicts={}):
for pmid in idList:
extraData = bothDicts.get(pmid)
ENCODEvalue = encodedcc.get_ENCODE("/search/?type=publication&searchTerm=PMID:" + pmid, connection)
if ENCODEvalue.get("@graph"):
log = "PMID " + pmid + " is listed in ENCODE"
logger.info('%s' % log)
uuid = ENCODEvalue.get("@graph")[0].get("uuid")
if not self.CREATE_ONLY:
self.compare_entrez_ENCODE(uuid, pmid, connection, extraData)
else:
if self.CREATE_ONLY:
self.get_entrez([pmid])
titleEntrez = self.entrezDict[pmid].get("title")
found = False
for otherID in otherIdList:
titleENCODE = encodedcc.get_ENCODE("/search/?type=publication&searchTerm=" + otherID, connection)
if titleENCODE.get("title") == titleEntrez:
log = pmid + " is in ENCODE by a different name " + titleENCODE.get("uuid")
logger.warning('%s' % log)
self.compare_entrez_ENCODE(titleENCODE.get("uuid"), pmid, connection, extraData)
if self.UPDATE:
newIdent = titleENCODE.get("identifiers")
newIdent.append("PMID:" + pmid)
patch_dict = {"identifiers": newIdent}
encodedcc.patch_ENCODE(titleENCODE.get("uuid"), connection, patch_dict)
found = True
if found is False:
log = "This publication is not listed in ENCODE " + pmid
logger.warning('%s' % log)
if self.CREATE:
self.POST_COUNT += 1
pmidData = self.entrezDict[pmid]
log = "POSTing the new object: " + pmid
logger.info('%s' % log)
post_dict = {
"title": pmidData.get("title"),
"abstract": pmidData.get("abstract"),
"submitted_by": "/users/8b1f8780-b5d6-4fb7-a5a2-ddcec9054288/",
"lab": "/labs/encode-consortium/",
"award": "/awards/ENCODE/",
"categories": extraData.get("categories"),
"published_by": extraData.get("published_by"),
"date_published": pmidData.get("date_published"),
"authors": pmidData.get("authors"),
"identifiers": ["PMID:" + pmid],
"journal": pmidData.get("journal"),
"volume": pmidData.get("volume"),
"issue": pmidData.get("issue"),
"page": pmidData.get("page"),
"status": "published"
}
if extraData.get("data_used"):
post_dict["data_used"] = extraData.get("data_used")
encodedcc.new_ENCODE(connection, "publications", post_dict)
def compare_entrez_ENCODE(self, uuid, pmid, connection, extraData={}):
'''compares value in ENCODE database to results from Entrez
'''
encode = encodedcc.get_ENCODE(uuid, connection)
entrez = self.entrezDict.get(pmid)
patch = False
if not entrez:
log = "PMID " + pmid + " was not found in Entrez database!!"
logger.warning('%s' % log)
else:
log = "PMID " + pmid
logger.info('%s' % log)
for key in entrez.keys():
if key in encode.keys():
if entrez[key] == encode[key]:
log = "entrez key \"" + key + "\" matches encode key"
logger.info('%s' % log)
else:
log = "\"" + key + "\" value in encode database does not match value in entrez database"
logger.warning('%s' % log)
log = "\tENTREZ: " + entrez[key] + "\n\tENCODE: " + encode[key]
logger.warning('%s' % log)
if self.UPDATE or self.UPDATE_ONLY:
log = "PATCH in the new value for \"" + key + "\""
logger.info('%s' % log)
patch_dict = {key: entrez[key]}
encodedcc.patch_ENCODE(uuid, connection, patch_dict)
patch = True
else:
log = "ENCODE missing \"" + key + "\" from Entrez. New key and value must be added"
logger.warning('%s' % log)
if self.UPDATE or self.UPDATE_ONLY:
log = "PATCHing in new key \"" + key + "\""
logger.info('%s' % log)
patch_dict = {key: entrez[key]}
encodedcc.patch_ENCODE(uuid, connection, patch_dict)
patch = True
if not self.UPDATE_ONLY:
for key in extraData.keys():
if type(extraData.get(key)) is list:
if set(encode.get(key, [])) == set(extraData.get(key, [])):
log = "encode \"" + key + "\" matches data in file"
logger.info('%s' % log)
else:
log = "encode \"" + key + "\" value" + str(encode.get(key, [])) + "does not match file"
logger.warning('%s' % log)
if self.UPDATE:
if any(extraData[key]):
patch_dict = {key: extraData[key]}
encodedcc.patch_ENCODE(uuid, connection, patch_dict)
patch = True
else:
log = "No value in file to input for \"" + key + "\""
logger.warning('%s' % log)
if type(extraData.get(key)) is str:
if encode.get(key, "") == extraData.get(key, ""):
log = "encode \"" + key + "\" matches data in file"
logger.info('%s' % log)
else:
log = "encode \"" + key + "\" value" + str(encode.get(key, "")) + "does not match file"
logger.warning('%s' % log)
if self.UPDATE:
patch_dict = {key: extraData[key]}
encodedcc.patch_ENCODE(uuid, connection, patch_dict)
patch = True
if encode.get("status", "") != "published" and (self.UPDATE or self.UPDATE_ONLY):
log = "Setting status to published"
logger.info('%s' % log)
encodedcc.patch_ENCODE(uuid, connection, {"status": "published"})
patch = True
if patch is True:
self.PATCH_COUNT += 1
def find_ENCODE_extras(self, communityList, consortiumList, connection):
'''finds any publications in the ENCODE database
that are not in the files provided
'''
community_url = "/search/?type=publication&status=published\
&published_by=community&field=identifiers&limit=all"
consortium_url = "/search/?type=publication&status=published\
&published_by!=community&field=identifiers&limit=all"
communityResult = encodedcc.get_ENCODE(community_url, connection).get("@graph")
consortiumResult = encodedcc.get_ENCODE(consortium_url, connection).get("@graph")
communityPMIDfromENCODE = [] # list of PMID from ENCODE site
communityOtherID = [] # list of non-PMID ids from ENCODE site
for pub in communityResult:
temp = pub.get("identifiers", [])
for idNum in temp:
if "PMID:" in idNum:
communityPMIDfromENCODE.append(idNum)
# this is something that has a pubmed ID
elif "PMCID:PMC" in idNum:
pass
# this is an alternate PMID
else:
uuid = pub.get("@id")
communityOtherID.append(uuid)
# this is something that does not have a PMID yet, find it and PATCH it in
community_ENCODE_Only = list(set(communityPMIDfromENCODE) - set(communityList))
consortiumPMIDfromENCODE = [] # list of PMID from ENCODE site
consortiumOtherID = [] # list of non-PMID ids from ENCODE site
for pub in consortiumResult:
temp = pub.get("identifiers", [])
for idNum in temp:
if "PMID:" in idNum:
consortiumPMIDfromENCODE.append(idNum)
# this is something that has a pubmed ID
elif "PMCID:PMC" in idNum:
pass
# this is an alternate PMID
else:
uuid = pub.get("@id")
consortiumOtherID.append(uuid)
# this is something that does not have a PMID yet, find it and PATCH it in
consortium_ENCODE_Only = list(set(consortiumPMIDfromENCODE) - set(consortiumList))
return community_ENCODE_Only, communityOtherID, consortium_ENCODE_Only, consortiumOtherID
def main():
args = getArgs()
outfile = args.outfile
CREATE_ONLY = args.createonly
UPDATE_ONLY = args.updateonly
Entrez.email = args.email
key = encodedcc.ENC_Key(args.keyfile, args.key)
connection = encodedcc.ENC_Connection(key)
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
print("Running on ", connection.server)
publication = PublicationUpdate(args)
if not UPDATE_ONLY:
publication.setup_publication()
pmidList = publication.consortium_ids + publication.community_ids
mergeDicts = publication.consortium_dict.copy()
mergeDicts.update(publication.community_dict) # holds published_by, categories, and data_used
if not CREATE_ONLY:
publication.get_entrez(pmidList)
community_ENCODE_Only, communityOtherID, consortium_ENCODE_Only, consortiumOtherID = publication.find_ENCODE_extras(publication.community_ids, publication.consortium_ids, connection)
total_ENCODE_only = len(community_ENCODE_Only) + len(consortium_ENCODE_Only)
allOtherIDs = communityOtherID + consortiumOtherID
publication.check_ENCODE(pmidList, connection, allOtherIDs, mergeDicts)
log = str(total_ENCODE_only) + " items in ENCODE but not in files"
logger.info('%s' % log)
log = str(publication.PATCH_COUNT) + " publication files PATCHed"
logger.info('%s' % log)
log = str(publication.POST_COUNT) + " publication files POSTed"
logger.info('%s' % log)
print("Results printed to", outfile)
else:
infile = UPDATE_ONLY
with open(infile, 'r') as readfile:
uuidList = [x.rstrip('\n') for x in readfile]
# check each publication to see if it has a PMID, if it does add it to the PMIDlist
# if it does not have one look it up on Entrez
pmid_uuid_dict = {}
for uuid in uuidList:
pub = encodedcc.get_ENCODE(uuid, connection)
title = pub.get("title", "")
identifiers = pub.get("identifiers", [])
found = False
for i in identifiers:
if "PMID:" in i:
p = i.split(":")[1]
found = True
if found:
pmid_uuid_dict[p] = uuid
else:
# search Entrez for publication by title
handle = Entrez.esearch(db="pubmed", term=title)
record = Entrez.read(handle)
idlist = record["IdList"]
if len(idlist) > 1:
log = "More than one possible PMID found for " + uuid
logger.error('%s' % log)
log = str(idlist) + " are possible PMIDs"
logger.error('%s' % log)
elif len(idlist) == 0:
log = "No possible PMID found for " + uuid
logger.error('%s' % log)
else:
handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text")
records = Medline.parse(handle)
# save the records, you can convert them to a list
records = list(records)
for record in records:
pm = record.get("PMID")
ti = record.get("TI")
log = "Publication " + uuid + " with title \"" + title + "\" matches PMID:" + pm + " with title \"" + ti + "\""
logger.info('%s' % log)
identifiers.append("PMID:" + pm)
encodedcc.patch_ENCODE(uuid, connection, {"identifiers": identifiers})
pmid_uuid_dict[pm] = uuid
pmidList = list(pmid_uuid_dict.keys())
publication.get_entrez(pmidList)
with open("pub_update.txt", "w") as f:
for pmid in pmid_uuid_dict.keys():
publication.compare_entrez_ENCODE(pmid_uuid_dict[pmid], pmid, connection)
f.write(str(len(pmid_uuid_dict.keys())) + " publications checked " + str(publication.PATCH_COUNT) + " publications PATCHed")
if __name__ == '__main__':
main()
| 48.85814 | 228 | 0.524775 | 2,113 | 21,009 | 5.128727 | 0.17416 | 0.009228 | 0.013196 | 0.016794 | 0.341884 | 0.310418 | 0.295562 | 0.272031 | 0.258743 | 0.245917 | 0 | 0.005132 | 0.369318 | 21,009 | 429 | 229 | 48.972028 | 0.812755 | 0.051549 | 0 | 0.302949 | 0 | 0 | 0.153105 | 0.011091 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021448 | false | 0.005362 | 0.018767 | 0 | 0.048257 | 0.013405 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99c11294cdaedfbcacc68dc5da7e71c76a3c5c19 | 579 | py | Python | medium/remove_nth_node_from_end_of_list.py | Jswig/leetcode | ca9ca182ab7824d642aa5ebbe5974669d2a6221c | [
"MIT"
] | null | null | null | medium/remove_nth_node_from_end_of_list.py | Jswig/leetcode | ca9ca182ab7824d642aa5ebbe5974669d2a6221c | [
"MIT"
] | null | null | null | medium/remove_nth_node_from_end_of_list.py | Jswig/leetcode | ca9ca182ab7824d642aa5ebbe5974669d2a6221c | [
"MIT"
] | null | null | null | # Anders Poirel
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
fast = head
slow = head
for i in range(n-1):
fast = fast.next
while fast.next is not None:
fast = fast.next
prev = slow
slow = slow.next
if slow == head:
head = slow.next
else:
prev.next = slow.next
return head
| 21.444444 | 67 | 0.462867 | 64 | 579 | 4.125 | 0.46875 | 0.090909 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003205 | 0.46114 | 579 | 27 | 68 | 21.444444 | 0.842949 | 0.022453 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99c2e61e389f9dd7c525b31829b0d447fc52f4de | 1,674 | py | Python | tests/community/test_pushbullet.py | soasme/runflow | b23086c2487a157b8c2d40f6225a1bcd9e8b6c60 | [
"Apache-2.0"
] | 6 | 2021-06-07T01:26:19.000Z | 2021-07-02T12:57:07.000Z | tests/community/test_pushbullet.py | soasme/runflow | b23086c2487a157b8c2d40f6225a1bcd9e8b6c60 | [
"Apache-2.0"
] | 2 | 2021-06-06T02:56:37.000Z | 2021-06-07T04:06:23.000Z | tests/community/test_pushbullet.py | soasme/runflow | b23086c2487a157b8c2d40f6225a1bcd9e8b6c60 | [
"Apache-2.0"
] | null | null | null | import pytest
from runflow import runflow
pytest.importorskip('slack_sdk')
def test_pushbullet_push_note(mocker):
pb = mocker.MagicMock()
mocker.patch('pushbullet.Pushbullet', pb)
runflow(path="examples/pushbullet_push_note.hcl", vars={
'pushbullet_api_key': 'any'
})
pb.return_value.push_note.assert_called_with(
title='This is the title',
body='This is the note',
email='',
channel=None,
)
def test_pushbullet_push_link(mocker):
pb = mocker.MagicMock()
mocker.patch('pushbullet.Pushbullet', pb)
runflow(path="examples/pushbullet_push_link.hcl", vars={
'pushbullet_api_key': 'any'
})
pb.return_value.push_link.assert_called_with(
title='This is the title',
url='https://runflow.org',
body='',
email='',
channel=None,
)
def test_pushbullet_push_file(mocker):
pb = mocker.MagicMock()
mocker.patch('pushbullet.Pushbullet', pb)
runflow(path="examples/pushbullet_push_file.hcl", vars={
'pushbullet_api_key': 'any',
})
pb.return_value.push_file.assert_called_with(
title='This is the title',
body='This is the body',
file_type='image/jpeg',
file_name='cat.jpg',
file_url='https://i.imgur.com/IAYZ20i.jpg',
email='',
channel=None,
)
def test_pushbullet_invalid_client(mocker, capsys):
pb = mocker.MagicMock()
mocker.patch('pushbullet.Pushbullet', pb)
runflow(source="""
flow "invalid_client" {
task "pushbullet_push" "this" {
client = {
}
}
}
""")
out, err = capsys.readouterr()
assert 'set api_key' in err
| 23.914286 | 60 | 0.635006 | 202 | 1,674 | 5.049505 | 0.306931 | 0.096078 | 0.044118 | 0.090196 | 0.677451 | 0.677451 | 0.645098 | 0.572549 | 0.538235 | 0.482353 | 0 | 0.001552 | 0.229988 | 1,674 | 69 | 61 | 24.26087 | 0.78976 | 0 | 0 | 0.392857 | 0 | 0 | 0.30227 | 0.109319 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.071429 | false | 0 | 0.053571 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99c393d65fd469c4d0c7d4d4470fc5c12bb10a44 | 3,269 | py | Python | ch02/dateandtime.py | ibiscum/Learn-Python-Programming-Third-Edition | c8e0061e97b16c9b55250cc720a8bc7613cb6cca | [
"MIT"
] | null | null | null | ch02/dateandtime.py | ibiscum/Learn-Python-Programming-Third-Edition | c8e0061e97b16c9b55250cc720a8bc7613cb6cca | [
"MIT"
] | null | null | null | ch02/dateandtime.py | ibiscum/Learn-Python-Programming-Third-Edition | c8e0061e97b16c9b55250cc720a8bc7613cb6cca | [
"MIT"
] | null | null | null | """Date and time"""
# imports
import arrow
from datetime import date, datetime, timedelta, timezone
import time
import calendar as cal
from zoneinfo import ZoneInfo
# arrow small demo
# date
today = date.today()
print(today)
# datetime.date(2021, 3, 28)
print(today.ctime())
print(today.isoformat())
print(today.weekday())
print(cal.day_name[today.weekday()])
print(today.day, today.month, today.year)
print(today.timetuple())
# print(time.struct_time(tm_year=2021, tm_mon=3, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0,
# tm_wday=6, tm_yday=87, tm_isdst=-1))
# time
time.ctime()
print(time.daylight)
time.gmtime()
# time.struct_time(
# tm_year=2021, tm_mon=3, tm_mday=28,
# tm_hour=14, tm_min=23, tm_sec=34,
# tm_wday=6, tm_yday=87, tm_isdst=0
# )
time.gmtime(0)
# time.struct_time(
# tm_year=1970, tm_mon=1, tm_mday=1,
# tm_hour=0, tm_min=0, tm_sec=0,
# tm_wday=3, tm_yday=1, tm_isdst=0
# )
time.localtime()
# time.struct_time(
# tm_year=2021, tm_mon=3, tm_mday=28,
# tm_hour=15, tm_min=23, tm_sec=50,
# tm_wday=6, tm_yday=87, tm_isdst=1
# )
time.time()
# datetime, timezones and timedelta
now = datetime.now()
utcnow = datetime.utcnow()
print(now)
# datetime.datetime(2021, 3, 28, 15, 25, 16, 258274)
print(utcnow)
# datetime.datetime(2021, 3, 28, 14, 25, 22, 918195)
print(now.date())
# datetime.date(2021, 3, 28)
print(now.day, now.month, now.year)
var = now.date() == date.today()
print(now.time())
# datetime.time(15, 25, 16, 258274)
# now.hour, now.minute, now.second, now.microsecond
now.ctime()
# 'Sun Mar 28 15:25:16 2021'
now.isoformat()
# '2021-03-28T15:25:16.258274'
now.timetuple()
# time.struct_time(
# tm_year=2021, tm_mon=3, tm_mday=28,
# tm_hour=15, tm_min=25, tm_sec=16,
# tm_wday=6, tm_yday=87, tm_isdst=-1
# )
print(now.tzinfo)
print(utcnow.tzinfo)
now.weekday()
# 6
f_bday = datetime(
1975, 12, 29, 12, 50, tzinfo=ZoneInfo('Europe/Rome')
)
h_bday = datetime(
1981, 10, 7, 15, 30, 50, tzinfo=timezone(timedelta(hours=2))
)
diff = h_bday - f_bday
type(diff)
# <class 'datetime.timedelta'>
print(diff.days)
# 2109
diff.total_seconds()
# 182223650.0
today + timedelta(days=49)
# datetime.date(2021, 5, 16)
now + timedelta(weeks=7)
# datetime.datetime(2021, 5, 16, 15, 25, 16, 258274)
# parsing (stdlib)
datetime.fromisoformat('1977-11-24T19:30:13+01:00')
# datetime.datetime(
# 1977, 11, 24, 19, 30, 13,
# tzinfo=datetime.timezone(datetime.timedelta(seconds=3600))
# )
datetime.fromtimestamp(time.time())
# datetime.datetime(2021, 3, 28, 15, 42, 2, 142696)
datetime.now()
# datetime.datetime(2021, 3, 28, 15, 42, 1, 120094)
arrow.utcnow()
# <Arrow [2021-03-28T14:43:20.017213+00:00]>
arrow.now()
# <Arrow [2021-03-28T15:43:39.370099+01:00]>
local = arrow.now('Europe/Rome')
print(local)
# <Arrow [2021-03-28T16:59:14.093960+02:00]>
local.to('utc')
# <Arrow [2021-03-28T14:59:14.093960+00:00]>
local.to('Europe/Moscow')
# <Arrow [2021-03-28T17:59:14.093960+03:00]>
local.to('Asia/Tokyo')
# <Arrow [2021-03-28T23:59:14.093960+09:00]>
print(local.datetime)
# datetime.datetime(
# 2021, 3, 28, 16, 59, 14, 93960,
# tzinfo=tzfile('/usr/share/zoneinfo/Europe/Rome')
# )
local.isoformat()
# '2021-03-28T16:59:14.093960+02:00'
| 20.955128 | 91 | 0.67513 | 547 | 3,269 | 3.932358 | 0.243144 | 0.059507 | 0.02278 | 0.037192 | 0.280335 | 0.240818 | 0.218503 | 0.178987 | 0.145514 | 0.134821 | 0 | 0.17903 | 0.142245 | 3,269 | 155 | 92 | 21.090323 | 0.588088 | 0.559498 | 0 | 0 | 0 | 0 | 0.053052 | 0.018169 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.087719 | 0 | 0.087719 | 0.315789 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99c5edc4f3f5114316b3f4fae670cf161eac7a79 | 15,176 | py | Python | utils.py | Soft8Soft/verge3d-blender-addon | 20a7ca153285e4744a7079bc015584271a50a252 | [
"Apache-2.0"
] | 86 | 2018-08-14T17:08:27.000Z | 2022-03-22T10:35:28.000Z | utils.py | Soft8Soft/verge3d-blender-addon | 20a7ca153285e4744a7079bc015584271a50a252 | [
"Apache-2.0"
] | 3 | 2018-08-16T16:32:26.000Z | 2021-01-31T11:09:01.000Z | utils.py | Soft8Soft/verge3d-blender-addon | 20a7ca153285e4744a7079bc015584271a50a252 | [
"Apache-2.0"
] | 18 | 2018-08-15T10:32:19.000Z | 2022-02-28T16:41:51.000Z | # Copyright (c) 2017-2019 Soft8Soft LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import math
import bpy
import numpy as np
import mathutils
import pyosl.glslgen
ORTHO_EPS = 1e-5
DEFAULT_MAT_NAME = 'v3d_default_material'
selectedObject = None
selectedObjectsSave = []
prevActiveObject = None
def clamp(val, minval, maxval):
return max(minval, min(maxval, val))
def integerToBlSuffix(val):
suf = str(val)
for i in range(0, 3 - len(suf)):
suf = '0' + suf
return suf
def getLightCyclesStrength(bl_light):
return bl_light.energy
def getLightCyclesColor(bl_light):
col = bl_light.color
return [col[0], col[1], col[2]]
def setSelectedObject(bl_obj):
"""
Select object for NLA baking
"""
global prevActiveObject
global selectedObject, selectedObjectsSave
selectedObject = bl_obj
selectedObjectsSave = bpy.context.selected_objects.copy()
# NOTE: seems like we need both selection and setting active object
for o in selectedObjectsSave:
o.select_set(False)
prevActiveObject = bpy.context.view_layer.objects.active
bpy.context.view_layer.objects.active = bl_obj
bl_obj.select_set(True)
def restoreSelectedObjects():
global prevActiveObject
global selectedObject, selectedObjectsSave
selectedObject.select_set(False)
for o in selectedObjectsSave:
o.select_set(True)
bpy.context.view_layer.objects.active = prevActiveObject
prevActiveObject = None
selectedObject = None
selectedObjectsSave = []
def getSceneByObject(obj):
for scene in bpy.data.scenes:
index = scene.objects.find(obj.name)
if index > -1 and scene.objects[index] == obj:
return scene
return None
def getTexImage(bl_tex):
"""
Get texture image from a texture, avoiding AttributeError for textures
without an image (e.g. a texture of type 'NONE').
"""
return getattr(bl_tex, 'image', None)
def getTextureName(bl_texture):
if (isinstance(bl_texture, (bpy.types.ShaderNodeTexImage,
bpy.types.ShaderNodeTexEnvironment))):
tex_name = bl_texture.image.name
else:
tex_name = bl_texture.name
return tex_name
def mat4IsIdentity(mat4):
return mat4 == mathutils.Matrix.Identity(4)
def mat4IsTRSDecomposable(mat4):
# don't use mathutils.Matrix.is_orthogonal_axis_vectors property, because it
# doesn't normalize vectors before checking
mat = mat4.to_3x3().transposed()
v0 = mat[0].normalized()
v1 = mat[1].normalized()
v2 = mat[2].normalized()
return (abs(v0.dot(v1)) < ORTHO_EPS
and abs(v0.dot(v2)) < ORTHO_EPS
and abs(v1.dot(v2)) < ORTHO_EPS)
def mat4SvdDecomposeToMatrs(mat4):
"""
Decompose the given matrix into a couple of TRS-decomposable matrices or
Returns None in case of an error.
"""
try:
u, s, vh = np.linalg.svd(mat4.to_3x3())
mat_u = mathutils.Matrix(u)
mat_s = mathutils.Matrix([[s[0], 0, 0], [0, s[1], 0], [0, 0, s[2]]])
mat_vh = mathutils.Matrix(vh)
# NOTE: a potential reflection part in U and VH matrices isn't considered
mat_trans = mathutils.Matrix.Translation(mat4.to_translation())
mat_left = mat_trans @ (mat_u @ mat_s).to_4x4()
return (mat_left, mat_vh.to_4x4())
except np.linalg.LinAlgError:
# numpy failed to decompose the matrix
return None
def findArmature(obj):
for mod in obj.modifiers:
if mod.type == 'ARMATURE' and mod.object is not None:
return mod.object
# use obj.find_armature as a last resort, because it doesn't work with many
# armature modifiers
return obj.find_armature()
def matHasBlendBackside(bl_mat):
return (matIsBlend(bl_mat) and
(hasattr(bl_mat, 'show_transparent_back') and bl_mat.show_transparent_back))
def matIsBlend(bl_mat):
return bl_mat.blend_method in ['BLEND', 'MULTIPLY', 'ADD']
def updateOrbitCameraView(cam_obj, scene):
target_obj = cam_obj.data.v3d.orbit_target_object
eye = cam_obj.matrix_world.to_translation()
target = (cam_obj.data.v3d.orbit_target if target_obj is None
else target_obj.matrix_world.to_translation())
quat = getLookAtAlignedUpMatrix(eye, target).to_quaternion()
quat.rotate(cam_obj.matrix_world.inverted())
quat.rotate(cam_obj.matrix_basis)
rot_mode = cam_obj.rotation_mode
cam_obj.rotation_mode = 'QUATERNION'
cam_obj.rotation_quaternion = quat
cam_obj.rotation_mode = rot_mode
# need to update the camera state (i.e. world matrix) immediately in case of
# several consecutive UI updates
bpy.context.view_layer.update()
def getLookAtAlignedUpMatrix(eye, target):
"""
This method uses camera axes for building the matrix.
"""
axis_z = (eye - target).normalized()
if axis_z.length == 0:
axis_z = mathutils.Vector((0, -1, 0))
axis_x = mathutils.Vector((0, 0, 1)).cross(axis_z)
if axis_x.length == 0:
axis_x = mathutils.Vector((1, 0, 0))
axis_y = axis_z.cross(axis_x)
return mathutils.Matrix([
axis_x,
axis_y,
axis_z,
]).transposed()
def objDataUsesLineRendering(bl_obj_data):
line_settings = getattr(getattr(bl_obj_data, 'v3d', None), 'line_rendering_settings', None)
return bool(line_settings and line_settings.enable)
def getObjectAllCollections(blObj):
return [coll for coll in bpy.data.collections if blObj in coll.all_objects[:]]
def getBlurPixelRadius(context, blLight):
if blLight.type == 'SUN':
relativeRadius = (blLight.shadow_buffer_soft / 100
* int(context.scene.eevee.shadow_cascade_size))
# blur strength doesn't increase after a certain point
return min(max(relativeRadius, 0), 100)
else:
blurGrade = math.floor(blLight.shadow_buffer_soft
* int(context.scene.eevee.shadow_cube_size) / 1000)
blurGrade = min(blurGrade, 9)
# some approximation of Blender blur radius
if blurGrade > 2:
return 4.22 * (blurGrade - 1.5)
else:
return blurGrade
def objHasExportedModifiers(obj):
"""
Check if an object has any modifiers that should be applied before export.
"""
return any([modifierNeedsExport(mod) for mod in obj.modifiers])
def obj_del_not_exported_modifiers(obj):
"""
Remove modifiers that shouldn't be applied before export from an object.
"""
for mod in obj.modifiers:
if not modifierNeedsExport(mod):
obj.modifiers.remove(mod)
def objAddTriModifier(obj):
mod = obj.modifiers.new('Temporary_Triangulation', 'TRIANGULATE')
mod.quad_method = 'FIXED'
mod.keep_custom_normals = True
def objApplyModifiers(obj):
"""
Creates a new mesh from applying modifiers to the mesh of the given object.
Assignes the newly created mesh to the given object. The old mesh's user
count will be decreased by 1.
"""
dg = bpy.context.evaluated_depsgraph_get()
need_linking = dg.scene.collection.objects.find(obj.name) == -1
need_showing = obj.hide_viewport
# NOTE: link the object if it's not in the 'Master Collection' and update
# the view layer to make the depsgraph able to apply modifiers to the object
if need_linking:
dg.scene.collection.objects.link(obj)
obj.update_tag()
# a hidden object doesn't get its modifiers applied, need to make it visible
# before updating the view layer
if need_showing:
obj.hide_viewport = False
bpy.context.view_layer.update()
# NOTE: some modifiers can remove UV layers from an object after applying
# (e.g. Skin), which is a consistent behavior regarding uv usage in the
# viewport (e.g. degenerate tangent space in the Normal Map node)
obj_eval = obj.evaluated_get(dg)
obj.data = bpy.data.meshes.new_from_object(obj_eval,
preserve_all_data_layers=True, depsgraph=dg)
obj.modifiers.clear()
if need_linking:
dg.scene.collection.objects.unlink(obj)
if need_showing:
obj.hide_viewport = True
def objTransferShapeKeys(obj_from, obj_to, depsgraph):
"""
Transfer shape keys from one object to another if it's possible:
- obj_from should be in the current view layer to be evaluated by depsgraph
- obj_to should not have shape keys
- obj_from (after evaluating) and obj_to should have the same amount of vertices
Returns a boolean flag indicating successful transfer.
"""
if obj_from.data.shape_keys is None:
return True
key_blocks_from = obj_from.data.shape_keys.key_blocks
keys_from = [key for key in key_blocks_from if key != key.relative_key
and key != obj_from.data.shape_keys.reference_key]
key_names = [key.name for key in keys_from]
key_values = [key.value for key in keys_from]
key_positions = []
for key in keys_from:
key.value = 0
same_vertex_count = True
for key in keys_from:
key.value = 1
obj_from.update_tag()
bpy.context.view_layer.update()
verts = obj_from.evaluated_get(depsgraph).data.vertices
if len(verts) != len(obj_to.data.vertices):
same_vertex_count = False
break
key_pos = [0] * 3 * len(verts)
verts.foreach_get('co', key_pos)
key_positions.append(key_pos)
key.value = 0
if same_vertex_count:
# basis shape key
obj_to.shape_key_add(name=obj_from.data.shape_keys.reference_key.name)
vert_co = [0] * 3 * len(obj_to.data.vertices)
for i in range(len(key_names)):
key_block = obj_to.shape_key_add(name=key_names[i])
key_block.value = key_values[i]
key_block.data.foreach_set('co', key_positions[i])
else:
# don't create nothing if vertex count isn't constant
pass
for i in range(len(keys_from)):
keys_from[i].value = key_values[i]
return same_vertex_count
def meshNeedTangentsForExport(mesh, optimize_tangents):
"""
Check if it's needed to export tangents for the given mesh.
"""
return (meshHasUvLayers(mesh) and (meshMaterialsUseTangents(mesh)
or not optimize_tangents))
def meshHasUvLayers(mesh):
return bool(mesh.uv_layers.active and len(mesh.uv_layers) > 0)
def meshMaterialsUseTangents(mesh):
for mat in mesh.materials:
if mat and mat.use_nodes and mat.node_tree != None:
node_trees = extractMaterialNodeTrees(mat.node_tree)
for node_tree in node_trees:
for bl_node in node_tree.nodes:
if matNodeUseTangents(bl_node):
return True
# HACK: in most cases this one indicates that object linking is used
# disable tangent optimizations for such cases
elif mat == None:
return True
return False
def matNodeUseTangents(bl_node):
if isinstance(bl_node, bpy.types.ShaderNodeNormalMap):
return True
if (isinstance(bl_node, bpy.types.ShaderNodeTangent)
and bl_node.direction_type == 'UV_MAP'):
return True
if isinstance(bl_node, bpy.types.ShaderNodeNewGeometry):
for out in bl_node.outputs:
if out.identifier == 'Tangent' and out.is_linked:
return True
return False
def extractMaterialNodeTrees(node_tree):
"""NOTE: located here since it's needed for meshMaterialsUseTangents()"""
out = [node_tree]
for bl_node in node_tree.nodes:
if isinstance(bl_node, bpy.types.ShaderNodeGroup):
out += extractMaterialNodeTrees(bl_node.node_tree)
return out
def meshHasNgons(mesh):
for poly in mesh.polygons:
if poly.loop_total > 4:
return True
return False
def modifierNeedsExport(mod):
"""
Modifiers that are applied before export shouldn't be:
- hidden during render (a way to disable export of a modifier)
- ARMATURE modifiers (used separately via skinning)
"""
return mod.show_render and mod.type != 'ARMATURE'
def getSocketDefvalCompat(socket, RGBAToRGB=False, isOSL=False):
"""
Get the default value of input/output sockets in some compatible form.
Vector types such as bpy_prop_aray, Vector, Euler, etc... are converted to lists,
primitive types are converted to int/float.
"""
if socket.type == 'VALUE' or socket.type == 'INT':
return socket.default_value
elif socket.type == 'BOOLEAN':
return int(socket.default_value)
elif socket.type == 'VECTOR':
return [i for i in socket.default_value]
elif socket.type == 'RGBA':
val = [i for i in socket.default_value]
if RGBAToRGB:
val = val[0:3]
return val
elif socket.type == 'SHADER':
# shader sockets have no default value
return [0, 0, 0, 0]
elif socket.type == 'STRING' and isOSL:
# for now used for OSL only
return pyosl.glslgen.string_to_osl_const(socket.default_value)
elif socket.type == 'CUSTOM':
# not supported
return 0
else:
return 0
def createCustomProperty(bl_element):
"""
Filters and creates a custom property, which is stored in the glTF extra field.
"""
if not bl_element:
return None
props = {}
# Custom properties, which are in most cases present and should not be exported.
black_list = ['cycles', 'cycles_visibility', 'cycles_curves', '_RNA_UI', 'v3d']
count = 0
for custom_property in bl_element.keys():
if custom_property in black_list:
continue
value = bl_element[custom_property]
add_value = False
if isinstance(value, str):
add_value = True
if isinstance(value, (int, float)):
add_value = True
if hasattr(value, "to_list"):
value = value.to_list()
add_value = True
if add_value:
props[custom_property] = value
count += 1
if count == 0:
return None
return props
def calcLightThresholdDist(bl_light, threshold):
"""Calculate the light attenuation distance from the given threshold.
The light power at this distance equals the threshold value.
"""
return math.sqrt(max(1e-16,
max(bl_light.color.r, bl_light.color.g, bl_light.color.b)
* max(1, bl_light.specular_factor)
* abs(bl_light.energy / 100)
/ max(threshold, 1e-16)
))
| 29.525292 | 95 | 0.669412 | 2,053 | 15,176 | 4.806624 | 0.253288 | 0.006688 | 0.008512 | 0.011552 | 0.178354 | 0.116842 | 0.043575 | 0.012566 | 0 | 0 | 0 | 0.010884 | 0.243213 | 15,176 | 513 | 96 | 29.582846 | 0.848324 | 0.244201 | 0 | 0.1777 | 0 | 0 | 0.024144 | 0.005991 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0.003484 | 0.017422 | 0.02439 | 0.313589 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99c8bc3429d14ce3a02ee120f70154934756c968 | 37,815 | py | Python | VESIcal/vplot.py | kaylai/VESIcal | 3ea18b0ce30b30fb55786346c37ef8f428ee5034 | [
"MIT"
] | 16 | 2020-06-22T09:07:32.000Z | 2022-01-12T13:42:12.000Z | VESIcal/vplot.py | kaylai/VESIcal | 3ea18b0ce30b30fb55786346c37ef8f428ee5034 | [
"MIT"
] | 136 | 2020-05-22T21:43:23.000Z | 2022-03-07T22:06:33.000Z | build/lib/VESIcal/vplot.py | kaylai/VESIcal | 3ea18b0ce30b30fb55786346c37ef8f428ee5034 | [
"MIT"
] | 3 | 2021-05-18T08:21:02.000Z | 2022-03-25T01:08:10.000Z | from VESIcal import core
from VESIcal import calibrations
from VESIcal.tasplot import add_LeMaitre_fields
import pandas as pd
import numpy as np
import warnings as w
import matplotlib as mpl
import matplotlib.pyplot as plt
# ---------- DEFINE CUSTOM PLOTTING FORMATTING ------------ #
style = "seaborn-colorblind"
plt.style.use(style)
plt.rcParams["mathtext.default"] = "regular"
plt.rcParams["mathtext.fontset"] = "dejavusans"
mpl.rcParams['patch.linewidth'] = 1
mpl.rcParams['axes.linewidth'] = 1
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 14
mpl.rcParams['lines.markersize'] = 10
# Define color cycler based on plot style set here
# get style formatting set by plt.style.use():
the_rc = plt.style.library[style]
# list of colors by hex code:
color_list = the_rc['axes.prop_cycle'].by_key()['color'] * 10
color_cyler = the_rc['axes.prop_cycle'] # get the cycler
# ----------- MAGMASAT PLOTTING FUNCTIONS ----------- #
def smooth_isobars_and_isopleths(isobars=None, isopleths=None):
"""
Takes in a dataframe with calculated isobar and isopleth information
(e.g., output from calculate_isobars_and_isopleths) and smooths the data
for plotting.
Parameters
----------
isobars: pandas DataFrame
OPTIONAL. DataFrame object containing isobar information as calculated
by calculate_isobars_and_isopleths.
isopleths: pandas DataFrame
OPTIONAL. DataFrame object containing isopleth information as
calculated by calculate_isobars_and_isopleths.
Returns
-------
pandas DataFrame
DataFrame with x and y values for all isobars and all isopleths.
Useful if a user wishes to do custom plotting with isobar and isopleth
data rather than using the built-in `plot_isobars_and_isopleths()`
function.
"""
np.seterr(divide='ignore', invalid='ignore') # turn off numpy warning
w.filterwarnings("ignore", message="Polyfit may be poorly conditioned")
if isobars is not None:
P_vals = isobars.Pressure.unique()
isobars_lists = isobars.values.tolist()
# add zero values to volatiles list
isobars_lists.append([0.0, 0.0, 0.0, 0.0])
isobars_pressure = []
isobars_H2O_liq = []
isobars_CO2_liq = []
# do some data smoothing
for pressure in P_vals:
Pxs = [item[1] for item in isobars_lists if item[0] == pressure]
Pys = [item[2] for item in isobars_lists if item[0] == pressure]
try:
# calcualte polynomial
Pz = np.polyfit(Pxs, Pys, 3)
Pf = np.poly1d(Pz)
# calculate new x's and y's
Px_new = np.linspace(Pxs[0], Pxs[-1], 50)
Py_new = Pf(Px_new)
# Save x's and y's
Px_new_list = list(Px_new)
isobars_H2O_liq += Px_new_list
Py_new_list = list(Py_new)
isobars_CO2_liq += Py_new_list
pressure_vals_for_list = [pressure]*len(Px_new)
isobars_pressure += pressure_vals_for_list
except Exception:
Px_list = list(Pxs)
isobars_H2O_liq += Px_list
Py_list = list(Pys)
isobars_CO2_liq += Py_list
pressure_vals_for_list = [pressure]*len(Pxs)
isobars_pressure += pressure_vals_for_list
isobar_df = pd.DataFrame({"Pressure": isobars_pressure,
"H2O_liq": isobars_H2O_liq,
"CO2_liq": isobars_CO2_liq})
if isopleths is not None:
XH2O_vals = isopleths.XH2O_fl.unique()
isopleths_lists = isopleths.values.tolist()
isopleths_XH2O_fl = []
isopleths_H2O_liq = []
isopleths_CO2_liq = []
for Xfl in XH2O_vals:
Xxs = [item[1] for item in isopleths_lists if item[0] == Xfl]
Xys = [item[2] for item in isopleths_lists if item[0] == Xfl]
try:
# calculate polynomial
Xz = np.polyfit(Xxs, Xys, 2)
Xf = np.poly1d(Xz)
# calculate new x's and y's
Xx_new = np.linspace(Xxs[0], Xxs[-1], 50)
Xy_new = Xf(Xx_new)
# Save x's and y's
Xx_new_list = list(Xx_new)
isopleths_H2O_liq += Xx_new_list
Xy_new_list = list(Xy_new)
isopleths_CO2_liq += Xy_new_list
XH2Ofl_vals_for_list = [Xfl]*len(Xx_new)
isopleths_XH2O_fl += XH2Ofl_vals_for_list
except Exception:
Xx_list = list(Xxs)
isopleths_H2O_liq += Xx_list
Xy_list = list(Xys)
isopleths_CO2_liq += Xy_list
XH2Ofl_vals_for_list = [Xfl]*len(Xxs)
isopleths_XH2O_fl += XH2Ofl_vals_for_list
isopleth_df = pd.DataFrame({"XH2O_fl": isopleths_XH2O_fl,
"H2O_liq": isopleths_H2O_liq,
"CO2_liq": isopleths_CO2_liq})
np.seterr(divide='warn', invalid='warn') # turn numpy warning back on
w.filterwarnings("always", message="Polyfit may be poorly conditioned")
if isobars is not None:
if isopleths is not None:
return isobar_df, isopleth_df
else:
return isobar_df
else:
if isopleths is not None:
return isopleth_df
def plot(isobars=None, isopleths=None, degassing_paths=None, custom_H2O=None,
custom_CO2=None, isobar_labels=None, isopleth_labels=None,
degassing_path_labels=None, custom_labels=None,
custom_colors="VESIcal", custom_symbols=None, markersize=10,
figsize=(12, 8), save_fig=False, extend_isobars_to_zero=True,
smooth_isobars=False, smooth_isopleths=False, **kwargs):
"""
Custom automatic plotting of model calculations in VESIcal.
Isobars, isopleths, and degassing paths can be plotted. Labels can be
specified for each. Any combination of isobars, isopleths, and degassing
paths can be plotted.
Parameters
----------
isobars: pandas DataFrame or list
OPTIONAL. DataFrame object containing isobar information as calculated
by calculate_isobars_and_isopleths. Or a list of DataFrame objects.
isopleths: pandas DataFrame or list
OPTIONAL. DataFrame object containing isopleth information as
calculated by calculate_isobars_and_isopleths. Or a list of DataFrame
objects.
degassing_paths: list
OPTIONAL. List of DataFrames with degassing information as generated
by calculate_degassing_path().
custom_H2O: list
OPTIONAL. List of groups of H2O values to plot as points. For example
myfile.data['H2O'] is one group of H2O values. Must be passed with
custom_CO2 and must be same length as custom_CO2.
custom_CO2: list
OPTIONAL. List of groups of CO2 values to plot as points.For example
myfile.data['CO2'] is one group of CO2 values. Must be passed with
custom_H2O and must be same length as custom_H2O.
isobar_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case
each plotted line will be given the generic legend name of
"Isobars n", with n referring to the nth isobars passed. Isobar
pressure is given in parentheses. The user can pass their own labels
as a list of strings. If more than one set of isobars is passed, the
labels should refer to each set of isobars, not each pressure.
isopleth_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case
each plotted isopleth will be given the generic legend name of
"Isopleth n", with n referring to the nth isopleths passed. Isopleth
XH2O values are given in parentheses. The user can pass their own
labels as a list of strings. If more than one set of isopleths is
passed, the labels should refer to each set of isopleths, not each
XH2O value.
degassing_path_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case
each plotted line will be given the generic legend name of "Pathn",
with n referring to the nth degassing path passed. The user can pass
their own labels as a list of strings.
custom_labels: list
OPTIONAL. Labels for the plot legend. Default is None, in which case
each group of custom points will be given the generic legend name of
"Customn", with n referring to the nth degassing path passed. The user
can pass their own labels as a list of strings.
custom_colors: list
OPTIONAL. Default value is "VESIcal", which uses VESIcal's color ramp.
A list of color values readable by matplotlib can be passed here if
custom symbol colors are desired. The length of this list must match
that of custom_H2O and custom_CO2.
custom_symbols: list
OPTIONAL. Default value is None, in which case data are plotted as
filled circles.. A list of symbol tyles readable by matplotlib can be
passed here if custom symbol types are desired. The length of this
list must match that of custom_H2O and custom_CO2.
markersize: int
OPTIONAL. Default value is 10. Same as markersize kwarg in matplotlib.
Any numeric value passed here will set the marker size for
(custom_H2O, custom_CO2) points.
figsize: tuple
OPTIONAL. Default value is (12,8). Sets the matplotlib.pyplot figsize
value as (x_dimension, y_dimension)
save_fig: False or str
OPTIONAL. Default value is False, in which case the figure will not be
saved. If a string is passed, the figure will be saved with the string
as the filename. The string must include the file extension.
extend_isobars_to_zero: bool
OPTIONAL. If True (default), isobars will be extended to zero, even if
there is a finite solubility at zero partial pressure.
smooth_isobars: bool
OPTIONAL. Default is False. If set to True, isobar data will be fit to
a polynomial and plotted. If False, the raw input data will be plotted.
smooth_isopleths: bool
OPTIONAL. Default is False. If set to True, isopleth data will be fit
to a polynomial and plotted. If False, the raw input data will be
plotted.
Returns
-------
fig, axes Matplotlib objects
fig and axes matploblib objects defining a plot with x-axis as H2O wt%
in the melt and y-axis as CO2 wt%in the melt. Isobars, or lines of
constant pressure at which the sample magma composition is saturated,
and isopleths, or lines of constant fluid composition at which the
sample magma composition is saturated, are plotted if passed.
Degassing paths, or the concentration of dissolved H2O and CO2 in a
melt equilibrated along a path of decreasing pressure, is plotted if
passed.
"""
# Turn off warnings:
np.seterr(divide='ignore', invalid='ignore') # turn off numpy warning
w.filterwarnings("ignore", message="Polyfit may be poorly conditioned")
def check_inputs(custom_H2O, custom_CO2):
if custom_H2O is not None:
if custom_CO2 is None:
raise core.InputError("If x data is passed, y data must also "
"be passed.")
else:
if len(custom_H2O) == len(custom_CO2):
pass
else:
raise core.InputError("x and y data must be same length")
if custom_CO2 is not None:
if custom_H2O is None:
raise core.InputError("If y data is passed, x data must also "
"be passed.")
def check_colors(custom_colors):
if custom_colors == "VESIcal":
use_colors = color_list
elif isinstance(custom_colors, list):
use_colors = custom_colors
else:
raise core.InputError("Argument custom_colors must be type list. "
"Just passing one item? Try putting square "
"brackets, [], around it.")
return use_colors
def calc_extend_isobars_to_zero(Pxs, Pys):
"""
Calculates new end-points for plotting isobars when
extend_isobars_to_zero option is set to True.
Parameters
----------
Pxs, Pys: list
List of x and y values corresponding to isobars.
"""
if Pxs[0]*Pys[0] != 0.0:
if Pxs[0] > Pys[0]:
# create new array of length n+1 if n is the length of the
# original array:
Px_new = np.zeros(np.shape(Pxs)[0]+1)
# set the first x value in the new array equal to 0:
Px_new[0] = 0
# fill the rest of the new array with the original array
# values:
Px_new[1:] = Pxs
# overwrite the original array with the new one:
Pxs = Px_new
Py_new = np.zeros(np.shape(Pys)[0]+1)
Py_new[0] = Pys[0]
Py_new[1:] = Pys
Pys = Py_new
else:
Px_new = np.zeros(np.shape(Pxs)[0]+1)
Px_new[0] = Pxs[0]
Px_new[1:] = Pxs
Pxs = Px_new
Py_new = np.zeros(np.shape(Pys)[0]+1)
Py_new[0] = 0
Py_new[1:] = Pys
Pys = Py_new
if Pxs[-1]*Pys[-1] != 0.0:
if Pxs[-1] < Pys[-1]:
Px_new = np.zeros(np.shape(Pxs)[0]+1)
Px_new[-1] = 0
Px_new[:-1] = Pxs
Pxs = Px_new
Py_new = np.zeros(np.shape(Pys)[0]+1)
Py_new[-1] = Pys[-1]
Py_new[:-1] = Pys
Pys = Py_new
else:
Px_new = np.zeros(np.shape(Pxs)[0]+1)
Px_new[-1] = Pxs[-1]
Px_new[:-1] = Pxs
Pxs = Px_new
Py_new = np.zeros(np.shape(Pys)[0]+1)
Py_new[-1] = 0
Py_new[:-1] = Pys
Pys = Py_new
return Pxs, Pys
# -------- HANDLE USER INPUT ERRORS, SET COLORS, SMOOTH LINES -------- ##
check_inputs(custom_H2O=custom_H2O, custom_CO2=custom_CO2)
use_colors = check_colors(custom_colors=custom_colors)
if smooth_isobars:
isobars = smooth_isobars_and_isopleths(isobars=isobars)
if smooth_isopleths:
isopleths = smooth_isobars_and_isopleths(isopleths=isopleths)
# -------- CREATE FIGURE -------- ##
fig, ax = plt.subplots(figsize=figsize)
if 'custom_x' in kwargs:
ax.set(xlabel=kwargs['xlabel'], ylabel=kwargs['ylabel'])
else:
ax.set(xlabel='H$_2$O wt%', ylabel='CO$_2$ wt%')
labels = []
# -------- PLOT ISOBARS -------- ##
if isobars is not None:
if isinstance(isobars, pd.DataFrame):
isobars = [isobars]
for i in range(len(isobars)):
P_vals = isobars[i].Pressure.unique()
isobars_lists = isobars[i].values.tolist()
# add zero values to volatiles list
isobars_lists.append([0.0, 0.0, 0.0, 0.0])
P_iter = 0
for pressure in P_vals:
P_iter += 1
Pxs = [item[1] for item in isobars_lists
if item[0] == pressure]
Pys = [item[2] for item in isobars_lists
if item[0] == pressure]
if extend_isobars_to_zero:
try:
Pxs, Pys = calc_extend_isobars_to_zero(Pxs, Pys)
except Exception:
pass
else:
print(extend_isobars_to_zero)
if len(isobars) > 1:
if P_iter == 1:
P_list = [int(i) for i in P_vals]
if isinstance(isobar_labels, list):
labels.append(str(isobar_labels[i]) + ' (' +
', '.join(map(str, P_list)) +
" bars)")
else:
labels.append('Isobars ' + str(i+1) + ' (' +
', '.join(map(str, P_list)) +
" bars)")
else:
labels.append('_nolegend_')
if len(isobars) > 1:
ax.plot(Pxs, Pys, color=color_list[i])
else:
ax.plot(Pxs, Pys)
if len(isobars) == 1:
labels += [str(P_val) + " bars" for P_val in P_vals]
# -------- PLOT ISOPLETHS -------- ##
if isopleths is not None:
if isinstance(isopleths, pd.DataFrame):
isopleths = [isopleths]
for i in range(len(isopleths)):
XH2O_vals = isopleths[i].XH2O_fl.unique()
isopleths_lists = isopleths[i].values.tolist()
H_iter = 0
for Xfl in XH2O_vals:
H_iter += 1
Xxs = [item[1] for item in isopleths_lists if item[0] == Xfl]
Xys = [item[2] for item in isopleths_lists if item[0] == Xfl]
if len(isopleths) > 1:
if H_iter == 1:
H_list = [i for i in XH2O_vals]
if isinstance(isopleth_labels, list):
labels.append(str(isopleth_labels[i]) + ' (' +
', '.join(map(str, H_list)) +
" XH2Ofluid)")
else:
labels.append('Isopleths ' + str(i+1) + ' (' +
', '.join(map(str, H_list)) +
" XH2Ofluid)")
else:
labels.append('_nolegend_')
ax.plot(Xxs, Xys, ls='dashed', color=color_list[i])
if len(isopleths) == 1:
H_list = [i for i in XH2O_vals]
if H_iter == 1:
labels.append('Isopleths (' +
', '.join(map(str, H_list)) +
" XH2Ofluid)")
else:
labels.append('_nolegend_')
ax.plot(Xxs, Xys, ls='dashed', color='k')
# -------- PLOT DEGASSING PATHS -------- ##
if degassing_paths is not None:
if isinstance(degassing_paths, pd.DataFrame):
degassing_paths = [degassing_paths]
degassing_colors = color_list.copy()
iterno = 0
for i in range(len(degassing_paths)):
if degassing_path_labels is None:
iterno += 1
labels.append('Path%s' % iterno)
ax.plot(degassing_paths[i]["H2O_liq"],
degassing_paths[i]["CO2_liq"], ls='dotted',
color=degassing_colors[i])
else:
labels.append(degassing_path_labels[iterno])
ax.plot(degassing_paths[i]["H2O_liq"],
degassing_paths[i]["CO2_liq"], ls='dotted',
color=degassing_colors[i])
iterno += 1
for i in range(len(degassing_paths)):
ax.plot(degassing_paths[i]["H2O_liq"].max(),
degassing_paths[i]["CO2_liq"].max(), 'o',
color=degassing_colors[i])
labels.append('_nolegend_')
# -------- PLOT CUSTOM H2O-CO2 -------- ##
if custom_H2O is not None and custom_CO2 is not None:
if isinstance(custom_H2O, pd.DataFrame):
custom_H2O = [custom_H2O]
if isinstance(custom_CO2, pd.DataFrame):
custom_CO2 = [custom_CO2]
if custom_symbols is None:
use_marker = ['o'] * len(custom_H2O)
else:
use_marker = custom_symbols
iterno = 0
for i in range(len(custom_H2O)):
if custom_labels is None:
iterno += 1
labels.append('Custom%s' % iterno)
ax.plot(custom_H2O[i], custom_CO2[i], use_marker[i],
color=use_colors[i], markersize=markersize)
else:
labels.append(custom_labels[iterno])
ax.plot(custom_H2O[i], custom_CO2[i], use_marker[i],
color=use_colors[i], markersize=markersize)
iterno += 1
# -------- PLOT CUSTOM X-Y -------- ##
if 'custom_x' in kwargs:
custom_x = kwargs['custom_x']
custom_y = kwargs['custom_y']
if isinstance(custom_x, pd.core.series.Series):
custom_x = [list(custom_x.values)]
if isinstance(custom_y, pd.core.series.Series):
custom_y = [list(custom_y.values)]
if custom_symbols is None:
use_marker = ['o'] * len(custom_x)
else:
use_marker = custom_symbols
iterno = 0
for i in range(len(custom_x)):
if custom_labels is None:
iterno += 1
labels.append('Custom%s' % iterno)
ax.plot(custom_x[i], custom_y[i], use_marker[i],
color=use_colors[i], markersize=markersize)
else:
labels.append(custom_labels[iterno])
ax.plot(custom_x[i], custom_y[i], use_marker[i],
color=use_colors[i], markersize=markersize)
iterno += 1
# -------- PLOT LEGEND -------- ##
ax.legend(labels, bbox_to_anchor=(1.01, 1), loc='upper left')
if 'custom_x' not in kwargs:
ax.set_xlim(left=0)
ax.set_ylim(bottom=0)
np.seterr(divide='warn', invalid='warn') # turn numpy warning back on
w.filterwarnings("always", message="Polyfit may be poorly conditioned")
# -------- SAVE FIGURE IF DESIRED -------- ##
if save_fig is not False:
fig.savefig(save_fig)
return fig, ax
def scatterplot(custom_x, custom_y, xlabel=None, ylabel=None, **kwargs):
"""
Custom x-y plotting using VESIcal's built-in plot() function, built
Matplotlib's plot and scatter functions.
Parameters
----------
custom_x: list
List of groups of x-values to plot as points or lines
custom_y: list
List of groups of y-values to plot as points or lines
xlabel: str
OPTIONAL. What to display along the x-axis.
ylabel: str
OPTIONAL. What to display along the y-axis.
kwargs:
Can take in any key word agruments that can be passed to `plot()`.
Returns
-------
fig, ax matplotlib objects
X-y plot with custom x and y axis values and labels.
"""
if isinstance(custom_x, list) and isinstance(custom_y, list):
if len(custom_x) != len(custom_y):
raise core.InputError("X and y lists must be same length")
if xlabel is not None:
if isinstance(xlabel, str):
pass
else:
raise core.InputError("xlabel must be string")
if ylabel is not None:
if isinstance(ylabel, str):
pass
else:
raise core.InputError("ylabel must be string")
return plot(custom_x=custom_x, custom_y=custom_y, xlabel=xlabel,
ylabel=ylabel, **kwargs)
# ------- Define custom plotting tools for checking calibrations ------- #
def calib_plot(user_data=None, model='all', plot_type='TAS', zoom=None,
figsize=(17, 8), legend=True, save_fig=False, **kwargs):
"""
Plots user data and calibration set of any or all models on any x-y plot
or a total alkalis vs silica (TAS) diagram. TAS diagram boundaries
provided by tasplot python module, copyright John A Stevenson.
Parameters
----------
user_data: BatchFile object, Sample object, pandas DataFrame, pandas Series,
or dict.
OPTIONAL. Default value is None, in which case only the model
calibration set is plotted. User provided sample data describing the
oxide composition of one or more samples. Multiple samples can be
passed as an BatchFile object or pandas DataFrame. A single sample can
be passed as a pandas Series.
model: str or list
OPTIONAL. Default value is 'all', in which case all model calibration
datasets will be plotted. 'Mixed' can be used to plot all mixed fluid
models. String of the name of the model calibration dataset to plot
(e.g., 'Shishkina'). Multiple models can be plotted by passing them as
strings within a list (e.g., ['Shishkina', 'Dixon']).
plot_type: str
OPTIONAL. Default value is 'TAS', which returns a total alkali vs
silica (TAS) diagram. Any two oxides can be plotted as an x-y plot by
setting plot_type='xy' and specifying x- and y-axis oxides, e.g.,
x='SiO2', y='Al2O3'.
zoom: str or list
OPTIONAL. Default value is None in which case axes will be set to the
default of 35<x<100 wt% and 0<y<25 wt% for TAS type plots and the best
values to show the data for xy type plots. Can pass "user_data" to
plot the figure where the x and y axes are scaled down to zoom in and
only show the region surrounding the user_data. A list of tuples may
be passed to manually specify x and y limits. Pass in data as
[(x_min, x_max), (y_min, y_max)]. For example, the default limits here
would be passed in as [(35,100), (0,25)].
figsize: tuple
OPTIONAL. Default value is (17,8). Sets the matplotlib.pyplot figsize
value as (x_dimension, y_dimension).
legend: bool
OPTIONAL. Default value is True. Can be set to False in which case the
legend will not be displayed.
save_fig: False or str
OPTIONAL. Default value is False, in which case the figure will not be
saved. If a string is passed, the figure will be saved with the string
as the filename. The string must include the file extension.
Returns
-------
matplotlib object
"""
# Get x and y axis limits, if user passed them
if zoom is None:
user_xmin = 35
user_xmax = 100
user_ymin = 0
user_ymax = 25
elif zoom == 'user_data':
if isinstance(user_data, pd.DataFrame):
print("'user_data' type zoom for more than one sample is not ",
"implemented yet.")
user_xmin = 35
user_xmax = 100
user_ymin = 0
user_ymax = 25
elif (isinstance(user_data, pd.core.series.Series) or
isinstance(user_data, dict)):
user_xmin = user_data['SiO2'] - 5
user_xmax = user_data['SiO2'] + 5
user_ymin = user_data['Na2O'] + user_data['K2O'] - 2
if user_ymin < 0:
user_ymin = 0
user_ymax = user_data['Na2O'] + user_data['K2O'] + 2
elif isinstance(zoom, list):
user_xmin, user_xmax = zoom[0]
user_ymin, user_ymax = zoom[1]
else:
raise core.InputError('Trying to pass zoom coords? Pass as ' +
'[(x, x), (y, y)]')
# Create the figure
fig, ax1 = plt.subplots(figsize=figsize)
font = {'family': 'sans-serif',
'color': 'black',
'weight': 'normal',
'size': 20,
}
# TAS figure
if plot_type == 'TAS':
# adjust x limits here if you want to focus on a specific part of
# compostional space:
ax1.set_xlim([user_xmin, user_xmax])
# adjust y limits here
ax1.set_ylim([user_ymin, user_ymax])
plt.xlabel('SiO$_2$, wt%', fontdict=font, labelpad=15)
plt.ylabel('Na$_2$O+K$_2$O, wt%', fontdict=font, labelpad=15)
# add LeMaitre fields
if zoom is None:
add_LeMaitre_fields(ax1)
elif plot_type == 'xy':
if 'x' in kwargs and 'y' in kwargs:
x = kwargs['x']
y = kwargs['y']
if zoom is not None:
ax1.set_xlim([user_xmin, user_xmax])
ax1.set_ylim([user_ymin, user_ymax])
plt.xlabel(str(x)+", wt%", fontdict=font, labelpad=15)
plt.ylabel(str(y)+", wt%", fontdict=font, labelpad=15)
else:
raise core.InputError("If plot_type is 'xy', then x and y "
"values must be passed as strings. For "
"example, x='SiO2', y='Al2O3'.")
# Plot Calibration Data
if model == 'all':
model = ['MagmaSat',
'Shishkina',
'Dixon',
'IaconoMarziano',
'Liu',
'AllisonCarbon',
'MooreWater']
if model == 'mixed':
model = ['MagmaSat',
'Shishkina',
'Dixon',
'IaconoMarziano',
'Liu']
if isinstance(model, str):
model = [model]
if isinstance(model, list):
# set legends to false
h2o_legend = False
co2_h2oco2_legend = False
# check which legends to turn to True
for modelname in model:
model_type = calibrations.return_calibration_type(modelname)
if model_type['H2O']:
h2o_legend = True
if model_type['CO2'] or model_type['Mixed']:
co2_h2oco2_legend = True
if h2o_legend:
plt.scatter([], [], marker='', label=r"$\bf{Pure \ H_2O:}$")
for modelname in model:
calibdata = calibrations.return_calibration(modelname)
model_type = calibrations.return_calibration_type(modelname)
if isinstance(calibdata, str):
w.warn(calibdata)
else:
if model_type['H2O']:
if plot_type == 'TAS':
try:
plt.scatter(calibdata['H2O']['SiO2'],
(calibdata['H2O']['Na2O'] +
calibdata['H2O']['K2O']),
marker='s', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
plt.scatter(calibdata['H2O']['SiO2'],
calibdata['H2O']['Na2O+K2O'],
marker='s', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
if plot_type == 'xy':
try:
plt.scatter(calibdata['H2O'][x],
calibdata['H2O'][y],
marker='s', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
w.warn("The requested oxides were not found",
"in the calibration dataset for " +
str(modelname) + ".")
if co2_h2oco2_legend:
plt.scatter([], [], marker='', label=r"${\ }$")
if co2_h2oco2_legend:
plt.scatter([], [], marker='',
label=r"$\bf{\ CO_2 \ and \ H_2O\!-\!CO_2:}$")
for modelname in model:
calibdata = calibrations.return_calibration(modelname)
model_type = calibrations.return_calibration_type(modelname)
if isinstance(calibdata, str):
w.warn(calibdata)
else:
if model_type['CO2'] and model_type['Mixed']:
frames = [calibdata['CO2'], calibdata['Mixed']]
co2_and_mixed = pd.concat(frames)
if plot_type == 'TAS':
try:
plt.scatter(co2_and_mixed['SiO2'],
(co2_and_mixed['Na2O'] +
co2_and_mixed['K2O']),
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
plt.scatter(co2_and_mixed['SiO2'],
co2_and_mixed['Na2O+K2O'],
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
if plot_type == 'xy':
try:
plt.scatter(co2_and_mixed[x], co2_and_mixed[y],
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
w.warn("The requested oxides were not found in ",
"the calibration dataset for " +
str(modelname) + ".")
elif model_type['CO2'] or model_type['Mixed']:
if model_type['CO2']:
thistype = 'CO2'
if model_type['Mixed']:
thistype = 'Mixed'
if plot_type == 'TAS':
try:
plt.scatter(calibdata[thistype]['SiO2'],
(calibdata[thistype]['Na2O'] +
calibdata[thistype]['K2O']),
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
plt.scatter(calibdata[thistype]['SiO2'],
calibdata[thistype]['Na2O+K2O'],
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
if plot_type == 'xy':
try:
plt.scatter(calibdata[thistype][x],
calibdata[thistype][y],
marker='d', edgecolors='k',
facecolors=calibdata['facecolor'],
label=str(modelname))
except Exception:
w.warn("The requested oxides were not found in ",
"the calibration dataset for "
+ str(modelname) + ".")
else:
raise core.InputError("model must be of type str or list")
# Plot user data
if user_data is None:
pass
else:
if ((user_data.__class__.__module__, user_data.__class__.__name__) ==
('VESIcal', 'BatchFile')):
user_data = user_data.get_data()
# batchfile and VESIcal (__init__) are not imported to avoid
# circular imports
# use above notation to interrogate datatype
if ((user_data.__class__.__module__, user_data.__class__.__name__) ==
('VESIcal', 'Sample')):
user_data = user_data.get_composition()
# batchfile and VESIcal (__init__) are not imported to avoid
# circular imports
# use above notation to interrogate datatype
if plot_type == 'TAS':
_sample = user_data.copy()
try:
_sample["TotalAlkalis"] = _sample["Na2O"] + _sample["K2O"]
except Exception:
core.InputError("Na2O and K2O data must be in user_data")
plt.scatter(_sample['SiO2'], _sample['TotalAlkalis'],
s=150, edgecolors='w', facecolors='red', marker='P',
label='User Data')
if plot_type == 'xy':
_sample = user_data.copy()
plt.scatter(_sample[x], _sample[y],
s=150, edgecolors='w', facecolors='red', marker='P',
label='User Data')
if legend:
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
fig.tight_layout()
if isinstance(save_fig, str):
fig.savefig(save_fig)
return fig, ax1
def show():
"""
Local implementation of pyplot.show(). For displaying created plots.
"""
plt.show()
| 40.100742 | 80 | 0.532276 | 4,458 | 37,815 | 4.373935 | 0.114401 | 0.012719 | 0.006923 | 0.013539 | 0.522745 | 0.469101 | 0.42915 | 0.376789 | 0.346582 | 0.320016 | 0 | 0.017262 | 0.374957 | 37,815 | 942 | 81 | 40.143312 | 0.807709 | 0.2801 | 0 | 0.467972 | 0 | 0 | 0.08569 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014235 | false | 0.021352 | 0.014235 | 0 | 0.042705 | 0.003559 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99c9d0224c8f3e8cd2f2000caf10456ad5cb58d3 | 824 | py | Python | test/test_tools.py | translationalneurosurgery/tool-dimep | d2e3f8847d8a43d75b9ffc2dfaf65e5d1662c8ae | [
"MIT"
] | 2 | 2021-11-01T11:48:59.000Z | 2021-11-03T19:07:16.000Z | test/test_tools.py | agricolab/tool-dimep | d2e3f8847d8a43d75b9ffc2dfaf65e5d1662c8ae | [
"MIT"
] | 2 | 2021-03-31T10:47:46.000Z | 2021-03-31T11:31:51.000Z | test/test_tools.py | neuromti/tool-dimep | 36f9f404c99a53e85c9c492c4b0a281f832e07ba | [
"MIT"
] | null | null | null | from dimep.tools import *
import numpy as np
import pytest
@pytest.mark.parametrize("binsize", np.arange(1.0, 20.0, 1.0))
def test_downbin(binsize):
x = np.arange(0.0, 100.0, 1)
if binsize == 1.0:
xhat = x
else:
xhat = np.arange((binsize - 1) / 2, 100.0 - binsize / 2, binsize)
assert np.allclose(down_bin(x, int(binsize)), xhat)
def test_bwboundaries():
assert np.allclose(bw_boundaries([0, 1, 1, 0]), [0, 1, 1, 0])
assert np.allclose(bw_boundaries([0, 1, 1, 0, 1]), [0, 1, 1, 0, 2])
assert np.allclose(bw_boundaries([1, 1, 1, 0, 1]), [1, 1, 1, 0, 2])
assert np.allclose(bw_boundaries([1, 1, 1, 1, 1]), [1, 1, 1, 1, 1])
assert np.allclose(bw_boundaries([1, 1, 0, 0, 1]), [1, 1, 0, 0, 2])
assert np.allclose(bw_boundaries([1, 1, 0, 1, 0, 1]), [1, 1, 0, 2, 0, 3])
| 34.333333 | 77 | 0.583738 | 154 | 824 | 3.064935 | 0.214286 | 0.101695 | 0.082627 | 0.067797 | 0.476695 | 0.459746 | 0.447034 | 0.434322 | 0.366525 | 0.148305 | 0 | 0.127889 | 0.212379 | 824 | 23 | 78 | 35.826087 | 0.599384 | 0 | 0 | 0 | 0 | 0 | 0.008495 | 0 | 0 | 0 | 0 | 0 | 0.388889 | 1 | 0.111111 | false | 0 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99cba028d63bd0b3002ecf1a8511685b7a12c347 | 6,825 | py | Python | crypto_comparison.py | tomsch1/defichain-arbitrage-telegram-bot | b9f5291eaa60771c0ae28f832467393611de3da4 | [
"MIT"
] | 1 | 2022-03-01T14:19:49.000Z | 2022-03-01T14:19:49.000Z | crypto_comparison.py | tomsch1/defichain-arbitrage-telegram-bot | b9f5291eaa60771c0ae28f832467393611de3da4 | [
"MIT"
] | null | null | null | crypto_comparison.py | tomsch1/defichain-arbitrage-telegram-bot | b9f5291eaa60771c0ae28f832467393611de3da4 | [
"MIT"
] | null | null | null | import cexdatacollect
import dfi_dex_prices
from symbols import CryptoExchangeSymbols
class SymbolPrice():
def __init__(self, exchange_name:str, symbol:str, price:float):
self.exchange_name: str = exchange_name
self.symbol: str = symbol
self.price: float = price
class IndirectComparison():
def __init__(self, ex_name: str, cex_dfi_pair: SymbolPrice, dex_dfi_pair: SymbolPrice, intermediate_pair: SymbolPrice=None, percentage_minus_one :bool = True):
self.ex_name: str = ex_name
self.dex_dfi_pair = dex_dfi_pair
self.cex_dfi_pair = cex_dfi_pair
self.intermediate_pair = intermediate_pair
if intermediate_pair is None:
perc = (dex_dfi_pair.price/cex_dfi_pair.price)
if percentage_minus_one:
perc = perc -1
self.percentage: float = perc
else:
print(f"{dex_dfi_pair.symbol} = {dex_dfi_pair.price}, {intermediate_pair.symbol} = {intermediate_pair.price}, {cex_dfi_pair.symbol} = {cex_dfi_pair.price}")
perc = (dex_dfi_pair.price/(cex_dfi_pair.price/intermediate_pair.price))
if percentage_minus_one:
perc = perc -1
self.percentage: float = perc
class AggregatedComparison():
def __init__(self, cex_name: str, symbol, cex_price, dex_price):
self.cex_name: str = cex_name
self.symbol: CryptoExchangeSymbols = symbol
self.cex_price: float = cex_price
self.dex_price: float = dex_price
self.percentage: float = (dex_price/cex_price)-1
class CryptoComparison():
all_pairs = []
def __init__(self, dex_data: dfi_dex_prices.DfiDexPrices, cex_data: cexdatacollect.CexPriceFetch):
self.dex_data = dex_data
self.cex_data = cex_data
def get_indirect_comparison(self, dfi_symbol: CryptoExchangeSymbols, intermediate_symbol: CryptoExchangeSymbols = None, inverse_intermediate_symbol: bool = False, inverse_cex_dfi_price: bool = True):
exchange_name = 'KuCoin'
dex_dfi_price = float(self.dex_data.dex_crypto_state_map[dfi_symbol.d_token()].data.price_ratio.ba)
if intermediate_symbol is not None:
intermediate_price = 1/float(self.cex_data.cex_price_state[exchange_name][intermediate_symbol.value]['last'])
if inverse_intermediate_symbol:
new_cex_symbol = f"DFI/{intermediate_symbol.value.split('/')[0]}"
inverse_intermediate_symbol = f"{intermediate_symbol.value.split('/')[1]}/{intermediate_symbol.value.split('/')[0]}"
intermediate_pair = SymbolPrice(exchange_name, inverse_intermediate_symbol, intermediate_price)
else:
new_cex_symbol = f"DFI/{intermediate_symbol.value.split('/')[1]}"
intermediate_pair = SymbolPrice(exchange_name, intermediate_symbol.value, intermediate_price)
cex_dfi_price = float(self.cex_data.cex_price_state[exchange_name][new_cex_symbol]['last'])
if inverse_cex_dfi_price:
cex_dfi_price = 1 / cex_dfi_price
cex_dfi_pair = SymbolPrice(exchange_name, new_cex_symbol, cex_dfi_price)
else:
intermediate_pair = None
cex_dfi_price = float(self.cex_data.cex_price_state[exchange_name][dfi_symbol.value]['last'])
if inverse_cex_dfi_price:
cex_dfi_price = 1 / cex_dfi_price
cex_dfi_pair = SymbolPrice(exchange_name, dfi_symbol.value, cex_dfi_price)
return IndirectComparison(
ex_name=exchange_name,
cex_dfi_pair=cex_dfi_pair,
dex_dfi_pair=SymbolPrice('dex', dfi_symbol.value, dex_dfi_price),
intermediate_pair=intermediate_pair,
percentage_minus_one=inverse_cex_dfi_price
)
def get_all_comparisons(self) -> [SymbolPrice]:
return [
self.get_indirect_comparison(CryptoExchangeSymbols.DFIBTC, None),
self.get_indirect_comparison(CryptoExchangeSymbols.DFIUSDT, None),
self.get_maximum_percentage(CryptoExchangeSymbols.DFIETH, [CryptoExchangeSymbols.ETHBTC, CryptoExchangeSymbols.ETHUSDT]),
self.get_maximum_percentage(CryptoExchangeSymbols.DFILTC, [CryptoExchangeSymbols.LTCBTC, CryptoExchangeSymbols.LTCUSDT]),
#Something is wrong here!
#self.get_indirect_comparison(ExchangeSymbol.DFIUSDC, ExchangeSymbol.BTCUSDC, True, False),
self.get_indirect_comparison(CryptoExchangeSymbols.DFIUSDC, CryptoExchangeSymbols.USDTUSDC, True),
self.get_maximum_percentage(CryptoExchangeSymbols.DFIBCH, [CryptoExchangeSymbols.BCHBTC, CryptoExchangeSymbols.BCHUSDT]),
self.get_maximum_percentage(CryptoExchangeSymbols.DFIDOGE, [CryptoExchangeSymbols.DOGEBTC, CryptoExchangeSymbols.DOGEUSDT]),
]
def get_maximum_percentage(self, dfi_symbol: CryptoExchangeSymbols, intermediate_pairs: [CryptoExchangeSymbols]):
all_paths = []
percentages = []
for pair in intermediate_pairs:
path = self.get_indirect_comparison(dfi_symbol, pair)
all_paths.append(path)
percentages.append(abs(path.percentage))
index_of_highest_percentage = percentages.index(max(percentages))
return all_paths[index_of_highest_percentage]
def update_pairs(self):
self.all_pairs = self.get_all_comparisons()
def get_overview(self):
pairs: [SymbolPrice] = self.get_all_comparisons()
pair_text = []
for pair in sorted(pairs, key=lambda x: abs(x.percentage), reverse=True):
if pair.intermediate_pair is None:
text = f"""
DFI -> {pair.dex_dfi_pair.symbol.split('/')[1]}:
DEX:\t{round(pair.dex_dfi_pair.price, 3)} DFI
{pair.ex_name}:\t{round(pair.cex_dfi_pair.price, 3)} DFI
\t{round(pair.percentage*100, 2)} %
"""
else:
text = f"""
DFI -> {pair.dex_dfi_pair.symbol.split('/')[1]}:
DEX:\t{round(pair.dex_dfi_pair.price, 3)} DFI
{pair.ex_name} via {pair.intermediate_pair.symbol.split('/')[1]}:\t{round(pair.cex_dfi_pair.price / pair.intermediate_pair.price, 3)} DFI
\t{round(pair.percentage * 100, 2)} %
"""
pair_text.append(text)
single_pair_text_string = '\n'.join(pair_text)
return f" Current premium overview:\n{single_pair_text_string}"
def evaluate_alarm(self, symbol_name, threshold: float):
for pair in self.all_pairs:
symbol = CryptoExchangeSymbols.from_string(symbol_name)
if pair.dex_dfi_pair.symbol == symbol.value:
if abs(pair.percentage) >= abs(threshold):
return pair
| 46.114865 | 203 | 0.670183 | 796 | 6,825 | 5.409548 | 0.15201 | 0.048769 | 0.03019 | 0.022759 | 0.391779 | 0.217139 | 0.195309 | 0.185787 | 0.185787 | 0.142127 | 0 | 0.004784 | 0.234286 | 6,825 | 147 | 204 | 46.428571 | 0.819173 | 0.016703 | 0 | 0.210526 | 0 | 0.017544 | 0.151551 | 0.102625 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0 | 0.026316 | 0.008772 | 0.201754 | 0.008772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99cf64985f998c65d2cacfaacd8edb39224b3b22 | 558 | py | Python | swig/openql/__init__.py | mmoelle1/OpenQL | 7d9e084cd3ecd237c9ea280801529d96cf67369a | [
"Apache-2.0"
] | null | null | null | swig/openql/__init__.py | mmoelle1/OpenQL | 7d9e084cd3ecd237c9ea280801529d96cf67369a | [
"Apache-2.0"
] | null | null | null | swig/openql/__init__.py | mmoelle1/OpenQL | 7d9e084cd3ecd237c9ea280801529d96cf67369a | [
"Apache-2.0"
] | 1 | 2022-01-04T20:51:43.000Z | 2022-01-04T20:51:43.000Z | # Author Imran Ashraf
# The import syntax changes slightly between python 2 and 3, so we
# need to detect which version is being used:
from sys import version_info
if version_info[0] == 3:
PY3 = True
elif version_info[0] == 2:
PY3 = False
else:
raise EnvironmentError("sys.version_info refers to a version of "
"Python neither 2 nor 3. This is not permitted. "
"sys.version_info = {}".format(version_info))
if PY3:
from .openql import Program, Kernel
else:
from openql import *
# __all__ = [ init, schedule, compile ]
| 25.363636 | 69 | 0.689964 | 83 | 558 | 4.518072 | 0.614458 | 0.176 | 0.069333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025463 | 0.225806 | 558 | 21 | 70 | 26.571429 | 0.842593 | 0.297491 | 0 | 0.153846 | 0 | 0 | 0.27907 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99d362a541a18ec8d12caecd6f148f337e916cdc | 1,391 | py | Python | src/external_script_runner.py | salvatorenovelli/python-automation-runner | 7d18d02f3e60980d18e97991e7991ba3aa43e79a | [
"Apache-2.0"
] | 1 | 2019-06-05T00:56:13.000Z | 2019-06-05T00:56:13.000Z | src/external_script_runner.py | salvatorenovelli/python-automation-runner | 7d18d02f3e60980d18e97991e7991ba3aa43e79a | [
"Apache-2.0"
] | null | null | null | src/external_script_runner.py | salvatorenovelli/python-automation-runner | 7d18d02f3e60980d18e97991e7991ba3aa43e79a | [
"Apache-2.0"
] | null | null | null | import logging
import os
import signal
import time
from multiprocessing import Process
class ExternalScript:
def __init__(self, name, path):
self.name = name
self.path = path
self.process = None
def start_main_loop(self):
if self.process is None:
self.process = Process(target=run_external_script_main_loop, args=(self.name, self.path))
self.process.start()
logging.info("External script started. PID: %d", self.process.pid)
def stop_main_loop(self):
if self.process is not None:
logging.info("Killing external script with PID: %d", self.process.pid)
os.kill(self.process.pid, signal.SIGTERM)
self.process = None
def run_external_script_main_loop(name, path):
try:
script = import_source(name, path)
logging.info("Script loaded")
while 1:
start = time.time()
try:
script.main_loop()
except Exception as e:
logging.error("Error in script '%s': %s", path, str(e))
if (time.time() - start) < .05:
time.sleep(1)
except Exception as e1:
logging.error("Unable to run main_loop: " + str(e1))
def import_source(name, path):
from importlib.machinery import SourceFileLoader
return SourceFileLoader(name, path).load_module()
| 30.23913 | 101 | 0.615385 | 175 | 1,391 | 4.771429 | 0.342857 | 0.118563 | 0.050299 | 0.043114 | 0.167665 | 0.064671 | 0.064671 | 0 | 0 | 0 | 0 | 0.00603 | 0.284687 | 1,391 | 45 | 102 | 30.911111 | 0.833166 | 0 | 0 | 0.108108 | 0 | 0 | 0.093458 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.216216 | 0 | 0.405405 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99d6b69bacc16386a2d51e2e47e08c035278f928 | 1,029 | py | Python | drone/remote_control/start.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
] | null | null | null | drone/remote_control/start.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
] | 18 | 2016-03-30T08:43:45.000Z | 2017-03-27T11:14:17.000Z | drone/remote_control/start.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
] | 2 | 2016-03-06T20:38:06.000Z | 2019-09-10T14:46:35.000Z | # -*- coding: utf-8 -*-
'''
Created on 15/06/2015
@author: david
'''
import sys
if sys.version_info.major < 3:
from SocketServer import TCPServer
else:
from socketserver import TCPServer
import datetime
import logging
from remote_control.dispatching import Dispatcher
def main():
logging.basicConfig(filename="remote_control_{0}.log".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")), \
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%d/%m/%y %H:%M:%S', \
level=logging.ERROR)
logging.info("**** [Starting server...] ****")
server = TCPServer(("0.0.0.0", 2121), Dispatcher)
message = "Waiting for remote control..."
logging.info(message)
print(message)
try:
server.serve_forever()
except KeyboardInterrupt:
print("[CTRL+C] -> Stop")
finally:
print("Goodbye!")
logging.info("**** [Server finish] ****")
if __name__ == '__main__':
main()
| 21.4375 | 118 | 0.594752 | 119 | 1,029 | 5.02521 | 0.571429 | 0.065217 | 0.073579 | 0.103679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027954 | 0.23518 | 1,029 | 47 | 119 | 21.893617 | 0.731893 | 0.058309 | 0 | 0 | 0 | 0 | 0.235172 | 0.046826 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.230769 | 0 | 0.269231 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99d6c24393f666e309f1685a7bc70884ed6cd31a | 2,390 | py | Python | src/threadpool.py | forumulator/pySlader | b2300da6c10f46f0fa9933be722462a42a98bdbc | [
"MIT"
] | 1 | 2018-10-28T23:33:57.000Z | 2018-10-28T23:33:57.000Z | src/threadpool.py | forumulator/pySlader | b2300da6c10f46f0fa9933be722462a42a98bdbc | [
"MIT"
] | null | null | null | src/threadpool.py | forumulator/pySlader | b2300da6c10f46f0fa9933be722462a42a98bdbc | [
"MIT"
] | null | null | null | from queue import Queue, Empty
import threading
from threading import Thread
class Worker(Thread):
_TIMEOUT = 2
""" Thread executing tasks from a given tasks queue. Thread is signalable,
to exit
"""
def __init__(self, tasks, th_num):
Thread.__init__(self)
self.tasks = tasks
self.daemon, self.th_num = True, th_num
self.done = threading.Event()
self.start()
def run(self):
while not self.done.is_set():
try:
func, args, kwargs = self.tasks.get(block=True,
timeout=self._TIMEOUT)
try:
func(*args, **kwargs)
except Exception as e:
print(e)
finally:
self.tasks.task_done()
except Empty as e:
pass
return
def signal_exit(self):
""" Signal to thread to exit """
self.done.set()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads, tasks=[]):
self.tasks = Queue(num_threads)
self.workers = []
self.done = False
self._init_workers(num_threads)
for task in tasks:
self.tasks.put(task)
def _init_workers(self, num_threads):
for i in range(num_threads):
self.workers.append(Worker(self.tasks, i))
def add_task(self, func, *args, **kwargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kwargs))
def _close_all_threads(self):
""" Signal all threads to exit and lose the references to them """
for workr in self.workers:
workr.signal_exit()
self.workers = []
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
def __del__(self):
self._close_all_threads()
def create_task(func, *args, **kwargs):
return (func, args, kwargs)
if __name__ == '__main__':
from random import randrange
from time import sleep
delays = [randrange(1, 10) for i in range(30)]
def wait_delay(d):
print('sleeping for (%d)sec' % d)
sleep(d)
pool = ThreadPool(20)
for i, d in enumerate(delays):
pool.add_task(wait_delay, d)
pool.wait_completion() | 27.471264 | 79 | 0.562762 | 296 | 2,390 | 4.358108 | 0.310811 | 0.062791 | 0.065116 | 0.026357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005031 | 0.334728 | 2,390 | 87 | 80 | 27.471264 | 0.806289 | 0.085356 | 0 | 0.065574 | 0 | 0 | 0.013592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.180328 | false | 0.016393 | 0.081967 | 0.016393 | 0.344262 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99d9a48b5d99ac3508e56112ec75780dfbe5858e | 8,893 | py | Python | rnaindel/rnaindel_lib/indel_annotator.py | adamdingliang/RNAIndel | bc154a25a459ca0dd5c1f2ce064944e979105d23 | [
"Apache-2.0"
] | 1 | 2019-01-07T21:21:28.000Z | 2019-01-07T21:21:28.000Z | rnaindel/rnaindel_lib/indel_annotator.py | adamdingliang/RNAIndel | bc154a25a459ca0dd5c1f2ce064944e979105d23 | [
"Apache-2.0"
] | 2 | 2019-01-05T16:39:41.000Z | 2019-01-14T16:00:43.000Z | rnaindel/rnaindel_lib/indel_annotator.py | adamdingliang/RNAIndel | bc154a25a459ca0dd5c1f2ce064944e979105d23 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""2nd step of the analysis
Checks if indels are coding or non-coding and annotates
coding indels with variant effect
indel_annotator is the main routine of this module
"""
import sys
import pysam
import logging
import pandas as pd
from functools import partial
from .indel_curator import curate_indel_in_genome
from .indel_sequence import CodingSequenceWithIndel
logger = logging.getLogger(__name__)
def indel_annotator(df, refgene, fasta, chr_prefixed):
"""Sort coding indels and annotate coding indels with variant effect
Args:
df (pandas.DataFrame): with a header:'chr', 'pos', 'ref', 'alt'
refgene (str): path to refCodingExon.bed.gz
fasta (str): path to fasta
Returns:
df (pandas.DataFrame): with indels annotated
"""
df["is_ins"] = df.apply(is_insertion, axis=1)
df["indel_seq"] = df.apply(get_indel_seq, axis=1)
# performs annotation
exon_data = pysam.TabixFile(refgene)
anno = partial(
annotate_indels, exon_data=exon_data, fasta=fasta, chr_prefixed=chr_prefixed
)
df["annotation"] = df.apply(anno, axis=1)
# removes unannotated calls (non-coding indels)
df = df[df["annotation"] != "-"]
if len(df) == 0:
logging.warning("No indels annotated in coding region. Analysis done.")
sys.exit(0)
# gene symbols
df["gene_symbol"] = df.apply(get_gene_symbol, axis=1)
# formats the header
df = df[
[
"chr",
"pos",
"ref",
"alt",
"rescued",
"indel_seq",
"annotation",
"gene_symbol",
"is_ins",
]
]
return df
def is_insertion(row):
"""Encodes if the indel is an insertion or deletion.
Args:
row (pandas.Series): reference seq (str) at index 'ref'
Returns:
is_insertion (int): 0 if insertion, 1 if deletion
"""
is_insertion = 0
if row["ref"] == "-":
is_insertion = 1
return is_insertion
def get_indel_seq(row):
"""Gets indel sequence
Args:
row (pandas.Series): a Series with 'ref' and 'alt' indices
Returns:
indel_seq (str): inserted or deleted sequence
"""
if row["ref"] == "-":
indel_seq = row["alt"]
else:
indel_seq = row["ref"]
return indel_seq
def annotate_indels(row, exon_data, fasta, chr_prefixed, postprocess=False):
"""Annotates indels for all RefSeq isoforms
Args:
row (pandas.Series): a Series with indices
'chr', 'pos', 'is_ins', 'indel_seq'
exon_data (pysam.TabixFile): coding exon database
fasta (str): path to fasta file
chr_prefixed (bool): True if chromosome names in BAM are "chr"-prefixed
postprocess (bool): True if used in indel_postprocessor. Default to False
Returns:
annotation (str): Each token represents an annotation for one
of the isoforms and is formatted as:
GeneSymbol|RefSeqAccession|AminoAcidPostion|Effect|IsInsensitive
GeneSymbol: RefSeq gene name
RefSeqAccession: RefSeq mRNA accession number
CodonPostion: the position of codon (not amino acid) affected in
the isoform specified in RefSeqAccession
Effect: consequences of the indel.
See CodingSequenceWithIndel for detail
IsInsensitive: 1 if the indel is nonsense-mediated-decay insensitive,
0 otherwise
'-' for non-coding indels
"""
chr = row["chr"]
pos = row["pos"]
idl_type = row["is_ins"]
idl_seq = row["indel_seq"]
# generates CodingSequenceWithIndel instances
idls = generate_coding_indels(
chr, pos, idl_type, idl_seq, exon_data, fasta, chr_prefixed
)
# annotates for all RefSeq isoforms
annots = []
if idls != []:
for idl in idls:
gene = idl.gene_symbol
refseq_acc = idl.accession
codon_pos, effect = idl.effect()
is_insensitive = idl.is_nmd_insensitive()
if not postprocess:
anno = (
gene
+ "|"
+ refseq_acc
+ "|"
+ str(codon_pos)
+ "|"
+ effect
+ "|"
+ str(is_insensitive)
)
else:
anno = gene + "|" + refseq_acc + "|" + str(codon_pos) + "|" + effect
annots.append(anno)
if len(annots) == 0:
annotation = "-"
else:
annotation = ",".join(annots)
return annotation
def generate_coding_indels(chr, pos, idl_type, idl_seq, exon_data, fasta, chr_prefixed):
"""Generates coding indel objects
Args:
chr (str): chr1-22, chrX or chrY. Note "chr"-prefixed.
pos (int): 1-based genomic position
idl_type (int): 1 for insertion, 0 for deletion
idl_seq (str): inserted or deleted sequence
exon_data (pysam.TabixFile): coding exon database
fasta (str): path to fasta file
chr_prefixed (bool): True if chromosome names in BAM or FASTA are "chr"-prefixed
Returns:
coding_idl_lst (list): a list of CodingSequenceWithIndel obj
empty list if non-coding indel
"""
coding_idl_lst = []
try:
candidate_genes = exon_data.fetch(chr, pos - 11, pos + 11)
except:
candidate_genes = None
# check for UTR
if candidate_genes:
for line in candidate_genes:
lst = line.split("\t")
# parsing exon info
info = lst[3].split("|")
exon = int(info[2])
last_exon = int(info[3])
# exon start and end
exon_start, exon_end = int(lst[1]), int(lst[2])
# strand
strand = lst[4]
# 5'UTR on positive strand (insertion)
if strand == "+" and exon == 1 and idl_type == 1 and exon_start >= pos:
pass
# 5'UTR on positive strand (deletion)
elif strand == "+" and exon == 1 and idl_type == 0 and exon_start > pos:
pass
# 3'UTR on positive strand
elif strand == "+" and exon == last_exon and pos > exon_end:
pass
# 5'UTR on negative strand
elif strand == "-" and exon == 1 and pos > exon_end:
pass
# 3'UTR on negative strand (insertion)
elif (
strand == "-"
and exon == last_exon
and idl_type == 1
and exon_start >= pos
):
pass
# 3'UTR on negative strand (deletion)
elif (
strand == "-"
and exon == last_exon
and idl_type == 0
and exon_start > pos
):
pass
else:
indel_in_reference_genome = curate_indel_in_genome(
fasta, chr, pos, idl_type, idl_seq, chr_prefixed
)
lt_seq = indel_in_reference_genome.lt_seq
rt_seq = indel_in_reference_genome.rt_seq
accession = info[0]
gene_symbol = info[1]
cds_start = int(info[4])
prev_exon = lst[5].split("|")
prev_exon_start, prev_exon_end = int(prev_exon[0]), int(prev_exon[1])
next_exon = lst[6].split("|")
next_exon_start, next_exon_end = int(next_exon[0]), int(next_exon[1])
indel = CodingSequenceWithIndel(
chr,
pos,
idl_type,
lt_seq,
idl_seq,
rt_seq,
strand,
accession,
gene_symbol,
exon,
exon_start,
exon_end,
last_exon,
cds_start,
prev_exon_start,
prev_exon_end,
next_exon_start,
next_exon_end,
)
coding_idl_lst.append(indel)
return coding_idl_lst
def get_gene_symbol(row):
"""Extracts gene name from annotation
Args:
row (pandas.Series): annotation info (str) at 'annotation' index
Returns:
gene_symbol (str): gene name(s)
"""
pd.options.mode.chained_assignment = None
lst = row["annotation"].split(",")
genes = [token.split("|")[0] for token in lst]
gene_symbol = ",".join(set(genes))
return gene_symbol
| 30.145763 | 88 | 0.534353 | 1,002 | 8,893 | 4.576846 | 0.220559 | 0.028783 | 0.017008 | 0.018535 | 0.257959 | 0.209769 | 0.152638 | 0.127344 | 0.1099 | 0.07283 | 0 | 0.009728 | 0.375801 | 8,893 | 294 | 89 | 30.248299 | 0.816429 | 0.342179 | 0 | 0.151899 | 0 | 0 | 0.040022 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037975 | false | 0.037975 | 0.044304 | 0 | 0.120253 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99dd827cc75c97313b16be59de14d6bad416a8e2 | 1,242 | py | Python | rugosa/emulation/call_hooks/win_api/shlwapi.py | Defense-Cyber-Crime-Center/rugosa | 70f5b1db7e3f02ecccb0495fe1c0c77930769276 | [
"MIT"
] | 1 | 2022-03-13T03:03:31.000Z | 2022-03-13T03:03:31.000Z | rugosa/emulation/call_hooks/win_api/shlwapi.py | Defense-Cyber-Crime-Center/rugosa | 70f5b1db7e3f02ecccb0495fe1c0c77930769276 | [
"MIT"
] | null | null | null | rugosa/emulation/call_hooks/win_api/shlwapi.py | Defense-Cyber-Crime-Center/rugosa | 70f5b1db7e3f02ecccb0495fe1c0c77930769276 | [
"MIT"
] | null | null | null | """
Functions found in shlwapi.dll
Shell Lightweight Utility Functions
"""
import logging
import ntpath
from ...call_hooks import builtin_func
logger = logging.getLogger(__name__)
@builtin_func("PathAppendA")
@builtin_func("PathAppendW")
#typedef(BOOL PathAppendA(LPSTR pszPath,LPCSTR pszMore);)
def pathappend(cpu_context, func_name, func_args):
"""
Appends one path to the end of another
"""
wide = func_name.endswith(u"W")
path_ptr, more_ptr = func_args
curr_path = cpu_context.memory.read_string(path_ptr, wide=wide)
more_path = cpu_context.memory.read_string(more_ptr, wide=wide)
full_path = ntpath.join(curr_path, more_path)
cpu_context.memory.write_string(path_ptr, full_path, wide=wide)
return True
@builtin_func("PathAddBackslashA")
@builtin_func("PathAddBackslashW")
#typedef(LPWSTR PathAddBackslashW(LPWSTR pszPath));)
def pathaddbackslash(cpu_context, func_name, func_args):
"""
Appends a backslash to the path
"""
wide = func_name.endswith(u"W")
path_ptr = func_args[0]
curr_path = cpu_context.memory.read_string(path_ptr, wide=wide)
full_path = curr_path + "\\"
cpu_context.memory.write_string(path_ptr, full_path, wide=wide)
return True
| 25.346939 | 67 | 0.738325 | 171 | 1,242 | 5.081871 | 0.362573 | 0.080552 | 0.080552 | 0.115075 | 0.482163 | 0.437284 | 0.402762 | 0.326812 | 0.260069 | 0.260069 | 0 | 0.000952 | 0.154589 | 1,242 | 48 | 68 | 25.875 | 0.826667 | 0.198873 | 0 | 0.347826 | 0 | 0 | 0.062827 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99ddb51556072c9fe39d711ea4b8ad9ad506d836 | 1,057 | py | Python | massage/urls.py | Johncs2d/accounting-system | 46d660044b198afadbf6f90b72376f6a69166c5f | [
"MIT"
] | null | null | null | massage/urls.py | Johncs2d/accounting-system | 46d660044b198afadbf6f90b72376f6a69166c5f | [
"MIT"
] | 2 | 2021-03-19T00:39:18.000Z | 2021-03-30T12:48:58.000Z | massage/urls.py | Johncs2d/accounting-system | 46d660044b198afadbf6f90b72376f6a69166c5f | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('info', views.info, name='info'),
path('chart', views.charts, name='chart'),
path('trialbalance', views.trialbalance, name='trialbalance'),
path('ledger', views.ledger, name='ledger'),
path('balancesheet', views.balancesheet, name='balancesheet'),
path('incomestatement', views.incomestatement, name='incomestatement'),
path('journalize', views.journalize, name='journalize'),
path('sign_up/', views.sign_up, name='sign_up'),
path("insertaccount", views.insertaccount, name='insertaccount'),
path("inserjournal", views.inserjournal, name='inserjournal'),
path("register", views.signupform, name='register'),
path("login", views.loginForm, name='login'),
path("logout", views.logusout, name='logout'),
path("log_in", views.log_me_in, name='log_in'),
path("journal_list", views.journalList, name='journal_list'),
path("journal_control",views.journalControls,name='journalControls')
] | 48.045455 | 75 | 0.69631 | 120 | 1,057 | 6.05 | 0.291667 | 0.024793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.122044 | 1,057 | 22 | 76 | 48.045455 | 0.782328 | 0 | 0 | 0 | 0 | 0 | 0.285444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99e2c1f902345ae0e7cb8b3c3a60e5be6b0705b4 | 2,680 | py | Python | QLM_utils.py | qiuchili/PyQLM | ccd5f64f86cf88f618e30808d598f3785f5e6483 | [
"MIT"
] | null | null | null | QLM_utils.py | qiuchili/PyQLM | ccd5f64f86cf88f618e30808d598f3785f5e6483 | [
"MIT"
] | null | null | null | QLM_utils.py | qiuchili/PyQLM | ccd5f64f86cf88f618e30808d598f3785f5e6483 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import division
import numpy as np
import os
import math
import pickle as Pickle
import pynlpir
import random
from math import log
from numpy import linalg as LA
def F(rhoM, proDict):
# print rhoM
res = 0
for pm in proDict:
P = np.trace(np.dot(proDict[pm][1], rhoM))
res += proDict[pm][0] * log(P)
# print('value of target F function = {}'.format(res))
return res
def Grad_F(rhoM, proDict, dim):
res = np.zeros((dim, dim))
for pm in proDict:
trace_val = np.trace(np.dot(proDict[pm][1], rhoM))
res += (proDict[pm][0] * proDict[pm][1] / trace_val)
return res
def rho_bar(rhoM, proDict, dim):
grad_f = Grad_F(rhoM, proDict, dim)
res = (np.dot(grad_f,rhoM) + np.dot(rhoM,grad_f))/2
return res
def rho_tilde(rhoM, proDict, dim):
grad_f = Grad_F(rhoM, proDict, dim)
grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f)
res = grad_rho_grad/np.trace(grad_rho_grad)
return res
def D_bar(rhoM, proDict, dim):
return(rho_bar(rhoM, proDict,dim)-rhoM)
def D_tilde(rhoM, proDict, dim):
return(rho_tilde(rhoM, proDict,dim)-rhoM)
def q_t(t, rhoM, proDict, dim):
grad_f = Grad_F(rhoM, proDict, dim)
grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f)
res = 1+2*t+t*t*np.trace(grad_rho_grad)
return res
def D(t, rhoM, proDict, dim): # 公式(19)
grad_f = Grad_F(rhoM, proDict, dim)
grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f)
d_bar = D_bar(rhoM, proDict, dim)
d_tilde = D_tilde(rhoM, proDict,dim)
q = q_t(t, rhoM, proDict, dim)
res = (2/q)*d_bar + (t*np.trace(grad_rho_grad)/q)*d_tilde
return res
def set_t(t):
return max(1, t)
def judgement(rhoM, proDict,f_old, dim, threshold_values = (1e-7, 1e-7, 1e-7)):
grad_f = Grad_F(rhoM, proDict, dim)
grad_rho_grad = np.dot(np.dot(grad_f, rhoM),grad_f)
grad_rho = np.dot(grad_f,rhoM)
diff = f_old - F(rhoM, proDict)
if(LA.norm(rhoM - grad_rho_grad)< threshold_values[0] and LA.norm(rhoM -grad_rho)< threshold_values[1] and abs(diff)< threshold_values[2]):
return False
else:
return True
def judge_t(t, d, rhoM, proDict, dim, iter_r):
# print 'please see here:'
f_new = F(rhoM + t * d, proDict)
f_old = F(rhoM, proDict)
diff = iter_r * t * np.trace(np.dot(Grad_F(rhoM, proDict,dim), d))
if(f_new == f_old):
return False
# print(f_new-f_old)
return(f_new <=f_old+diff)
def test_set_generator(proj_num,vector_dim):
dictionary = {}
for i in range(proj_num):
weight = np.random.random()
vector = np.random.rand(1,vector_dim)
vector = vector/(math.sqrt(np.dot(vector,np.transpose(vector))))
projector = np.outer(vector, vector) / np.inner(vector, vector)
dictionary['word_'+str(i)] = [weight, projector]
return dictionary | 26.8 | 140 | 0.691791 | 494 | 2,680 | 3.574899 | 0.184211 | 0.143262 | 0.150623 | 0.06342 | 0.463194 | 0.292752 | 0.270102 | 0.253681 | 0.253681 | 0.218573 | 0 | 0.010582 | 0.153731 | 2,680 | 100 | 141 | 26.8 | 0.768078 | 0.050746 | 0 | 0.256757 | 0 | 0 | 0.00197 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162162 | false | 0 | 0.121622 | 0.040541 | 0.432432 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99e491fbbf30f0808a4fe6456d18b36c7d57a986 | 1,647 | py | Python | src/server.py | vishwajithsandaru/party-qr-generator-flask | 10c44ec8aa03e561e9e6415b681a0e200184b120 | [
"MIT"
] | null | null | null | src/server.py | vishwajithsandaru/party-qr-generator-flask | 10c44ec8aa03e561e9e6415b681a0e200184b120 | [
"MIT"
] | null | null | null | src/server.py | vishwajithsandaru/party-qr-generator-flask | 10c44ec8aa03e561e9e6415b681a0e200184b120 | [
"MIT"
] | null | null | null | from crypt import methods
from this import s
from flask import Flask, request, jsonify
import base64
import os
from Crypto.Cipher import AES
from urllib.parse import unquote
app = Flask(__name__)
secret = 'TESTTESTTESTTEST'
p_char = '%'
def unpad_str(msg):
msg = msg.decode('utf-8')
msg = msg.rstrip('%')
return msg
def decrypt_message(msg, key):
decoded_encrypted_msg = base64.b64decode(msg)
cipher = AES.new(key)
try:
decrypted_msg = cipher.decrypt(decoded_encrypted_msg)
except:
raise Exception('Error Decrypting')
else:
unpadded_private_msg = unpad_str(decrypted_msg)
return unpadded_private_msg
def decode_str(msg):
msg = msg.rstrip(p_char)
splitted = msg.split(':')
splitted_bev = splitted[4].split(';')
resp_dect = {}
resp_dect['id'] = int(float(splitted[0]))
resp_dect['name'] = splitted[1]
resp_dect['email'] = splitted[2]
resp_dect['emp_no'] = 'N/A' if (splitted[3] == '0.0') else splitted[3]
resp_dect['beverages'] = splitted_bev
resp_dect['food_preference'] = splitted[5]
return resp_dect
@app.route("/decrypt", methods=['GET'])
def decrypt():
args = request.args
encrypted_string = args.get('enc_str')
dec_str = ''
try:
dec_str = decrypt_message(encrypted_string, secret)
except:
return jsonify({'error': 'Invalid User!!'}), 400, {'ContentType': 'application/json'}
else:
processed_res = decode_str(dec_str)
return jsonify({'data': processed_res}), 200, {'ContentType': 'application/json'}
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| 25.734375 | 93 | 0.659381 | 217 | 1,647 | 4.774194 | 0.428571 | 0.061776 | 0.017375 | 0.023166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019757 | 0.200971 | 1,647 | 63 | 94 | 26.142857 | 0.767477 | 0 | 0 | 0.12 | 0 | 0 | 0.120219 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.14 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99e565c6c04330549af61e5e41e6e64599dd5a85 | 404 | py | Python | Day 02 - Beginner - Understanding Data Types and How to Manipulate Strings/04_PROJECT_tip_calculator.py | not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022 | 2c21c190ab756176bd7b577b3f8a0370b75c3828 | [
"MIT"
] | null | null | null | Day 02 - Beginner - Understanding Data Types and How to Manipulate Strings/04_PROJECT_tip_calculator.py | not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022 | 2c21c190ab756176bd7b577b3f8a0370b75c3828 | [
"MIT"
] | null | null | null | Day 02 - Beginner - Understanding Data Types and How to Manipulate Strings/04_PROJECT_tip_calculator.py | not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022 | 2c21c190ab756176bd7b577b3f8a0370b75c3828 | [
"MIT"
] | null | null | null | print("wewcome to the tip cawcuwatow. >_<")
bill = float(input("What is the total bill?\n$"))
tip_percentage = float(input("What percentage tip will you like to give?\n"))
split_among = int(input("How many people to split the bill?\n"))
total_bill_with_tip = bill + (bill * tip_percentage / 100)
each_pay = round(total_bill_with_tip / split_among, 2)
print(f"Each person should pay: ${each_pay:.2f}")
| 36.727273 | 77 | 0.725248 | 68 | 404 | 4.117647 | 0.5 | 0.096429 | 0.1 | 0.114286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014327 | 0.136139 | 404 | 10 | 78 | 40.4 | 0.787966 | 0 | 0 | 0 | 0 | 0 | 0.443069 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99ea05eb7467ddb1c7614fa510b122a7f9c67b31 | 4,619 | py | Python | lib/consistencyCheck.py | pnlbwh/multi-shell-dMRIharmonization | 46e207c5d2b7fd68ddbbd1b305c33cfd45351ee8 | [
"CNRI-Python",
"Info-ZIP"
] | 4 | 2020-04-28T15:31:07.000Z | 2022-02-15T07:04:21.000Z | lib/consistencyCheck.py | pnlbwh/multi-shell-dMRIharmonization | 46e207c5d2b7fd68ddbbd1b305c33cfd45351ee8 | [
"CNRI-Python",
"Info-ZIP"
] | 30 | 2019-11-10T20:51:34.000Z | 2021-04-19T19:08:14.000Z | lib/consistencyCheck.py | pnlbwh/multi-shell-dMRIharmonization | 46e207c5d2b7fd68ddbbd1b305c33cfd45351ee8 | [
"CNRI-Python",
"Info-ZIP"
] | 3 | 2019-10-18T16:06:03.000Z | 2021-02-21T01:10:45.000Z | #!/usr/bin/env python
# ===============================================================================
# dMRIharmonization (2018) pipeline is written by-
#
# TASHRIF BILLAH
# Brigham and Women's Hospital/Harvard Medical School
# tbillah@bwh.harvard.edu, tashrifbillah@gmail.com
#
# ===============================================================================
# See details at https://github.com/pnlbwh/dMRIharmonization
# Submit issues at https://github.com/pnlbwh/dMRIharmonization/issues
# View LICENSE at https://github.com/pnlbwh/dMRIharmonization/blob/master/LICENSE
# ===============================================================================
from conversion import read_bvals, read_imgs, read_imgs_masks
import numpy as np
from warnings import warn
from plumbum import local
from util import abspath, load, isfile, getpid
from findBshells import findBShells
import sys
def check_bshells(ref_imgs, ref_bvals):
unmatched=[]
for imgPath in ref_imgs:
imgPath= local.path(imgPath)
if not imgPath.exists():
FileNotFoundError(imgPath)
inPrefix = abspath(imgPath).split('.nii')[0]
bvals= findBShells(inPrefix+'.bval')
if (bvals==ref_bvals).all():
print('b-shells matched for', imgPath.name)
else:
print(f'\nUnmatched b-shells for {imgPath.name}')
print(bvals)
print(f'ref_bvals {ref_bvals}\n')
unmatched.append(imgPath._path)
print('')
if len(unmatched):
print('Unmatched cases:')
print(unmatched)
raise ValueError('Leave out the unmatched cases or change the reference case for determining b-shell to run multi-shell-dMRIharmonization')
else:
print('All cases have same b-shells. Data is good for running multi-shell-dMRIharmonization')
print('')
def check_resolution(ref_imgs, ref_res):
unmatched = []
for imgPath in ref_imgs:
imgPath= local.path(imgPath)
if not imgPath.exists():
FileNotFoundError(imgPath)
res= load(imgPath._path).header['pixdim'][1:4]
if (res-ref_res).sum()<=10e-6:
print('spatial resolution matched for', imgPath.name)
else:
print(f'\nUnmatched spatial resolution for {imgPath.name}')
print(res)
print(f'ref_res {ref_res}\n')
unmatched.append(imgPath._path)
print('')
if len(unmatched):
print('Unmatched cases:')
print(unmatched)
raise ValueError('Leave out the unmatched cases or change the reference case for determining spatial resolution to run multi-shell-dMRIharmonization')
else:
print('All cases have same spatial resolution. Data is good for running multi-shell-dMRIharmonization')
print('')
def consistencyCheck(ref_csv, outputBshellFile= None, outPutResolutionFile= None):
try:
ref_imgs, _ = read_imgs_masks(ref_csv)
except:
ref_imgs = read_imgs(ref_csv)
if isfile(outputBshellFile) and isfile(outPutResolutionFile):
ref_bvals= read_bvals(outputBshellFile)
ref_res = np.load(outPutResolutionFile)
else:
ref_bshell_img = ref_imgs[0]
print(f'Using {ref_bshell_img} to determine b-shells')
inPrefix = abspath(ref_bshell_img).split('.nii')[0]
ref_bvals = findBShells(inPrefix + '.bval', outputBshellFile)
ref_res = load(ref_bshell_img).header['pixdim'][1:4]
np.save(outPutResolutionFile, ref_res)
print('b-shells are', ref_bvals)
print('\nSite', ref_csv, '\n')
print('Checking consistency of b-shells among subjects')
check_bshells(ref_imgs, ref_bvals)
print('spatial resolution is', ref_res)
print('Checking consistency of spatial resolution among subjects')
check_resolution(ref_imgs, ref_res)
if __name__ == '__main__':
if len(sys.argv)==1 or sys.argv[1]=='-h' or sys.argv[1]=='--help':
print('''Check consistency of b-shells and spatial resolution among subjects
Usage:
consistencyCheck list.csv/txt ref_bshell_bvalues.txt ref_res_file.npy
Provide a csv/txt file with first column for dwi and 2nd column for mask: dwi1,mask1\\ndwi2,mask2\\n...
or just one column for dwi1\\ndwi2\\n...
In addition, provide ref_bshell_bvalues and ref_res_file.''')
exit()
ref_csv= abspath(sys.argv[1])
outputBshellFile= abspath(sys.argv[2])
outPutResolutionFile= abspath(sys.argv[3])
if isfile(ref_csv):
consistencyCheck(ref_csv, outputBshellFile, outPutResolutionFile)
else:
raise FileNotFoundError(f'{ref_csv} does not exists.')
| 32.076389 | 158 | 0.646677 | 563 | 4,619 | 5.174068 | 0.296625 | 0.022657 | 0.013732 | 0.016478 | 0.354617 | 0.354617 | 0.276691 | 0.276691 | 0.248541 | 0.248541 | 0 | 0.007311 | 0.200476 | 4,619 | 143 | 159 | 32.300699 | 0.781478 | 0.136826 | 0 | 0.311111 | 0 | 0.033333 | 0.31412 | 0.042034 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.077778 | 0 | 0.111111 | 0.277778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99ebaf6f0294d590f15abaa22804c5ca352ff36f | 1,283 | py | Python | tests/test_cross_corr.py | halomod/halomod | f4f207ac70ed32a7f7e16139698c85eda1f0b6a9 | [
"MIT"
] | 4 | 2021-02-23T13:28:59.000Z | 2022-02-11T15:53:57.000Z | tests/test_cross_corr.py | halomod/halomod | f4f207ac70ed32a7f7e16139698c85eda1f0b6a9 | [
"MIT"
] | 20 | 2021-02-02T15:08:28.000Z | 2021-09-20T18:26:49.000Z | tests/test_cross_corr.py | halomod/halomod | f4f207ac70ed32a7f7e16139698c85eda1f0b6a9 | [
"MIT"
] | 3 | 2021-03-07T15:28:34.000Z | 2021-08-21T21:41:44.000Z | import numpy as np
from halomod.cross_correlations import ConstantCorr, CrossCorrelations
def test_cross_same():
"""Test if using two components that are the same gives the same as an auto corr."""
cross = CrossCorrelations(
cross_hod_model=ConstantCorr,
halo_model_1_params={
"exclusion_model": "NoExclusion",
"sd_bias_model": None,
"transfer_model": "EH",
"force_1halo_turnover": False,
},
halo_model_2_params={
"exclusion_model": "NoExclusion",
"sd_bias_model": None,
"transfer_model": "EH",
"force_1halo_turnover": False,
},
)
assert np.allclose(cross.power_2h_cross, cross.halo_model_1.power_2h_auto_tracer)
assert np.allclose(cross.corr_2h_cross, cross.halo_model_1.corr_2h_auto_tracer)
# This is only close-ish, because cross-pairs are actually different than auto-pairs,
# since you can count self-correlations.
assert np.allclose(
cross.corr_1h_cross,
cross.halo_model_1.corr_1h_auto_tracer,
atol=1e-5,
rtol=1e-1,
)
assert np.allclose(
cross.power_1h_cross,
cross.halo_model_1.power_1h_auto_tracer,
atol=1e-6,
rtol=1e-1,
)
| 29.837209 | 89 | 0.643804 | 165 | 1,283 | 4.69697 | 0.406061 | 0.069677 | 0.064516 | 0.108387 | 0.531613 | 0.353548 | 0.216774 | 0.216774 | 0.216774 | 0.216774 | 0 | 0.025478 | 0.265783 | 1,283 | 42 | 90 | 30.547619 | 0.79724 | 0.157443 | 0 | 0.375 | 0 | 0 | 0.139665 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.03125 | false | 0 | 0.0625 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99efb291ee7023242cf3315eacce5ac12e340e87 | 5,703 | py | Python | arduino_kernel/kernel.py | jpsaenzmo/jupyter-lab-kernelino | 57b20c9cecf74d6533243729f47ab9cc74e0ef7d | [
"BSD-3-Clause"
] | null | null | null | arduino_kernel/kernel.py | jpsaenzmo/jupyter-lab-kernelino | 57b20c9cecf74d6533243729f47ab9cc74e0ef7d | [
"BSD-3-Clause"
] | null | null | null | arduino_kernel/kernel.py | jpsaenzmo/jupyter-lab-kernelino | 57b20c9cecf74d6533243729f47ab9cc74e0ef7d | [
"BSD-3-Clause"
] | null | null | null | from ipykernel.kernelbase import Kernel
import json
import os
import subprocess
import sys
import urllib
from urllib.request import urlopen
from requests.compat import urljoin
from notebook.notebookapp import list_running_servers
from .board import Board, BoardError
SKETCH_FOLDER = ".arduino/sketch"
class ArduinoKernel(Kernel):
implementation = "Arduino"
implementation_version = "1.0"
language = "no-op"
language_version = "0.1"
language_info = {
"name": "Any text",
"mimetype": "text/plain",
"file_extension": ".ino",
}
banner = "Arduino kernel"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_bash()
def _start_bash(self):
from pexpect import replwrap
import signal
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
os.makedirs(SKETCH_FOLDER)
except FileExistsError:
pass
def do_execute(
self, code, silent, store_history=True, user_expressions=None, allow_stdin=False
):
from pexpect import EOF
# Empty cell
if not code.strip():
return {
"status": "OK",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
# Non-empty cell
interrupted = False
try:
try:
os.makedirs(SKETCH_FOLDER)
except FileExistsError:
pass
if code == "arduino-cli board list":
try:
sp = subprocess.check_output(
"arduino-cli board list", stderr=subprocess.STDOUT, shell=False
)
except subprocess.CalledProcessError as e:
raise RuntimeError(
"command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
)
output = sp.decode(sys.stdout.encoding)
elif code.startswith("arduino-cli lib install"):
try:
sp = subprocess.check_output(
code,
stderr=subprocess.STDOUT,
shell=True,
)
except subprocess.CalledProcessError as e:
errorTxt = "Command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
stream_content = {"name": "stdout", "text": errorTxt}
self.send_response(self.iopub_socket, "stream", stream_content)
return {"status": "abort", "execution_count": self.execution_count}
output = sp.decode(sys.stdout.encoding)
else:
oper = code.split("\n")[0]
command = ""
codes = ""
if oper.split("%")[0] == "port":
port = oper.split("%")[1]
fqbn = code.split("\n")[1]
fqbn = fqbn.split("%")[1]
codes = code.split("\n", 2)[2]
command = (
"arduino-cli upload -p "
+ port
+ " --fqbn "
+ fqbn
+ " "
+ SKETCH_FOLDER
)
elif oper.split("%")[0] == "board":
fqbn = code.split("\n")[0]
fqbn = fqbn.split("%")[1]
codes = code.split("\n", 1)[1]
command = "arduino-cli compile -b " + fqbn + " " + SKETCH_FOLDER
f = open(SKETCH_FOLDER + "/sketch.ino", "w+")
f.write(codes.rstrip())
f.close()
try:
sp = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
shell=True,
)
except subprocess.CalledProcessError as e:
errorTxt = "Command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
stream_content = {"name": "stdout", "text": errorTxt}
self.send_response(self.iopub_socket, "stream", stream_content)
return {"status": "abort", "execution_count": self.execution_count}
output = sp.decode(sys.stdout.encoding)
except KeyboardInterrupt:
interrupted = True
clean_sketches()
# Restarting Bash
except EOF:
output = self.bash_wrapper.child.before + "Restarting Bash"
# If expecting output
if not silent:
stream_content = {"name": "stdout", "text": output}
self.send_response(self.iopub_socket, "stream", stream_content)
# If interrupted
if interrupted:
clean_sketches()
return {"status": "abort", "execution_count": self.execution_count}
# If everything is OK
else:
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
def clean_sketches():
if os.path.isfile("./" + SKETCH_FOLDER + "/sketch.ino"):
filelist = os.listdir("./" + SKETCH_FOLDER)
for f in filelist:
os.remove(os.path.join(mydir, f))
| 35.867925 | 88 | 0.476065 | 505 | 5,703 | 5.253465 | 0.310891 | 0.05277 | 0.033924 | 0.050886 | 0.456087 | 0.402563 | 0.390878 | 0.390878 | 0.312853 | 0.294007 | 0 | 0.004815 | 0.417324 | 5,703 | 158 | 89 | 36.094937 | 0.79356 | 0.016833 | 0 | 0.374101 | 0 | 0 | 0.115357 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028777 | false | 0.014388 | 0.093525 | 0 | 0.208633 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99f2a8cd466b198120000a799808ca4ceb565c8a | 465 | py | Python | ping.py | TuxStory/Python3 | 4c1b2291d1613b32aa36b62b0b881ea40b423cce | [
"MIT"
] | null | null | null | ping.py | TuxStory/Python3 | 4c1b2291d1613b32aa36b62b0b881ea40b423cce | [
"MIT"
] | null | null | null | ping.py | TuxStory/Python3 | 4c1b2291d1613b32aa36b62b0b881ea40b423cce | [
"MIT"
] | null | null | null | import os
def ping(plage):
for ip in range(255):
test = os.system("ping -c 1 " +plage+str(ip)+" >/dev/null")
if test == 0:
print(plage+str(ip),"est actif sur le reseau.")
def main():
os.system("clear")
print("PingPy".center(25,"-"))
print("\nExemple : 192.168.1.")
print("Exemple : 192.168.0.")
print("-"*25)
Plage = input("Adresses reseau a scanner: ")
ping(Plage)
if __name__=="__main__":
main()
| 23.25 | 67 | 0.556989 | 66 | 465 | 3.80303 | 0.590909 | 0.071713 | 0.079681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065527 | 0.245161 | 465 | 19 | 68 | 24.473684 | 0.649573 | 0 | 0 | 0 | 0 | 0 | 0.290323 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0 | 0.1875 | 0.3125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99f3591924ec7be6b6dc2dbe39d731a3c039c201 | 971 | py | Python | MathUtilities.py | paulmontecot/PythonDFTMotionP3 | 741e544e0885e97a5469649ae2a90eadbec914c5 | [
"MIT"
] | null | null | null | MathUtilities.py | paulmontecot/PythonDFTMotionP3 | 741e544e0885e97a5469649ae2a90eadbec914c5 | [
"MIT"
] | null | null | null | MathUtilities.py | paulmontecot/PythonDFTMotionP3 | 741e544e0885e97a5469649ae2a90eadbec914c5 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
def integral(df,data):
integral = [0]
for i in range(len(df.index) - 1):
dt_ = df.time[i + 1] - df.time[i]
integral_ = data[i] * dt_
integral_ += (data[i + 1] - data[i]) * dt_ / 2.0
integral.append(integral[i] + integral_)
return(integral)
def derivData(df,data):
derivData = [0]
for i in range(len(df.index) - 1):
if i == 0:
derivData.append((data[i + 1] - data[i]) / (df.time[i + 1] - df.time[i]))
elif i == len(df.index) - 1:
derivData.append((data[i] - data[i - 1]) / (df.time[i] - df.time[i - 1]))
else:
derivData.append((data[i + 1] - data[i - 1]) / (df.time[i + 1] - df.time[i - 1]))
return(derivData)
def angle(df,X,Z):
angle = [0]
angle = np.arctan(X/Z)
return(angle)
def norme(df):
norme = np.sqrt(((df['accX'])**2)+(df['accY']**2)+(df['accZ']**2))
return(norme)
| 27.742857 | 93 | 0.510814 | 151 | 971 | 3.245033 | 0.238411 | 0.040816 | 0.114286 | 0.081633 | 0.361224 | 0.322449 | 0.291837 | 0.093878 | 0.093878 | 0 | 0 | 0.031838 | 0.288363 | 971 | 34 | 94 | 28.558824 | 0.677279 | 0 | 0 | 0.074074 | 0 | 0 | 0.012384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.074074 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99f3fc94d7ce6e669de010cfe49cc2e8d70b8888 | 3,461 | py | Python | attention.py | lil-lab/atis | 08a17a7be8cd7b40d2f35e089947df4d543b3321 | [
"MIT"
] | 21 | 2019-06-03T14:21:00.000Z | 2022-01-14T13:43:42.000Z | attention.py | clic-lab/atis | 08a17a7be8cd7b40d2f35e089947df4d543b3321 | [
"MIT"
] | 3 | 2018-07-05T13:42:31.000Z | 2019-04-02T12:01:18.000Z | attention.py | clic-lab/atis | 08a17a7be8cd7b40d2f35e089947df4d543b3321 | [
"MIT"
] | 12 | 2018-06-13T00:11:33.000Z | 2019-04-15T22:12:08.000Z | """Contains classes for computing and keeping track of attention distributions.
"""
from collections import namedtuple
import dynet as dy
import dynet_utils as du
class AttentionResult(namedtuple('AttentionResult',
('scores',
'distribution',
'vector'))):
"""Stores the result of an attention calculation."""
__slots__ = ()
class Attention():
"""Attention mechanism class. Stores parameters for and computes attention.
Attributes:
transform_query (bool): Whether or not to transform the query being
passed in with a weight transformation before computing attentino.
transform_key (bool): Whether or not to transform the key being
passed in with a weight transformation before computing attentino.
transform_value (bool): Whether or not to transform the value being
passed in with a weight transformation before computing attentino.
key_size (int): The size of the key vectors.
value_size (int): The size of the value vectors.
the query or key.
query_weights (dy.Parameters): Weights for transforming the query.
key_weights (dy.Parameters): Weights for transforming the key.
value_weights (dy.Parameters): Weights for transforming the value.
"""
def __init__(self,
model,
query_size,
key_size,
value_size):
self.key_size = key_size
self.value_size = value_size
self.query_weights = du.add_params(
model, (query_size, self.key_size), "weights-attention-q")
def transform_arguments(self, query, keys, values):
""" Transforms the query/key/value inputs before attention calculations.
Arguments:
query (dy.Expression): Vector representing the query (e.g., hidden state.)
keys (list of dy.Expression): List of vectors representing the key
values.
values (list of dy.Expression): List of vectors representing the values.
Returns:
triple of dy.Expression, where the first represents the (transformed)
query, the second represents the (transformed and concatenated)
keys, and the third represents the (transformed and concatenated)
values.
"""
assert len(keys) == len(values)
all_keys = dy.concatenate(keys, d=1)
all_values = dy.concatenate(values, d=1)
assert all_keys.dim()[0][0] == self.key_size, "Expected key size of " + \
str(self.key_size) + " but got " + str(all_keys.dim()[0][0])
assert all_values.dim()[0][0] == self.value_size
query = du.linear_transform(query, self.query_weights)
if du.is_vector(query):
query = du.add_dim(query)
return query, all_keys, all_values
def __call__(self, query, keys, values=None):
if not values:
values = keys
query_t, keys_t, values_t = self.transform_arguments(query,
keys,
values)
scores = dy.transpose(query_t * keys_t)
distribution = dy.softmax(scores)
context_vector = values_t * distribution
return AttentionResult(scores, distribution, context_vector)
| 39.781609 | 86 | 0.608206 | 397 | 3,461 | 5.161209 | 0.27204 | 0.02733 | 0.021474 | 0.023426 | 0.321132 | 0.271352 | 0.252806 | 0.144461 | 0.144461 | 0.099561 | 0 | 0.003377 | 0.315516 | 3,461 | 86 | 87 | 40.244186 | 0.861545 | 0.445247 | 0 | 0 | 0 | 0 | 0.050257 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.282051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99f5c968619741d599aec34b3b7698daeeb82d4a | 2,655 | py | Python | v0/hack1.py | dhvakr/Farm-Hub---Hackverse-2021 | 3444c16dc792a3d533a071f41d53b7b9efbf0a78 | [
"MIT"
] | 2 | 2021-04-21T07:26:01.000Z | 2021-04-21T07:26:17.000Z | v0/hack1.py | dhvakr/Farm-Hub---Hackverse-2021 | 3444c16dc792a3d533a071f41d53b7b9efbf0a78 | [
"MIT"
] | null | null | null | v0/hack1.py | dhvakr/Farm-Hub---Hackverse-2021 | 3444c16dc792a3d533a071f41d53b7b9efbf0a78 | [
"MIT"
] | 2 | 2021-05-21T08:44:41.000Z | 2021-07-16T14:59:24.000Z | # -*- coding: utf-8 -*-
"""
Created on Saturday - 2021
@author: DIVAKARAN
"""
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Activation
from keras.optimizers import SGD
from keras.layers import Dense
df = pd.read_csv("final.csv")
le = preprocessing.LabelEncoder()
l1 = df["Soil"]
le.fit(l1)
newsoil = le.transform(l1)
df["Soil"]=newsoil
l2 = df["Month"]
le.fit(l2)
df["Month"]=le.transform(l2)
l3 = df["State"]
le.fit(l3)
df["State"]=le.transform(l3)
#df=df.iloc[:,1:]
df = pd.DataFrame(data = df.iloc[:,1:].values, columns=["Soil","Month","State","Rice","Wheat","Cotton","Sugarcane","Tea","Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"])
#print(df)
feat = pd.DataFrame({"Soil": df["Soil"], "Month" : df["Month"], "State": df["State"]})
labels = pd.DataFrame(data=df.iloc[:,3:],columns=["Rice","Wheat","Cotton","Sugarcane","Tea", "Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"])
#print(df)
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
(trainData, testData, trainLabels, testLabels) = train_test_split(feat, labels, test_size=0.25, random_state=42)
print(trainData.values)
model = Sequential()
model.add(Dense(15, input_dim=3, init="uniform",activation="sigmoid"))
"""
model.add(Dense(10, input_dim=3, init="uniform",activation="relu"))
print(model.output)
model.add(Dense(15, init="uniform", activation="relu"))
print(model.output)
model.add(Activation("sigmoid"))
print(model.output)
print(model.summary())
"""
#trainLabels = trainLabels.reshape((-1, 1))
print(trainData.shape, testData.shape, trainLabels.shape, testLabels.shape)
sgd = SGD(lr=0.01)
model.compile(loss="binary_crossentropy", optimizer=sgd, metrics=["accuracy"])
model.fit(trainData.values, trainLabels.values, epochs=500, batch_size=10, verbose=1)
(loss, accuracy) = model.evaluate(testData.values, testLabels.values, batch_size=40, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
pred = model.predict_proba(testData.values)
df = pd.DataFrame(pred, columns=["Rice","Wheat","Cotton","Sugarcane","Tea", "Coffee","Cashew","Rubber","Coconut","Oilseed","Ragi","Maize","Groundnut","Millet","Barley"])
print(df)
#df['image_name'] = test_id
"""
newhh=df[['image_name','Type_1','Type_2','Type_3']]
newhh.to_csv('submission.csv', index=False)
""" | 34.480519 | 215 | 0.698682 | 364 | 2,655 | 5.024725 | 0.35989 | 0.024604 | 0.024604 | 0.039366 | 0.296884 | 0.240022 | 0.218699 | 0.218699 | 0.218699 | 0.165118 | 0 | 0.021821 | 0.102448 | 2,655 | 77 | 216 | 34.480519 | 0.745699 | 0.064407 | 0 | 0 | 0 | 0 | 0.207234 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.282051 | 0 | 0.282051 | 0.102564 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99f5e77d5e735a8f18f4fdcee74e3f414905ae28 | 1,978 | py | Python | app/base/management/commands/send_reminder.py | Sovol2018/sovolo | 54250e42b4af3391d2f99690f45b93ab240563c2 | [
"MIT"
] | 2 | 2017-06-06T11:34:49.000Z | 2017-10-24T13:09:50.000Z | app/base/management/commands/send_reminder.py | Sovol2018/sovolo | 54250e42b4af3391d2f99690f45b93ab240563c2 | [
"MIT"
] | 346 | 2016-08-09T20:50:57.000Z | 2018-08-28T06:52:17.000Z | app/base/management/commands/send_reminder.py | hejob/sovolo | 8b73253d7bf0427c7ae0ebb6d8e3d70e118e8427 | [
"MIT"
] | 3 | 2017-11-27T14:07:57.000Z | 2018-08-13T15:51:01.000Z | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.template.loader import get_template
from event.models import Event, Frame
from base.utils import send_template_mail
from django.utils import timezone
import datetime
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
class Command(BaseCommand):
help = """
以下の動作をします。毎日午前9時に一度実行されることを想定しています。
- 翌日開催or翌日登録締切のボランティア参加者にリマインダーを送る
"""
from_address = "reminder@sovol.moe"
def handle(self, *args, **options):
self.stdout.write("running...")
today = datetime.datetime.combine(
datetime.date.today(),
datetime.time(0, 0, tzinfo=timezone.LocalTimezone())
)
reminder_template = get_template("email/reminder.txt")
reminder_events = Event.objects.filter(
start_time__gte=today + datetime.timedelta(days=1),
start_time__lt=today + datetime.timedelta(days=2),
)
for event in reminder_events:
for user in event.participant.all():
send_template_mail(
reminder_template,
{'user': user, 'event': event},
self.from_address,
[user.email]
)
deadline_template = get_template("email/deadline.txt")
deadline_frames = Frame.objects.filter(
deadline__gte=today + datetime.timedelta(days=1),
deadline__lt=today + datetime.timedelta(days=2),
)
for frame in deadline_frames:
if frame.event not in reminder_events:
for user in frame.participant.all():
send_template_mail(
deadline_template,
{'user': user, 'event': frame.event},
self.from_address,
[user.email]
)
self.stdout.write("success...!")
| 34.103448 | 66 | 0.58999 | 202 | 1,978 | 5.623762 | 0.361386 | 0.068662 | 0.077465 | 0.091549 | 0.257042 | 0.204225 | 0.056338 | 0 | 0 | 0 | 0 | 0.006598 | 0.310415 | 1,978 | 57 | 67 | 34.701754 | 0.826246 | 0.010617 | 0 | 0.122449 | 0 | 0 | 0.093095 | 0.034271 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0 | 0.163265 | 0 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
99ff21128f28ade08f135547e7c3ca6883ffbc73 | 5,675 | py | Python | human_feedback.py | XushengLuo/SocialRobotHumanFeedback | db609bc76c11528ea611dcbd21982e0ac6d50b71 | [
"BSD-2-Clause"
] | null | null | null | human_feedback.py | XushengLuo/SocialRobotHumanFeedback | db609bc76c11528ea611dcbd21982e0ac6d50b71 | [
"BSD-2-Clause"
] | null | null | null | human_feedback.py | XushengLuo/SocialRobotHumanFeedback | db609bc76c11528ea611dcbd21982e0ac6d50b71 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
from shapely.geometry import Point, LineString, Polygon
from smallest_enclosing_circle import make_circle
from itertools import groupby
from operator import itemgetter
# def human_feedback(x, human_cluster, point_cluster, obstacle):
# # human are inside the polygon
# score = 0
# index = set()
# nx = np.shape(x)[0]//2
#
# for num, polygon in human_cluster.items():
# point = []
# cluster = Polygon(polygon)
# for i in range(nx - 1):
# # whether the line segment crosses the cluster(polygon)
# if LineString([Point((x[i], x[i+nx])), Point(x[i+1], x[i+1+nx])]).intersects(cluster):
# point.append([(x[i], x[i+nx]), (x[i+1], x[i+1+nx])])
# index.add(i)
# index.add(i+1)
# if point:
# score += get_score_from_human(point, point_cluster[num])
# # obstacle avoidance
# for num, obs in obstacle.items():
# for i in range(nx - 1):
# # whether the line segment crosses the (obstacle)
# if LineString([Point((x[i], x[i + nx])), Point(x[i + 1], x[i + 1 + nx])]).intersects(obs):
# score += 1
# index.add(i)
# index.add(i+1)
# # the length of the trajectory
# dist = np.sum([np.linalg.norm([x[i] - x[i + 1], x[i + nx] - x[i + 1 + nx]]) for i in range(nx - 1)])
# score += dist
#
# # index
# index_group = []
# index = list(index)
# index.sort()
# for k, g in groupby(enumerate(index), lambda ix: ix[0] - ix[1]):
# index_group.append(list(map(itemgetter(1), g)))
# expand_index = set([j for i in index_group for j in i])
# for group in index_group:
# num = np.random.randint(0, 3)
# extra = [group[0]-k for k in range(1, num+1) if group[0]-k > 0] + \
# [group[-1]+k for k in range(1, num+1) if group[-1]+k < nx]
# expand_index.update(set(extra))
# return score, dist, list(expand_index)
#
#
# def get_score(point, polygon):
# """
# the distance of the center of polygon to the line segment of a trajectory
# :param point:
# :param polygon:
# :return:
# """
# rho = 1
# cx, cy, r = make_circle(polygon)
# score = 0
# for p in point:
# d = np.abs((p[1][1]-p[0][1])*cx - (p[1][0]-p[0][0])*cy + p[1][0]*p[0][1] - p[1][1]*p[0][0]) / \
# np.sqrt((p[1][1]-p[0][1])**2 + (p[1][0]-p[0][0])**2)
# score += rho/d[0]
# return score
#
#
# def get_score_from_human(point, point_cluster):
# score = 0
# radius = 0.5
# for human in point_cluster:
# cx = human[0]
# cy = human[1]
# for p in point:
# d = np.abs((p[1][1] - p[0][1]) * cx - (p[1][0] - p[0][0]) * cy + p[1][0] * p[0][1] - p[1][1] * p[0][0]) / \
# np.sqrt((p[1][1] - p[0][1]) ** 2 + (p[1][0] - p[0][0]) ** 2)
# if d <= radius:
# score += 1
# return score
def human_feedback1(x0, x, human, obstacle, human_scale):
# human stand randomly
score = 0
index = set()
nx = np.shape(x)[0]//2
# complaint
for i in range(nx - 1):
p = [(x[i], x[i + nx]), (x[i + 1], x[i + 1 + nx])]
for ind, h in enumerate(human):
cx = h[0]
cy = h[1]
# decide the shortest distance of a point to a line segment
# https://math.stackexchange.com/questions/2248617/shortest-distance-between-a-point-and-a-line-segment
t = - ((p[0][0] - cx) * (p[1][0] - p[0][0]) + (p[0][1] - cy) * (p[1][1] - p[0][1])) / \
((p[1][0] - p[0][0]) ** 2 + (p[1][1] - p[0][1]) ** 2)
if 0 <= t <= 1:
d = np.abs((p[1][1] - p[0][1]) * cx - (p[1][0] - p[0][0]) * cy + p[1][0] * p[0][1] - p[1][1] * p[0][0]) / \
np.sqrt((p[1][1] - p[0][1]) ** 2 + (p[1][0] - p[0][0]) ** 2)
else:
d1 = (p[0][0] - cx) ** 2 + (p[0][1] - cy) ** 2
d2 = (p[1][0] - cx) ** 2 + (p[1][1] - cy) ** 2
d = np.sqrt(d1) if d1 <= d2 else np.sqrt(d2)
if d <= human_scale[ind]:
score += 1
index.add(i)
index.add(i+1)
# obstacle avoidance
# for num, poly in obstacle.items():
# obs = Polygon(poly)
# for i in range(nx - 1):
# # whether the line segment crosses the (obstacle)
# if LineString([Point((x[i], x[i + nx])), Point(x[i + 1], x[i + 1 + nx])]).intersects(obs):
# score += 1
# index.add(i)
# index.add(i+1)
# complaints inludes human complaints and obstacles
complaint = score
# the length of the trajectory
dist = 0 # np.sum([np.linalg.norm([x[i] - x[i + 1], x[i + nx] - x[i + 1 + nx]]) for i in range(nx - 1)])
# diff = x - x0
# dist = dist + np.sum([np.linalg.norm([(diff[i], diff[i + nx])]) for i in range(nx)])
dist = dist + np.linalg.norm(x-x0)
dist = dist
score = (score * 10 + dist)
# indices of waypoints need to be perturbed
index_group = []
index = list(index)
index.sort()
for k, g in groupby(enumerate(index), lambda ix: ix[0] - ix[1]):
index_group.append(list(map(itemgetter(1), g)))
expand_index = set([j for i in index_group for j in i])
for group in index_group:
num = np.random.randint(0, 1)
extra = [group[0]-k for k in range(1, num+1) if group[0]-k > 0] + \
[group[-1]+k for k in range(1, num+1) if group[-1]+k < nx]
expand_index.update(set(extra))
return score, complaint, dist, list(expand_index)
| 39.685315 | 123 | 0.492863 | 904 | 5,675 | 3.05531 | 0.137168 | 0.020275 | 0.015206 | 0.01593 | 0.556843 | 0.555757 | 0.515206 | 0.475742 | 0.475742 | 0.466691 | 0 | 0.053106 | 0.316476 | 5,675 | 142 | 124 | 39.964789 | 0.658933 | 0.615507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.113636 | 0 | 0.159091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8201bf3c0e643a911851cd3ab506967ec106559e | 8,286 | py | Python | MI/classify.py | n778509775/NWCQ | 72851d26f470465e9e13b219a12d52daa5e1ceed | [
"MIT"
] | 1 | 2021-10-17T02:13:27.000Z | 2021-10-17T02:13:27.000Z | MI/classify.py | n778509775/NWCQ | 72851d26f470465e9e13b219a12d52daa5e1ceed | [
"MIT"
] | null | null | null | MI/classify.py | n778509775/NWCQ | 72851d26f470465e9e13b219a12d52daa5e1ceed | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import torch.utils.data
import numpy as np
import random
import time
import matplotlib.pyplot as plt
from tkinter import _flatten
from function import plot_clas_loss, pre_processing
from sklearn.metrics import roc_auc_score
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import network as models
import math
import argparse
import pylib
# Set random seed
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
# CUDA
device_id = 0 # ID of GPU to use
cuda = torch.cuda.is_available()
if cuda:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
plt.ioff()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=str, default='data/')
parser.add_argument('-l', '--dataset_file_list', nargs='+', help='<Required> Set flag', required=True, type=str)
parser.add_argument('--train_num', type=int, default=2)
parser.add_argument('--code_save', type=str, default='code_list.pkl')
parser.add_argument('--take_log', type=bool, default=False)
parser.add_argument('--standardization', type=bool, default=False)
parser.add_argument('--scaling', type=bool, default=False)
parser.add_argument('--plots_dir', type=str, default='plots/')
parser.add_argument('--code_dim', type=int, default=25)
parser.add_argument('--batch_size', type=int, default=128, help='mini-batch size')
parser.add_argument('--num_epochs', type=int, default=100, help='number of total iterations for training')
parser.add_argument('--lr_step', type=int, default=10000, help='step decay of learning rates')
parser.add_argument('--base_lr', type=float, default=1e-4, help='learning rate for network')
parser.add_argument('--l2_decay', type=float, default=5e-5)
parser.add_argument('--log_interval', type=int, default=100)
config = parser.parse_args()
#print(config)
data_folder = config.data_folder
code_save_file = data_folder + config.code_save
dataset_file_list = [data_folder+f for f in config.dataset_file_list]
data_num = len(dataset_file_list)
train_num = config.train_num
plots_dir = config.plots_dir
# read data
pre_process_paras = {'take_log': config.take_log, 'standardization': config.standardization, 'scaling': config.scaling}
dataset_list = pre_processing(dataset_file_list, pre_process_paras)
# training
batch_size = config.batch_size
num_epochs = config.num_epochs
num_inputs = len(dataset_list[0]['feature'])
code_dim = config.code_dim
# construct a DataLoader for each batch
batch_loader_dict = {}
for i in range(len(dataset_list)):
gene_exp = dataset_list[i]['mz_exp'].transpose()
labels = dataset_list[i]['labels']
# construct DataLoader list
if cuda:
torch_dataset = torch.utils.data.TensorDataset(
torch.FloatTensor(gene_exp).cuda(), torch.LongTensor(labels).cuda())
else:
torch_dataset = torch.utils.data.TensorDataset(
torch.FloatTensor(gene_exp), torch.LongTensor(labels))
data_loader = torch.utils.data.DataLoader(torch_dataset, batch_size=batch_size,
shuffle=True, drop_last=True)
batch_loader_dict[i+1] = data_loader
# create model
discriminator = models.Discriminator(num_inputs=num_inputs)
if cuda:
discriminator.cuda()
log_interval = config.log_interval
base_lr = config.base_lr
lr_step = config.lr_step
num_epochs = config.num_epochs
l2_decay = config.l2_decay
# training
criterion = nn.CrossEntropyLoss()
loss_classifier_list = []
for epoch in range(1, num_epochs + 1):
# step decay of learning rate
#learning_rate = base_lr / math.pow(2, math.floor(epoch / lr_step))
learning_rate = base_lr * math.pow(0.9, epoch / lr_step)
# regularization parameter between two losses
gamma_rate = 2 / (1 + math.exp(-10 * (epoch) / num_epochs)) - 1
if epoch % log_interval == 0:
print('{:}, Epoch {}, learning rate {:.3E}'.format(time.asctime(time.localtime()), epoch, learning_rate))
optimizer = torch.optim.Adam([
{'params': discriminator.parameters()},
], lr=learning_rate, weight_decay=l2_decay)
discriminator.train()
iter_data_dict = {}
for cls in batch_loader_dict:
iter_data = iter(batch_loader_dict[cls])
iter_data_dict[cls] = iter_data
# use the largest dataset to define an epoch
num_iter = 0
for cls in batch_loader_dict:
num_iter = max(num_iter, len(batch_loader_dict[cls]))
total_clas_loss = 0
num_batches = 0
for it in range(0, num_iter):
data_dict = {}
label_dict = {}
code_dict = {}
reconstruct_dict = {}
Disc_dict = {}
for cls in iter_data_dict:
data, labels = iter_data_dict[cls].next()
data_dict[cls] = data
label_dict[cls] = labels
if it % len(batch_loader_dict[cls]) == 0:
iter_data_dict[cls] = iter(batch_loader_dict[cls])
data_dict[cls] = Variable(data_dict[cls])
label_dict[cls] = Variable(label_dict[cls])
for cls in range(1,train_num+1):
Disc_dict[cls] = discriminator(data_dict[cls])
optimizer.zero_grad()
#Loss
# classifier loss for dignosis
loss_classification = torch.FloatTensor([0])
if cuda:
loss_classification = loss_classification.cuda()
for cat in range(1,train_num+1):
for cls in range(len(label_dict[cat])):
loss_classification += F.binary_cross_entropy(torch.squeeze(Disc_dict[cat])[cls], label_dict[cat][cls].float())
#loss_classification = criterion(Disc_dict[cat], label_dict[cat])
loss = loss_classification
loss.backward()
optimizer.step()
# update total loss
num_batches += 1
total_clas_loss += loss_classification.data.item()
avg_clas_loss = total_clas_loss / num_batches
if epoch % log_interval == 0:
print('Avg_classify_loss {:.3E}'.format(avg_clas_loss))
loss_classifier_list.append(avg_clas_loss)
#scheduler.step()
plot_clas_loss(loss_classifier_list, plots_dir+'clas_loss.png')
# testing: extract codes
discriminator.eval()
#F_score
def matric(cluster, labels):
TP, TN, FP, FN = 0, 0, 0, 0
n = len(labels)
for i in range(n):
if cluster[i]:
if labels[i]:
TP += 1
else:
FP += 1
elif labels[i]:
FN += 1
else:
TN += 1
return TP, TN, FP, FN
#Accuracy
for pre in range(train_num,len(dataset_list)):
test_data = torch.from_numpy(dataset_list[pre]['mz_exp'].transpose())
test_label = torch.from_numpy((np.array(dataset_list[pre]['labels']))).cuda()
Disc = discriminator(test_data.float().cuda())
pred = torch.from_numpy(np.array([1 if i > 0.5 else 0 for i in Disc])).cuda()
#pred = torch.max(F.softmax(Disc), 1)[1]
num_correct = 0
num_correct += torch.eq(pred, test_label).sum().float().item()
Acc = num_correct/len(test_label)
print("Accuracy is ", Acc)
TP, TN, FP, FN = matric(pred, test_label)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f_score = 2 * precision * recall / (precision + recall)
print("F_score is ",f_score)
#AUC
print("AUC is ",roc_auc_score(test_label.cpu(), pred.cpu()))
#MCC
MCC = (TP * TN - FP * FN) / math.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
print("MCC is ",MCC)
| 35.715517 | 131 | 0.621289 | 1,080 | 8,286 | 4.540741 | 0.221296 | 0.027529 | 0.051998 | 0.014682 | 0.137643 | 0.084217 | 0.04792 | 0.025285 | 0.025285 | 0.025285 | 0 | 0.01147 | 0.263456 | 8,286 | 231 | 132 | 35.87013 | 0.792069 | 0.068911 | 0 | 0.090909 | 0 | 0 | 0.066857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006061 | false | 0 | 0.09697 | 0 | 0.109091 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82021110f6f9c2d0fb36aa62a5093fee91e387ae | 3,842 | py | Python | idgames-extract/src/doom/doomimage.py | GitExl/DoomIdgamesArchive | 6f7124de5c2bab256bb2255b309f50a958780b9d | [
"BSD-2-Clause"
] | 2 | 2019-05-05T02:04:45.000Z | 2019-07-10T18:49:31.000Z | idgames-extract/src/doom/doomimage.py | GitExl/DoomIdgamesArchive | 6f7124de5c2bab256bb2255b309f50a958780b9d | [
"BSD-2-Clause"
] | null | null | null | idgames-extract/src/doom/doomimage.py | GitExl/DoomIdgamesArchive | 6f7124de5c2bab256bb2255b309f50a958780b9d | [
"BSD-2-Clause"
] | 3 | 2016-11-08T21:03:28.000Z | 2019-05-12T21:45:37.000Z | from struct import Struct
from typing import Optional
from PIL import Image
from doom.palette import Palette
class DoomImage(object):
S_HEADER: Struct = Struct('<HHhh')
def __init__(self, width: int, height: int, left: int, top: int):
self.width: int = width
self.height: int = height
self.left: int = left
self.top: int = top
self.pixels: Optional[bytes] = None
@classmethod
def from_data(cls, data: bytes, palette: Palette):
"""
Creates a DoomImage with doom graphics data rendered to an internal buffer.
:param data:
:param palette:
:return:
"""
width, height, left, top = DoomImage.S_HEADER.unpack_from(data)
data_len = len(data)
# Attempt to detect invalid data.
if width > 2048 or height > 2048 or top > 2048 or left > 2048:
return None
if width <= 0 or height <= 0:
return None
image = cls(width, height, left, top)
# Initialize an empty bitmap.
pixels = bytearray([0, 0, 0] * width * height)
pixels_len = len(pixels)
# Read column offsets.
offset_struct = Struct('<' + ('I' * width))
offsets = offset_struct.unpack_from(data[8:8 + (width * 4)])
# Read columns.
column_index = 0
while column_index < width:
offset = offsets[column_index]
# Attempt to detect invalid data.
if offset >= data_len:
return None
prev_delta = 0
while True:
column_top = data[offset]
# Column end.
if column_top == 255:
break
# Tall columns are extended.
if column_top <= prev_delta:
column_top += prev_delta
prev_delta = column_top
pixel_count = data[offset + 1]
offset += 3
pixel_index = 0
while pixel_index < pixel_count:
if offset + pixel_index >= data_len:
break
pixel = data[offset + pixel_index]
destination = ((pixel_index + column_top) * width + column_index) * 3
if destination + 2 < pixels_len:
pixels[destination + 0] = palette.colors[pixel].r
pixels[destination + 1] = palette.colors[pixel].g
pixels[destination + 2] = palette.colors[pixel].b
pixel_index += 1
offset += pixel_count + 1
if offset >= data_len:
break
column_index += 1
image.pixels = bytes(pixels)
return image
@staticmethod
def is_valid(data: bytes) -> bool:
"""
Determine if some data is likely to be a valid Doom type image.
:param data:
:return:
"""
if len(data) < 16:
return False
# Verify if the header values are sane.
width, height, left, top = DoomImage.S_HEADER.unpack_from(data)
if width > 2048 or height > 2048 or top > 2048 or left > 2048:
return False
if width <= 0 or height <= 0:
return False
# Verify that offsets are in range of the data.
offset_struct = Struct('<' + ('I' * width))
offsets = offset_struct.unpack_from(data[8:8 + (width * 4)])
for offset in offsets:
if offset >= len(data):
return False
return True
def get_pillow_image(self) -> Image:
"""
Returns a Pillow image from this graphic's image data.
:return:
"""
return Image.frombytes('RGB', (self.width, self.height), self.pixels)
| 29.106061 | 89 | 0.525508 | 434 | 3,842 | 4.539171 | 0.251152 | 0.018274 | 0.028426 | 0.027411 | 0.214213 | 0.214213 | 0.188832 | 0.165482 | 0.165482 | 0.165482 | 0 | 0.026854 | 0.389381 | 3,842 | 131 | 90 | 29.328244 | 0.812873 | 0.134565 | 0 | 0.297297 | 0 | 0 | 0.003751 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82027cf84d56a19e89654387f3e3771d2c030696 | 4,327 | py | Python | bdd_mtl/tools/eval_seg.py | XDong18/bdd-mtl | 90459c090a2bc4a89a929740e5cf5d37c1b34a4b | [
"BSD-3-Clause"
] | null | null | null | bdd_mtl/tools/eval_seg.py | XDong18/bdd-mtl | 90459c090a2bc4a89a929740e5cf5d37c1b34a4b | [
"BSD-3-Clause"
] | null | null | null | bdd_mtl/tools/eval_seg.py | XDong18/bdd-mtl | 90459c090a2bc4a89a929740e5cf5d37c1b34a4b | [
"BSD-3-Clause"
] | null | null | null | import argparse
import boundary_utils as bu
import numpy as np
import os
import sys
import time
from utils import *
from multiprocessing import Pool
import pickle as pk
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate drivable area and semantic segmentation predictions')
parser.add_argument('-d', '--data-dir', default=None)
parser.add_argument('-p', '--pred-dir', default=None)
args = parser.parse_args()
return args
def _eval_drivable(infos):
global task
gt_fn, pred_fn = infos
gt = np.array(Image.open(gt_fn))
pred = np.load(pred_fn)
drivable_hist = fast_hist(pred.flatten(), gt.flatten(), 3)
return [drivable_hist]
def _eval_sem_seg(infos):
global task
gt_fn, pred_fn = infos
gt = np.array(Image.open(gt_fn))
pred = np.load(pred_fn).squeeze(0).astype(np.uint8)
# semantic segmentation
hist = fast_hist(pred.flatten(), gt.flatten(), 19)
return hist
def main():
args = parse_args()
tasks = os.listdir(args.pred_dir)
# segmentation
if 'sem_seg' in tasks:
print('Evaluating semantic segmentation...')
sem_seg_base = os.path.join(args.data_dir, 'images', '10k', 'val')
gt_fns = [os.path.join(args.data_dir, 'labels', 'sem_seg', 'sem_seg_val', fn[:-4] + '_train_id.png') for fn in os.listdir(sem_seg_base)]
sem_seg_fns = [os.path.join(args.pred_dir, 'sem_seg', '{}.npy'.format(fn[:-4])) for fn in os.listdir(sem_seg_base)]
pool = Pool(5)
o = pool.imap_unordered(_eval_sem_seg, zip(gt_fns, sem_seg_fns))
tic = time.time()
while len(o._items) < len(gt_fns):
toc = time.time()
finished = len(o._items)
if finished > 0:
print('{}/{} ETA: {}s Elapsed: {}s'.format(finished, len(gt_fns), (len(gt_fns) - finished) / finished * (toc - tic), toc - tic), end='\r')
time.sleep(10)
pool.close()
pool.join()
evals = [i[1] for i in o._items]
hist = np.sum(evals, axis=0)
ious = per_class_iu(hist).tolist()
classes = ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'TOTAL']
ious.append(np.nanmean(ious))
print('[SEMANTIC]')
[print(a, '\t\t', b) for a, b in zip(classes, ious)]
print(','.join([str(i) for i in ious]))
# drivable area
if 'drivable' in tasks:
print('Evaluating drivable...')
drivable_base = os.path.join(args.data_dir, 'labels', 'drivable', 'drivable_val')
gt_fns = sorted([os.path.join(drivable_base, d) for d in os.listdir(drivable_base)])
pred_drivable_base = os.path.join(args.pred_dir, 'drivable')
drivable_fns = [os.path.join(pred_drivable_base, '{}.npy'.format(n.split('.')[0].split('/')[-1])) for n in gt_fns]
pool = Pool(10)
print(len(gt_fns), len(drivable_fns))
o = pool.imap_unordered(_eval_drivable, zip(gt_fns, drivable_fns))
tic = time.time()
while len(o._items) < len(gt_fns):
toc = time.time()
finished = len(o._items)
if finished > 0:
print('{}/{} ETA: {}s Elapsed: {}s'.format(finished, len(gt_fns), (len(gt_fns) - finished) / finished * (toc - tic), toc - tic), end='\r')
time.sleep(10)
pool.close()
pool.join()
drivable_evals = [i[1] for i in o._items]
# if len(drivable_evals[0][0]) == 9:
# lane_evals = np.mean([i[0] for i in drivable_evals], axis=0)
# print('[LANE]\n[thresh=10] {} {} {}\n[thresh=5] {} {} {}\n[thresh=1] {} {} {}'.format(*lane_evals))
# for e in lane_evals:
# print(e)
drivable_hist = np.sum(drivable_evals, axis=0)
drivable_ious = per_class_iu(drivable_hist[0]).tolist()
drivable_ious.append(sum(drivable_ious[1:])/2)
print('[DRIVABLE]\n[direct] {} [alt] {} [overall] {}'.format(*drivable_ious[1:]))
for d in drivable_ious:
print(d)
if __name__ == '__main__':
main()
| 39.336364 | 226 | 0.580772 | 593 | 4,327 | 4.055649 | 0.256324 | 0.024948 | 0.029106 | 0.029106 | 0.353015 | 0.332225 | 0.306445 | 0.245322 | 0.2079 | 0.2079 | 0 | 0.011523 | 0.257915 | 4,327 | 109 | 227 | 39.697248 | 0.737465 | 0.067945 | 0 | 0.282353 | 0 | 0 | 0.131545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047059 | false | 0 | 0.117647 | 0 | 0.2 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82063e18e59a0cf3835ff63d9115c425615a4eb0 | 877 | py | Python | admin/aws/list_instances.py | biosimulations/hsds | bd5b659c4fcc60c4a2791b2f39750faaa9098d09 | [
"Apache-2.0"
] | 76 | 2019-01-03T18:14:01.000Z | 2022-03-28T20:09:42.000Z | admin/aws/list_instances.py | biosimulations/hsds | bd5b659c4fcc60c4a2791b2f39750faaa9098d09 | [
"Apache-2.0"
] | 104 | 2019-01-01T17:09:52.000Z | 2022-03-31T17:53:48.000Z | admin/aws/list_instances.py | biosimulations/hsds | bd5b659c4fcc60c4a2791b2f39750faaa9098d09 | [
"Apache-2.0"
] | 41 | 2019-01-30T13:58:16.000Z | 2022-02-23T13:13:01.000Z | import boto.ec2
import config
region = config.get("aws_region")
conn = boto.ec2.connect_to_region(region)
reservations = conn.get_all_instances()
fields = ("id", "public ip", "private ip", "name", "subnet", "state")
format_str = "{:<20} {:<16} {:<16} {:<16} {:<16} {:<12}"
print(format_str.format(*fields))
sep = ('-'*12,) * 6
print(format_str.format(*sep))
for res in reservations:
for inst in res.instances:
name = '<none>'
if 'Name' in inst.tags:
name = inst.tags["Name"]
if inst.ip_address is None:
inst.ip_address = '<none>'
if inst.private_ip_address is None:
inst.private_ip_address = '<none>'
if inst.subnet_id is None:
inst.subnet_id = "<none>"
print(format_str.format(inst.id, inst.ip_address, inst.private_ip_address, name, inst.subnet_id, inst.state))
| 32.481481 | 117 | 0.615735 | 123 | 877 | 4.219512 | 0.308943 | 0.104046 | 0.080925 | 0.115607 | 0.146435 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024963 | 0.223489 | 877 | 26 | 118 | 33.730769 | 0.737151 | 0 | 0 | 0 | 0 | 0 | 0.136986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82073259142e93c2fdba09723427d60b8a6ec446 | 5,795 | py | Python | src/regression.py | rahlk/Rosie | 60dc9d6a5590cdfbafbcbb0a7285db4e496384cc | [
"MIT"
] | null | null | null | src/regression.py | rahlk/Rosie | 60dc9d6a5590cdfbafbcbb0a7285db4e496384cc | [
"MIT"
] | null | null | null | src/regression.py | rahlk/Rosie | 60dc9d6a5590cdfbafbcbb0a7285db4e496384cc | [
"MIT"
] | null | null | null | '''
Created on Feb 8, 2017
This is the regression script the langlib project.
It is meant to be run at the top level directory of the repository.
'''
import copy,json
from os import walk
from os.path import exists,splitext
from subprocess import Popen, PIPE
from string import digits
import difflib
import sys
#This is the location of the testfiles and manifest relative to the root directory.
testfiles = "./testfiles/"
manifest_file = "./MANIFEST"
#This dict defines the translation from directory names in the "testfiles" directory to actual rosie pattern
#i.e. csharp -> "cs.<pattern>"
langs = {
"java" : "java",
"c" : "c",
"cpp" : "cpp",
"csharp" : "cs",
"go" : "go",
"javascript" : "js",
"ruby" : "rb",
"r" : "r",
"bash" : "b",
"vb" : "vb",
"python" : "py",
}
#This array defines the actuals expected to be ran by the script. These are also the expected directory names
#for associated tests under the testfiles/language i.e. each "comments" -> "./testfiles/<language>/comments/
tests = [
"comments",
"dependencies",
"functions",
"classes",
"structs",
"strings"
]
class HtmlPrinter:
'''
This is a simple html printer used to write the various results table generated
by run_tests to an html file.
'''
def __init__(self,id):
'''
Initializes the printer
id : Numeric id of the test execution (test id).
'''
self.ts = id
self.file=open("./result" + str(self.ts) + ".html", 'w')
def add_table(self,test,html):
self.file.write("<h1>" + test + "</h1>")
self.file.write(html)
def close(self):
self.file.close()
def run_tests():
'''
This function iterates through all directories found under ./testfiles/ and executes tests if possible.
The process is as follows:
1. Find directory in ./testfiles/, and verify if it maps to a value in the langs.
Continue to step 2 if it does not or move to new directory.
2. Find a directory in the langs directory found in step 1, and verify if it maps to a test in the tests array.
Continue to step 3 if it does not or move to a new directory.
3. Find a file in the test directory found in step 2.
If the file is correctly named i.e <pattern name><numeric id>.<valid_extension> then strip the numeric id,
and and the pattern name.
4. Verify that the input file has a corresponding json output file in ./testfiles/<lang>/output/<test>/.
If it does continue to step 5 otherwise move to a new test file.
5. Execute the input file and compare the results to the output file. Fail the test if a difference is found,
and print the diff using HTMLPrinter.
6. Move to new test file as appropriate and continue.
'''
failures = 0
testCount = 0
printer = HtmlPrinter(sys.argv[1])
for test in tests:
for lang,alias in langs.items():
base_path = testfiles + lang + "/input/" + test + "/"
for (dirpath, dirnames, test_files) in walk(base_path):
for test_file in test_files:
resolved_input = dirpath + test_file
resolved_output = splitext(resolved_input)[0].replace("input","output") + ".json"
if not exists(resolved_input): continue
if not exists(resolved_output): continue
with open(resolved_output, 'rU') as vOut:
test_file_name = splitext(test_file)[0]
pattern = copy.copy(test_file_name)
pattern = pattern.translate(None,digits)
proc = Popen('rosie -manifest ' + manifest_file + ' -wholefile -encode json ' + alias + "." + pattern + " " + resolved_input, stdout=PIPE, stderr=PIPE,shell=True)
stdout = ''
stderr = ''
for line in proc.stdout: stdout += line
for line in proc.stderr: stderr += line
if(stderr != ''): print(stderr)
try:
json1 = json.loads(vOut.read())
json2 = json.loads(stdout)
jsonOut1 = json.dumps(json1,indent=2, sort_keys=True)
jsonOut2 = json.dumps(json2,indent=2, sort_keys=True)
if jsonOut1 != jsonOut2:
differ = difflib.HtmlDiff()
printer.add_table(lang + " : " + test_file_name, ''.join(differ.make_file(jsonOut1.splitlines(True),jsonOut2.splitlines(True))))
failures += 1
print("-------------------------------------------------")
print (test_file_name + " test failed for " + lang)
except ValueError:
failures += 1
print("-------------------------------------------------")
print (test_file_name + " test failed for " + lang)
testCount += 1
print("-------------------------------------------------")
if(testCount == 1):
print(str(testCount) + " test ran")
else:
print(str(testCount) + " tests ran")
if(failures == 1):
print(str(failures) + " test failed")
else:
print(str(failures) + " tests failed")
print("-------------------------------------------------")
printer.close()
if(failures > 0): exit(1)
if __name__ == '__main__':
run_tests()
| 41.099291 | 186 | 0.534599 | 672 | 5,795 | 4.540179 | 0.327381 | 0.026221 | 0.019666 | 0.008522 | 0.069485 | 0.05703 | 0.05703 | 0.031465 | 0.031465 | 0.031465 | 0 | 0.010909 | 0.335634 | 5,795 | 140 | 187 | 41.392857 | 0.781558 | 0.30906 | 0 | 0.108696 | 0 | 0 | 0.133195 | 0.05079 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.076087 | 0 | 0.130435 | 0.152174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82075f1bc0f847438e1a50eb1984f6a6e589570a | 4,727 | py | Python | personal/sudoku_solver/sudoku_solver.py | jyodroid/python_training | 7b17145faed6a005d44a08c2f72a60644705f1e1 | [
"Unlicense"
] | null | null | null | personal/sudoku_solver/sudoku_solver.py | jyodroid/python_training | 7b17145faed6a005d44a08c2f72a60644705f1e1 | [
"Unlicense"
] | null | null | null | personal/sudoku_solver/sudoku_solver.py | jyodroid/python_training | 7b17145faed6a005d44a08c2f72a60644705f1e1 | [
"Unlicense"
] | null | null | null | # Class https://docs.python.org/3/tutorial/classes.html
class SudokuSolver:
def __init__(self, boxes, unitlist):
self.boxes = boxes
self.unitlist = unitlist
# My solution
def set_boxes_values(values):
board = {}
for index in range(len(self.boxes)):
board[self.boxes[index]] = values[index]
return board
#Better solution
def grid_values(self, values):
assert len(values) == 81, "Input grid must be a string of length 81 (9x9)"
return dict(zip(self.boxes, values))
# The elimination technique https://youtu.be/6rFOX2jHB2g
#Adding Grid values with elimination technique so we can add possible values to grid
def grid_all_posibilities(self, values):
assert len(values) == 81, "Input grid must be a string of length 81 (9x9)"
board = {}
for index in range(len(self.boxes)):
value = values[index]
if value == ".":
board[self.boxes[index]] = "123456789"
else:
board[self.boxes[index]] = value
return board
# Udacity solution
def another_grid_all_posibilities(values):
values = []
all_digits = '123456789'
for c in grid:
if c == '.':
values.append(all_digits)
elif c in all_digits:
values.append(c)
assert len(values) == 81
return dict(zip(self.boxes, values))
# Find board places with one digit element and discard from peers other options
def __eliminate(self, values):
units = dict((s, [u for u in self.unitlist if s in u]) for s in self.boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in self.boxes)
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit, '')
return values
# Only choise technique: https://youtu.be/sSjYn-Kex1A
def __only_choise(self, values):
for unit in self.unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
# Constraints propagation on solving puzzle
def reduce_puzzle(self, values):
stalled = False
while not stalled:
# Check how many boxes have a determined value
solve_values_before = len([box for box in values.keys() if len(values[box]) == 1])
#Use eliminate strategy
self.__eliminate(values)
#Use Only choise strategy
self.__only_choise(values)
#Check how many boxes have a determined value to compare
solve_values_after = len([box for box in values.keys() if len(values[box]) == 1 ])
#If no new values were added, stop the loop.
stalled = solve_values_before == solve_values_after
#Sanity check: return false if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
# search strategy https://youtu.be/omveZu2gRLs
def search(self, values):
# "Using depth-first search and propagation, create a search tree and solve the sudoku."
# First, reduce the puzzle using the previous function
values = self.reduce_puzzle(values)
if values is False:
return False #Error propagation
if all(len(values[s]) == 1 for s in self.boxes):
return values # Solved
# Choose one of the unfilled squares with the fewest possibilities
n, s = min((len(values[s]), s) for s in self.boxes if len(values[s]) > 1)
# Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = self.search(new_sudoku)
if attempt:
return attempt
def display(self, values, rows, columns):
"""
Display the values as a 2-D grid.
Input: The sudoku in dictionary form
Output: None
"""
width = 1+max(len(values[s]) for s in self.boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in columns))
if r in 'CF': print(line)
return
| 37.220472 | 131 | 0.584303 | 623 | 4,727 | 4.369181 | 0.277689 | 0.046289 | 0.017634 | 0.018369 | 0.201323 | 0.190301 | 0.152094 | 0.152094 | 0.101396 | 0.101396 | 0 | 0.018938 | 0.318595 | 4,727 | 126 | 132 | 37.515873 | 0.826141 | 0.243072 | 0 | 0.2 | 0 | 0 | 0.036395 | 0 | 0 | 0 | 0 | 0 | 0.0375 | 1 | 0.125 | false | 0 | 0 | 0 | 0.2875 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
820790042011984b310814afa864c57777d5d354 | 10,956 | py | Python | crystal_toolkit/components/transformations/grainboundary.py | mkhorton/mp-dash-components | b9af1b59f0120a90897631d9a7f8d9f0ae561de9 | [
"MIT"
] | null | null | null | crystal_toolkit/components/transformations/grainboundary.py | mkhorton/mp-dash-components | b9af1b59f0120a90897631d9a7f8d9f0ae561de9 | [
"MIT"
] | 5 | 2018-10-18T19:52:12.000Z | 2018-11-17T19:02:49.000Z | crystal_toolkit/components/transformations/grainboundary.py | mkhorton/mp-dash-components | b9af1b59f0120a90897631d9a7f8d9f0ae561de9 | [
"MIT"
] | null | null | null | import dash
from dash import dcc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.advanced_transformations import (
GrainBoundaryGenerator,
GrainBoundaryTransformation,
)
from crystal_toolkit.components.transformations.core import TransformationComponent
from crystal_toolkit.helpers.layouts import add_label_help
class GrainBoundaryTransformationComponent(TransformationComponent):
@property
def title(self):
return "Make a grain boundary"
@property
def description(self):
return """Create a grain boundary within a periodic supercell. This transformation
requires sensible inputs, and will be slow to run in certain cases.
When using this transformation a new site property is added which can be used
to colour-code the top and bottom grains."""
@property
def transformation(self):
return GrainBoundaryTransformation
def options_layouts(self, state=None, structure=None):
state = state or {
"rotation_axis": [0, 0, 1],
"rotation_angle": None,
"expand_times": 2,
"vacuum_thickness": 0,
"ab_shift": [0, 0],
"normal": False,
"ratio": None,
"plane": None,
"max_search": 20,
"tol_coi": 1e-8,
"rm_ratio": 0.7,
"quick_gen": False,
}
rotation_axis = self.get_numerical_input(
label="Rotation axis",
kwarg_label="rotation_axis",
state=state,
help_str="""Maximum number of atoms allowed in the supercell.""",
shape=(3,),
)
# sigma isn't a direct input into the transformation, but has
# to be calculated from the rotation_axis and structure
_, sigma_options, _ = self._get_sigmas_options_and_ratio(
structure, state.get("rotation_axis")
)
sigma = dcc.Dropdown(
id=self.id("sigma"),
style={"width": "5rem"},
options=sigma_options,
value=sigma_options[0]["value"] if sigma_options else None,
)
sigma = add_label_help(
sigma,
"Sigma",
"The unit cell volume of the coincidence site lattice relative to "
"input unit cell is denoted by sigma.",
)
# likewise, rotation_angle is then a function of sigma, so
# best determined using sigma to provide a default value:
# this is initialized via a callback
rotation_angle = self.get_choice_input(
label="Rotation angle",
kwarg_label="rotation_angle",
state=state, # starts as None
help_str="""Rotation angle to generate grain boundary. Options determined by
your choice of Σ.""",
style={"width": "15rem"},
)
expand_times = self.get_numerical_input(
label="Expand times",
kwarg_label="expand_times",
state=state,
help_str="""The multiple number of times to expand one unit grain into a larger grain. This is
useful to avoid self-interaction issues when using the grain boundary as an input to further simulations.""",
is_int=True,
shape=(),
min=1,
max=6,
)
vacuum_thickness = self.get_numerical_input(
label="Vacuum thickness /Å",
kwarg_label="vacuum_thickness",
state=state,
help_str="""The thickness of vacuum that you want to insert between the two grains.""",
shape=(),
)
ab_shift = self.get_numerical_input(
label="In-plane shift",
kwarg_label="ab_shift",
state=state,
help_str="""In-plane shift of the two grains given in units of the **a**
and **b** vectors of the grain boundary.""",
shape=(2,),
)
normal = self.get_bool_input(
label="Set normal direction",
kwarg_label="normal",
state=state,
help_str="Enable to require the **c** axis of the top grain to be perpendicular to the surface.",
)
plane = self.get_numerical_input(
label="Grain boundary plane",
kwarg_label="plane",
state=state,
help_str="""Grain boundary plane in the form of a list of integers.
If not set, grain boundary will be a twist grain boundary.
The plane will be perpendicular to the rotation axis.""",
shape=(3,),
)
tol_coi = self.get_numerical_input(
label="Coincidence Site Tolerance",
kwarg_label="tol_coi",
state=state,
help_str="""Tolerance to find the coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated grain boundary's sigma with
expected number.""",
shape=(),
)
rm_ratio = self.get_numerical_input(
label="Site Merging Tolerance",
kwarg_label="rm_ratio",
state=state,
help_str="""The criteria to remove the atoms which are too close with each other relative to
the bond length in the bulk system.""",
shape=(),
)
return [
rotation_axis,
sigma,
rotation_angle,
expand_times,
vacuum_thickness,
ab_shift,
normal,
plane,
tol_coi,
rm_ratio,
]
@staticmethod
def _get_sigmas_options_and_ratio(structure, rotation_axis):
rotation_axis = [int(i) for i in rotation_axis]
lat_type = (
"c" # assume cubic if no structure specified, just to set initial choices
)
ratio = None
if structure:
sga = SpacegroupAnalyzer(structure)
lat_type = sga.get_lattice_type()[0] # this should be fixed in pymatgen
try:
ratio = GrainBoundaryGenerator(structure).get_ratio()
except Exception:
ratio = None
cutoff = 10
if lat_type.lower() == "c":
sigmas = GrainBoundaryGenerator.enum_sigma_cubic(
cutoff=cutoff, r_axis=rotation_axis
)
elif lat_type.lower() == "t":
sigmas = GrainBoundaryGenerator.enum_sigma_tet(
cutoff=cutoff, r_axis=rotation_axis, c2_a2_ratio=ratio
)
elif lat_type.lower() == "o":
sigmas = GrainBoundaryGenerator.enum_sigma_ort(
cutoff=cutoff, r_axis=rotation_axis, c2_b2_a2_ratio=ratio
)
elif lat_type.lower() == "h":
sigmas = GrainBoundaryGenerator.enum_sigma_hex(
cutoff=cutoff, r_axis=rotation_axis, c2_a2_ratio=ratio
)
elif lat_type.lower() == "r":
sigmas = GrainBoundaryGenerator.enum_sigma_rho(
cutoff=cutoff, r_axis=rotation_axis, ratio_alpha=ratio
)
else:
return [], None, ratio
options = []
subscript_unicode_map = {
0: "₀",
1: "₁",
2: "₂",
3: "₃",
4: "₄",
5: "₅",
6: "₆",
7: "₇",
8: "₈",
9: "₉",
}
for sigma in sorted(sigmas.keys()):
sigma_label = "Σ{}".format(sigma)
for k, v in subscript_unicode_map.items():
sigma_label = sigma_label.replace(str(k), v)
options.append({"label": sigma_label, "value": sigma})
return sigmas, options, ratio
def generate_callbacks(self, app, cache):
super().generate_callbacks(app, cache)
@app.callback(
Output(self.id("sigma"), "options"),
[Input(self.get_kwarg_id("rotation_axis"), "value")],
[State(self.id("input_structure"), "data")],
)
def update_sigma_options(rotation_axis, structure):
rotation_axis = self.reconstruct_kwarg_from_state(
dash.callback_context.inputs, "rotation_axis"
)
if (rotation_axis is None) or (not structure):
raise PreventUpdate
structure = self.from_data(structure)
_, sigma_options, _ = self._get_sigmas_options_and_ratio(
structure=structure, rotation_axis=rotation_axis
)
# TODO: add some sort of error handling here when sigmas is empty
return sigma_options
@app.callback(
Output(self.id("rotation_angle", is_kwarg=True, hint="literal"), "options"),
[
Input(self.id("sigma"), "value"),
Input(self.get_kwarg_id("rotation_axis"), "value"),
],
[State(self.id("input_structure"), "data")],
)
def update_rotation_angle_options(sigma, rotation_axis, structure):
if not sigma:
raise PreventUpdate
rotation_axis = self.reconstruct_kwarg_from_state(
dash.callback_context.inputs, "rotation_axis"
)
if (rotation_axis is None) or (not structure):
raise PreventUpdate
structure = self.from_data(structure)
sigmas, _, _ = self._get_sigmas_options_and_ratio(
structure=structure, rotation_axis=rotation_axis
)
rotation_angles = sigmas[sigma]
options = []
for rotation_angle in sorted(rotation_angles):
options.append(
{"label": "{:.2f}º".format(rotation_angle), "value": rotation_angle}
)
return options
# TODO: make client-side callback
@app.callback(
[Output(self.id("sigma"), "value"), Output(self.id("sigma"), "disabled")],
[
Input(self.id("sigma"), "options"),
Input(self.id("enable_transformation"), "on"),
],
)
def update_default_value(options, enabled):
if not options:
raise PreventUpdate
return options[0]["value"], enabled
# TODO: make client-side callback, or just combine all callbacks here
@app.callback(
Output(self.id("rotation_angle", is_kwarg=True, hint="literal"), "value"),
[
Input(
self.id("rotation_angle", is_kwarg=True, hint="literal"), "options"
)
],
)
def update_default_value(options):
if not options:
raise PreventUpdate
return options[0]["value"]
| 34.670886 | 121 | 0.562705 | 1,179 | 10,956 | 5.049194 | 0.248516 | 0.060474 | 0.018814 | 0.022846 | 0.282043 | 0.219217 | 0.188812 | 0.178901 | 0.178901 | 0.162439 | 0 | 0.007671 | 0.345564 | 10,956 | 315 | 122 | 34.780952 | 0.822594 | 0.049379 | 0 | 0.205323 | 0 | 0 | 0.217918 | 0.002019 | 0 | 0 | 0 | 0.003175 | 0 | 1 | 0.038023 | false | 0 | 0.030418 | 0.011407 | 0.110266 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8209433689df2d1a57e76c64f9c9052b8c8371b0 | 2,871 | py | Python | main.py | marcomaiermm/fishit | 5c4992d91365ec9b841eaaa81848d4ad8a9af9ed | [
"MIT"
] | null | null | null | main.py | marcomaiermm/fishit | 5c4992d91365ec9b841eaaa81848d4ad8a9af9ed | [
"MIT"
] | 1 | 2021-06-08T21:07:25.000Z | 2021-06-08T21:07:25.000Z | main.py | marcomaiermm/fishit | 5c4992d91365ec9b841eaaa81848d4ad8a9af9ed | [
"MIT"
] | null | null | null | import imagesearch
import gui
import subprocess
import time
#from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import traceback,sys
class Thread(QThread):
def __init__(self, fn, f=False):
super(Thread, self).__init__()
signal = pyqtSignal(object)
self.f=f
self.fn=fn
# run method gets called when we start the thread
def run(self):
print("starting thread")
if self.f:
result = self.fn(w).CheckArea()
else:
result = self.fn()
#w.gui.catched_edit.setText(str(process.fish_count))
#w.gui.status_edit.setText(str(process.status))
def stop(self):
print("thread ended")
self.terminate()
class Timer:
def __init__(self,duration):
super(Timer, self).__init__()
self.dur = duration
self.elapsed_s = 0
self.elapsed_m = 0
self.elapsed_h = 0
self.total=0
def timer(self):
while (self.total<=(self.dur*60)) and (w.gui.stoppedLabel.text()==""):
if self.elapsed_m<=59:
if self.elapsed_s<=59:
self.elapsed_s+=1
else:
self.elapsed_s=0
self.elapsed_m+=1
else:
self.elapsed_h+=1
self.elapsed_m=0
time_text=str(self.elapsed_h) + ":" + str(self.elapsed_m) + ":" + str(self.elapsed_s)
w.gui.time_edit.setText(time_text)
self.total+=1
time.sleep(1)
w.gui.stoppedLabel.setText("Stopped")
w.Clear()
class AppWindow(QMainWindow):
def __init__(self):
super(AppWindow, self).__init__()
self.gui = gui.Ui_Window()
self.gui.setupUi(self)
self.fish_thread = Thread(imagesearch.Fishit, f=True)
self.gui.fish_button.clicked.connect(self.FishButton)
#self.fish_thread.signal.connect(self.gui.status_label.setText)
#self.fish_thread.signal.connect(self.finished)
self.gui.stop_button.clicked.connect(self.Stop)
def FishButton(self):
duration = int(self.gui.duration.currentText())
self.gui.fish_button.setEnabled(False)
self.gui.stoppedLabel.setText("")
self.fish_thread.start()
self.timer = Thread(Timer(duration).timer)
self.timer.start()
def Stop(self):
self.gui.stoppedLabel.setText("Stopped")
self.timer.stop()
self.Clear()
self.time=0
def Clear(self):
self.gui.time_edit.setText("0:0:0")
self.gui.catched_edit.setText("")
self.gui.status_edit.setText("")
self.gui.fish_button.setEnabled(True)
if __name__ == "__main__":
app=QApplication(sys.argv)
w=AppWindow()
w.show()
app.exec_() | 29.295918 | 97 | 0.593173 | 359 | 2,871 | 4.562674 | 0.259053 | 0.087302 | 0.03663 | 0.031136 | 0.101343 | 0.068376 | 0.030525 | 0 | 0 | 0 | 0 | 0.012148 | 0.283177 | 2,871 | 98 | 98 | 29.295918 | 0.783771 | 0.1031 | 0 | 0.089744 | 0 | 0 | 0.02179 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.102564 | 0 | 0.25641 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
820ca3e67bffef370d9d41988b414444b2ad1bf8 | 2,968 | py | Python | examples/Mentor/05.6.TransformOrdering.py | cclauss/pivy | 55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a | [
"ISC"
] | 29 | 2019-12-28T10:37:16.000Z | 2022-02-09T10:48:04.000Z | examples/Mentor/05.6.TransformOrdering.py | cclauss/pivy | 55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a | [
"ISC"
] | 29 | 2019-12-26T13:46:11.000Z | 2022-03-29T18:14:33.000Z | examples/Mentor/05.6.TransformOrdering.py | cclauss/pivy | 55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a | [
"ISC"
] | 17 | 2019-12-29T11:49:32.000Z | 2022-02-23T00:28:18.000Z | #!/usr/bin/env python
###
# Copyright (c) 2002-2007 Systems in Motion
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
###
# This is an example from the Inventor Mentor,
# chapter 5, example 6.
#
# This example shows the effect of different order of
# operation of transforms. The left object is first
# scaled, then rotated, and finally translated to the left.
# The right object is first rotated, then scaled, and finally
# translated to the right.
#
import sys
from pivy.coin import *
from pivy.sogui import *
def main():
# Initialize Inventor and Qt
myWindow = SoGui.init(sys.argv[0])
if myWindow == None: sys.exit(1)
root = SoSeparator()
# Create two separators, for left and right objects.
leftSep = SoSeparator()
rightSep = SoSeparator()
root.addChild(leftSep)
root.addChild(rightSep)
# Create the transformation nodes
leftTranslation = SoTranslation()
rightTranslation = SoTranslation()
myRotation = SoRotationXYZ()
myScale = SoScale()
# Fill in the values
leftTranslation.translation = (-1.0, 0.0, 0.0)
rightTranslation.translation = (1.0, 0.0, 0.0)
myRotation.angle = M_PI/2 # 90 degrees
myRotation.axis = SoRotationXYZ.X
myScale.scaleFactor = (2., 1., 3.)
# Add transforms to the scene.
leftSep.addChild(leftTranslation) # left graph
leftSep.addChild(myRotation) # then rotated
leftSep.addChild(myScale) # first scaled
rightSep.addChild(rightTranslation) # right graph
rightSep.addChild(myScale) # then scaled
rightSep.addChild(myRotation) # first rotated
# Read an object from file. (as in example 4.2.Lights)
myInput = SoInput()
if not myInput.openFile("temple.iv"):
sys.exit(1)
fileContents = SoDB.readAll(myInput)
if fileContents == None:
sys.exit(1)
# Add an instance of the object under each separator.
leftSep.addChild(fileContents)
rightSep.addChild(fileContents)
# Construct a renderArea and display the scene.
myViewer = SoGuiExaminerViewer(myWindow)
myViewer.setSceneGraph(root)
myViewer.setTitle("Transform Ordering")
myViewer.show()
myViewer.viewAll()
SoGui.show(myWindow)
SoGui.mainLoop()
if __name__ == "__main__":
main()
| 31.242105 | 74 | 0.705863 | 382 | 2,968 | 5.460733 | 0.465969 | 0.00767 | 0.008629 | 0.00767 | 0.040268 | 0.016299 | 0.016299 | 0 | 0 | 0 | 0 | 0.014524 | 0.211253 | 2,968 | 94 | 75 | 31.574468 | 0.876548 | 0.493598 | 0 | 0.046512 | 0 | 0 | 0.024005 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.069767 | 0 | 0.093023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
820ceb9164fe0c747e5f68b273d1654107a1cd1d | 1,796 | py | Python | python-object-storage/test_object_storage.py | henriqueccapozzi/pocs | 9c1e3b22b3f026b22fe01b365bddd95b5afc9264 | [
"MIT"
] | null | null | null | python-object-storage/test_object_storage.py | henriqueccapozzi/pocs | 9c1e3b22b3f026b22fe01b365bddd95b5afc9264 | [
"MIT"
] | null | null | null | python-object-storage/test_object_storage.py | henriqueccapozzi/pocs | 9c1e3b22b3f026b22fe01b365bddd95b5afc9264 | [
"MIT"
] | null | null | null | import os
import json
import pytest
from object_storage import DB
DB_FILE_NAME = "db.json"
def _create_test_db():
return DB("test", file_name=DB_FILE_NAME)
def _create_mock_obj():
return {
"programing_languages_features": {
"python": ["simple", "easy setup"],
"javascript": ["widespread usage", "powerfull"],
}
}
class TestAtributes(object):
def test_db_have_name(self):
new_db = _create_test_db()
assert new_db.name == "test"
def test_insert_retrieve_objects(self):
new_db = _create_test_db()
new_db.insert(key="topic", value="registration")
assert new_db.get("topic") == "registration"
color_list = ["blue", "red", "green"]
new_db.insert("color_list", color_list)
assert new_db.get("color_list") == color_list
new_db.insert("number", 9)
assert new_db.get("number") == 9
obj = _create_mock_obj()
new_db.insert("languages", obj)
assert new_db.get("languages") == obj
def test_db_saves_to_file(self):
new_db = _create_test_db()
obj = _create_mock_obj()
new_db.insert("languages", obj)
new_db.save()
assert os.stat("db.json") is not None
def test_db_saves_proper_data_to_file(self):
new_db = _create_test_db()
obj = _create_mock_obj()
new_db.insert("languages", obj)
new_db.save()
with open(DB_FILE_NAME, "r") as f:
loaded_obj = json.load(f)
assert loaded_obj == new_db.objects
def test_db_loads_from_file(self):
new_db = _create_test_db()
new_db.insert("number", 1)
new_db.save()
loaded_db = DB("test", DB_FILE_NAME, True)
assert loaded_db.objects == {"number": 1} | 26.80597 | 60 | 0.614143 | 244 | 1,796 | 4.143443 | 0.27459 | 0.103858 | 0.076162 | 0.074184 | 0.274975 | 0.274975 | 0.254204 | 0.250247 | 0.250247 | 0.148368 | 0 | 0.003019 | 0.262249 | 1,796 | 67 | 61 | 26.80597 | 0.76 | 0 | 0 | 0.28 | 0 | 0 | 0.132999 | 0.016138 | 0 | 0 | 0 | 0 | 0.16 | 1 | 0.14 | false | 0 | 0.08 | 0.04 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
820f6292ce69f024eb8b287f0493082d26ff31c3 | 3,898 | py | Python | 10-Flask/72-Library.py | ericson14/Small_project | dd88b9a5619d38fb8d236c932ffa8429d24b28ae | [
"MIT"
] | null | null | null | 10-Flask/72-Library.py | ericson14/Small_project | dd88b9a5619d38fb8d236c932ffa8429d24b28ae | [
"MIT"
] | null | null | null | 10-Flask/72-Library.py | ericson14/Small_project | dd88b9a5619d38fb8d236c932ffa8429d24b28ae | [
"MIT"
] | null | null | null | from flask import Flask, flash, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
class Config(object):
SQLALCHEMY_DATABASE_URI = "mysql://root:chuanzhi@127.0.0.1:3306/library"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = "a13uo1ccl"
class Register(FlaskForm):
author = StringField("作者", render_kw={"placeholder": "添加作者"})
book = StringField("书名", render_kw={"placeholder": "添加书名"})
submit = SubmitField("添加")
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(15), nullable=False)
books = db.relation("Book", backref="author")
def __repr__(self):
return "Author: {} {}".format(self.name, self.id)
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey(Author.id))
def __repr__(self):
return "Book: {} {}".format(self.name, self.id)
@app.route('/', methods=['GET', 'POST'])
def index():
form = Register()
if request.method == "POST":
if form.validate_on_submit():
author_name = request.form.get("author")
book_name = request.form.get("book")
author = Author.query.filter(Author.name == author_name).first()
if author:
# 有作者只添加书籍
book = Book.query.filter(Book.name == book_name).first()
if book:
flash("已经有此书了,请勿重复添加")
else:
new_book = Book(name=book_name, author_id=author.id)
db.session.add(new_book)
db.session.commit()
else:
# 没有该作者,添加作者再添加书籍
new_author = Author(name=author_name)
db.session.add(new_author)
db.session.commit()
new_book = Book(name=book_name, author_id=new_author.id)
db.session.add(new_book)
db.session.commit()
else:
flash("参数错误")
authors = Author.query.all()
return render_template("temp4_72.html", form=form, authors=authors)
@app.route('/del_book/<book_id>')
def del_book(book_id):
delbook = Book.query.get(book_id)
if delbook:
try:
db.session.delete(delbook)
except Exception as e:
flash(e)
db.session.rollback()
finally:
db.session.commit()
else:
flash("书名不存在。。。")
return redirect(url_for("index"))
@app.route('/del_author/<author_id>')
def del_author(author_id):
delauthor = Author.query.get(author_id)
if delauthor:
# 删除作者需要先删除旗下所有书籍
books = Book.query.filter(author_id == Book.author_id)
try:
for book in books:
db.session.delete(book)
db.session.delete(delauthor)
except Exception as e:
flash(e)
db.session.rollback()
finally:
db.session.commit()
else:
flash("作者不存在。。。")
return redirect(url_for("index"))
if __name__ == "__main__":
db.drop_all()
# 创建所有表
db.create_all()
# 生成数据
au1 = Author(name='老王')
au2 = Author(name='老尹')
au3 = Author(name='老刘')
# 把数据提交给用户会话
db.session.add_all([au1, au2, au3])
db.session.commit()
bk1 = Book(name='老王回忆录', author_id=au1.id)
bk2 = Book(name='我读书少,你别骗我', author_id=au1.id)
bk3 = Book(name='如何才能让自己更骚', author_id=au2.id)
bk4 = Book(name='怎样征服美丽少女', author_id=au3.id)
bk5 = Book(name='如何征服英俊少男', author_id=au3.id)
# 把数据提交给用户会话
db.session.add_all([bk1, bk2, bk3, bk4, bk5])
# 提交会话
db.session.commit()
app.run(debug=True)
| 29.755725 | 76 | 0.596203 | 484 | 3,898 | 4.640496 | 0.285124 | 0.068121 | 0.04675 | 0.033838 | 0.259127 | 0.186109 | 0.186109 | 0.186109 | 0.158504 | 0.158504 | 0 | 0.014371 | 0.268086 | 3,898 | 130 | 77 | 29.984615 | 0.772871 | 0.02001 | 0 | 0.29703 | 0 | 0 | 0.07767 | 0.017581 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049505 | false | 0 | 0.039604 | 0.019802 | 0.29703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8211444c1809530cc0c344f0e30b5c1c9a1ff78e | 24,908 | py | Python | totall/api.py | dacosta2213/totall | c64420bb8d35ccf423fe8ea66321b34431f79660 | [
"MIT"
] | null | null | null | totall/api.py | dacosta2213/totall | c64420bb8d35ccf423fe8ea66321b34431f79660 | [
"MIT"
] | null | null | null | totall/api.py | dacosta2213/totall | c64420bb8d35ccf423fe8ea66321b34431f79660 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.utils import get_fullname, get_link_to_form, get_url_to_form
from datetime import date,datetime,timedelta
import jwt
import time
@frappe.whitelist()
def inventario(item_code):
inventario = frappe.get_all('Bin', filters={'item_code': item_code}, fields=['warehouse', 'actual_qty'] )
frappe.errprint(inventario)
return(inventario)
@frappe.whitelist()
def pings():
return 'pong'
def pingo():
return 'pongo'
# RG - Actualizar el campo de atrasado y factura en c/customer
# 1 - Al finalizar un Payment Entry se actualizaran todos los customers...Iniciamos calculando la fecha de update_atrasado
# 2 - Bloquear (congelado = 1, credit_limit = 1) a los mayores de 40 dias y desbloquear (congelado = 0, credit_limit = ) en < 40
# 3 - Si no existe SINV == "Unpaid" o "Overdue" mandar atrasado = 0
@frappe.whitelist()
def update_atrasado():
clientes = frappe.db.get_list('Customer',fields=['name'])
for c in clientes:
facturas = frappe.db.get_list('Sales Invoice', filters={ 'outstanding_amount': ['>', 1],'customer': c.name,'clave':['like', '%%CC%%'] },
fields=['name', 'outstanding_amount','posting_date'],
order_by='posting_date asc',
page_length=1,
as_list=True
)
if facturas:
today = date.today()
someday = facturas[0][2]
diff = today - someday
#frappe.errprint(diff.days)
#frappe.errprint(facturas[0][0])
#frappe.errprint(c.name)
if diff.days > 40:
frappe.db.sql("UPDATE tabCustomer set congelado = 1 ,credit_limit = 1 WHERE name = %s", (c.name))
frappe.errprint(c.name)
frappe.db.sql("UPDATE `tabSales Invoice` a left join tabCustomer b on a.customer = b.customer_name set congelado = 1, credit_limit = 1, b.atrasado = %s, b.factura = %s WHERE b.name = %s", (diff.days,facturas[0][0],c.name))
frappe.db.commit()
#else:
# frappe.db.sql("UPDATE tabCustomer SET congelado = 0 ,credit_limit = 0 WHERE name = %s", (c.name))
# frappe.db.commit()
return
# @ frappe.whitelist ()
# def ubicacion ():
# items = frappe.db.get_list ('Item', fields =['name', 'rack', 'ubicacion'])
# for c in items:
# ajustes = frappe.db.get_list ('Stock Reconciliation Item', fields =['name','item_code', 'anaquel', 'ubicacion'], order_by = 'creation desc')
# for a in ajustes:
# if c.ubicacion != a.rack:
# #frappe.errprint(c.name)
# frappe.errprint(a.anaquel)
# frappe.db.commit()
# #else /* : */
# # frappe.db.sql("UPDATE tabCustomer SET congelado = 0 ,credit_limit = 0 WHERE name = %s", (c.name))
# # frappe.db.commit()
# return
#
# @frappe.whitelist()
# def ruta(login_manager):
# ruta = frappe.db.get_value("User", login_manager.user,"ruta_login")
# frappe.errprint(ruta)
# frappe.local.response["home_page"] = ruta
# {"type":"Feature","properties":{},"geometry":{"type":"LineString","coordinates":[ [-118.383197,32.649782],[-115.382042,32.650772],[-115.380479,32.649689] ] }}]}"
@frappe.whitelist(allow_guest=True)
# carga los registros de ruta, filtra por usuario y fecha y envia feature collection al front
def get_rutas(user,date):
rutas = frappe.get_all('Ruta', fields=['cliente','nombre_prospecto','lat','lng','creation','comentario'], filters = {'usuario': user, 'date': date } , order_by='creation' )
frappe.errprint(rutas)
feature = """ { "type": "FeatureCollection" , "features":[ { "type" : "Feature","properties":{},"geometry":{"type":"LineString","coordinates": [ """
for i in rutas:
feature += """ [ """ + str(i.lng) + """,""" + str(i.lat) + """ ] """
if i == rutas[-1]:
feature += """ ]}}]}"""
else:
feature += """ , """
return feature
@frappe.whitelist(allow_guest=True)
# regresa la ruta para las tablas
def get_tabla(user,date):
rutas = frappe.get_all('Ruta', fields=['cliente','nombre_prospecto','lat','lng','time','comentario'], filters = {'usuario': user, 'date': date } , order_by='creation' )
return rutas
@frappe.whitelist(allow_guest=True)
# regresa los usuarios con el Role Profile de Vendedor para iniciar la captura en el app
def get_usuarios():
# u = frappe.db.sql("SELECT name from tabUser")
# return u
usuarios = frappe.get_all('User', fields=['name','full_name'], filters = {'role_profile_name': 'Vendedor'} , order_by='name' )
if usuarios:
return usuarios
else:
return('No encontrado')
@frappe.whitelist(allow_guest=True)
def generar_lead(owner,lead_name,email_id,numero,lead_owner,source,campaign_name,informacion_adicional,lead_type):
doc = frappe.get_doc({
"doctype": "Lead",
"user": "Administrator",
"owner": owner,
"lead_name": lead_name,
"email_id": email_id,
"numero": numero,
"lead_owner": lead_owner,
"source": source,
"campaign_name": campaign_name,
"informacion_adicional": informacion_adicional,
"lead_type": lead_type
})
doc.insert(ignore_permissions=True)
frappe.db.commit()
# return('Nuevo Lead: ' + str(doc.name))
# frappe.db.sql("UPDATE tabLead SET status='Asignado' WHERE source like '%Publicidad%'")
# frappe.db.commit()
frappe.sendmail(['egarcia@totall.mx',"{0}".format(doc.lead_owner)], \
subject=doc.name , \
content="Felicidades usted tiene un nuevo prospecto, de click en la liga para darle seguimiento. ¡Exito! "+frappe.utils.get_url_to_form(doc.doctype, doc.name),delayed=False)
@frappe.whitelist(allow_guest=True)
def recorrido(user,lat,lng):
frappe.log_error(title="Error latitud", message=lat + user)
doc = frappe.get_doc({
"doctype": "Recorrido",
"user": user.strip('"'),
"lng": lng,
"lat": lat,
"phone": user
})
doc.insert(ignore_permissions=True)
frappe.db.commit()
return('Nueva-Lectura Insertada: ' + str(doc.name))
@frappe.whitelist(allow_guest=True)
# carga las estaciones registradas y genera el archivo para el mapa
def get_estaciones():
estaciones = frappe.get_all('Estacion', fields=['nombre','lat','lng'])
feature = """ { "type": "FeatureCollection" , "features":[ """
for i in estaciones:
feature += """ { "type" : "Feature","properties":{"name": " """ + i.nombre + """ "},"geometry":{"type":"Point","coordinates":[""" + str(i.lat) + """,""" + str(i.lng) + """]}}"""
if i == estaciones[-1]:
feature += """ ]} """
else:
feature += """ , """
return feature
@frappe.whitelist(allow_guest=True)
def estaciones(estacion,lat,lng):
est = frappe.db.get_value("Estacion", estacion , "name")
if est:
frappe.db.sql("UPDATE tabEstacion SET lat=%s , lng= %s WHERE nombre = %s", (lat,lng,estacion))
frappe.db.commit()
return ('Estacion Actualizada.')
else:
return('no se encontro la estacion')
# est = frappe.db.get_value("Estacion", estacion , "name")
# if est:
# frappe.db.set_value("Estacion", estacion, 'lat', lat)
# frappe.db.set_value("Estacion", estacion , 'lng', lng)
# return ('Estacion Actualizada.')
# else:
# return('no se encontro la estacion')
# RG-Actualizar actual cantidad en Item
# falta hacer metodo para que todo Item tenga stock_maximo get_all stock_maximo < 0 (cambiarlo a int)
# otra ocsion - hacer metodo desde bin y calcular reorden desde ahi (quizas es mas facil)
# RG - En hooks - > events tenemos este metodo para "Bin": "on_update": "totall.api.update_actual"
# RG- Lo que hace es actualizar el punto de reorden (actual-stock maximo) para que se muestre en el reporte Max del Doctype Item
@frappe.whitelist()
def update_actual(self,method):
# m = str(self.name) + "item: " + str(self.item_code)
# frappe.log_error(title="New Update Actual Qty", message=m)
doc = frappe.get_doc("Item", self.item_code)
if self.warehouse == 'GENERAL - SAT':
doc.actual = self.actual_qty
doc.reorder = float(self.actual_qty) - float(self.stock_maximo)
doc.save()
frappe.errprint('Items actualizado')
# RG - En hooks - > events tenemos este metodo para "Item": "on_update": "totall.api.update_actual"
# RG- Lo que hace es actualizar el punto de reorden (actual-stock maximo) para que se muestre en el reporte Max del Doctype Item
@frappe.whitelist()
def update_actual_item(self,method):
# doc = frappe.get_doc("Item", self.name)
frappe.errprint('asas')
reorder = float(self.actual) - float(self.stock_maximo)
frappe.db.sql("UPDATE tabItem set reorder=%s WHERE name = %s", (str(reorder),self.name) )
# self.save()
# frappe.msgprint('El articulo ha sido actualizado.')
# RG-Este es un metodo que corrimos manualmente para actualizar las cantidades de Actual y Reorder en el Item (considerando el stock maximo)
#RG- ToDo - poner un metodo parecido en hooks > Events > Item > on_update para que recalcule
@frappe.whitelist()
def actual():
# doc = frappe.get_doc("Item", 'GUM78-ARE')
# doc.save()
items = frappe.get_all('Bin', filters={'warehouse': 'GENERAL - SAT'}, fields=['name', 'actual_qty','item_code'])
for i in items:
if 0 < i.actual_qty < 100:
doc = frappe.get_doc("Item", i.item_code)
doc.actual = i.actual_qty
doc.reorder = float(i.actual_qty) - float(doc.stock_maximo)
doc.save()
frappe.errprint(i.item_code + 'actual: ' + str(i.actual_qty) + ' max: ' + str(doc.stock_maximo) )
@frappe.whitelist()
def actualizar():
# para actualizar todos los items corri los queries desde la consola
# items = frappe.get_all('Bin', filters={'actual_qty':'0'}, fields = ['item_code'])
items = frappe.db.sql("SELECT name from tabItem WHERE actual IS NULL")
for i in items:
item = frappe.db.sql("SELECT actual from tabItem WHERE name = %s",i)
#item = frappe.db.sql("UPDATE tabItem set actual=0 WHERE name = %s",i)
frappe.errprint(item)
#r = frappe.get_doc("Item", items[1]['item_code'])
#frappe.errprint(items)
# poner en 0 TODOS los items que tengan null - puedes frappe.db.sql ('UPDATE')
# actualizar item.reorder float(i.actual) - float(doc.stock_maximo) para todos los Items.actual === 0
@frappe.whitelist()
def borrar():
return frappe.db.sql("DELETE from `tabBin` where warehouse != 'GENERAL - SAT' and actual_qty = 0")
@frappe.whitelist()
def atrasado():
frappe.db.sql("UPDATE `tabSales Invoice` set outstanding_amount = 0, status = 'Paid' where outstanding_amount like '0.%' or outstanding_amount like '-%%'")
frappe.db.sql("UPDATE `tabPurchase Invoice` set outstanding_amount = 0, status = 'Paid' where outstanding_amount like '0.%' or outstanding_amount like '-%%'")
frappe.db.sql("UPDATE `tabSales Invoice` a left join `tabCustomer` b on a.customer = b.customer_name set b.atrasado = DATEDIFF(CURDATE(), a.posting_date), b.factura = a.name where a.status = 'Overdue' or a.status = 'Unpaid'")
frappe.db.sql("UPDATE `tabCustomer` set congelado = 1, credit_limit = 1 where atrasado >= 40 and clave like '%%CC%%'")
frappe.db.sql("UPDATE `tabCustomer` set congelado = 0, credit_limit = 0 where atrasado < 40 and clave like '%%CC%%'")
frappe.db.sql("UPDATE `tabSales Invoice` a left join `tabCustomer` b on a.customer = b.customer_name set b.atrasado = 0 where a.customer not in (Select customer from `tabSales Invoice` where status = 'Overdue' or status = 'Unpaid')")
frappe.db.sql("update `tabPayment Entry` INNER JOIN (SELECT party, MAX(creation) AS 'tranc_date' FROM `tabPayment Entry` where party_type = 'customer' AND `tabPayment Entry`.name not like 'AJUSTE%' GROUP BY party) as max_creation ON `tabPayment Entry`.party = max_creation.party AND `tabPayment Entry`.creation = max_creation.tranc_date left JOIN `tabCustomer` b on max_creation.party = b.customer_name set b.latest_payment = `tabPayment Entry`.name, b.date_latest_payment = `tabPayment Entry`.creation ")
frappe.db.sql("update `tabPayment Entry` INNER JOIN (SELECT party, MAX(creation) AS 'tranc_date' FROM `tabPayment Entry` where party_type = 'supplier' AND `tabPayment Entry`.name not like 'AJUSTE%' GROUP BY party) as max_creation ON `tabPayment Entry`.party = max_creation.party AND `tabPayment Entry`.creation = max_creation.tranc_date left JOIN `tabSupplier` b on max_creation.party = b.supplier_name set b.latest_payment = `tabPayment Entry`.name, b.date_latest_payment = `tabPayment Entry`.creation ")
@frappe.whitelist()
def factura_global():
frappe.db.sql("UPDATE `tabCFDI` set grand_total = total")
@frappe.whitelist()
def sin_timbrar():
frappe.db.sql("UPDATE `tabSales Invoice` set sin_timbrar = DATEDIFF(CURDATE(), creation) where cfdi_status='Sin Timbrar'")
@frappe.whitelist()
def nuevas_facturas():
anteriores = frappe.db.sql("""SELECT name,creation,date_sub(NOW(),INTERVAL 5 HOUR),TIMESTAMPDIFF(HOUR,creation,date_sub(NOW(),INTERVAL 5 HOUR)) FROM `tabSales Invoice` WHERE docstatus = %s AND TIMESTAMPDIFF(HOUR,creation,date_sub(NOW(),INTERVAL 5 HOUR)) > 24""", (0), as_dict=1)
for a in anteriores:
frappe.db.sql("""UPDATE `tabSales Invoice` SET docstatus = 2, observaciones ='Factura cancelada por tiempo de espera excedido' WHERE name =%s""",(a.name),as_dict=1)
@frappe.whitelist()
def genera_cotizacion(name='PUR-SQTN-2021-00001'):
si = frappe.get_doc('Supplier Quotation', name)
articulosproveedor = si.items
articulosventa = {}
margen = si.margen
#return
frappe.errprint(margen)
doc = frappe.new_doc('Quotation')
articulosventa = doc.items
for a in articulos:
a.item_code
a.amount = a.amount*margen
a.valuation_rate = a.rate
for b in articulosventa:
b.item_code = a.item_code
b.amount = a.amount*margen
doc.save()
frappe.msgprint('Cotizacion Generada')
# doc.append("items", {
# "item_code": si.items[0].item_code ,
# "qty": 1,
# "precio_de_venta": si.monto / 1.16,
# "monto": si.monto / 1.16,
# "precio_unitario_neto": si.monto / 1.16,
# "precio_neto": si.monto / 1.16,
# "tax": 16,
# "impuestos_totales": (si.monto / 1.16) * 0.16
# })
#
# doc.append("si_sustitucion", {
# "tipo_documento": "Sales Invoice" ,
# "sales_invoice": si.name,
# "uuid": si.uuid,
# "valor": si.monto
# })
#
#
#
@frappe.whitelist()
def boton(name):
pos = frappe.get_doc('POS Invoice', name)
doc = frappe.new_doc('Sales Invoice')
doc.items = pos.items
doc.customer = pos.customer
doc.perfil_facturacion = 'TICKET'
doc.is_pos = 1
doc.update_stock = 1
doc.naming_series = 'TICKET'
doc.append("payments", {
"mode_of_payment": 'Efectivo',
"account": '101.01 - Caja y efectivo - SAT',
"amount": pos.grand_total,
"type": 'Cash',
"base_amount": pos.base_grand_total
})
doc.save()
doc.submit()
frappe.errprint(doc.name)
@frappe.whitelist()
def update_payment_entry(name):
doc = frappe.get_doc('Payment Entry',name)
references = doc.references
gran_total_original = 0
for r in references:
si = frappe.get_doc('Sales Invoice', r.reference_name)
gran_total_original += round(si.monto_pendiente, 2)
frappe.db.set_value("Payment Entry Reference", r.name, 'monto_pendiente', si.monto_pendiente)
#frappe.db.set_value("Payment Entry", doc.name, 'total_original', gran_total_original)
frappe.errprint(gran_total_original)
@frappe.whitelist()
def saldos_cero():
frappe.db.sql("""update `tabGL Entry` a LEFT JOIN `tabPayment Entry` b ON a.voucher_no = b.name SET a.credit_in_account_currency = 0 , a.credit = 0 where b.unallocated_amount = a.credit_in_account_currency AND b.unallocated_amount = a.credit AND b.unallocated_amount > 0 AND b.docstatus = 1""")
frappe.db.sql("""update `tabSales Invoice` SET outstanding_amount = 0 WHERE outstanding_amount < 0""")
@frappe.whitelist()
def crear_pago(name):
doc = frappe.get_doc('CFDI Nota de Credito',name)
cliente = frappe.get_doc('Customer',doc.customer)
today = date.today()
frappe.errprint(doc.name)
frappe.errprint(doc.conversion_rate)
pii = frappe.new_doc("Payment Entry")
pii.mode_of_payment = 'Transferencia bancaria'
# pii.payment_type = 'Pay'
pii.party_type = 'Customer'
pii.party = doc.customer
pii.posting_date = today.strftime("%Y-%m-%d") #Daniel Acosta: Estaba mostrando un error de Fiscal Year al generar el payment entry
# if doc.forma_de_pago != '01':
# pii.paid_from = company.default_cash_account
# else:
# pii.paid_from = company.default_bank_account
#pii.paid_to = company.default_receivable_account #frappe.get_value("Company",doc.company,'default_receivable_account')
# pii.paid_to_account_currency = doc.currency
# pii.paid_to = doc.paid_to
pii.reference_no = doc.name
pii.naming_series = 'NC-'
# RG - Los clientes con currency != MXN solo pueden hacaer transacciones en su moneda nativa (ej. USD)
# RG - Los clientes sin default_currency o con MXN pueden transaccionar en cualquier moneda
# RG - Los payment entries derivados de los descuentos automaticos NO podran timbrarse.
pii.paid_amount = float(doc.total) * float(doc.conversion_rate)
pii.source_exchange_rate = 1
pii.target_exchange_rate = 1
pii.received_amount = float(doc.total) * float(doc.conversion_rate)
company = frappe.get_doc('Company', pii.company)
pii.paid_to = '102.01 - Bancos nacionales - ' + company.abbr
# frappe.errprint(float(doc.total) * float(doc.conversion_rate))
for i in doc.si_sustitucion:
pii.append('references', {
'reference_doctype': 'Sales Invoice',
'reference_name': i.sales_invoice,
'allocated_amount': float(i.valor) * float(doc.conversion_rate),
'pagado': float(i.valor) * float(doc.conversion_rate),#cambio hecho por Santiago
})
pii.flags.ignore_permissions = True
pii.flags.ignore_mandatory = True
frappe.errprint(pii.party)
frappe.errprint(pii.paid_to)
# pii.flags.ignore_validate = True
pii.submit()
frappe.db.set_value("CFDI Nota de Credito", name, 'pago', pii.name)
doc.pago = pii.name
frappe.msgprint('Devolucion monetaria generada : ' + '<a href="#Form/Payment Entry/' + pii.name + '"target="_blank">' + pii.name + '</a>' )
doc.reload()
@frappe.whitelist()
def create_stock_entry(name):
doc = frappe.get_doc('CFDI Nota de Credito',name)
if doc.tipo_de_factura == "Devolucion":
pii = frappe.new_doc("Stock Entry")
pii.stock_entry_type = "Material Receipt"
pii.naming_series = "STE-"
for i in doc.items:
pii.append('items', {
'item_code': i.item_code,
'qty': i.qty,
'uom': i.stock_uom,
't_warehouse': i.warehouse,
})
pii.flags.ignore_permissions = True
pii.submit()
frappe.msgprint('Devolucion de Inventario generada : ' + '<a href="#Form/Stock Entry/' + pii.name + '"target="_blank">' + pii.name + '</a>' )
frappe.errprint('HECHO')
# RG - Crear Pago
@frappe.whitelist()
def cancelar_pago(name):
doc = frappe.get_doc('CFDI Nota de Credito', name)
pii = frappe.get_doc('Payment Entry', doc.pago)
pii.cancel()
@frappe.whitelist()
def quitar_tags_item_description():
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p><p>',' '))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<div><p>',''))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'</p><p>',' '))""")
frappe.db.sql("""update `tabSales Invoice Item` set description = (replace(description,'<br>',''))""")
frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabDelivery Note Item` set description = (replace(description,'</p><p>',' '))""")
frappe.errprint('Tags Eliminados')
@frappe.whitelist()
def quitar_tags_item():
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</p><p>',' '))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<div><p>',' '))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<br>',' '))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'<strong>',' '))""")
frappe.db.sql("""update `tabQuotation Item` set description = (replace(description,'</strong>',' '))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabSales Order Item` set description = (replace(description,'</p><p>',' '))""")
@frappe.whitelist()
def quitar_tags():
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'<div class="ql-editor read-mode"><p>',''))""")
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</p></div>',''))""")
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</p><p>',' '))""")
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'<strong>',' '))""")
frappe.db.sql("""update `tabPurchase Order Item` set description = (replace(description,'</strong><strong>',' '))""")
@frappe.whitelist()
def get_chart_data():
query = """SELECT str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') AS `posting_date`, sum(`tabSales Invoice`.`base_grand_total`) AS `sum`
FROM `tabSales Invoice`
WHERE (`tabSales Invoice`.`docstatus` = 1
AND `tabSales Invoice`.`metodo_pago` = 'PPD' AND DATE(`tabSales Invoice`.`posting_date`) >= '2021-04-01')
GROUP BY str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d')
ORDER BY str_to_date(concat(date_format(`tabSales Invoice`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') ASC
"""
data = frappe.db.sql(query, as_list=1)
datasets = []
labels = []
for d in data:
labels.append(d[0])
datasets.append(d[1])
query2 = """SELECT str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') AS `creation`, sum(`tabPayment Entry`.`paid_amount`) AS `sum`
FROM `tabPayment Entry`
WHERE `tabPayment Entry`.`docstatus` = 1 AND `tabPayment Entry`.`payment_type` = 'Receive'
GROUP BY str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d')
ORDER BY str_to_date(concat(date_format(`tabPayment Entry`.`posting_date`, '%Y-%m'), '-01'), '%Y-%m-%d') ASC
"""
data2 = frappe.db.sql(query2, as_list=1)
datapoints = []
labels2 = []
for d in data2:
labels2.append(d[0])
datapoints.append(d[1])
return{
"labels": labels,
"datasets": [{
"name": _("Ventas a Credito"),
"values": datasets,
"chartType": 'bar'
},
{
"name": _("Pagos"),
"values": datapoints,
"chartType": 'line'
}],
"type": "axis-mixed"
}
@frappe.whitelist()
def pi_monto_pendiente(name):
pi = frappe.get_doc('Purchase Invoice',name)
frappe.db.set_value("Purchase Invoice",name, 'monto_pendiente', pi.grand_total)
@frappe.whitelist()
def pago_proveedor_usd(name):
pe = frappe.get_doc('Payment Entry',name)
if pe.company == 'Sillas and Chairs':
per = frappe.get_list('Payment Entry Reference', filters={
'parent': pe.name})
for r in per:
frappe.db.set_value("Purchase Invoice",r, 'monto_pendiente', pe.paid_amount)
@frappe.whitelist()
def restore_monto_pendiente(name):
doc = frappe.get_doc('Payment Entry',name)
for i in doc.references:
frappe.db.set_value('Sales Invoice',i.reference_name,'monto_pendiente',i.monto_pendiente)
frappe.errprint(i.reference_name)
frappe.errprint(i.monto_pendiente)
@frappe.whitelist()
def clave(name):
numero = frappe.db.sql("""SELECT max(clave) + 1 as "clave" from `tabCustomer` ORDER BY creation desc """)
c = frappe.get_doc('Customer', name)
if c.clave is None:
frappe.db.set_value("Customer", c.name, 'clave', numero)
# frappe.db.sql("""UPDATE `tabCustomer` set cuenta_sat = CONCAT('110410', clave) WHERE cuenta_sat IS null """)
| 44.718133 | 510 | 0.681829 | 3,425 | 24,908 | 4.845255 | 0.15854 | 0.039048 | 0.037782 | 0.050196 | 0.445134 | 0.408677 | 0.353299 | 0.333896 | 0.315637 | 0.297077 | 0 | 0.010188 | 0.152762 | 24,908 | 556 | 511 | 44.798561 | 0.776146 | 0.220853 | 0 | 0.217507 | 0 | 0.068966 | 0.464258 | 0.093619 | 0 | 0 | 0 | 0.001799 | 0 | 1 | 0.092838 | false | 0 | 0.02122 | 0.007958 | 0.137931 | 0.055703 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8215508f627bf3d1b4d448dabf1e8742f974b661 | 2,701 | py | Python | update.py | cccs-rs/apiscout | 27aa5cb6991a39df03695576b351a815146fd270 | [
"BSD-2-Clause"
] | 169 | 2017-04-10T14:43:54.000Z | 2022-03-20T09:35:26.000Z | update.py | cccs-rs/apiscout | 27aa5cb6991a39df03695576b351a815146fd270 | [
"BSD-2-Clause"
] | 25 | 2017-05-19T08:53:58.000Z | 2021-07-31T14:09:37.000Z | update.py | cccs-rs/apiscout | 27aa5cb6991a39df03695576b351a815146fd270 | [
"BSD-2-Clause"
] | 44 | 2017-04-10T16:22:55.000Z | 2021-11-09T13:45:08.000Z | import re
import os
import sys
import requests
try:
import config
except:
print("create a config.py based on template.config.py and set your Malpedia API token!")
sys.exit()
def delete_existing_dbs():
""" delete potentially existing old apivector db files """
for filename in os.listdir("dbs"):
if re.search(r"\d{4}-\d\d-\d\d-apivectors-v\d+\.csv", filename):
os.remove("dbs" + os.sep + filename)
def get_newest_db_version():
""" find ApiVector DB files and return newest version number found """
max_version = 0
for filename in os.listdir("dbs"):
version = re.search(r"\d{4}-\d\d-\d\d-apivectors-v(?P<version_number>\d+)\.csv", filename)
if version:
max_version = max(max_version, int(version.group("version_number")))
return max_version
def download_apivector_db():
result = {
"filename": "",
"content": "",
"version": 0
}
response = requests.get(
'https://malpedia.caad.fkie.fraunhofer.de/api/list/apiscout/csv',
headers={'Authorization': 'apitoken ' + config.APITOKEN},
)
if response.status_code == 200:
result["filename"] = response.headers['Content-Disposition'].split("=")[1].strip()
result["content"] = response.text
version = re.search(r"\d{4}-\d\d-\d\d-apivectors-v(?P<version_number>\d+)\.csv", result["filename"])
result["version"] = version
else:
print("Failed to download ApiVector DB, response code: ", response.status_code)
return result
def check_malpedia_version():
remote_version = 0
response = requests.get(
'https://malpedia.caad.fkie.fraunhofer.de/api/get/version'
)
if response.status_code == 200:
response_json = response.json()
remote_version =response_json["version"]
else:
print("Failed to check Malpedia version, response code: ", response.status_code)
return remote_version
def main():
db_version = get_newest_db_version()
malpedia_version = check_malpedia_version()
if db_version < malpedia_version:
apivector_update = download_apivector_db()
if apivector_update["version"]:
delete_existing_dbs()
update_db_path = "dbs" + os.sep + apivector_update["filename"]
with open(update_db_path, "w") as fout:
fout.write(apivector_update["content"])
print("Downloaded and stored ApiVector DB file: ", update_db_path)
else:
print("ApiVector update download failed.")
else:
print("Your ApiVector DB is the most recent ({})".format(malpedia_version))
if __name__ == "__main__":
sys.exit(main())
| 32.939024 | 108 | 0.640874 | 340 | 2,701 | 4.923529 | 0.302941 | 0.010753 | 0.010753 | 0.017921 | 0.280765 | 0.224612 | 0.151732 | 0.151732 | 0.151732 | 0.151732 | 0 | 0.006217 | 0.225842 | 2,701 | 81 | 109 | 33.345679 | 0.794357 | 0.042207 | 0 | 0.151515 | 0 | 0.030303 | 0.277886 | 0.05752 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0 | 0.075758 | 0 | 0.19697 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82175e8b7a22ac93f4e8ed01903f37b29cf95227 | 1,242 | py | Python | fate/prompt.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | fate/prompt.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | fate/prompt.py | Mattias1/fate | 10266406336bc4c683ff5b23af32ac3447f7f054 | [
"MIT"
] | null | null | null | from .mode import Mode
from .document import Document
Document.promptinput = ''
class Prompt(Mode):
def __init__(self, document, callback=None):
Mode.__init__(self, document, callback)
self.inputstring = ''
self.start(document)
def processinput(self, document, userinput):
if isinstance(userinput, str):
key = userinput
if key == 'Cancel':
self.stop(document)
elif key == '\n':
document.promptinput = self.inputstring
self.stop(document)
elif len(key) > 1:
# key not supported
pass
else:
self.inputstring += key
else:
raise NotImplementedError('To be done.')
def start(self, doc):
Mode.start(self, doc)
doc.OnPrompt.fire(doc)
def stop(self, doc):
Mode.stop(self, doc)
doc.OnPrompt.fire(doc)
def prompt(promptstring='>'):
"""Constructor for the prompt mode."""
class PromptWithString(Prompt):
def __init__(self, document, callback=None):
Prompt.__init__(self, document, callback)
self.promptstring = promptstring
return PromptWithString
| 28.883721 | 55 | 0.573269 | 125 | 1,242 | 5.568 | 0.352 | 0.086207 | 0.091954 | 0.137931 | 0.25 | 0.16954 | 0.08046 | 0 | 0 | 0 | 0 | 0.001193 | 0.325282 | 1,242 | 42 | 56 | 29.571429 | 0.829356 | 0.041063 | 0 | 0.235294 | 0 | 0 | 0.016878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0.029412 | 0.058824 | 0 | 0.323529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82186e13305a89984ea44c868e824fbefce44ba3 | 1,677 | py | Python | tf2jax/_src/numpy_compat_test.py | deepmind/tf2jax | fb5388656344a7e5bb8e5635e82a3d93c7bc9d8c | [
"Apache-2.0"
] | 6 | 2022-03-18T12:09:11.000Z | 2022-03-26T14:16:35.000Z | tf2jax/_src/numpy_compat_test.py | deepmind/tf2jax | fb5388656344a7e5bb8e5635e82a3d93c7bc9d8c | [
"Apache-2.0"
] | null | null | null | tf2jax/_src/numpy_compat_test.py | deepmind/tf2jax | fb5388656344a7e5bb8e5635e82a3d93c7bc9d8c | [
"Apache-2.0"
] | 1 | 2022-03-18T12:09:23.000Z | 2022-03-18T12:09:23.000Z | # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf2jax."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
from tf2jax._src import numpy_compat
_dtypes = [
tf.bool, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16,
tf.int32, tf.int64, tf.bfloat16, tf.float16, tf.float32, tf.float64,
tf.complex64, tf.complex128
]
class NumpyCompatTest(parameterized.TestCase):
@parameterized.named_parameters(
("np", np, numpy_compat.tf_to_np_dtypes),
("jnp", jnp, numpy_compat.tf_to_jnp_dtypes),
)
def test_dtype_conversion(self, np_module, dtype_map):
self.assertEqual(len(_dtypes), len(dtype_map))
for src in _dtypes:
dst = "bool_" if src.name == "bool" else src.name
if src.name == "bfloat16":
self.assertIs(dtype_map[src], jnp.bfloat16)
else:
self.assertIs(dtype_map[src], getattr(np_module, dst))
if __name__ == "__main__":
absltest.main()
| 33.54 | 80 | 0.692308 | 235 | 1,677 | 4.808511 | 0.510638 | 0.053097 | 0.023009 | 0.028319 | 0.040708 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029286 | 0.165176 | 1,677 | 49 | 81 | 34.22449 | 0.777857 | 0.409064 | 0 | 0 | 0 | 0 | 0.030864 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 1 | 0.038462 | false | 0 | 0.230769 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
821a8e57cdc8b71c46b5b924928e6ffbc5170020 | 329 | py | Python | delphi/apps/rest_api/__init__.py | mwdchang/delphi | c6177f2d614118883eaaa7f5300f3e46f10ddc7e | [
"Apache-2.0"
] | null | null | null | delphi/apps/rest_api/__init__.py | mwdchang/delphi | c6177f2d614118883eaaa7f5300f3e46f10ddc7e | [
"Apache-2.0"
] | null | null | null | delphi/apps/rest_api/__init__.py | mwdchang/delphi | c6177f2d614118883eaaa7f5300f3e46f10ddc7e | [
"Apache-2.0"
] | 1 | 2019-07-18T19:13:13.000Z | 2019-07-18T19:13:13.000Z | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app(debug=False):
from delphi.apps.rest_api.api import bp
app = Flask(__name__)
app.config.from_object("delphi.apps.rest_api.config")
app.debug=debug
db.init_app(app)
app.register_blueprint(bp)
return app
| 21.933333 | 57 | 0.735562 | 49 | 329 | 4.714286 | 0.44898 | 0.077922 | 0.121212 | 0.147186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173252 | 329 | 14 | 58 | 23.5 | 0.849265 | 0 | 0 | 0 | 0 | 0 | 0.082067 | 0.082067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
821e77dee58f0bcdb435a5eecbf66592783bf963 | 4,524 | py | Python | rob/robocopy.py | dan-osull/rob | 25f2781cc5124570a04a48b56ec7d7f802b0650b | [
"MIT"
] | 3 | 2022-02-08T20:10:21.000Z | 2022-02-08T20:18:54.000Z | rob/robocopy.py | dan-osull/rob | 25f2781cc5124570a04a48b56ec7d7f802b0650b | [
"MIT"
] | null | null | null | rob/robocopy.py | dan-osull/rob | 25f2781cc5124570a04a48b56ec7d7f802b0650b | [
"MIT"
] | null | null | null | import os
import subprocess
from dataclasses import dataclass
from pathlib import WindowsPath
from time import sleep
from typing import Optional, Sequence
from click import ClickException
from rich.progress import Progress
import rob.console as con
import rob.filesystem
@dataclass
class RobocopyResults:
options: list[str]
# Using Sequence because "list" and other mutable container types are
# considered "invariant", so the contained type needs to match exactly.
# https://github.com/microsoft/pyright/issues/130
errors: Sequence[Optional[str]]
stats: Sequence[Optional[str]]
def parse_robocopy_output(
output: str,
) -> RobocopyResults:
output_list = output.split("\n")
output_list = [line for line in output_list if line]
divider_idx = []
for index, line in enumerate(output_list):
# 50 chars long. Finds dividers in output, which are 78/79 chars.
if "--------------------------------------------------" in line:
divider_idx.append(index)
options = output_list[divider_idx[1] + 1 : divider_idx[2]]
if len(divider_idx) == 3:
errors = output_list[divider_idx[2] + 1 :]
stats = []
else:
errors = output_list[divider_idx[2] + 1 : divider_idx[3]]
stats = output_list[divider_idx[3] + 1 :]
return RobocopyResults(
options=options,
errors=errors,
stats=stats,
)
def run_robocopy(
source: WindowsPath,
target: WindowsPath,
dir_size_bytes: Optional[int] = None,
dry_run: bool = False,
copy_permissions: bool = False,
quiet=False,
) -> None:
msg = f"Copying data from {con.style_path(source)} to {con.style_path(target)}"
if not dir_size_bytes:
dir_size_bytes = rob.filesystem.get_dir_size(source)
if target.exists():
con.print_(msg)
raise ClickException("{target} already exists")
if dry_run:
con.print_(msg, end="")
con.print_skipped()
return
if not quiet:
con.print_(msg)
robocopy_exe = (
WindowsPath(os.environ["SystemRoot"])
.joinpath("system32/robocopy.exe")
.resolve()
)
robocopy_args = [
str(robocopy_exe),
str(source),
str(target),
"/E", # copy subdirectories, including Empty ones.
"/MT", # Do multi-threaded copies with n threads (default 8).
"/R:0", # number of Retries on failed copies: default 1 million.
"/NDL", # No Directory List - don't log directory names.
"/NFL", # No File List - don't log file names.
"/NP", # No Progress - don't display percentage copied.
]
if copy_permissions:
robocopy_args.append(
# /COPY flags: D=Data, A=Attributes, T=Timestamps, X=Skip alt data streams,
# S=Security=NTFS ACLs, O=Owner info, U=aUditing info
"/COPY:DATSO"
)
proc = subprocess.Popen(
args=robocopy_args,
stdout=subprocess.PIPE,
# stderr included for completeness, robocopy doesn't seem to use it
stderr=subprocess.STDOUT,
text=True,
)
while proc.poll() is None:
# "is None" so that returncode 0 breaks loop
# 0: No errors occurred, and no copying was done.
# The source and destination directory trees are completely synchronized.
# 1: One or more files were copied successfully (that is, new files have arrived).
# https://ss64.com/nt/robocopy-exit.html
if not quiet:
with Progress(auto_refresh=False, transient=True) as progress:
task_id = progress.add_task(
"[green]Copying data...[/green]", total=dir_size_bytes
)
progress.update(task_id, completed=rob.filesystem.get_dir_size(target))
progress.refresh()
sleep(2)
output = proc.stdout.read() # type: ignore
# Exit code cannot be trusted as, for example, this error:
# ERROR 5 (0x00000005) Copying NTFS Security to Destination Directory
# ...can be present despite returncode 0, so let's look for errors ourselves
robocopy_results = parse_robocopy_output(output)
if robocopy_results.errors:
raise ClickException(f"Robocopy: {str(robocopy_results.errors)}")
if dir_size_bytes != rob.filesystem.get_dir_size(target):
raise ClickException("Source and target folder sizes do not match. Aborting.")
if not quiet:
con.print_("[green]Data copy complete[/green]")
| 34.8 | 90 | 0.6355 | 566 | 4,524 | 4.971731 | 0.431095 | 0.031983 | 0.021322 | 0.028429 | 0.070007 | 0.057214 | 0.044776 | 0.024876 | 0 | 0 | 0 | 0.012496 | 0.257073 | 4,524 | 129 | 91 | 35.069767 | 0.824755 | 0.270115 | 0 | 0.050505 | 0 | 0 | 0.111009 | 0.045441 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020202 | false | 0 | 0.10101 | 0 | 0.181818 | 0.050505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
821f4a58cdad24897457c7d61f286692df08d3ee | 31,892 | py | Python | code/resumesearch.py | outreachy/creative-works-and-scripts | 8cac43a649aefaa793b91ac34ef73e20194ca22a | [
"CC-BY-4.0"
] | 27 | 2019-07-18T04:44:29.000Z | 2022-02-15T21:15:09.000Z | code/resumesearch.py | outreachy/creative-works-and-scripts | 8cac43a649aefaa793b91ac34ef73e20194ca22a | [
"CC-BY-4.0"
] | 5 | 2019-08-08T21:15:39.000Z | 2022-02-22T01:38:55.000Z | code/resumesearch.py | outreachy/creative-works-and-scripts | 8cac43a649aefaa793b91ac34ef73e20194ca22a | [
"CC-BY-4.0"
] | 13 | 2020-01-13T11:38:45.000Z | 2022-01-25T17:48:29.000Z | #!/usr/bin/env python3
#
# Copyright 2017 Sarah Sharp <sharp@otter.technology>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script attempts to match skillset keywords in resumes with
# Outreachy projects. The skillset keyword lists are based on the
# Outreachy project list at:
# https://wiki.gnome.org/Outreachy/2017/MayAugust
#
# This program expects you to have created a directory with identically
# named PDF and text resume files. You can translate PDF files to text with:
# $ for i in `ls *.pdf`; do pdftotext $i; done
import argparse
import csv
import os
import re
import textwrap
#from fuzzywuzzy import fuzz
from enum import Enum
from collections import Counter
from shutil import copyfile
class outreachyProject:
"""Outreachy project name, description, keywords, and matching resume storage."""
def __init__(self, name, short, description, keywords, printskip):
self.name = name
self.description = description
self.keywords = keywords
self.strongResumeMatches = []
self.weakResumeMatches = []
self.short = short
self.printskip = printskip
class resumeFile:
"""Information relating to a text and pdf resume pair."""
def __init__(self, path, textFileName, contents):
self.path = path
self.textFileName = textFileName
self.pdfFileName = os.path.splitext(textFileName)[0] + '.pdf'
self.contents = contents
self.emails = re.findall(r'[\w\.-\_\+]+@[\w\.-]+', contents)
self.strongProjectMatches = []
self.weakProjectMatches = []
def readResumeFiles(directory):
resumeFiles = []
for f in [l for l in os.listdir(directory) if l.endswith('.txt') and
not l.endswith('-email.txt') and
not l.endswith('-email-tam.txt')]:
with open(os.path.join(directory, f), 'r') as resume:
contents = resume.read()
resumeFiles.append(resumeFile(directory, f, contents))
#print("Found", len(resumeFiles), "resume files")
for r in resumeFiles:
if len(r.emails) == 0:
continue
line = r.pdfFileName
for email in r.emails:
line = line + ' ' + email
line = line + ' ' + str(len(r.contents))
# The first email is usually the actual email.
emails = [resume.emails[0] for resume in resumeFiles if resume.emails]
edups = [item for item, count in Counter(emails).items() if count > 1]
for email in edups:
pdfs = [resume.pdfFileName for resume in resumeFiles if resume.emails and resume.emails[0] == email]
print('Email duplicate:', email, ' '.join(pdfs))
return resumeFiles
def searchForEmail(csvFile, resumeFiles):
with open(csvFile, 'r') as csvFile:
freader = csv.DictReader(csvFile, delimiter=',', quotechar='"')
boothstops = []
for row in freader:
# Create a list of potential matches for the email we have.
# Search through the list of emails in each PDF.
# Use fuzzywuzzy to do a fuzzy search in case we misread an email.
# No difference between pure match when I tried this, and going down to 80 only added false positives
#m = [r for r in resumeFiles if len([email for email in r.emails if fuzz.ratio(row['Email'], email) > 90]) != 0]
m = [r for r in resumeFiles if row['Email'] in r.emails]
if len(m) == 0:
continue
files = set()
for resume in m:
files.add(resume.pdfFileName)
boothstops.append((row['Email'], list(files)))
return boothstops
projectsMay2017 = [
#outreachyProject('Outreachy',
# ['open source', 'free software', 'Linux', 'Unix', 'Solaris']),
outreachyProject('Cadasta', 'a property rights tool',
'enhance user settings and create a user dashboard',
['django'], []),
outreachyProject('Cadasta', 'a property rights tool',
'add new login options',
['django|oauth'], []),
outreachyProject('Cadasta', 'a property rights tool',
'improve automated test coverage',
['selenium'], []),
outreachyProject('Ceph', 'a network filesystem',
'create a root cause analysis tool for Linux distributed systems',
['linux', 'distributed systems'], ['linux', 'distributed systems']),
outreachyProject('Ceph', 'a network filesystem',
'evaluate the performance of new reweight algorithms for balancing storage utilization',
['statistics', 'storage', 'linux'], ['statistics', 'storage', 'linux']),
outreachyProject('Ceph', 'a network filesystem',
'design a status dashboard to visualize Ceph cluster statistics',
['python', 'linux', 'javascript', 'html5', 'css3'], []),
outreachyProject('Ceph', 'a network filesystem',
'identify performance degradation in nodes and automate cluster response',
['Linux', 'python', 'distributed systems'], []),
outreachyProject('Ceph','a network filesystem',
'design a simplified database backend for the Ceph Object Gateway',
['database', 'Linux', 'C\+\+'], ['database']),
outreachyProject('Ceph','a network filesystem',
'port tests written in multiple languages to test the Amazon S3 storage protocol and Openstack Swift storage',
['python', 'linux', 'storage'], ['storage']),
outreachyProject('Debian', 'a Linux distribution',
'benchmark scientific packages for general and architecture specific builds',
['linux', 'gcc'], ['linux']),
outreachyProject('Debian', 'a Linux distribution',
'improve the Debian test database and website',
['linux', 'python', 'sql', 'shell|bash|command-line'], ['linux', 'command-line']),
outreachyProject('Debian', 'a Linux distribution',
'enhance the Debian test website',
['html', 'css', 'linux', 'graphic'], ['linux', 'graphic']),
outreachyProject('Debian', 'a Linux distribution',
'Add secure mail server support to FreedomBox (a web server for small machines)',
['python', 'django', 'shell|bash|command-line'], ['command-line']),
outreachyProject('Discourse', 'chat forum software',
'enhance their forum and chat web services',
['rails', 'javascript|ember.js'], []),
outreachyProject('Fedora', 'a Linux distribution',
'create a coloring book to explain technical concepts',
['inkscape|scribus|storyboard|storyboarding|graphic design'], ['graphic design', 'storyboard', 'storyboarding']),
outreachyProject('Fedora', 'a Linux distribution',
'improve Bodhi, the web-system that publishes updates for Fedora',
['python', 'javascript|html|css|linux|fedora'], []),
outreachyProject('GNOME', None,
'improve the recipes or maps applications',
['gtk'], []),
outreachyProject('Lagome', 'a microservices platform',
"create an online auction sample app to showcase Lagome's microservices",
['java', 'scala|react|reactive'], ['react', 'reactive']),
outreachyProject('Linux kernel', None,
'analyze memory resource release operators and fix Linux kernel memory bugs',
['linux', 'operating systems', 'memory'], ['linux', 'operating systems', 'memory']),
outreachyProject('Linux kernel', None,
'improve process ID allocation',
['linux', 'operating systems', 'kernel'], ['linux', 'operating systems', 'kernel']),
outreachyProject('Linux kernel', None,
'improve nftables (an in-kernel network filtration tool)',
['linux', 'operating systems', 'networking'], ['linux', 'operating systems', 'networking']),
outreachyProject('Linux kernel', None,
'write a driver for a sensor using the Industrial I/O interface',
['linux', 'operating systems|robotics|embedded', 'C\+\+|C(?!\+\+)'],
['linux', 'operating systems', 'robotics', 'embedded', 'c++']),
outreachyProject('Linux kernel', None,
'improve documentation build system and translate docs into ReStructured Text format',
['perl', 'python', 'operating systems'], ['operating systems']),
outreachyProject('Mozilla', None,
None,
['mozilla|firefox'], ['mozilla', 'firefox']),
outreachyProject('OpenStack', 'software for cloud deployment and management',
'add continuous integration for OpenStack Identity Service (keystone) LDAP support',
['python', 'shell|bash|command-line'], ['command-line']),
outreachyProject('oVirt', 'virtualization management software',
'implement oVirt integration tests using Lago and the oVirt REST API',
['python', 'rest'], ['rest']),
outreachyProject('oVirt', 'virtualization management software',
'design an oVirt log analyzer for distributed systems',
['python', 'linux', 'distributed systems'], ['distributed systems']),
outreachyProject('oVirt', 'virtualization management software',
'rewrite oVirt UI dialogs in modern JavaScript technologies',
['es6|react|redux'], []),
outreachyProject('QEMU', 'hardware virtualization software',
'rework the QEMU audio backend',
['C(?!\+\+)', 'audio'], ['audio']),
outreachyProject('QEMU', 'hardware virtualization software',
'create a full and incremental disk backup tool',
['C(?!\+\+)', 'python', 'storage'], ['storage']),
outreachyProject('QEMU', 'hardware virtualization software',
"refactor the block layer's I/O throttling and write notifiers",
['C(?!\+\+)', 'storage'], ['storage']),
outreachyProject('QEMU', 'hardware virtualization software',
"code an emulated PCIe-to-PCI bridge",
['pci|pcie'], ['pci', 'pcie']),
outreachyProject('QEMU', 'hardware virtualization software',
"add x86 virtualization support on macOS using Hypervisor.framework",
['C(?!\+\+)', 'mac', 'virtualization'], ['mac', 'virtualization']),
outreachyProject('QEMU', 'hardware virtualization software',
"extend the current vhost-pci based inter-VM communication",
['C(?!\+\+)', 'pci'], ['pci']),
outreachyProject('Sugar Labs', 'a software-development and learning community',
'improve Music Blocks, an application for exploring fundamental musical concepts',
['javascript|JS', 'music'], ['music']),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'write a Zotero translator and document the process',
['javascript', 'documentation'], ['documentation']),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'improve and fix bugs in the quiz extension',
['php', 'documentation'], ['documentation']),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'create user guides to help with translation outreach',
['translation|localization'], ['translation', 'localization']),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'implement automatic edits on wikis connected to the Programs & Events Dashboard',
['rails'], []),
outreachyProject('Wikimedia', 'a non-profit known for Wikipedia',
'implement an automatic article feedback feature for the Programs & Events Dashboard',
['rails'], []),
outreachyProject('Wine', 'a tool to run Windows programs on Linux or BSD',
'implement a resource editor and dialog editor',
['C(?!\+\+)', 'Windows', 'UI|UX'], ['windows', 'ui', 'ux']),
outreachyProject('Wine', 'a tool to run Windows programs on Linux or BSD',
'implement missing D3DX9 APIs',
['C(?!\+\+)', 'computer graphics'], []),
outreachyProject('Wine','a tool to run Windows programs on Linux or BSD',
'implement Direct3D microbenchmarks',
['C(?!\+\+)', 'opengl'], []),
outreachyProject('Wine','a tool to run Windows programs on Linux or BSD',
'create automated game benchmarks',
['C(?!\+\+)', 'game engine'], ['game engine']),
outreachyProject('Wine','a tool to run Windows programs on Linux or BSD',
'port WineLib to a new architecture (such as PPC64, Sparc64, RISC-V, or x32)',
['PPC|PowerPC|Sparc|Sparc64|RISC-V'], ['ppc', 'powerpc', 'sparc', 'sparc64', 'risc-v']),
outreachyProject('Wine','a tool to run Windows programs on Linux or BSD',
'improve the AppDB website, which lists Wine support for Windows programs',
['php', 'html', 'mysql'], []),
outreachyProject('Xen Project', 'a virtualization platform',
'create golang bindings for libxl on the Xen hypervisor',
['go', 'C(?!\+\+)'], []),
outreachyProject('Xen Project', 'a virtualization platform',
'create rust bindings for libxl on the Xen hypervisor',
['rust'], ['rust']),
outreachyProject('Xen Project', 'a virtualization platform',
'enhance the KDD (Windows Debugger Stub) for the Xen hypervisor',
['C(?!\+\+)', 'windows', 'kernel|debugger'], ['windows', 'debugger']),
outreachyProject('Xen Project', 'a virtualization platform',
'fuzz test the Xen hypercall interface',
['C(?!\+\+)', 'assembly', 'gcc'], []),
outreachyProject('Xen Project', 'a virtualization platform',
'improve Mirage OS, a unikernel that runs on top of Xen',
['ocaml'], []),
outreachyProject('Xen Project', 'a virtualization platform',
'create a Xen code review dashboard',
['sql', 'javascript', 'html5', 'java'], []),
#outreachyProject('Xen Project', 'a virtualization platform',
# 'implement tools for code standards checking using clang-format',
# ['clang']),
outreachyProject('Xen Project', 'a virtualization platform',
'add more FreeBSD testing to osstest',
['freebsd|bsd|openbsd|netbsd|dragonfly'], ['freebsd', 'bsd', 'openbsd', 'netbsd', 'dragonfly']),
outreachyProject('Yocto', 'a tool for creating embedded Linux distributions',
'improve and document the Yocto autobuilder',
['C(?!\+\+)', 'python', 'distro|linux|yocto|openembedded', 'embedded|robotics|beaglebone|beagle bone|minnow|minnowboard|arduino'], ['distro', 'linux', 'yocto', 'embedded', 'robotics', 'beaglebone', 'beagle bone', 'minnow', 'minnowboard', 'arduino']),
]
# We have two types of resumes:
# 1. They matched *some* but not all of the important keywords for a project.
# 2. They matches all of the keywords we need.
def matchResumes(resumeFiles):
for resume in resumeFiles:
for project in projectsMay2017:
matches = [set(re.findall(r'\b(?:' + keyword + r')\b', resume.contents, flags=re.IGNORECASE)) for keyword in project.keywords]
# New syntax for me!
# * takes a list and expands it to arguments to a function.
# ** takes a dictionary and expands it to key-value arguments to a function.
# union combines the list of sets and removes duplicates.
keywords = set.union(*matches)
if all(matches):
resume.strongProjectMatches.append((project, keywords))
project.strongResumeMatches.append(resume)
elif any(matches):
resume.weakProjectMatches.append((project, keywords))
project.weakResumeMatches.append(resume)
def matchWithProjects(resumeFiles):
goldresumes = []
matchResumes(resumeFiles)
#for project in projectsMay2017:
# print(len(project.strongResumeMatches), '\t', project.name, '\t', project.description)
#print('Resumes to review:', len([resume for resume in resumeFiles if len(resume.strongProjectMatches) > 0]))
#print('Resumes with strong matches:')
#for i in range(1, 9):
# resumeCount = [resume for resume in resumeFiles if len(resume.strongProjectMatches) == i]
# if resumeCount:
# print(len(resumeCount), 'with', i, 'strong matches')
#resumeCount = [resume for resume in resumeFiles if len(resume.strongProjectMatches) > 9]
#if resumeCount:
# print(len(resumeCount), 'with > 10 matches')
#print('Resumes with weak matches:')
#for i in range(1, 9):
# resumeCount = [resume for resume in resumeFiles
# if not resume.strongProjectMatches and len(resume.weakProjectMatches) == i]
# if resumeCount:
# print(len(resumeCount), 'with', i, 'weak matches')
#resumeCount = [resume for resume in resumeFiles
# if not resume.strongProjectMatches and len(resume.weakProjectMatches) > 9]
#if resumeCount:
# print(len(resumeCount), 'with > 10 matches')
header1 = '''From: Sarah Sharp <saharabeara@gmail.com>
'''
header3 = '''Reply-to: outreachy-admins@gnome.org
Subject: Internship opportunities
'''
noBooth = '''Greetings!
I'm Sarah Sharp, and we both attended the Tapia conference last
September. I'd like to invite you to apply to two programs programs
that provide paid internships in open source. Interns will work
remotely with experienced mentors.
'''
# offer to host Outreachy session if they signed up at the booth or mention open
# source in their resume?
# What about the students at universities hosting introductory sessions?
atBooth = '''Greetings!
I'm Sarah Sharp, and we met when you stopped by the Outreachy booth
at the Tapia conference last September. I'd like to invite you to
apply to two programs programs that provide paid internships.
Interns will work remotely with experienced mentors.
'''
generalInfo = '''Google Summer of Code is open to all university students:
https://developers.google.com/open-source/gsoc/
Outreachy is open internationally to women (both cis & trans),
trans men, and genderqueer folks. It is also open to U.S. residents
and nationals of any gender who are Black/African American,
Hispanic/Latin@, American Indian, Alaska Native, Native Hawaiian, or
Pacific Islander.
https://wiki.gnome.org/Outreachy/
Both programs offer internships from May 30 to August 30.
Google Summer of Code application process runs from Feb 28 to Apr 3,
while Outreachy's application process runs from Feb 16 to Mar 30.
Google Summer of Code application only requires a project proposal.
Outreachy also requires applicants to make project contributions.
'''
moreInfo = '''The full list of Outreachy internship projects is available at:
https://wiki.gnome.org/Outreachy/2017/MayAugust
Please let me know if you have any questions about the Outreachy
program. Outreachy coordinators (Marina, Karen, Cindy, Tony, and I)
can all be reached at outreachy-admins@gnome.org You can contact all
organization mentors by emailing outreachy-list@gnome.org
I hope you'll apply!
Thanks,
Sarah Sharp
'''
# TODO:
# 1. Remove the generic description when we have a good resume match; it's more personal.
LINEWRAP = 68
def writeInitialInvitation(emaildir, resume, boothlist, matches):
project, keywords = matches[0]
para = ("Based on your resume, if you're eligible for Outreachy, it looks like you might be a good fit for in an internship with " +
project.name)
if project.short:
para = para +' (' + project.short + ')'
if not project.description:
return textwrap.fill(para + '.', LINEWRAP, replace_whitespace=False) + '\n\n'
para = para + ' which is offering an internship to ' + project.description
keywords = [k for k in keywords if k.lower() not in project.printskip]
if keywords:
para = para + ' that involves working with '
k = list(set(keywords))
if len(k) == 1:
para = para + k[0]
elif len(k) == 2:
para = para + ' and '.join(k)
else:
para = para + ', '.join(k[:-1]) + ' and ' + k[-1]
return para
def writeStrongInvitation(emaildir, resume, boothlist):
matches = sorted(resume.strongProjectMatches, key=lambda match: len(match[1]))
project, keywords = matches[0]
para = writeInitialInvitation(emaildir, resume, boothlist, matches)
if len(resume.strongProjectMatches) > 1:
para = (para + '. You may also be interested in the ' +
project.name + ' internship')
if len(resume.strongProjectMatches) > 2:
para = para + 's to '
else:
para = para + ' to '
descriptions = []
for project, keywords in matches[1:-1]:
para = para + project.description + ' or the internship to '
para = para + matches[-1][0].description
return textwrap.fill(para + '.', LINEWRAP) + '\n\n'
def writeMultipleStrongInvitation(emaildir, resume, boothlist):
matches = sorted(resume.strongProjectMatches, key=lambda match: len(match[1]))
project, keywords = matches[0]
para = writeInitialInvitation(emaildir, resume, boothlist, matches)
doneProjects = set()
for project, keywords in matches[1:]:
projmatches = [(p, k) for p, k in matches[1:]
if p not in doneProjects and
p.name == project.name
]
if not projmatches:
continue
doneProjects.add(project)
for p, k in projmatches:
para = (para + '. You may also be interested in the ' +
p.name + ' internship')
if not p.description:
break
if len(projmatches) > 2:
para = para + 's to '
else:
para = para + ' to '
descriptions = []
for p2, k2 in projmatches[1:-1]:
para = para + p2.description + ' or the internship to '
para = para + projmatches[-1][0].description
return textwrap.fill(para + '.', LINEWRAP) + '\n\n'
class emailType(Enum):
strong = 1
mixed = 2
weak = 3
def craftEmail(emaildir, resume, boothlist, strength):
email = header1 + 'To: ' + ', '.join(resume.emails) + '\n' + header3
if resume.pdfFileName in boothlist:
email = email + atBooth
else:
email = email + noBooth
if strength is emailType.strong:
email = (email + generalInfo +
writeStrongInvitation(emaildir, resume, boothlist) +
moreInfo)
elif strength is emailType.mixed:
email = (email + generalInfo +
writeMultipleStrongInvitation(emaildir, resume, boothlist) +
moreInfo)
ext = '-email.txt'
with open(os.path.join(emaildir, os.path.splitext(resume.textFileName)[0] + ext), 'w') as f:
f.write(email)
def createFormEmails(directory, resumeFiles, boothlist):
# For all resumes with one strong match or multiple strong matches with the same organization:
# Create a directory with the organization name (lowercase, with spaces replaced with dashes)
# Copy pdf resume into that directory, create basename-email.txt
oneStrong = [resume for resume in resumeFiles if len(resume.strongProjectMatches) == 1]
print('Resumes with exactly one match:', len(oneStrong))
left = [resume for resume in resumeFiles if resume not in oneStrong]
for resume in left:
if not resume.strongProjectMatches:
continue
firstMatch = resume.strongProjectMatches[0][0].name
for match in resume.strongProjectMatches[1:]:
if match[0].name != firstMatch:
firstMatch = ''
break
if firstMatch:
oneStrong.append(resume)
left = [resume for resume in resumeFiles if resume not in oneStrong]
print('Resumes with exactly one match or multiple matches with same org:', len(oneStrong))
print('Other resumes:', len(left))
for project in projectsMay2017:
matches = [resume for resume in oneStrong if resume.strongProjectMatches[0][0].name == project.name]
if not matches:
continue
dirpath = os.path.join(directory, 'emails-' + re.sub(r'\s+', '-', project.name.lower()))
if not os.path.exists(dirpath):
os.makedirs(dirpath)
for resume in matches:
try:
if not os.path.exists(os.path.join(dirpath, resume.pdfFileName)):
copyfile(os.path.join(directory, resume.pdfFileName),
os.path.join(dirpath, resume.pdfFileName))
except:
print('Could not find pdf file for', resume.textFileName)
continue
craftEmail(dirpath, resume, boothlist, emailType.strong)
# For all resumes with strong matches with multiple orgs (but less than 4 orgs):
# Create a directory called strong-mixed.
# Copy pdf resume into that directory, create basename-email.txt
#
# "Based on your resume, it looks like you might be interested in an
# internship with $PROJECT that involves $KEYWORDS which is offering an internship for
# $DESCRIPTION.
#
# Additionally, you might be interested in $PROJECT that involves $KEYWORDS which
# is offering an internship for $DESCRIPTION."
mixed = [resume for resume in resumeFiles if resume not in oneStrong and resume.strongProjectMatches]
dirpath = os.path.join(directory, 'mixed')
if not os.path.exists(dirpath):
os.makedirs(dirpath)
for resume in mixed:
try:
if not os.path.exists(os.path.join(dirpath, resume.pdfFileName)):
copyfile(os.path.join(directory, resume.pdfFileName),
os.path.join(dirpath, resume.pdfFileName))
except:
print('Could not find pdf file for', resume.textFileName)
continue
craftEmail(dirpath, resume, boothlist, emailType.mixed)
# For all resumes with strong matches with 4 or more orgs:
# Create a directory called strong-scattered.
# Copy pdf resume into that directory, create basename-email.txt
# For all weakly matched resumes - figure out top keywords that matched weak resumes.
hitcount = Counter()
for resume in [resume for resume in resumeFiles if not resume.strongProjectMatches]:
allkeywords = set()
for project, keywords in resume.weakProjectMatches:
allkeywords.update(keywords)
for keyword in keywords:
allkeywords.add(keyword)
hitcount.update(allkeywords)
# Take the top N keywords that weakly matched, find all projects that matched those keywords.
# "Based on your resume, it looks like you might be interested in Outreachy
# projects involving $KEYWORD like $MATCHES"
def craftGenericEmail(emaildir, resume):
if not resume.emails:
address = ''
else:
address = resume.emails[0]
email = header1 + 'To: ' + address + '\n' + header3
email = email + noBooth
email = (email + generalInfo + moreInfo)
ext = '-email.txt'
with open(os.path.join(emaildir, os.path.splitext(resume.textFileName)[0] + ext), 'w') as f:
f.write(email)
def main():
parser = argparse.ArgumentParser(description='Search text resume files for skillset matches.')
parser.add_argument('dir', help='Directory with .txt resume files')
parser.add_argument('--csv', help='CSV file with name <email>,matching resume file of people who stopped by the booth')
parser.add_argument('--notus', help='Directory with .txt resumes files that may be non-U.S. residents')
parser.add_argument('--done', help='Directory with .txt resume files that have been contacted')
parser.add_argument('--generic', help='Simply create generic emails and ignore project matches', default=False)
#parser.add_argument('matches', help='file to write potential matches to')
args = parser.parse_args()
resumeFiles = readResumeFiles(args.dir)
# Check to see if we have resumes to process that we've already
# send email to.
if args.done:
doneResumes = readResumeFiles(args.done)
emails = [resume.emails[0] for resume in doneResumes if resume.emails]
for email in emails:
pdfs = [resume.pdfFileName for resume in resumeFiles if resume.emails and resume.emails[0] == email]
matches = [resume.pdfFileName for resume in doneResumes if resume.emails and resume.emails[0] == email]
if pdfs:
print('Already contacted:', email, ' '.join(pdfs), 'matches done resume', ' '.join(matches))
if args.notus:
notusResumes = readResumeFiles(args.notus)
if args.generic:
genericdir = os.path.join(args.dir, 'generic-todo')
if not os.path.exists(genericdir):
os.makedirs(genericdir)
for resume in resumeFiles:
craftGenericEmail(genericdir, resume)
return
boothstops = (searchForEmail(args.csv, resumeFiles) +
searchForEmail(args.csv, doneResumes) +
searchForEmail(args.csv, notusResumes))
boothlist = set()
for email, filelist in boothstops:
boothlist.update(filelist)
print('Booth stop list', boothstops)
print('Booth stop pdfs', boothlist)
print('Done resumes', [resume.pdfFileName for resume in doneResumes])
matchWithProjects(resumeFiles)
boothandresume = len([resume for resume in resumeFiles
if resume.pdfFileName in boothlist
and len(resume.strongProjectMatches)])
print('People who stopped by the booth who have a resume and need an email:', boothandresume)
print('People who stopped by the booth who have a resume and have been sent email:',
len([resume for resume in doneResumes
if resume.pdfFileName in boothlist]))
print('People who stopped by the booth who have a resume and may be non-U.S. citizens:',
len([resume for resume in notusResumes
if resume.pdfFileName in boothlist]))
createFormEmails(args.dir, resumeFiles, boothlist, generic)
if __name__ == "__main__":
main()
| 48.839204 | 271 | 0.62304 | 3,565 | 31,892 | 5.56662 | 0.223562 | 0.013152 | 0.014966 | 0.017737 | 0.408869 | 0.323558 | 0.261124 | 0.204837 | 0.185739 | 0.162509 | 0 | 0.005901 | 0.266744 | 31,892 | 652 | 272 | 48.91411 | 0.84272 | 0.157657 | 0 | 0.258873 | 0 | 0.004175 | 0.363789 | 0.018087 | 0 | 0 | 0 | 0.001534 | 0 | 1 | 0.02714 | false | 0 | 0.016701 | 0 | 0.070981 | 0.033403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82216f99b184a3762b1bf3acde24777396628147 | 5,053 | py | Python | liquid_node/jobs/__init__.py | admariner/node | 611795e17aa52c50795cb81c1ffebd641dcab51b | [
"MIT"
] | null | null | null | liquid_node/jobs/__init__.py | admariner/node | 611795e17aa52c50795cb81c1ffebd641dcab51b | [
"MIT"
] | 3 | 2021-11-10T11:08:14.000Z | 2021-11-26T13:12:20.000Z | liquid_node/jobs/__init__.py | admariner/node | 611795e17aa52c50795cb81c1ffebd641dcab51b | [
"MIT"
] | null | null | null | import subprocess
import logging
import os
from pathlib import Path
from ..docker import docker
import jinja2
log = logging.getLogger(__name__)
TEMPLATES = Path(__file__).parent.parent.parent.resolve() / 'templates'
def set_volumes_paths(substitutions={}):
"""Sets the volumes paths in the job options
:param substitutions: dictionary containing the job options
:returns: the job options
:rtype: dict
"""
from ..configuration import config
substitutions['config'] = config
substitutions['liquid_domain'] = config.liquid_domain
substitutions['liquid_volumes'] = config.liquid_volumes
substitutions['liquid_collections'] = config.liquid_collections
substitutions['liquid_http_port'] = config.liquid_http_port
substitutions['liquid_2fa'] = config.liquid_2fa
substitutions['check_interval'] = config.check_interval
substitutions['check_timeout'] = config.check_timeout
substitutions['consul_url'] = config.consul_url
substitutions['exec_command'] = docker.exec_command_str
substitutions['https_enabled'] = config.https_enabled
if config.https_enabled:
substitutions['liquid_https_port'] = config.liquid_https_port
substitutions['acme_email'] = config.https_acme_email
substitutions['acme_caServer'] = config.https_acme_caServer
repos = {
'snoop2': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-snoop2'),
'target': '/opt/hoover/snoop'
},
'search': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-search'),
'target': '/opt/hoover/search'
},
'ui_src': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/src'),
'target': '/opt/hoover/ui/src'
},
'ui_pages': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/pages'),
'target': '/opt/hoover/ui/pages'
},
'ui_styles': {
'org': 'hoover',
'local': os.path.join(config.liquidinvestigations_repos_path, 'hoover-ui/styles'),
'target': '/opt/hoover/ui/styles'
},
'core': {
'org': 'liquidinvestigations',
'local': os.path.join(config.liquidinvestigations_repos_path, 'core'),
'target': '/app'
},
'authproxy': {
'org': 'liquidinvestigations',
'local': os.path.join(config.liquidinvestigations_repos_path, 'authproxy'),
'target': '/app'
},
'codimd_server': {
'org': 'liquidinvestigations',
'local': os.path.join(config.liquidinvestigations_repos_path, 'codimd-server'),
'target': '/app',
},
'dokuwiki': {
'org': 'liquidinvestigations',
'local': os.path.join(config.liquidinvestigations_repos_path, 'liquid-dokuwiki'),
'target': '/liquid',
},
}
for repo, repo_config in repos.items():
key_repo = f"{repo_config['org']}_{repo}_repo"
key_git = f"{repo_config['org']}_{repo}_git"
substitutions[key_repo] = ''
substitutions[key_git] = ''
if config.mount_local_repos:
if Path(repo_config['local']).is_dir():
substitutions[key_repo] = f"\"{repo_config['local']}:{repo_config['target']}\",\n"
tag = subprocess.check_output(
f"git -C {repo_config['local']} describe --tags --dirty --broken",
shell=True,
).decode('utf-8').strip()
md5sum = subprocess.check_output(
f"git -C {repo_config['local']} diff HEAD | md5sum",
shell=True,
).decode('utf-8').strip()
substitutions[key_git] = tag + md5sum
else:
log.warn(f'Invalid repo path "{repo_config["local"]}"')
return substitutions
def render(template, subs):
from ..configuration import config
env = jinja2.Environment(
variable_start_string="${",
variable_end_string="}",
loader=jinja2.FileSystemLoader(str(config.templates)),
)
env.globals['int'] = int
env.globals['max'] = max
return env.from_string(template).render(subs)
def get_job(hcl_path, substitutions={}):
"""Return the job description generated from the given template
:param hcl_path: the path to the hcl template file
:param substitutions: dictionary containing the job options
:returns: the job description
:rtype: str
"""
with hcl_path.open() as job_file:
template = job_file.read()
output = render(template, set_volumes_paths(substitutions))
return output
class Job:
vault_secret_keys = ()
core_oauth_apps = ()
stage = 2
generate_oauth2_proxy_cookie = False
extra_secret_fn = None
| 34.141892 | 98 | 0.610133 | 527 | 5,053 | 5.637571 | 0.275142 | 0.021205 | 0.033322 | 0.045439 | 0.317065 | 0.296533 | 0.280377 | 0.280377 | 0.280377 | 0.252777 | 0 | 0.003739 | 0.259054 | 5,053 | 147 | 99 | 34.37415 | 0.789797 | 0.070453 | 0 | 0.150442 | 0 | 0 | 0.209122 | 0.032702 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026549 | false | 0 | 0.070796 | 0 | 0.176991 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8222def448064d7e5190de17a9fd652126451dc7 | 1,035 | py | Python | v8webconsole/webconsole/urls.py | zeerayne/1cv8-webconsole | 3f562b5458ed6e6e221c7d78886a49f89b07e4f9 | [
"MIT"
] | null | null | null | v8webconsole/webconsole/urls.py | zeerayne/1cv8-webconsole | 3f562b5458ed6e6e221c7d78886a49f89b07e4f9 | [
"MIT"
] | null | null | null | v8webconsole/webconsole/urls.py | zeerayne/1cv8-webconsole | 3f562b5458ed6e6e221c7d78886a49f89b07e4f9 | [
"MIT"
] | null | null | null | from django.urls import include
from django.conf.urls import url
from rest_framework.routers import SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from .views import (
HostViewSet,
HostAdminViewSet,
ClusterViewSet,
InfobaseViewSet,
)
host_router = SimpleRouter()
host_router.register(r'hosts', HostViewSet, basename='host')
host_admin_router = NestedSimpleRouter(host_router, r'hosts', lookup='host')
host_admin_router.register(r'admins', HostAdminViewSet, basename='host-admin')
cluster_router = NestedSimpleRouter(host_router, r'hosts', lookup='host')
cluster_router.register(r'clusters', ClusterViewSet, basename='cluster')
infobase_router = NestedSimpleRouter(cluster_router, r'clusters', lookup='cluster')
infobase_router.register(r'infobases', InfobaseViewSet, basename='infobase')
urlpatterns = [
url(r'^', include(host_router.urls)),
url(r'^', include(host_admin_router.urls)),
url(r'^', include(cluster_router.urls)),
url(r'^', include(infobase_router.urls)),
]
| 34.5 | 83 | 0.774879 | 122 | 1,035 | 6.401639 | 0.262295 | 0.06402 | 0.076825 | 0.053777 | 0.208707 | 0.128041 | 0.128041 | 0.128041 | 0 | 0 | 0 | 0 | 0.100483 | 1,035 | 29 | 84 | 35.689655 | 0.838883 | 0 | 0 | 0 | 0 | 0 | 0.090821 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.208333 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
82240b4e497af879de2b8332d733c22fc5035c5f | 6,292 | py | Python | gifploter.py | Lupin1998/inv-ML | 9f3db461911748292dff18024587538eb66d44bf | [
"MIT"
] | 1 | 2021-12-14T09:16:17.000Z | 2021-12-14T09:16:17.000Z | gifploter.py | Lupin1998/inv-ML | 9f3db461911748292dff18024587538eb66d44bf | [
"MIT"
] | null | null | null | gifploter.py | Lupin1998/inv-ML | 9f3db461911748292dff18024587538eb66d44bf | [
"MIT"
] | 2 | 2021-12-14T09:10:00.000Z | 2022-01-21T16:57:44.000Z | import matplotlib.pyplot as plt
import imageio
import os
import numpy as np
class GIFPloter():
def __init__(self, args, model):
self.plot_method = 'Li'
self.gif_axlist = []
self.clist = ['r', 'g', 'b', 'y', 'm', 'c', 'k',
'pink', 'lightblue', 'lightgreen', 'grey']
self.fig, self.ax = plt.subplots()
self.his_loss = None
self.NetworkStructure = args['NetworkStructure']
self.current_subfig_index = 2
self.plot_every_epoch = args['PlotForloop']
self.infor_index_list = model.plot_index_list
self.name_list = model.name_list
self.num_subfig = len(model.plot_index_list)
self.layer_num = len(args['NetworkStructure']) - 1
if self.plot_method == 'Zang':
self.num_fig_every_row = int(np.sqrt(self.num_subfig))+1
self.num_row = int(1+(self.num_subfig - 0.5) //
self.num_fig_every_row)
self.sub_position_list = [i+1 for i in range(self.num_subfig)]
if self.plot_method == 'Li':
self.num_fig_every_row = 2
self.num_row = int(1+(self.num_subfig - 0.5) //
self.num_fig_every_row)
self.sub_position_list = [i*2 + 1 for i in range(self.num_subfig//2)] +\
[self.num_subfig] + \
list(reversed([i*2 + 2 for i in range(self.num_subfig//2)]))
def PlotOtherLayer(self, fig,
data, label,
title='',
fig_position0=1,
fig_position1=1,
fig_position2=1,
s=8):
from sklearn.decomposition import PCA
# input(fig_position)
color_list = []
for i in range(label.shape[0]):
color_list.append(int(label[i]))
if data.shape[1] > 3:
pca = PCA(n_components=2)
try:
data_em = pca.fit_transform(data)
except:
print("Error in plot latent space: PCA.")
data_max = np.max(data) if np.max(data) < 1e30 else 1e30
data_min = np.min(data) if np.min(data) > 1e-5 else 1e-2
data -= data_min
data /= data_max
data_em = pca.fit_transform(data)
else:
data_em = data
data_em = data_em - data_em.mean(axis=0)
if data_em.shape[1] == 3:
ax = fig.add_subplot(fig_position0, fig_position1,
fig_position2, projection='3d')
ax.scatter(
data_em[:, 0], data_em[:, 1], data_em[:, 2],
c=color_list, s=s, cmap='rainbow')
if data_em.shape[1] == 2:
ax = fig.add_subplot(fig_position0, fig_position1, fig_position2)
ax.scatter(
data_em[:, 0], data_em[:, 1], c=label, s=s, cmap='rainbow')
plt.axis('equal')
plt.title(title)
self.current_subfig_index = self.current_subfig_index+1
def update_loss(self, loss=None):
""" 0721, append loss list """
if self.his_loss is None and loss is not None:
self.his_loss = [[] for i in range(len(loss))]
elif loss is not None:
for i, loss_item in enumerate(loss):
self.his_loss[i].append(loss_item)
def AddNewFig(self, output_info, label_point, loss=None, title_='', save=True):
self.update_loss(loss)
self.current_subfig_index = 1
fig = plt.figure(figsize=(5*self.num_fig_every_row, 5*self.num_row))
for i, index in enumerate(self.infor_index_list):
self.PlotOtherLayer(
fig, output_info[index],
label_point, title=self.name_list[index],
fig_position0=self.num_row,
fig_position1=self.num_fig_every_row,
fig_position2=int(self.sub_position_list[i]))
if loss is not None:
loss_interval = 200
loss_sum = []
for i in range(len(self.his_loss[1])):
tmp = 0
for j in range(len(self.his_loss)):
try:
tmp += self.his_loss[j][i]
except:
pass
loss_sum.append(tmp)
# add new subplot
ax = fig.add_subplot(self.num_row, self.num_fig_every_row,
int(max(self.sub_position_list))+1)
l1, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[0], 'bo-')
l2, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[1], 'ko-')
l3, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[2], 'yo-')
l4, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[3], 'ro-')
l5, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[4], 'mo-')
l6, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
self.his_loss[5], 'go-')
l7, = ax.plot(
[i*loss_interval for i in range(len(self.his_loss[0]))],
loss_sum, 'co-')
ax.legend((l1, l2, l3, l4, l5, l6, l7),
('dis', 'push', 'ang', 'orth', 'pad', 'ae', 'sum'))
# loss
plt.title('loss history')
plt.tight_layout()
if save:
plt.savefig(title_+'.png', dpi=300)
plt.close()
def SaveGIF(self, path):
gif_images_path = os.listdir(path+'/')
gif_images_path.sort()
print(gif_images_path)
gif_images = []
for _, path_ in enumerate(gif_images_path):
print(path_)
if '.png' in path_:
# print(path+'/'+path_)
gif_images.append(imageio.imread(path+'/'+path_))
imageio.mimsave(path+'/'+"latent.gif", gif_images, fps=10)
| 37.230769 | 84 | 0.508423 | 808 | 6,292 | 3.748762 | 0.215347 | 0.04622 | 0.072631 | 0.04721 | 0.369759 | 0.295807 | 0.252228 | 0.252228 | 0.203698 | 0.203698 | 0 | 0.026131 | 0.367451 | 6,292 | 168 | 85 | 37.452381 | 0.734925 | 0.013827 | 0 | 0.138686 | 0 | 0 | 0.034544 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036496 | false | 0.007299 | 0.036496 | 0 | 0.080292 | 0.021898 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8226023cb3139084f438cf9dfe3adae050279971 | 1,336 | py | Python | violet.py | MasayukiTanaka0412/violet | 9bc5bfab83902e6798e3b9ef679757f6cd58f900 | [
"MIT"
] | null | null | null | violet.py | MasayukiTanaka0412/violet | 9bc5bfab83902e6798e3b9ef679757f6cd58f900 | [
"MIT"
] | null | null | null | violet.py | MasayukiTanaka0412/violet | 9bc5bfab83902e6798e3b9ef679757f6cd58f900 | [
"MIT"
] | null | null | null | import logging
import os
import win32com.client
import pandas as pd
logging.basicConfig(level=logging.INFO)
templateName = "Template.msg"
recipientsFile = "Recipients.xlsx"
logging.info('Violet App Start')
path = os.getcwd()
logging.info('Current Directory {}'.format(path))
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
mail = outlook.OpenSharedItem(os.path.join(path,templateName))
logging.info("件名: {}".format(mail.subject))
logging.info("本文: {}".format(mail.HTMLBody))
originalBody = mail.HTMLBody
df = pd.read_excel(os.path.join(path,recipientsFile), sheet_name='Recipients')
logging.info(df)
outputDir = os.path.join(path,'output')
if not os.path.isdir(outputDir):
os.mkdir(outputDir)
for index, row in df.iterrows():
replacedBody = originalBody
recipient = ""
for indexName in row.index:
logging.info("indexName {}".format(indexName))
if indexName == "TO":
mail.Recipients.Add(row[indexName])
recipient =row[indexName]
else:
replacedBody = replacedBody.replace(indexName,row[indexName])
mail.HTMLBody = replacedBody
mail.SaveAs(os.path.join(outputDir,"{}.msg".format(recipient.replace("@","_"))))
mail.Recipients.Remove(1)
logging.info('Violet App End') | 29.688889 | 85 | 0.68488 | 156 | 1,336 | 5.846154 | 0.416667 | 0.096491 | 0.04386 | 0.046053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004517 | 0.171407 | 1,336 | 45 | 86 | 29.688889 | 0.819332 | 0 | 0 | 0 | 0 | 0 | 0.116009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8227caa8afc429ee4cb7a62a33c9415eba7a7d1a | 4,718 | py | Python | tests/test_vertex_color.py | aferrall/redner | be52e4105140f575f153d640ba889eb6e6015616 | [
"MIT"
] | 1,146 | 2018-11-11T01:47:18.000Z | 2022-03-31T14:11:03.000Z | tests/test_vertex_color.py | Awcrr/redner | b4f57037af26b720d916bbaf26103a3499101a9f | [
"MIT"
] | 177 | 2018-11-13T22:48:25.000Z | 2022-03-30T07:19:29.000Z | tests/test_vertex_color.py | Awcrr/redner | b4f57037af26b720d916bbaf26103a3499101a9f | [
"MIT"
] | 127 | 2018-11-11T02:32:17.000Z | 2022-03-31T07:24:03.000Z | import pyredner
import redner
import numpy as np
import torch
import math
# Example of optimizing vertex color of a sphere.
# Use GPU if available
pyredner.set_use_gpu(torch.cuda.is_available())
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256))
# Set "use_vertex_color = True" to use vertex color
mat_vertex_color = pyredner.Material(use_vertex_color = True)
materials = [mat_vertex_color]
vertices, indices, uvs, normals = pyredner.generate_sphere(128, 64)
# For the target we randomize the vertex color.
vertex_color = torch.zeros_like(vertices).uniform_(0.0, 1.0)
shape_sphere = pyredner.Shape(\
vertices = vertices,
indices = indices,
uvs = uvs,
normals = normals,
colors = vertex_color, # use the 'colors' field in Shape to store the color
material_id = 0)
shapes = [shape_sphere]
envmap = pyredner.imread('sunsky.exr')
if pyredner.get_use_gpu():
envmap = envmap.cuda(device = pyredner.get_device())
envmap = pyredner.EnvironmentMap(envmap)
scene = pyredner.Scene(cam, shapes, materials, [], envmap)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 256,
max_bounces = 1,
channels = [redner.channels.radiance, redner.channels.vertex_color])
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/target.exr')
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/target.png')
pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/target_color.png')
target_radiance = pyredner.imread('results/test_vertex_color/target.exr')
if pyredner.get_use_gpu():
target_radiance = target_radiance.cuda()
# Initial guess. Set to 0.5 for all vertices.
shape_sphere.colors = \
torch.zeros_like(vertices, device = pyredner.get_device()) + 0.5
shape_sphere.colors.requires_grad = True
# We render both the radiance and the vertex color here.
# The vertex color is only for visualization.
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 256,
max_bounces = 1,
channels = [redner.channels.radiance, redner.channels.vertex_color])
img = render(1, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/init.png')
pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/init_color.png')
diff = torch.abs(target_radiance - img_radiance)
pyredner.imwrite(diff.cpu(), 'results/test_vertex_color/init_diff.png')
optimizer = torch.optim.Adam([shape_sphere.colors], lr=1e-2)
for t in range(100):
print('iteration:', t)
optimizer.zero_grad()
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 4,
max_bounces = 1,
channels = [redner.channels.radiance, redner.channels.vertex_color])
img = render(t+1, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/iter_{}.png'.format(t))
pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/iter_color_{}.png'.format(t))
loss = torch.pow(img_radiance - target_radiance, 2).sum()
print('loss:', loss.item())
loss.backward()
optimizer.step()
# Clamp the data to valid range.
shape_sphere.colors.data.clamp_(0.0, 1.0)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 256,
max_bounces = 1,
channels = [redner.channels.radiance, redner.channels.vertex_color])
img = render(102, *scene_args)
img_radiance = img[:, :, :3]
img_vertex_color = img[:, :, 3:]
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/final.exr')
pyredner.imwrite(img_radiance.cpu(), 'results/test_vertex_color/final.png')
pyredner.imwrite(img_vertex_color.cpu(), 'results/test_vertex_color/final_color.png')
pyredner.imwrite(torch.abs(target_radiance - img_radiance).cpu(), 'results/test_vertex_color/final_diff.png')
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/test_vertex_color/iter_%d.png", "-vb", "20M",
"results/test_vertex_color/out.mp4"])
call(["ffmpeg", "-framerate", "24", "-i",
"results/test_vertex_color/iter_color_%d.png", "-vb", "20M",
"results/test_vertex_color/out_color.mp4"]) | 39.983051 | 109 | 0.708563 | 658 | 4,718 | 4.867781 | 0.218845 | 0.137371 | 0.090228 | 0.116766 | 0.536684 | 0.53606 | 0.466438 | 0.466438 | 0.453637 | 0.432407 | 0 | 0.022989 | 0.151759 | 4,718 | 118 | 110 | 39.983051 | 0.777361 | 0.087325 | 0 | 0.319588 | 0 | 0 | 0.168335 | 0.150407 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.061856 | 0 | 0.061856 | 0.020619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8227d244a3f5042b87acfe74836b3be3d5794b53 | 10,066 | py | Python | starterlite/simulation/GRF.py | gjsun/starterlite | 4838c0b9837e0012157596984f9e39ed52f9c86c | [
"MIT"
] | null | null | null | starterlite/simulation/GRF.py | gjsun/starterlite | 4838c0b9837e0012157596984f9e39ed52f9c86c | [
"MIT"
] | null | null | null | starterlite/simulation/GRF.py | gjsun/starterlite | 4838c0b9837e0012157596984f9e39ed52f9c86c | [
"MIT"
] | null | null | null | import numpy as np
import os
from ..analysis import Sensitivity
from ..physics import Cosmology
from ..physics.Constants import c, cm_per_mpc
from ..util.ParameterFile import ParameterFile
from .FourierSpace import FourierSpace
"""
------------
Instructions
------------
The GRF module allows the user to generate realizations of a Gaussian random field with an input power spectrum, and
compute power spectrum from a given map of fluctuations in real space.
This module has benefited a lot from the imapper2 package developed by Tony Li.
"""
class GaussianRandomField(FourierSpace):
def __init__(self, **kwargs):
FourierSpace.__init__(self, **kwargs)
self.pf = ParameterFile(**kwargs)
# Get the redshift of the interested signal
self._z = self.pf.grf_params['grf_z_signal']
# Specify the survey geometry
self._survey_goemetry = np.array([self.pf.grf_params['grf_geom_x'],
self.pf.grf_params['grf_geom_y'],
self.pf.grf_params['grf_geom_z']])
# Get the wavelength [cm] of the interested signal
self.wv_signal = self.pf.grf_params['grf_lambda_signal']
# Get the assumed aperture size (diameter) of dish
self.d_ap = self.pf.grf_params['grf_d_ap']
self._powerspectrum_in = self.pf.grf_params['grf_ps_in']
@property
def cosm(self):
if not hasattr(self, '_cosm'):
self._cosm = Cosmology(
omega_m_0=self.pf.cosmo_params['omega_m_0'],
omega_l_0=self.pf.cosmo_params['omega_l_0'],
omega_b_0=self.pf.cosmo_params['omega_b_0'],
hubble_0=self.pf.cosmo_params['hubble_0'],
helium_by_number=self.pf.cosmo_params['helium_by_number'],
cmb_temp_0=self.pf.cosmo_params['cmb_temp_0'],
approx_highz=self.pf.cosmo_params['approx_highz'],
sigma_8=self.pf.cosmo_params['sigma_8'],
primordial_index=self.pf.cosmo_params['primordial_index'])
return self._cosm
@property
def sens(self, **kwargs):
if not hasattr(self, '_sens'):
self._sens = Sensitivity(**kwargs)
return self._sens
@property
def z(self):
if not hasattr(self, '_z'):
raise ValueError('must specify a redshift for which the fluctuations of target signal will be simulated!')
return self._z
@z.setter
def z(self, value):
if value in [6.0]:
self._z = value
else:
raise ValueError('invalid signal redshift!')
@property
def survey_goemetry(self):
if not hasattr(self, '_survey_goemetry'):
raise ValueError('must specify a survey geometry for the simulation!')
return self._survey_goemetry
@survey_goemetry.setter
def survey_goemetry(self, value):
if (np.alltrue(value>0)) and (np.size(value)==3):
self._survey_goemetry = value
#print 'updated default geometry to %s!'%(self._survey_goemetry)
else:
raise ValueError('input survey geometry invalid!')
@property
def PowerSpectrum(self):
if not hasattr(self, '_PowerSpectrum'):
raise ValueError('To simulate a GRF, must supply an input PS!')
return self._PowerSpectrum
@PowerSpectrum.setter
def PowerSpectrum(self, value):
if callable(value):
self._PowerSpectrum = value
else:
raise ValueError('Input power spectrum must be a callable function of k!')
@property
def n_ch_x(self):
return self.survey_goemetry[0]
@property
def n_ch_y(self):
return self.survey_goemetry[1]
@property
def n_ch_z(self):
return self.survey_goemetry[2]
@property
def n_beam(self):
return self.survey_goemetry[0] * self.survey_goemetry[1]
@property
def n_channel(self):
return self.survey_goemetry[-1]
def SetGrid(self, L_x, L_y, L_z):
"""
Set x (real space) and k (fourier space) grids
----------------------------------------
:param L_x: length of survey volume along 1st dimension; {scalar}
:param L_y: length of survey volume along 2nd dimension; {scalar}
:param L_z: length of survey volume along 3rd (LOS) dimension; {scalar}
:return:
"""
_lslab_x = L_x
_lslab_y = L_y
_lslab_z = L_z
# Define the large simulation box within which the survey volume is embedded
_lsim_x = _lsim_y = _lsim_z = _lslab_z # Mpc h^-1
_dx = _lslab_x / self.n_ch_x # Mpc h^-1
_dy = _lslab_y / self.n_ch_y # Mpc h^-1
_dz = _lslab_z / self.n_ch_z # Mpc h^-1
self.nx_sim = int(np.round(_lsim_x / _dx))
self.ny_sim = int(np.round(_lsim_y / _dy))
self.nz_sim = int(np.round(_lsim_z / _dz))
self.xs = np.linspace(-self.nx_sim//2 + self.nx_sim%2, self.nx_sim//2 - 1 + self.nx_sim%2, self.nx_sim) * _dx
self.ys = np.linspace(-self.ny_sim//2 + self.ny_sim%2, self.ny_sim//2 - 1 + self.ny_sim%2, self.ny_sim) * _dy
self.zs = np.linspace(-self.nz_sim//2 + self.nz_sim%2, self.nz_sim//2 - 1 + self.nz_sim%2, self.nz_sim) * _dz
self.r = np.sqrt(self.xs[:,np.newaxis,np.newaxis]**2 + self.ys[np.newaxis,:,np.newaxis]**2 + self.zs[np.newaxis,np.newaxis,:]**2)
sim = np.zeros((self.nx_sim, self.ny_sim, self.nz_sim), float)
self.npix_cen = self.nx_sim // 2 - 1
if self.n_ch_y==1:
# real-space weighting function
sim[int(self.npix_cen - (self.n_beam // 2)):int(self.npix_cen + (self.n_beam // 2)), self.npix_cen, 0:] = 1.0
else:
raise NotImplementedError('help!')
_kx = 2*np.pi * np.fft.fftfreq(self.nx_sim, _dx)
_ky = 2*np.pi * np.fft.fftfreq(self.ny_sim, _dy)
_kz = 2*np.pi * np.fft.fftfreq(self.nz_sim, _dz)
_dkx = abs(_kx[1] - _kx[0])
_dky = abs(_ky[1] - _ky[0])
_dkz = abs(_kz[1] - _kz[0])
self.k = np.sqrt(_kx[:,np.newaxis,np.newaxis]**2 + _ky[np.newaxis,:,np.newaxis]**2 + _kz[np.newaxis,np.newaxis,:]**2)
_box_vol = _lsim_x * _lsim_y * _lsim_z
_pix_vol = _box_vol / (self.nx_sim * self.ny_sim * self.nz_sim)
self.scale_factor = np.sqrt(_pix_vol**2 / _box_vol)
def GenerateGRF(self, L_x, L_y, L_z, n_samples=1):
"""
Generate Gaussian random field according to the provided geometry and power spectrum
----------------------------------------
:param L_x: length of survey volume along 1st dimension; {scalar}
:param L_y: length of survey volume along 2nd dimension; {scalar}
:param L_z: length of survey volume along 3rd (LOS) dimension; {scalar}
:param n_samples: number of GRF realizations to generate
:return:
"""
self.fn = 'grf_samples_x%dy%dz%d_N%d' % (self.n_ch_x, self.n_ch_y, self.n_ch_z, n_samples)
if not callable(self.PowerSpectrum): raise TypeError('Input power spectrum must be a callable function of k!')
self.survey_maps = np.zeros((self.n_ch_x, self.n_ch_y, self.n_ch_z, n_samples))
print('\nGenerating x (real space) and k (fourier space) grids...')
self.SetGrid(L_x=L_x, L_y=L_y, L_z=L_z)
print('\nReading in power spectrum...')
try:
Pk = self.PowerSpectrum(self.k)
assert Pk[Pk >= 0.0].size == Pk.size
except:
raise ValueError('Oops!')
print('\nGenerating GRF realizations...')
if self.n_ch_y == 1:
for i in range(n_samples):
# Generate real and imaginary parts
rand = np.random.RandomState(seed=(42 + i))
realspace_vec_r = rand.normal(loc=0.0, scale=1.0, size=self.r.shape)
realspace_vec_i = rand.normal(loc=0.0, scale=1.0, size=self.r.shape)
realspace_map = (realspace_vec_r + realspace_vec_i * 1.0j)
fourierspace_map = np.fft.fftn(realspace_map) / np.sqrt(self.nx_sim * self.ny_sim * self.nz_sim)
ft_map = np.sqrt(Pk) * fourierspace_map / self.scale_factor
ft_map[0, 0, 0] = 0.0
full_map = np.fft.ifftn(ft_map)
full_map = np.real(full_map)
survey_map = full_map[int(self.npix_cen-(self.n_ch_x//2)):int(self.npix_cen+(self.n_ch_x//2)), self.npix_cen, :]
self.survey_maps[:, 0, :, i] = survey_map
print('%d out of %d realizations completed!'%(i+1, n_samples))
self.survey_map_coords = [self.xs[int(self.npix_cen-(self.n_ch_x//2)):int(self.npix_cen+(self.n_ch_x//2))], None, self.zs]
else:
raise NotImplementedError('help!')
self.save()
print('\n--- DONE ---\n')
def save(self, format='npz'):
"""
Save derived window functions to file
----------------------------------------
:param format: format of output file; {str}
"""
_path = os.getenv('STARTERLITE') + '/output/grf/%s.%s' % (self.fn, format)
_wf_dict = {'grf': self.survey_maps, 'coords': self.survey_map_coords}
np.savez(_path, **_wf_dict)
def GetObsPS2D_NoAvg(self, ps3d, T_matrix_path):
"""
Obtain the observed 2D PS for a given true, 3D PS and a projection (WF) matrix
----------------------------------------
:param ps3d: true spatial power spectrum; {callable function}
:param T_matrix_path: path to transfer matrix; {str}
:return: observed power spectrum and bin edges
"""
T_matrix_data = np.load(T_matrix_path)
T_matrix = T_matrix_data['T_matrix'] # of size (NKx * NKz, Nkbins)
k3d_bins = T_matrix_data['k3d_bins']
K2D_bins = T_matrix_data['K2D_matrix'] # of size (NKx * NKz, 2)
PS2D_from_mat = np.matmul(T_matrix, ps3d(k3d_bins))
return K2D_bins, PS2D_from_mat | 36.078853 | 137 | 0.597457 | 1,441 | 10,066 | 3.932686 | 0.195697 | 0.009529 | 0.018528 | 0.026998 | 0.344627 | 0.256397 | 0.198165 | 0.16252 | 0.127228 | 0.112935 | 0 | 0.015481 | 0.268428 | 10,066 | 279 | 138 | 36.078853 | 0.754074 | 0.154878 | 0 | 0.119497 | 0 | 0 | 0.104833 | 0.003146 | 0 | 0 | 0 | 0 | 0.006289 | 1 | 0.113208 | false | 0 | 0.044025 | 0.031447 | 0.232704 | 0.031447 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8229dbbb6f0052b556f0fa37fd3095bf537d9605 | 5,180 | py | Python | fine_grid.py | daveweij/fine_mouse_grid_talon | d13d942f977d06d8f17d9a14988811bae69ac2cd | [
"MIT"
] | null | null | null | fine_grid.py | daveweij/fine_mouse_grid_talon | d13d942f977d06d8f17d9a14988811bae69ac2cd | [
"MIT"
] | null | null | null | fine_grid.py | daveweij/fine_mouse_grid_talon | d13d942f977d06d8f17d9a14988811bae69ac2cd | [
"MIT"
] | null | null | null | import typing
import os
import json
from talon import Module, Context, canvas, screen, ui, ctrl, settings
from talon.skia import Paint, Rect
from talon.types.point import Point2d
mod = Module()
ctx = Context()
mod.tag('fine_grid_enabled', desc='Tag enables fine grid commands')
class FineMouseGrid:
ZOOM_RATIO = 0.6
def __init__(self):
self.screen = None
self.mcanvas = None
self.rect = None
self.active = False
letters = [chr(97 + i) for i in range(26)]
numbers = [str(i) for i in range(10)]
self.columns = letters + numbers
self.rows = letters + numbers
def setup(self, *, screen_num: int = None):
screens = ui.screens()
# each if block here might set the rect to None to indicate failure
if screen_num is not None:
screen = screens[screen_num % len(screens)]
else:
screen = screens[0]
if not self.rect:
rect = screen.rect
self.rect = rect.copy()
self.screen = screen
if self.mcanvas is not None:
self.mcanvas.close()
self.mcanvas = canvas.Canvas.from_screen(screen)
self.mcanvas.register("draw", self.draw)
self.mcanvas.freeze()
def draw(self, canvas):
def draw_text(offset_x, offset_y, width, height):
row_height = height / len(self.rows)
column_width = width / len(self.columns)
canvas.paint.text_align = canvas.paint.TextAlign.CENTER
canvas.paint.textsize = 16
for row, row_char in enumerate(self.rows):
for col, col_char in enumerate(self.columns):
coordinate_x = offset_x + column_width * (col + 0.5)
coordinate_y = offset_y + row_height * (row + 0.5)
text_string = f"{row_char}{col_char}"
text_rect = canvas.paint.measure_text(text_string)[1]
background_rect = text_rect.copy()
background_rect.center = Point2d(
coordinate_x,
coordinate_y,
)
background_rect = background_rect.inset(-4)
canvas.paint.color = "9999994f"
canvas.paint.style = Paint.Style.FILL
canvas.draw_rect(background_rect)
canvas.paint.color = "00ff008f"
canvas.draw_text(
text_string,
coordinate_x,
coordinate_y + text_rect.height / 2,
)
draw_text(self.rect.x, self.rect.y, self.rect.width, self.rect.height)
self.active = True
def close(self):
self.mcanvas.unregister("draw", self.draw)
self.mcanvas.close()
self.mcanvas = None
self.active = False
def reset(self):
self.rect = None
self.redraw()
def redraw(self):
self.close()
self.setup()
self.draw(self.mcanvas)
def get_coordinate(self, row: str, column: str):
column_index = self.columns.index(column)
row_index = self.rows.index(row)
x = self.rect.x + self.rect.width * (column_index + 0.5) / len(self.columns)
y = self.rect.y + self.rect.height * (row_index + 0.5) / len(self.rows)
return x, y
def go_coordinate(self, row: str, column: str):
ctrl.mouse_move(*self.get_coordinate(row, column))
def zoom(self, row: str, column: str):
x, y = self.get_coordinate(row, column)
xnew_min = self.rect.x
xnew_max = self.rect.x + (1 - self.ZOOM_RATIO)*self.rect.width
xnew = x - 0.5*self.ZOOM_RATIO*self.rect.width
self.rect.x = max(min(xnew, xnew_max), xnew_min)
ynew_min = self.rect.y
ynew_may = self.rect.y + (1 - self.ZOOM_RATIO)*self.rect.height
ynew = y - 0.5*self.ZOOM_RATIO*self.rect.height
self.rect.y = max(min(ynew, ynew_may), ynew_min)
self.rect.width = self.ZOOM_RATIO*self.rect.width
self.rect.height = self.ZOOM_RATIO*self.rect.height
self.redraw()
grid = FineMouseGrid()
@mod.capture(rule="(<user.letter> | <user.number_key>) (<user.letter> | <user.number_key>)")
def coordinate(m) -> str:
"column or row character"
return ','.join(m)
@mod.action_class
class GridActions:
def fine_grid_activate():
"""activate chess board"""
ctx.tags = ['user.fine_grid_enabled']
grid.rect = None
if not grid.mcanvas:
grid.setup()
grid.draw(grid.mcanvas)
def fine_grid_close():
"""Close the chessboard"""
print(ctx.tags)
grid.close()
ctx.tags = []
def go_coordinate(coordinate: str):
"""select coordinate"""
print(coordinate)
row, column = coordinate.split(',')
grid.go_coordinate(row, column)
def zoom(coordinate: str):
"""zoom"""
row, column = coordinate.split(',')
grid.zoom(row, column)
def fine_grid_reset():
"""reset grid to original state"""
grid.reset()
| 31.585366 | 92 | 0.570463 | 647 | 5,180 | 4.440495 | 0.224111 | 0.072398 | 0.027149 | 0.035503 | 0.237731 | 0.098851 | 0.05952 | 0.023669 | 0 | 0 | 0 | 0.011861 | 0.316409 | 5,180 | 163 | 93 | 31.779141 | 0.799492 | 0.035521 | 0 | 0.113821 | 0 | 0.00813 | 0.04205 | 0.004405 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130081 | false | 0 | 0.04878 | 0 | 0.219512 | 0.01626 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |