hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0f8d8e1a99343fc9166ec575752cd7be6e45cee
| 827
|
py
|
Python
|
Pytorch/5-CNN/nn_Sequential.py
|
pengchenyu111/PaperCodeReplication
|
7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a
|
[
"Apache-2.0"
] | null | null | null |
Pytorch/5-CNN/nn_Sequential.py
|
pengchenyu111/PaperCodeReplication
|
7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a
|
[
"Apache-2.0"
] | null | null | null |
Pytorch/5-CNN/nn_Sequential.py
|
pengchenyu111/PaperCodeReplication
|
7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.tensorboard import SummaryWriter
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
tudui = Tudui()
print(tudui)
input = torch.ones((64, 3, 32, 32))
output = tudui(input)
print(output.shape)
writer = SummaryWriter("./logs")
writer.add_graph(tudui, input)
writer.close()
| 22.972222
| 67
| 0.584039
| 103
| 827
| 4.601942
| 0.398058
| 0.056962
| 0.056962
| 0.113924
| 0.162447
| 0.122363
| 0.122363
| 0.122363
| 0
| 0
| 0
| 0.079796
| 0.287787
| 827
| 35
| 68
| 23.628571
| 0.724958
| 0
| 0
| 0.103448
| 0
| 0
| 0.007255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.137931
| 0
| 0.275862
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0fb6f704b8d712fad268b480c2d25cc8bc409f5
| 1,232
|
py
|
Python
|
src/brewlog/utils/query.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 3
|
2019-03-11T04:30:06.000Z
|
2020-01-26T03:21:52.000Z
|
src/brewlog/utils/query.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 23
|
2019-02-06T20:37:37.000Z
|
2020-06-01T07:08:35.000Z
|
src/brewlog/utils/query.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Optional
from flask import url_for
from flask_sqlalchemy import BaseQuery
from ..ext import db
from ..models import Brew, BrewerProfile
def public_or_owner(query: BaseQuery, user: Optional[BrewerProfile]) -> BaseQuery:
"""Filter Brew query of all non-accessible objects.
:param query: query over Brew objects
:type query: BaseQuery
:param user: actor object, may be None
:type user: Optional[BrewerProfile]
:return: filtered query
:rtype: BaseQuery
"""
if user is not None:
query = query.filter(
db.or_(
BrewerProfile.id == user.id,
db.and_(BrewerProfile.is_public.is_(True), Brew.is_public.is_(True))
)
)
else:
query = query.filter(
BrewerProfile.is_public.is_(True), Brew.is_public.is_(True)
)
return query
def search_result(query, endpoint, attr_name):
res = []
id_col = query.column_descriptions[0]['entity'].id
name_col = query.column_descriptions[0]['entity'].name
for obj_id, obj_name in query.values(id_col, name_col):
kw = {attr_name: obj_id}
res.append({'name': obj_name, 'url': url_for(endpoint, **kw)})
return res
| 29.333333
| 84
| 0.649351
| 161
| 1,232
| 4.795031
| 0.372671
| 0.041451
| 0.051813
| 0.072539
| 0.202073
| 0.202073
| 0.11658
| 0.11658
| 0.11658
| 0.11658
| 0
| 0.002151
| 0.24513
| 1,232
| 41
| 85
| 30.04878
| 0.827957
| 0.184253
| 0
| 0.076923
| 0
| 0
| 0.019588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.192308
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0feb9c47583568b91cc55ff1a17eb9a915a0f41
| 1,626
|
py
|
Python
|
framework/repository/info.py
|
jarret/bitcoin_helpers
|
4b6155ea3b004ad58a717b36cd58138d058281b1
|
[
"MIT"
] | null | null | null |
framework/repository/info.py
|
jarret/bitcoin_helpers
|
4b6155ea3b004ad58a717b36cd58138d058281b1
|
[
"MIT"
] | null | null | null |
framework/repository/info.py
|
jarret/bitcoin_helpers
|
4b6155ea3b004ad58a717b36cd58138d058281b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import json
import os
import sys
from framework.path.path import Path
from framework.file.io import read_file
from framework.git.path import GitPath
REPO_INFO_FILENAME = ".bitcoin-maintainer-tools.json"
FAILBACK_REPO_INFO_FILENAME = ".fallback-bitcoin-maintainer-tools.json"
class RepositoryInfo(dict):
"""
Dictionary that is sourced from a json file in the target repository.
If the file doesn't exist, a fallback file is used for the settings.
"""
def __init__(self, repository_base):
super().__init__()
json_file = os.path.join(repository_base, REPO_INFO_FILENAME)
path = Path(json_file)
if not path.exists():
# If there is no .json file in the repo, it might be an old version
# checked out. We can still do best-effort with a default file
# that is located in this repo.
json_file = self._failback_file()
path = Path(json_file)
if not path.exists():
sys.exit("Could not find a .json repository info file to use.")
path.assert_is_file()
path.assert_mode(os.R_OK)
content = read_file(json_file)
self.update(json.loads(content))
def _failback_file(self):
gp = GitPath(os.path.abspath(os.path.realpath(__file__)))
return os.path.join(str(gp.repository_base()),
FAILBACK_REPO_INFO_FILENAME)
| 36.954545
| 79
| 0.674662
| 229
| 1,626
| 4.615721
| 0.471616
| 0.05298
| 0.060549
| 0.049196
| 0.058657
| 0.058657
| 0.058657
| 0.058657
| 0
| 0
| 0
| 0.004032
| 0.237392
| 1,626
| 43
| 80
| 37.813953
| 0.848387
| 0.305658
| 0
| 0.153846
| 0
| 0
| 0.108794
| 0.062557
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0ffaa7976be25e795d5abe04edf6fd2fd0631eb
| 1,540
|
py
|
Python
|
cookie_manager/security_decorator.py
|
ScholarPack/cookie-manager
|
342eaf19d4ebbe83319306e4a3afcc3988f61d3d
|
[
"MIT"
] | 10
|
2020-02-26T14:13:05.000Z
|
2021-07-30T02:16:47.000Z
|
cookie_manager/security_decorator.py
|
ScholarPack/cookie-manager
|
342eaf19d4ebbe83319306e4a3afcc3988f61d3d
|
[
"MIT"
] | 13
|
2020-02-26T10:42:09.000Z
|
2021-09-30T13:26:23.000Z
|
cookie_manager/security_decorator.py
|
ScholarPack/cookie-manager
|
342eaf19d4ebbe83319306e4a3afcc3988f61d3d
|
[
"MIT"
] | 3
|
2020-03-29T00:49:23.000Z
|
2020-07-24T16:26:20.000Z
|
from functools import wraps
from typing import List, Any
from cookie_manager import CookieManager
class CookieSecurityDecorator:
_cookie_manager = None
_request = None
_cookie_name = None
def init_app(self, request: Any, cookie_manager: CookieManager, cookie_name: str):
"""
Initialise the security decorators
:param request: An object with the attribute `cookies`
:param cookie_manager: The instance of the cookie manager to be used for the decorator
:param cookie_name: The name of the cookie to read from the request
"""
self._request = request
self._cookie_manager = cookie_manager
self._cookie_name = cookie_name
def keys_required(self, keys: List = []):
"""
:param keys: A list of cookie signing keys that are allowed to use a decorated endpoint.
:raises Unauthorized: When the route is accessed without a valid key id
:return: wrapper
"""
def route_wrapper(f):
@wraps(f)
def wrapper(*args, **kwds):
verfied_cookie = self._cookie_manager.verify(
signed_cookie=self._request.cookies.get(self._cookie_name)
)
key_id = verfied_cookie.get("key_id")
if len(keys) == 0 or key_id in keys:
return f(*args, **kwds)
else:
raise self._cookie_manager._exceptions.Unauthorized()
return wrapper
return route_wrapper
| 34.222222
| 96
| 0.619481
| 182
| 1,540
| 5.049451
| 0.412088
| 0.127312
| 0.055495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000946
| 0.313636
| 1,540
| 44
| 97
| 35
| 0.868496
| 0.274026
| 0
| 0
| 0
| 0
| 0.00582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.12
| 0
| 0.56
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c0015d18f7709ad1f58810e4e4fbcf0c3ae1358
| 6,394
|
py
|
Python
|
mutagen/aiff.py
|
lucienimmink/scanner.py
|
cecaa0a570ba8058321dea1c8efa9f77868effb3
|
[
"MIT"
] | 2
|
2022-03-14T15:34:14.000Z
|
2022-03-23T17:05:42.000Z
|
mutagen/aiff.py
|
lucienimmink/scanner.py
|
cecaa0a570ba8058321dea1c8efa9f77868effb3
|
[
"MIT"
] | null | null | null |
mutagen/aiff.py
|
lucienimmink/scanner.py
|
cecaa0a570ba8058321dea1c8efa9f77868effb3
|
[
"MIT"
] | 2
|
2020-09-17T08:27:12.000Z
|
2021-08-23T11:13:52.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
# 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""AIFF audio stream information and tags."""
import struct
from struct import pack
from mutagen import StreamInfo, FileType
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffFile,
IffID3,
InvalidChunk,
error as IffError,
)
from mutagen._util import (
convert_error,
loadfile,
endswith,
)
__all__ = ["AIFF", "Open", "delete"]
class error(IffError):
pass
# based on stdlib's aifc
_HUGE_VAL = 1.79769313486231e+308
def read_float(data):
"""Raises OverflowError"""
assert len(data) == 10
expon, himant, lomant = struct.unpack('>hLL', data)
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
raise OverflowError("inf and nan not supported")
else:
expon = expon - 16383
# this can raise OverflowError too
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
class AIFFChunk(IffChunk):
"""Representation of a single IFF chunk"""
@classmethod
def parse_header(cls, header):
return struct.unpack('>4sI', header)
@classmethod
def get_class(cls, id):
if id == 'FORM':
return AIFFFormChunk
else:
return cls
def write_new_header(self, id_, size):
self._fileobj.write(pack('>4sI', id_, size))
def write_size(self):
self._fileobj.write(pack('>I', self.data_size))
class AIFFFormChunk(AIFFChunk, IffContainerChunkMixin):
"""The AIFF root chunk."""
def parse_next_subchunk(self):
return AIFFChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id != u'FORM':
raise InvalidChunk('Expected FORM chunk, got %s' % id)
AIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container()
class AIFFFile(IffFile):
"""Representation of a AIFF file"""
def __init__(self, fileobj):
# AIFF Files always start with the FORM chunk which contains a 4 byte
# ID before the start of other chunks
super().__init__(AIFFChunk, fileobj)
if self.root.id != u'FORM':
raise InvalidChunk("Root chunk must be a FORM chunk, got %s"
% self.root.id)
def __contains__(self, id_):
if id_ == 'FORM': # For backwards compatibility
return True
return super().__contains__(id_)
def __getitem__(self, id_):
if id_ == 'FORM': # For backwards compatibility
return self.root
return super().__getitem__(id_)
class AIFFInfo(StreamInfo):
"""AIFFInfo()
AIFF audio stream information.
Information is parsed from the COMM chunk of the AIFF file
Attributes:
length (`float`): audio length, in seconds
bitrate (`int`): audio bitrate, in bits per second
channels (`int`): The number of audio channels
sample_rate (`int`): audio sample rate, in Hz
bits_per_sample (`int`): The audio sample size
"""
length = 0
bitrate = 0
channels = 0
sample_rate = 0
@convert_error(IOError, error)
def __init__(self, fileobj):
"""Raises error"""
iff = AIFFFile(fileobj)
try:
common_chunk = iff[u'COMM']
except KeyError as e:
raise error(str(e))
data = common_chunk.read()
if len(data) < 18:
raise error
info = struct.unpack('>hLh10s', data[:18])
channels, frame_count, sample_size, sample_rate = info
try:
self.sample_rate = int(read_float(sample_rate))
except OverflowError:
raise error("Invalid sample rate")
if self.sample_rate < 0:
raise error("Invalid sample rate")
if self.sample_rate != 0:
self.length = frame_count / float(self.sample_rate)
self.bits_per_sample = sample_size
self.sample_size = sample_size # For backward compatibility
self.channels = channels
self.bitrate = channels * sample_size * self.sample_rate
def pprint(self):
return u"%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _IFFID3(IffID3):
"""A AIFF file with ID3v2 tags"""
def _load_file(self, fileobj):
return AIFFFile(fileobj)
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Completely removes the ID3 chunk from the AIFF file"""
try:
del AIFFFile(filething.fileobj)[u'ID3']
except KeyError:
pass
class AIFF(FileType):
"""AIFF(filething)
An AIFF audio file.
Arguments:
filething (filething)
Attributes:
tags (`mutagen.id3.ID3`)
info (`AIFFInfo`)
"""
_mimes = ["audio/aiff", "audio/x-aiff"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") +
endswith(filename, b".aiff") + endswith(filename, b".aifc"))
def add_tags(self):
"""Add an empty ID3 tag to the file."""
if self.tags is None:
self.tags = _IFFID3()
else:
raise error("an ID3 tag already exists")
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
"""Load stream and tag information from a file."""
fileobj = filething.fileobj
try:
self.tags = _IFFID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
fileobj.seek(0, 0)
self.info = AIFFInfo(fileobj)
Open = AIFF
| 25.99187
| 78
| 0.610416
| 775
| 6,394
| 4.900645
| 0.304516
| 0.034229
| 0.022117
| 0.007899
| 0.095313
| 0.065824
| 0.065824
| 0.065824
| 0.046867
| 0.02317
| 0
| 0.023434
| 0.285893
| 6,394
| 245
| 79
| 26.097959
| 0.808366
| 0.22568
| 0
| 0.148936
| 0
| 0
| 0.062591
| 0
| 0
| 0
| 0.004783
| 0
| 0.007092
| 1
| 0.120567
| false
| 0.014184
| 0.042553
| 0.028369
| 0.333333
| 0.007092
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c037d89e3a9bf24f7302ca98b6d3dd08cac776e
| 11,062
|
py
|
Python
|
ML/meanvel.py
|
lewisfish/Triton-dolphin
|
bc7256485e1bd943e0b9b3017c214c82e26905f3
|
[
"MIT"
] | null | null | null |
ML/meanvel.py
|
lewisfish/Triton-dolphin
|
bc7256485e1bd943e0b9b3017c214c82e26905f3
|
[
"MIT"
] | null | null | null |
ML/meanvel.py
|
lewisfish/Triton-dolphin
|
bc7256485e1bd943e0b9b3017c214c82e26905f3
|
[
"MIT"
] | null | null | null |
from concurrent import futures
from itertools import repeat
import pathlib
from pathlib import Path
import pickle
import time
from typing import List, Tuple, Union
import cv2 as cv2
import hdbscan
import numpy as np
import pims
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from gps import getAltitude
from ocr import getMagnification
def _getFullFileName(files: List[pathlib.PosixPath], target: str) -> pathlib.PosixPath:
'''Match a file to list of full filenames
Parameters
----------
files : List[pathlib.PosixPath]
List of file name to match to
target : str
File to be matched with full path
Returns
-------
file : pathlib.PosixPath
Full filename
'''
for file in files:
if target in str(file):
return file
def getFrames(file: str, position: int, offset: int) -> Tuple[List[pims.frame.Frame], List[int], float]:
"""Get 3 frames for optical flow analysis. Frames are serperated by +/- offset.
Central frame is at position.
Parameters
----------
file : str
Video file to get frames from
position : int
Position of central frame
offset : int
offset to get other frames for optical flow analysis
Returns
-------
Tuple[List[pims.frame.Frame], List[int], float]
Frames at position, +/- offset, list of frame positions, fps of video
"""
assert position > offset
video = pims.PyAVVideoReader(file)
frame0 = video[position - offset]
frame1 = video[position]
frame2 = video[position + offset]
return [frame0, frame1, frame2], [position - offset, position, position + offset], float(video._frame_rate)
def getFramesCV2(file: str, position: int, offset: int):
"""Get 3 frames for optical flow analysis using cv2. Frames are serperated by +/- offset.
Central frame is at position.
Parameters
----------
file : str
Video file to get frames from
position : int
Position of central frame
offset : int
offset to get other frames for optical flow analysis
Returns
-------
Tuple[List[np.ndarray], List[int], float]
Frames at position, +/- offset, list of frame positions, fps of video
"""
assert position >= offset
cap = cv2.VideoCapture(str(file))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.set(cv2.CAP_PROP_POS_FRAMES, position-offset)
_, frame = cap.read()
frame0 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap.set(cv2.CAP_PROP_POS_FRAMES, position)
_, frame = cap.read()
frame1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap.set(cv2.CAP_PROP_POS_FRAMES, position+offset)
_, frame = cap.read()
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return [frame0, frame1, frame2], [position - offset, position, position + offset], fps
def cropFrames(frames: List[pims.frame.Frame], box: List[int]) -> List[pims.frame.Frame]:
"""Crop frames.
Parameters
----------
frames : List[pims.frame.Frame]
List of frames to be cropped
box : List[int]
Dimensions, and location to crop: format [y0, x0, y1, x1]
Returns
-------
List[pims.frame.Frame]
List of cropped frames
"""
croppedFrames = []
xi = box[1]
xii = box[3]
yi = box[0]
yii = box[2]
for frame in frames:
croppedFrames.append(frame[yi:yii, xi:xii])
return croppedFrames
def preprocessFrame(frame: pims.frame.Frame, fg) -> Tuple[np.ndarray]:
"""Preprocess frame. Converts to grayscale and removes noise.
Parameters
----------
frame : pims.frame.Frame
Frame to be preprocessed
fg : TYPE
Foreground remover?
Returns
-------
Tuple[np.ndarray]
Dilated image binary image, and grayscale image
"""
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
fgmask = fg.apply(gray)
blur = cv2.GaussianBlur(fgmask, (5, 5), 0)
_, thesh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thesh, None, iterations=3)
return dilated, gray
def processContours(contours: List[float], contourpoints: List[List[float]], frame: pims.frame.Frame, debug=False) -> Tuple[List[List[float]], pims.frame.Frame]:
"""Get bounding boxes for each contour.
Parameters
----------
contours : List[float]
List of contours to find bounding boxes for.
contourpoints : List[List[float]]
List of bounding boxes. Does this need passed in?
frame : pims.frame.Frame
Frame from which the contours are from
debug : bool, optional
If true then draw bounding boxes
Returns
-------
Tuple[List[List[float]], pims.frame.Frame]
List of bounding boxes, and frame
"""
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cx = x + (w / 2)
cy = y + (h / 2)
contourpoints.append([cx, cy])
if debug:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
return contourpoints, frame
def processFile(file: str) -> List[Union[str, int, List[float], int]]:
"""Open, read and process data from file.
Parameters
----------
file : str
File to read.
Returns
-------
List[Union[str, int, List[float], int]]
List of videoname, framenumber, bounding box, and label
"""
info = []
f = open(file, "r")
lines = f.readlines()[1:]
for line in lines:
name, framenum, *box, label, vel, kmeans, hdbscan = line.split(",")
framenum = int(framenum)
label = int(label)
box = [int(x) for x in box]
item = [name, framenum, box, label]
info.append(item)
return info
def trainParallel(workers=8):
"""Wrapper function for training HDBSCAN in parallel.
Parameters
----------
workers : int, optional
Number of workers to use in parallel, default=2
"""
data = processFile("../data/train.csv")
with futures.ProcessPoolExecutor(workers) as executor:
res = list(tqdm(executor.map(train, data), total=len(data)))
velocities = []
for i in res:
velocities.extend(i)
# with open('velocities.npy', 'wb') as f:
# np.save(f, velocities)
# model = hdbscan.HDBSCAN(min_cluster_size=1000, cluster_selection_epsilon=0.2, min_samples=5, leaf_size=100, prediction_data=True).fit(np.array(velocities).reshape(-1, 1))
# import pickle
# with open('model.pickle', 'wb') as f:
# pickle.dump(model, f)
def train(info: Tuple[str, List[float], int], root="/data/lm959/data/", crop=False):
"""Training function for HDBSCAN. Actually does the optical flow and
returns the data needed for training.
Parameters
----------
info : Tuple[str, List[float], int]
Tuple of video filename, framenumber, bounding box of object, and label of object.
root : str, optional
Root of file system location where videos are stored.
crop : bool, optional
If true then crop frames to bounding box of object.
Returns
-------
velocitymeterPerSecond : np.ndarray
List of velocities in m/s
"""
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
fgbg = cv2.createBackgroundSubtractorMOG2()
root = Path(root)
videoFiles = list(root.glob("**/*.mp4"))
vidname, fnumber, box, label = info
fullname = _getFullFileName(videoFiles, vidname)
frames, framenums, fps = getFramesCV2(fullname, fnumber, offset=15)
contourpoints = []
fpsPerFrame = 1. / fps
alt = getAltitude(fullname, framenums[1], gpsdataPath="../data/gps/")
magn = getMagnification(frames[1])
dolphLength = 1714 * (magn / alt) + 16.5
dolphPixelPerSecond = dolphLength / 2.
if crop:
frames = cropFrames(frames, box)
frame = frames[0]
for i in range(0, 2):
dilated, gray1 = preprocessFrame(frame, fgbg)
contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contourpoints, frame = processContours(contours, contourpoints, frame)
p0 = np.array(contourpoints, np.float32)
gray2 = cv2.cvtColor(frames[i + 1], cv2.COLOR_RGB2GRAY)
try:
p1, _, _ = cv2.calcOpticalFlowPyrLK(gray1, gray2, p0, None, **lk_params)
diff = np.array(p1) - np.array(p0)
velocity = diff / fpsPerFrame
velocity = [np.sqrt(item[0]**2 + item[1]**2) for item in velocity]
frame = frames[1].copy()
contourpoints = []
except:
# velocity = np.array([0.])
# if not crop:
continue
velocitymeterPerSecond = velocity / dolphPixelPerSecond
return velocitymeterPerSecond
def calcLabels():
"""Summary
"""
from sklearn.cluster import KMeans
data = processFile("../data/train.csv")
data = data
workers = 8
with futures.ProcessPoolExecutor(workers) as executor:
res = list(tqdm(executor.map(train, data, repeat("/data/lm959/data/"), repeat(True)), total=len(data)))
with open('velocities.npy', "rb") as f:
arrays = np.load(f)
# model = hdbscan.HDBSCAN(min_cluster_size=1000, min_samples=5, leaf_size=100, prediction_data=True).fit(arrays.reshape(-1, 1))
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
TotalVelocity = scaler.fit_transform(arrays.reshape(-1, 1))
model = KMeans(n_clusters=6, random_state=0, max_iter=300, precompute_distances=True, algorithm='full').fit(np.array(TotalVelocity).reshape(-1, 1))
outshizz = []
for i, item in enumerate(res):
vels = np.mean(item)
test_labels = model.predict(vels.reshape(-1, 1))
tmp = [data[i][0], data[i][1], data[i][2][0], data[i][2][1], data[i][2][2], data[i][2][3], data[i][3], vels, test_labels[0]]
outshizz.append(tmp)
with open('train-data-kmeans.npy', 'wb') as f:
np.save(f, np.array(outshizz))
# def infer(vels, tmper):
# import pickle
# with open('model.pickle', "rb") as f:
# loaded_obj = pickle.load(f)
# test_labels, strengths = hdbscan.approximate_predict(loaded_obj, np.array(vels).reshape(-1, 1))
def elbowPlot():
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
with open('velocities.npy', "rb") as f:
arrays = np.load(f)
scaler = StandardScaler()
TotalVelocity = scaler.fit_transform(arrays.reshape(-1, 1))
inertia = []
for i in range(1, 15):
print(i)
km = KMeans(n_clusters=i, random_state=0, max_iter=300, precompute_distances=True, algorithm='full')
km.fit(np.array(TotalVelocity).reshape(-1, 1))
inertia.append(km.inertia_)
# results = trainParallel(workers=6)
# calcLabels()
# elbowPlot()
| 28.29156
| 176
| 0.626831
| 1,396
| 11,062
| 4.919771
| 0.235673
| 0.020384
| 0.024461
| 0.015725
| 0.365609
| 0.32484
| 0.289458
| 0.238497
| 0.231363
| 0.213891
| 0
| 0.023091
| 0.248328
| 11,062
| 390
| 177
| 28.364103
| 0.802886
| 0.334207
| 0
| 0.148387
| 0
| 0
| 0.02228
| 0.003058
| 0
| 0
| 0
| 0
| 0.012903
| 1
| 0.070968
| false
| 0
| 0.129032
| 0
| 0.251613
| 0.006452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c094dc9f6789987096d646f2920ee372eb6f1b8
| 3,409
|
py
|
Python
|
zero_paper/models.py
|
PLsergent/zero-paper
|
4663e0e9976447419b5da5cdd32e57dccfc32125
|
[
"MIT"
] | null | null | null |
zero_paper/models.py
|
PLsergent/zero-paper
|
4663e0e9976447419b5da5cdd32e57dccfc32125
|
[
"MIT"
] | null | null | null |
zero_paper/models.py
|
PLsergent/zero-paper
|
4663e0e9976447419b5da5cdd32e57dccfc32125
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.dispatch import receiver
import os
import shutil
class Folder(models.Model):
name = models.CharField(max_length=128)
parent_folder = models.ForeignKey("self", null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return f'{self.name}'
class Tag(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return f'{self.name}'
def upload_to(instance, filename):
upload_to = ""
if instance.folder:
upload_to = instance.folder.name + "/"
instance = instance.folder
while True:
if not instance.parent_folder:
break
upload_to = instance.parent_folder.name + "/" + upload_to
instance = instance.parent_folder
return "files/" + upload_to + filename
class Document(models.Model):
title = models.CharField(max_length=128)
docfile = models.FileField(upload_to=upload_to)
TYPES = (
('PDF', 'Pdf'),
('IMG', 'Image'),
('XLS', 'Excel'),
('DOCX', 'Docx'),
('TXT', 'Text')
)
doctype = models.CharField(max_length=128, choices=TYPES)
description = models.TextField(max_length=250, blank=True)
created = models.DateTimeField(editable=False)
updated = models.DateTimeField(editable=False)
folder = models.ForeignKey(Folder, null=True, blank=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created = timezone.now()
self.updated = timezone.now()
return super(Document, self).save(*args, **kwargs)
def __str__(self):
return f'{self.title}'
@receiver(models.signals.post_delete, sender=Document)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""
Deletes file from filesystem
when corresponding `Document` object is deleted.
"""
if instance.docfile:
if os.path.isfile(instance.docfile.path):
os.remove(instance.docfile.path)
@receiver(models.signals.pre_save, sender=Document)
def auto_delete_file_on_change(sender, instance, **kwargs):
"""
Deletes old file from filesystem
when corresponding `Document` object is updated
with new file.
"""
if not instance.pk:
return False
try:
old_file = Document.objects.get(pk=instance.pk).docfile
except Document.DoesNotExist:
return False
new_file = instance.docfile
if not old_file == new_file:
if os.path.isfile(old_file.path):
os.remove(old_file.path)
@receiver(models.signals.pre_save, sender=Document)
def auto_move_file_on_update(sender, instance, **kwargs):
"""
Move old file from filesystem
when corresponding `Document` object is updated
with new folder.
"""
if not instance.pk:
return False
try:
old_file = Document.objects.get(pk=instance.pk).docfile
old_folder = Document.objects.get(pk=instance.pk).folder
except Document.DoesNotExist:
return False
new_folder = instance.folder
if not old_folder == new_folder:
new_path = upload_to(instance, os.path.basename(old_file.path))
shutil.move(old_file.path, new_path)
| 29.903509
| 94
| 0.661484
| 423
| 3,409
| 5.184397
| 0.250591
| 0.032832
| 0.03648
| 0.043776
| 0.400821
| 0.376197
| 0.316461
| 0.273142
| 0.176927
| 0.176927
| 0
| 0.005693
| 0.227046
| 3,409
| 113
| 95
| 30.168142
| 0.826565
| 0.086536
| 0
| 0.265823
| 0
| 0
| 0.027294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101266
| false
| 0
| 0.075949
| 0.037975
| 0.481013
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fd494efb313bbd651da7b57c11d4c1658a52fc3
| 1,610
|
py
|
Python
|
example/torch_classifier.py
|
KevinEloff/deep-chain-apps
|
0952845e93f0c0592f04275fe99b122ff831901f
|
[
"Apache-1.1"
] | null | null | null |
example/torch_classifier.py
|
KevinEloff/deep-chain-apps
|
0952845e93f0c0592f04275fe99b122ff831901f
|
[
"Apache-1.1"
] | null | null | null |
example/torch_classifier.py
|
KevinEloff/deep-chain-apps
|
0952845e93f0c0592f04275fe99b122ff831901f
|
[
"Apache-1.1"
] | null | null | null |
"""
Module that provide a classifier template to train a model on embeddings.
With use the pathogen vs human dataset as an example. The embedding of 100k proteins come
from the protBert model.
The model is built with pytorch_ligthning, a wrapper on top of
pytorch (similar to keras with tensorflow)
Feel feel to build you own model if you want to build a more complex one
"""
from deepchain.dataset import load_pathogen_dataset
from deepchain.models import MLP
from deepchain.models.utils import (
confusion_matrix_plot,
dataloader_from_numpy,
model_evaluation_accuracy,
)
from sklearn.model_selection import train_test_split
# Load embedding and target dataset
X, y = load_pathogen_dataset()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3)
train_dataloader = dataloader_from_numpy(X_train, y_train, batch_size=32)
test_dataloader = dataloader_from_numpy(X_val, y_val, batch_size=32)
# Build a multi-layer-perceptron on top of embedding
# The fit method can handle all the arguments available in the
# 'trainer' class of pytorch lightening :
# https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html
# Example arguments:
# * specifies all GPUs regardless of its availability :
# Trainer(gpus=-1, auto_select_gpus=False, max_epochs=20)
mlp = MLP(input_shape=X_train.shape[1])
mlp.fit(train_dataloader, epochs=5)
mlp.save_model("model.pt")
# Model evaluation
prediction, truth = model_evaluation_accuracy(test_dataloader, mlp)
# Plot confusion matrix
confusion_matrix_plot(truth, (prediction > 0.5).astype(int), ["0", "1"])
| 36.590909
| 90
| 0.775776
| 248
| 1,610
| 4.854839
| 0.467742
| 0.032392
| 0.047342
| 0.048173
| 0.049834
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013091
| 0.145963
| 1,610
| 43
| 91
| 37.44186
| 0.862545
| 0.51118
| 0
| 0
| 0
| 0
| 0.012987
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fd54ac2fec1187dcd8824c5cc8a001bad343192
| 5,589
|
py
|
Python
|
towavfile.py
|
streamsoftdev/audiomods
|
0e3d27fcd9af0a0f6a9de512112425e093f82dda
|
[
"Apache-2.0"
] | null | null | null |
towavfile.py
|
streamsoftdev/audiomods
|
0e3d27fcd9af0a0f6a9de512112425e093f82dda
|
[
"Apache-2.0"
] | null | null | null |
towavfile.py
|
streamsoftdev/audiomods
|
0e3d27fcd9af0a0f6a9de512112425e093f82dda
|
[
"Apache-2.0"
] | null | null | null |
#Copyright 2022 Nathan Harwood
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import numpy as np
from pathlib import Path
import struct
from audiomodule.audiomodule import AM_CONTINUE, AM_CYCLIC_UNDERRUN, AM_ERROR, AM_INPUT_REQUIRED, MODULE_ERROR, \
AudioModule, sw_dtype, audiomod, nice_channels, nice_frequency_str, hms2_str
@audiomod
class ToWavFile(AudioModule):
name = "To WAV File"
category = "Signal out"
description = "Write the signal to a WAV file. Supports incremental writing to the WAV file."
def __init__(self, file: str = "",
num_channels: int = 2,
sample_width: int = 2,
**kwargs):
super().__init__(num_outputs=0, in_chs=[num_channels], **kwargs)
self.file = file
self.num_channels = num_channels
self.ready = True
self.wf = None
self.sample_width=sample_width
self.bytes_written=0
self.frames_written=0
self.requires_data = False
self.set_write_dtype()
def set_write_dtype(self):
if self.sample_width == 1:
self.write_dtype = np.int8
self.out_max = (2**7)-1
elif self.sample_width == 2:
self.write_dtype = np.int16
self.out_max = (2**15)-1
elif self.sample_width == 4:
self.out_max = 1.0
self.write_dtype = np.float32
async def next_chunk(self):
return await self.process_next_chunk()
def process_next_chunk(self):
if not self.wf:
return AM_ERROR
underrun,cyclic = self.input_underrun()
if (not self.input_pending()) or underrun:
if cyclic:
return AM_CYCLIC_UNDERRUN
else:
return AM_INPUT_REQUIRED
chunk = self.get_input_chunk().buffer
self.write_chunk(chunk)
return AM_CONTINUE
def write_chunk(self,chunk):
chunk *= self.out_max
interleaved = chunk.flatten()
out_data = interleaved.astype(self.write_dtype).tobytes()
self.frames_written+=len(chunk)
self.bytes_written+=len(chunk)*self.sample_width*self.num_channels
self.wf.write(out_data)
self.update_wav_header()
def update_wav_header(self):
current_pos = self.wf.tell()
self.wf.seek(0)
WAVE_FORMAT_PCM = 0x0001
bytes_to_add = b'RIFF'
_datalength = self.frames_written * self.num_channels * self.sample_width
bytes_to_add += struct.pack('<L4s4sLHHLLHH4s',
36 + _datalength, b'WAVE', b'fmt ', 16,
WAVE_FORMAT_PCM, self.num_channels, int(self.sample_rate),
self.num_channels * int(self.sample_rate) * self.sample_width,
self.num_channels * self.sample_width,
self.sample_width * 8, b'data')
bytes_to_add += struct.pack('<L', _datalength)
self.wf.write(bytes_to_add)
self.wf.seek(current_pos)
def open(self):
super().open()
if self.file != "" and self.file != None:
try:
self.wf = open(self.file, 'wb')
self.update_wav_header()
self.ready = True
except Exception as e:
self.observer.put(
(MODULE_ERROR, (self.mod_id, f"{self.file} could not be written to. {e}."), None))
self.ready = False
def close(self):
if self.get_in_buf(0).size()>0 and self.wf:
chunk = self.get_in_buf(0).get_all()
self.write_chunk(chunk)
super().close()
if self.wf:
self.wf.close()
self.wf = None
def reset(self):
super().reset()
if self.wf:
self.wf.seek(0)
self.bytes_written=0
self.frames_written=0
self.update_wav_header()
def get_widget_params(self):
return super().get_widget_params() | {
'meta_order': ['filename'],
'filename': {
'name': 'Filename',
'value': self.file,
'type': 'write-filename',
'filetypes': [('WAV files','*.wav *.WAV'),("All files","*.*")],
'defaultextension': '.wav'
}
}
def set_widget_params(self, params):
super().set_widget_params(params)
if 'filename' in params:
self.close()
self.file = params['filename']['value']
self.open()
def get_status(self):
if self.wf:
ch = nice_channels(self.num_channels)
status = {
'status':'ready',
'topleft':Path(self.file).name,
'topright':hms2_str(self.frames_written/self.sample_rate),
'bottom':nice_frequency_str(self.sample_rate),
'bottomleft':f"{self.sample_width*8} bit",
'bottomright':ch,
}
else:
status = {
'status':'error',
'bottom':"Valid filename required."
}
return status
| 33.668675
| 113
| 0.580247
| 687
| 5,589
| 4.524017
| 0.294032
| 0.028958
| 0.048263
| 0.015444
| 0.136744
| 0.080759
| 0.067568
| 0.046976
| 0.025097
| 0
| 0
| 0.013813
| 0.313473
| 5,589
| 166
| 114
| 33.668675
| 0.796195
| 0.096976
| 0
| 0.167939
| 0
| 0
| 0.086594
| 0.004171
| 0
| 0
| 0.001192
| 0
| 0
| 1
| 0.083969
| false
| 0
| 0.030534
| 0.007634
| 0.198473
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fd5c810c4ce10644e6de28ce2355f1bd5e61c6a
| 6,857
|
py
|
Python
|
test/blast/sample_data.py
|
UdoGi/dark-matter
|
3d49e89fa5e81f83144119f6216c5774176d203b
|
[
"MIT"
] | 10
|
2016-03-09T09:43:14.000Z
|
2021-04-03T21:46:12.000Z
|
test/blast/sample_data.py
|
terrycojones/dark-matter
|
67d16f870db6b4239e17e542bc6e3f072dc29c75
|
[
"MIT"
] | 332
|
2015-01-07T12:37:30.000Z
|
2022-01-20T15:48:11.000Z
|
test/blast/sample_data.py
|
terrycojones/dark-matter
|
67d16f870db6b4239e17e542bc6e3f072dc29c75
|
[
"MIT"
] | 4
|
2016-03-08T14:56:39.000Z
|
2021-01-27T08:11:27.000Z
|
# Sample BLAST parameters.
PARAMS = {
'application': 'BLASTN',
'blast_cutoff': [
None,
None
],
'database': 'manx-shearwater',
'database_length': 17465129,
'database_letters': None,
'database_name': [],
'database_sequences': 70016,
'date': '',
'dropoff_1st_pass': [
None,
None
],
'effective_database_length': None,
'effective_hsp_length': 22,
'effective_query_length': None,
'effective_search_space': 382194648.0,
'effective_search_space_used': None,
'frameshift': [
None,
None
],
'gap_penalties': [
5,
2
],
'gap_trigger': [
None,
None
],
'gap_x_dropoff': [
None,
None
],
'gap_x_dropoff_final': [
None,
None
],
'gapped': 0,
'hsps_gapped': None,
'hsps_no_gap': None,
'hsps_prelim_gapped': None,
'hsps_prelim_gapped_attemped': None,
'ka_params': [
0.625,
0.41,
0.78
],
'ka_params_gap': [
None,
None,
None
],
'matrix': '',
'num_good_extends': None,
'num_hits': None,
'num_letters_in_database': 17465129,
'num_seqs_better_e': None,
'num_sequences': None,
'num_sequences_in_database': 70016,
'posted_date': [],
'query': 'GZG3DGY01ASHXW',
'query_id': 'Query_1',
'query_length': 46,
'query_letters': 46,
'reference': 'Stephen F. Altschul, Thomas L. Madden, ...',
'sc_match': 2,
'sc_mismatch': -3,
'threshold': None,
'version': '2.2.28+',
'window_size': None
}
RECORD0 = {
'query': 'id0',
'alignments': [
{
'length': 37000,
'hsps': [
{
'bits': 20,
'sbjct_end': 15400,
'expect': 1e-11,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 15362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
},
{
'length': 38000,
'hsps': [
{
'bits': 25,
'sbjct_end': 12400,
'expect': 1e-10,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 12362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Squirrelpox virus 55',
}
]
}
RECORD1 = {
'query': 'id1',
'alignments': [
{
'length': 35000,
'hsps': [
{
'bits': 20,
'sbjct_end': 11400,
'expect': 1e-8,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 11362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Monkeypox virus 456',
},
{
'length': 35000,
'hsps': [
{
'bits': 20,
'sbjct_end': 10400,
'expect': 1e-7,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 10362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
}
]
}
RECORD2 = {
'query': 'id2',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 20,
'sbjct_end': 1400,
'expect': 1e-6,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
# Identical to RECORD2, apart from e-value.
RECORD3 = {
'query': 'id3',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 20,
'sbjct_end': 1400,
'expect': 1e-5,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
RECORD4 = {
'query': 'id4',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 10,
'sbjct_end': 1400,
'expect': 1e-3,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
},
{
'bits': 5,
'sbjct_end': 1400,
'expect': 1e-2,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
},
{
'bits': 3,
'sbjct_end': 1400,
'expect': 0.0,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
| 27.538153
| 70
| 0.392737
| 518
| 6,857
| 5.019305
| 0.279923
| 0.023077
| 0.065769
| 0.114231
| 0.577308
| 0.529231
| 0.463077
| 0.440769
| 0.440769
| 0.440769
| 0
| 0.105351
| 0.473968
| 6,857
| 248
| 71
| 27.649194
| 0.61547
| 0.009625
| 0
| 0.460581
| 0
| 0
| 0.340454
| 0.107395
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.004149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fd91ec0b1f55cd338e90af15b11bcbae0dc4915
| 1,471
|
py
|
Python
|
neurgoo/misc/plot_utils.py
|
NISH1001/neurgoo
|
83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74
|
[
"MIT"
] | 2
|
2022-03-02T11:59:19.000Z
|
2022-03-18T17:59:28.000Z
|
neurgoo/misc/plot_utils.py
|
NISH1001/neurgoo
|
83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74
|
[
"MIT"
] | 1
|
2022-03-03T14:07:19.000Z
|
2022-03-03T14:07:19.000Z
|
neurgoo/misc/plot_utils.py
|
NISH1001/neurgoo
|
83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from typing import Dict, List
try:
import matplotlib.pyplot as plt
MATPLOTLIB = True
except:
MATPLOTLIB = False
from loguru import logger
from .eval import EvalData
def plot_losses(losses):
if not MATPLOTLIB:
logger.error("Maplotlib not installed. Halting the plot process!")
return
plt.plot(losses)
plt.show()
def plot_history(
history: Dict[str, List[EvalData]], plot_type="loss", figure_size=(20, 7)
) -> None:
"""
This function plots train/val metrics in the same figure.
"""
if not MATPLOTLIB:
logger.error("Maplotlib not installed. Halting the plot process!")
return
train = history.get("train", [])
val = history.get("val", [])
# get epoch data common to both
t_epochs = list(map(lambda e: e.epoch, train))
v_epochs = list(map(lambda e: e.epoch, val))
epochs = set(t_epochs).intersection(v_epochs)
train = filter(lambda e: e.epoch in epochs, train)
train = sorted(train, key=lambda e: e.epoch)
val = filter(lambda e: e.epoch in epochs, val)
val = sorted(val, key=lambda e: e.epoch)
plt.figure(figsize=figure_size)
plt.plot([getattr(data, plot_type) for data in train])
plt.plot([getattr(data, plot_type) for data in val])
plt.legend([f"Train {plot_type}", f"Val {plot_type}"])
plt.xlabel("epoch")
plt.ylabel(f"{plot_type}")
def main():
pass
if __name__ == "__main__":
main()
| 23.349206
| 77
| 0.650578
| 212
| 1,471
| 4.410377
| 0.367925
| 0.051337
| 0.051337
| 0.083422
| 0.387166
| 0.346524
| 0.346524
| 0.233155
| 0.233155
| 0.158289
| 0
| 0.003493
| 0.221618
| 1,471
| 62
| 78
| 23.725806
| 0.8131
| 0.074099
| 0
| 0.153846
| 0
| 0
| 0.124907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0.025641
| 0.102564
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fdabb9fe3ddfe1efb0455a952de16d9ff31f05a
| 3,277
|
py
|
Python
|
LPSelectiveSearch.py
|
ksdsouza/license-plate-detector
|
900a032768d9c623b7ecb1ec7abd07651fda2b16
|
[
"MIT"
] | null | null | null |
LPSelectiveSearch.py
|
ksdsouza/license-plate-detector
|
900a032768d9c623b7ecb1ec7abd07651fda2b16
|
[
"MIT"
] | null | null | null |
LPSelectiveSearch.py
|
ksdsouza/license-plate-detector
|
900a032768d9c623b7ecb1ec7abd07651fda2b16
|
[
"MIT"
] | null | null | null |
import itertools
import os
import sys
import cv2
import numpy as np
from SelectiveSearch import SelectiveSearch
images_path = "Images"
annotations = "Annotations"
cv2.setUseOptimized(True)
selective_search = SelectiveSearch()
train_images = []
train_labels = []
SS_IMG_SIZE = (224, 224)
# chunk = int(sys.argv[1])
def get_annotation(file: str) -> (dict, object):
f = open(file)
[filename, x1, y1, dx, dy] = f.readline().split('\t')[0:5]
f.close()
img_filename = os.path.join(images_path, filename)
boundary_box = {
'x1': int(x1),
'y1': int(y1),
'x2': int(x1) + int(dx),
'y2': int(y1) + int(dy)
}
return boundary_box, img_filename
def get_iou(bb1: dict, bb2: dict) -> float:
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
intersection_area = (x_right - x_left) * (y_bottom - y_top)
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
# filenames = list(enumerate(
# f for f in os.listdir(annotations)
# if f.startswith('wts')
# ))[chunk * 20:(chunk + 1) * 20]
#
# for index, file in filenames:
# try:
# print(f"{index}\t{file}")
# boundary_box, img_filename = get_annotation(os.path.join(annotations, file))
# ss_results, img_out = selective_search.process_image(img_filename)
# lp_counter = 0
# bg_counter = 0
# fflag = False
# bflag = False
#
# for result in itertools.islice(ss_results, 2000):
# x1, y1, dx, dy = result
# if file.startswith('wts_textonly'):
# iou = 0
# else:
# iou = get_iou(boundary_box, {
# 'x1': x1,
# 'y1': y1,
# 'x2': x1 + dx,
# 'y2': y1 + dy
# })
# if bg_counter < 30:
# if iou < 0.3:
# test_image = img_out[y1: y1 + dy, x1:x1 + dx]
# resized = cv2.resize(test_image, SS_IMG_SIZE, interpolation=cv2.INTER_AREA)
# train_images.append(resized)
# train_labels.append(0)
# bg_counter += 1
# else:
# bflag = True
# if lp_counter < 30:
# if iou > 0.85:
# test_image = img_out[y1: y1 + dy, x1:x1 + dx]
# resized = cv2.resize(test_image, SS_IMG_SIZE, interpolation=cv2.INTER_AREA)
# train_images.append(resized)
# train_labels.append(1)
# lp_counter += 1
# else:
# fflag = True
# if fflag and bflag:
# break
# except Exception as e:
# print(e)
# print(f"Error occurred in {file}")
#
# np.save(f'train_images_chunk{chunk}', train_images)
# np.save(f'train_labels_chunk{chunk}', train_labels)
| 30.06422
| 97
| 0.523039
| 414
| 3,277
| 3.966184
| 0.275362
| 0.033496
| 0.016443
| 0.009744
| 0.17296
| 0.154689
| 0.154689
| 0.154689
| 0.154689
| 0.154689
| 0
| 0.051494
| 0.336283
| 3,277
| 108
| 98
| 30.342593
| 0.703448
| 0.578273
| 0
| 0
| 0
| 0
| 0.044428
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 1
| 0.051282
| false
| 0
| 0.153846
| 0
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fde09067036bce742f20360db8b7e67c431adac
| 3,636
|
py
|
Python
|
core/tests/test_office.py
|
cjmash/art-backend
|
fb1dfd69cca9cda1d8714bd7066c3920d1a97312
|
[
"MIT"
] | null | null | null |
core/tests/test_office.py
|
cjmash/art-backend
|
fb1dfd69cca9cda1d8714bd7066c3920d1a97312
|
[
"MIT"
] | null | null | null |
core/tests/test_office.py
|
cjmash/art-backend
|
fb1dfd69cca9cda1d8714bd7066c3920d1a97312
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework.exceptions import ValidationError
from ..models import OfficeBlock, OfficeFloor, OfficeFloorSection
from core.tests import CoreBaseTestCase
User = get_user_model()
class OfficeBlockModelTest(CoreBaseTestCase):
"""Tests for the Office Block Models"""
def setUp(self):
super(OfficeBlockModelTest, self).setUp()
self.admin = User.objects.create_superuser(
email='testuser@gmail.com', cohort=19,
slack_handle='tester', password='qwerty123'
)
self.office_block = OfficeBlock.objects.create(
name="Block A"
)
self.office_floor = OfficeFloor.objects.create(
number=5,
block=self.office_block
)
self.office_floor_section = OfficeFloorSection.objects.create(
name="Right Wing",
floor=self.office_floor
)
self.all_office_blocks = OfficeBlock.objects.all()
self.all_office_floors = OfficeFloor.objects.all()
self.all_office_floor_sections = OfficeFloorSection.objects.all()
self.token_user = 'testtoken'
def test_add_new_office_block(self):
"""Test add new office block"""
self.assertEqual(self.all_office_blocks.count(), 1)
new_office_block = OfficeBlock(name="Block B")
new_office_block.save()
self.assertEqual(self.all_office_blocks.count(), 2)
def test_add_new_office_floor(self):
"""Test add new office floor"""
self.assertEqual(self.all_office_floors.count(), 1)
new_office_floor = OfficeFloor(
number=10,
block=self.office_block)
new_office_floor.save()
self.assertEqual(self.all_office_floors.count(), 2)
def test_add_new_office_floor_section(self):
"""Test add new Office Floor Section"""
self.assertEqual(self.all_office_floor_sections.count(), 1)
new_office_floor_section = OfficeFloorSection(
name="Left Wing",
floor=self.office_floor)
new_office_floor_section.save()
self.assertEqual(self.all_office_floor_sections.count(), 2)
def test_cannot_add_existing_office_block(self):
"""Test cannot add existing office_block name"""
self.assertEqual(self.all_office_blocks.count(), 1)
with transaction.atomic():
with self.assertRaises(ValidationError):
new_office_block = OfficeBlock.objects.create(
name="Block A"
)
new_office_block.save()
self.assertEqual(self.all_office_blocks.count(), 1)
def test_cannot_add_existing_office_floor_section(self):
"""Test cannot add existing office floor section name"""
self.assertEqual(self.all_office_floor_sections.count(), 1)
with transaction.atomic():
with self.assertRaises(ValidationError):
new_office_floor_section = OfficeFloorSection(
name="Right Wing",
floor=self.office_floor
)
new_office_floor_section.save()
self.assertEqual(self.all_office_floor_sections.count(), 1)
def test_office_block_model_string_representation(self):
self.assertEqual(str(self.office_block), "Block A")
def test_office_floor_model_string_representation(self):
self.assertEqual(self.office_floor.number, 5)
def test_office_floor_section_model_string_representation(self):
self.assertEqual(str(self.office_floor_section), "Right Wing")
| 37.484536
| 73
| 0.668592
| 417
| 3,636
| 5.549161
| 0.189448
| 0.123596
| 0.073034
| 0.095073
| 0.626621
| 0.593345
| 0.473207
| 0.372947
| 0.280467
| 0.19274
| 0
| 0.006877
| 0.240099
| 3,636
| 96
| 74
| 37.875
| 0.830619
| 0.058581
| 0
| 0.305556
| 0
| 0
| 0.032134
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 1
| 0.125
| false
| 0.013889
| 0.069444
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fdf686d8f768389a99935e8b0188491d6cb098b
| 2,481
|
py
|
Python
|
tests/functional/collection/test_collection_show.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 47
|
2016-04-21T19:51:17.000Z
|
2022-02-25T14:13:30.000Z
|
tests/functional/collection/test_collection_show.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 421
|
2016-04-20T18:45:24.000Z
|
2022-03-14T14:50:41.000Z
|
tests/functional/collection/test_collection_show.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 20
|
2016-09-10T20:25:27.000Z
|
2021-10-06T16:02:47.000Z
|
import pytest
def test_collection_show(run_line, load_api_fixtures, add_gcs_login):
data = load_api_fixtures("collection_operations.yaml")
cid = data["metadata"]["mapped_collection_id"]
username = data["metadata"]["username"]
epid = data["metadata"]["endpoint_id"]
add_gcs_login(epid)
_result, matcher = run_line(f"globus collection show {cid}", matcher=True)
matcher.check(r"^Display Name:\s+(.*)$", groups=["Happy Fun Collection Name"])
matcher.check(r"^Owner:\s+(.*)$", groups=[username])
matcher.check(r"^ID:\s+(.*)$", groups=[cid])
matcher.check(r"^Collection Type:\s+(.*)$", groups=["mapped"])
matcher.check(r"^Connector:\s+(.*)$", groups=["POSIX"])
def test_collection_show_private_policies(run_line, load_api_fixtures, add_gcs_login):
data = load_api_fixtures("collection_show_private_policies.yaml")
cid = data["metadata"]["collection_id"]
username = data["metadata"]["username"]
epid = data["metadata"]["endpoint_id"]
add_gcs_login(epid)
_result, matcher = run_line(
f"globus collection show --include-private-policies {cid}", matcher=True
)
matcher.check(r"^Display Name:\s+(.*)$", groups=["Happy Fun Collection Name"])
matcher.check(r"^Owner:\s+(.*)$", groups=[username])
matcher.check(r"^ID:\s+(.*)$", groups=[cid])
matcher.check(r"^Collection Type:\s+(.*)$", groups=["mapped"])
matcher.check(r"^Connector:\s+(.*)$", groups=["POSIX"])
matcher.check(r"Root Path:\s+(.*)$", groups=["/"])
matcher.check(
r"^Sharing Path Restrictions:\s+(.*)$",
groups=[
'{"DATA_TYPE": "path_restrictions#1.0.0", "none": ["/"], "read": ["/projects"], "read_write": ["$HOME"]}', # noqa: E501
],
)
@pytest.mark.parametrize(
"epid_key, ep_type",
[
("gcp_endpoint_id", "Globus Connect Personal"),
("endpoint_id", "Globus Connect Server v5 Endpoint"),
],
)
def test_collection_show_on_non_collection(
run_line, load_api_fixtures, epid_key, ep_type
):
data = load_api_fixtures("collection_operations.yaml")
epid = data["metadata"][epid_key]
result = run_line(f"globus collection show {epid}", assert_exit_code=3)
assert (
f"Expected {epid} to be a collection ID.\n"
f"Instead, found it was of type '{ep_type}'."
) in result.stderr
assert (
"Please run the following command instead:\n\n"
f" globus endpoint show {epid}"
) in result.stderr
| 36.485294
| 132
| 0.639258
| 315
| 2,481
| 4.834921
| 0.279365
| 0.09455
| 0.102429
| 0.041366
| 0.573211
| 0.558766
| 0.540381
| 0.502955
| 0.502955
| 0.502955
| 0
| 0.003906
| 0.174526
| 2,481
| 67
| 133
| 37.029851
| 0.739746
| 0.004031
| 0
| 0.436364
| 0
| 0.018182
| 0.405022
| 0.065614
| 0
| 0
| 0
| 0
| 0.054545
| 1
| 0.054545
| false
| 0
| 0.018182
| 0
| 0.072727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fdfc62d17a4273e27c2f11b9b40558a4ec8fe41
| 2,044
|
py
|
Python
|
job.py
|
mapledyne/ihunttools
|
28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8
|
[
"MIT"
] | null | null | null |
job.py
|
mapledyne/ihunttools
|
28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8
|
[
"MIT"
] | 2
|
2021-09-08T02:16:00.000Z
|
2022-01-13T02:57:26.000Z
|
job.py
|
mapledyne/ihunttools
|
28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8
|
[
"MIT"
] | null | null | null |
import argparse
import random
import ihuntapp
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Build an #iHunt job app page.')
parser.add_argument('--name', "-n",
default="Unnamed job", help='Name of the job')
parser.add_argument('--description', "-d",
default="No description available", help='Description of the job')
parser.add_argument('--image', "-i",
type=str, default="", help='Job image', required=True)
parser.add_argument('--price', "-p",
default=(random.randint(1, 18) * 500), help='Price the job pays')
parser.add_argument('--stars', "-s",
type=float, default=(random.randint(3, 8) * 0.5), help='Star level of job')
parser.add_argument('--currency', default="$", help='Currency symbol')
parser.add_argument('--distance',
type=float, default=random.uniform(5, 25), help='Distance of job')
parser.add_argument('--distanceunit', default="miles", help='distance unit')
parser.add_argument('--time', "-t",
type=float, default=random.randint(1, 3), help='Time of post')
parser.add_argument('--timeunit', default="days", help='Time unit')
parser.add_argument('--remaining', "-r",
type=float, default=random.randint(3, 8), help='Time of post')
parser.add_argument('--remainingunit', default="days", help='Time unit')
parser.add_argument('--output', '-o', default='job.png', help='Filename to save screenshot')
args = parser.parse_args()
phone = ihuntapp.iHuntApp(args.name, args.description, args.image)
phone.price = args.price
phone.stars = args.stars
phone.currency = args.currency
phone.distance = args.distance
phone.distanceunit = args.distanceunit
phone.time = args.time
phone.timeunit = args.timeunit
phone.remaining = args.remaining
phone.remainingunit = args.remainingunit
phone.screenshot(args.output)
| 44.434783
| 99
| 0.626223
| 242
| 2,044
| 5.198347
| 0.309917
| 0.093005
| 0.175676
| 0.063593
| 0.259936
| 0.201908
| 0.162162
| 0.063593
| 0
| 0
| 0
| 0.010652
| 0.219178
| 2,044
| 45
| 100
| 45.422222
| 0.777569
| 0
| 0
| 0
| 0
| 0
| 0.208415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078947
| 0
| 0.078947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fe1fc1239cc234d72923d2663fd719390f1395d
| 454
|
py
|
Python
|
epochbot/utils.py
|
jaloo555/solana-easy-py
|
8e28b8de52fbe4ee0b8e94a0f9c728114fc91728
|
[
"MIT"
] | 4
|
2021-09-10T19:20:42.000Z
|
2022-02-12T00:27:40.000Z
|
epochbot/utils.py
|
jaloo555/solana-easy-py
|
8e28b8de52fbe4ee0b8e94a0f9c728114fc91728
|
[
"MIT"
] | null | null | null |
epochbot/utils.py
|
jaloo555/solana-easy-py
|
8e28b8de52fbe4ee0b8e94a0f9c728114fc91728
|
[
"MIT"
] | 1
|
2021-11-08T15:32:46.000Z
|
2021-11-08T15:32:46.000Z
|
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
ENDPOINT_URLS_ENUM = enum(
MAIN='https://api.mainnet-beta.solana.com',
DEV='https://api.devnet.solana.com',
TEST='https://api.testnet.solana.com',
)
ENDPOINT_URLS = {
"MAIN":'https://api.mainnet-beta.solana.com',
"DEV":'https://api.devnet.solana.com',
"TEST":'https://api.testnet.solana.com',
}
| 30.266667
| 66
| 0.645374
| 60
| 454
| 4.833333
| 0.4
| 0.165517
| 0.082759
| 0.131034
| 0.593103
| 0.593103
| 0.593103
| 0.593103
| 0.593103
| 0.593103
| 0
| 0
| 0.129956
| 454
| 15
| 67
| 30.266667
| 0.734177
| 0
| 0
| 0
| 0
| 0
| 0.446154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fe207c6dcc8198ff87d2e16175a87d21e6112ea
| 812
|
py
|
Python
|
FrontEnd/mapa_pontos.py
|
JessicaIsri/WebBot
|
e9ed911c0306f5e362b577e244e50073336480ea
|
[
"bzip2-1.0.6"
] | null | null | null |
FrontEnd/mapa_pontos.py
|
JessicaIsri/WebBot
|
e9ed911c0306f5e362b577e244e50073336480ea
|
[
"bzip2-1.0.6"
] | 1
|
2021-11-13T10:12:49.000Z
|
2021-11-16T12:17:01.000Z
|
FrontEnd/mapa_pontos.py
|
JessicaIsri/WebBot
|
e9ed911c0306f5e362b577e244e50073336480ea
|
[
"bzip2-1.0.6"
] | null | null | null |
import pymongo
import folium
from pymongo import MongoClient
db = MongoClient('mongodb+srv://admin:admin@cluster0-vuh1j.azure.mongodb.net/test?retryWrites=true&w=majority')
db = db.get_database('BD_EMPRESAS')
collection = db.empresas
cnpj = []
latitude = []
longitude = []
qtd_range = []
endereco = []
cnpj = db.get_collection('empresas').distinct("cnpj")
latitude = db.get_collection('empresas').distinct("latitude")
qtd_range = len(latitude)
longitude = db.get_collection('empresas').distinct("longitude")
endereco = db.get_collection('empresas').distinct("endereco")
mapa = folium.Map(location=[-23.4795233,-46.2698754],zoom_start=9)
for i in range(qtd_range):
folium.Marker([latitude[i], longitude[i]], popup='CNPJ: '+cnpj[i]+'\n Endereco: '+endereco[i]).add_to(mapa)
mapa.save("index.html")
| 24.606061
| 111
| 0.730296
| 109
| 812
| 5.33945
| 0.477064
| 0.042955
| 0.103093
| 0.158076
| 0.213058
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.094828
| 812
| 33
| 112
| 24.606061
| 0.763265
| 0
| 0
| 0
| 0
| 0.05
| 0.236162
| 0.111931
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fe3746b0ca2a17a4da60916603ceccce096325e
| 6,994
|
py
|
Python
|
hypernets/tests/searchers/evolution_test.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 1,080
|
2020-06-22T07:44:22.000Z
|
2022-03-22T07:46:48.000Z
|
hypernets/tests/searchers/evolution_test.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 24
|
2020-08-06T02:06:37.000Z
|
2022-03-31T03:34:35.000Z
|
hypernets/tests/searchers/evolution_test.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 170
|
2020-08-14T08:39:18.000Z
|
2022-03-23T12:58:17.000Z
|
# -*- coding:utf-8 -*-
"""
"""
import numpy as np
from hypernets.core.ops import Identity
from hypernets.core.search_space import HyperSpace, Int, Real, Choice, Bool
from hypernets.core.searcher import OptimizeDirection
from hypernets.searchers.evolution_searcher import Population, EvolutionSearcher
def get_space():
space = HyperSpace()
with space.as_default():
p1 = Int(1, 100)
p2 = Choice(['a', 'b'])
p3 = Bool()
p4 = Real(0.0, 1.0)
id1 = Identity(p1=p1)
id2 = Identity(p2=p2)(id1)
id3 = Identity(p3=p3)(id2)
id4 = Identity(p4=p4)(id3)
return space
class Test_Evolution():
def test_population(self):
population = Population(optimize_direction=OptimizeDirection.Maximize, random_state=np.random.RandomState(9527))
population.append('a', 0)
population.append('b', 1)
population.append('c', 2)
population.append('d', 3)
population.append('e', 4)
population.append('f', 5)
population.append('g', 6)
population.append('h', 7)
population.append('i', 8)
population.append('i', 9)
b1 = population.sample_best(25)
assert b1.reward == 8
population = Population(optimize_direction=OptimizeDirection.Minimize, random_state=np.random.RandomState(9527))
population.append('a', 0)
population.append('b', 1)
population.append('c', 2)
population.append('d', 3)
population.append('e', 4)
population.append('f', 5)
population.append('g', 6)
population.append('h', 7)
population.append('i', 8)
population.append('i', 9)
b2 = population.sample_best(25)
assert b2.reward == 0
def test_eliminate(self):
population = Population(optimize_direction=OptimizeDirection.Maximize)
population.append('a', 4)
population.append('b', 3)
population.append('c', 2)
population.append('d', 1)
population.append('e', 0)
population.append('f', 5)
population.append('g', 6)
population.append('h', 7)
population.append('i', 8)
population.append('j', 9)
eliminates = population.eliminate(2, regularized=True)
assert eliminates[0].space_sample == 'a' and eliminates[1].space_sample == 'b'
eliminates = population.eliminate(2, regularized=False)
assert eliminates[0].space_sample == 'e' and eliminates[1].space_sample == 'd'
def test_mutate(self):
def get_space():
space = HyperSpace()
with space.as_default():
id1 = Identity(p1=Int(0, 10), p2=Choice(['a', 'b']))
id2 = Identity(p3=Real(0., 1.), p4=Bool())(id1)
return space
# population = Population(optimize_direction=OptimizeDirection.Maximize)
population = Population(optimize_direction='max')
space1 = get_space()
space1.random_sample()
assert space1.all_assigned
space2 = get_space()
assert not space2.all_assigned
new_space = population.mutate(space1, space2)
pv1 = list(space1.get_assigned_param_values().values())
pv2 = list(space2.get_assigned_param_values().values())
assert space2.all_assigned
assert new_space.all_assigned
assert np.sum([v1 != v2 for v1, v2 in zip(pv1, pv2)]) == 1
def test_set_random_state(self):
from hypernets.core import set_random_state
set_random_state(9527)
searcher = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
vectors = []
for i in range(1, 10):
vectors.append(searcher.sample().vectors)
assert vectors == [[98, 0, 0, 0.96], [9, 0, 0, 0.93], [60, 0, 1, 0.24], [54, 0, 1, 0.7],
[25, 0, 1, 0.73], [67, 1, 1, 0.43], [57, 1, 1, 0.05], [49, 0, 0, 0.71], [71, 1, 1, 0.49]]
set_random_state(None)
searcher = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
vectors = []
for i in range(1, 10):
vectors.append(searcher.sample().vectors)
assert vectors != [[98, 0, 0, 0.96], [9, 0, 0, 0.93], [60, 0, 1, 0.24], [54, 0, 1, 0.7],
[25, 0, 1, 0.73], [67, 1, 1, 0.43], [57, 1, 1, 0.05], [49, 0, 0, 0.71], [71, 1, 1, 0.49]]
set_random_state(9527)
searcher = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
vectors = []
for i in range(1, 10):
vectors.append(searcher.sample().vectors)
assert vectors == [[98, 0, 0, 0.96], [9, 0, 0, 0.93], [60, 0, 1, 0.24], [54, 0, 1, 0.7],
[25, 0, 1, 0.73], [67, 1, 1, 0.43], [57, 1, 1, 0.05], [49, 0, 0, 0.71], [71, 1, 1, 0.49]]
set_random_state(1)
searcher = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
vectors = []
for i in range(1, 10):
vectors.append(searcher.sample().vectors)
assert vectors == [[38, 1, 0, 0.93], [10, 1, 1, 0.15], [17, 1, 0, 0.39], [7, 1, 0, 0.85], [19, 0, 1, 0.44],
[29, 1, 0, 0.67], [88, 1, 1, 0.43], [95, 0, 0, 0.8], [10, 1, 1, 0.09]]
set_random_state(None)
# def test_searcher_with_hp(self):
# def get_space():
# space = HyperSpace()
# with space.as_default():
# in1 = Input(shape=(10,))
# in2 = Input(shape=(20,))
# in3 = Input(shape=(1,))
# concat = Concatenate()([in1, in2, in3])
# dense1 = Dense(10, activation=Choice(['relu', 'tanh', None]), use_bias=Bool())(concat)
# bn1 = BatchNormalization()(dense1)
# dropout1 = Dropout(Choice([0.3, 0.4, 0.5]))(bn1)
# output = Dense(2, activation='softmax', use_bias=True)(dropout1)
# return space
#
# rs = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
# hk = HyperKeras(rs, optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'],
# callbacks=[SummaryCallback()])
#
# x1 = np.random.randint(0, 10000, size=(100, 10))
# x2 = np.random.randint(0, 100, size=(100, 20))
# x3 = np.random.normal(1.0, 100.0, size=(100))
# y = np.random.randint(0, 2, size=(100), dtype='int')
# x = [x1, x2, x3]
#
# hk.search(x, y, x, y, max_trials=10)
# assert hk.get_best_trial()
# best_trial = hk.get_best_trial()
#
# estimator = hk.final_train(best_trial.space_sample, x, y)
# score = estimator.predict(x)
# result = estimator.evaluate(x, y)
# assert len(score) == 100
# assert result
| 39.965714
| 120
| 0.570489
| 903
| 6,994
| 4.328904
| 0.20598
| 0.122794
| 0.00921
| 0.085955
| 0.571757
| 0.481197
| 0.481197
| 0.417498
| 0.417498
| 0.406242
| 0
| 0.090497
| 0.274807
| 6,994
| 174
| 121
| 40.195402
| 0.680205
| 0.20875
| 0
| 0.522523
| 0
| 0
| 0.007468
| 0
| 0
| 0
| 0
| 0
| 0.117117
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fe62e0a759bb38c6001a97c2d2f6695ebbb34cb
| 22,748
|
py
|
Python
|
tests/filesystem_tests.py
|
d-kiss/fakeos
|
88dff667830efe10841df8b3a5f33a581bd94b69
|
[
"MIT"
] | 1
|
2017-10-09T10:59:43.000Z
|
2017-10-09T10:59:43.000Z
|
tests/filesystem_tests.py
|
d-kiss/fakeos
|
88dff667830efe10841df8b3a5f33a581bd94b69
|
[
"MIT"
] | 5
|
2017-10-06T17:33:37.000Z
|
2017-10-13T16:31:34.000Z
|
tests/filesystem_tests.py
|
rinslow/fakeos
|
88dff667830efe10841df8b3a5f33a581bd94b69
|
[
"MIT"
] | null | null | null |
import operator
import os as _os
from pathlib import Path
from string import ascii_letters
from itertools import chain, permutations
from functools import reduce
from fakeos import FakeOS
from hypothesis import given, assume, example
from hypothesis.strategies import text, sets, integers, lists, just
from filesystem import FakeDirectory, FakeFile, FakeFilesystem, \
FakeFilesystemWithPermissions
from fakeuser import FakeUser, Root
from unittest import TestCase
from operating_system import FakeWindows, FakeUnix
ILLEGAL_NAMES = ("", ".", "..")
class DirectoryCase(TestCase):
@given(text())
def test_mkdir_when_directory_already_exists(self, directory: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
with self.assertRaises(FileExistsError):
os.mkdir("/" + directory)
@given(text())
def test_mkdir_when_parent_directory_doesnt_exist(self, directory: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
with self.assertRaises(FileNotFoundError):
os.mkdir("/hello/" + directory)
@given(text(), text())
def test_mkdir_and_directory_exists_afterwards(self, directory: str, _file: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in _file and _file not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
os.mkdir("/" + directory + "/" + _file)
assert os.filesystem.has(Path("/" + directory + "/" + _file))
@given(text())
def test_mkdir_works(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
@given(text())
def test_creating_root_directory(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(directory)
assert os.filesystem.has_directory(Path(directory))
@given(text(), sets(text()))
@example("0", set())
def test_listdir_with_subdirectories_only(self, directory, subdirectories):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
for subdirectory in subdirectories:
assume(subdirectory not in ILLEGAL_NAMES)
assume("/" not in subdirectory)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
for subdirectory in subdirectories:
os.mkdir("/" + directory + "/" + subdirectory)
assert sorted(subdirectories) == sorted(os.listdir("/" + directory))
@given(text())
def test_listdir_empty_directory(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
assert os.listdir("/" + directory) == []
@given(text(), text())
def test_listdir_with_a_file_inside(self, directory, filename):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in filename and filename not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))],
files=[FakeFile(Path("/" +
directory +
"/" +
filename))]
))
os.mkdir("/" + directory)
assert os.listdir("/" + directory) == [filename]
@given(text(), text(), text())
def test_listdir_with_a_file_and_a_directory_inside(self, directory,
filename, subdirectory):
assume(subdirectory != filename)
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in filename and filename not in ILLEGAL_NAMES)
assume("/" not in subdirectory and subdirectory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))],
files=[FakeFile(Path("/" +
directory +
"/" +
filename))]
))
os.mkdir("/" + directory)
os.mkdir("/" + directory + "/" + subdirectory)
assert sorted(os.listdir("/" + directory)) == sorted([filename, subdirectory])
@given(text())
def test_makedirs_one_file_path(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path(path))]))
with self.assertRaises(OSError):
os.makedirs(path)
try:
os.makedirs(path, exist_ok=True)
except OSError:
self.fail()
@given(text())
@example("/")
@example("/0")
def test_makedirs_multiple_file_path(self, path: str):
assume("/" in path and not path.startswith("."))
os = FakeOS()
os.makedirs(path)
with self.assertRaises(OSError):
os.makedirs(path)
@given(text())
def test_makedirs_when_part_of_the_path_exists_as_and_is_a_file(self, path: str):
assume("/" in path)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
dirname = Path(path).joinpath("dirname")
with self.assertRaises(FileExistsError):
os.makedirs(dirname)
@given(text())
@example("0")
def test_rmdir(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
fullpath = "/" + path
os.makedirs(fullpath)
assert path in os.listdir("/")
os.rmdir(fullpath)
assert path not in os.listdir("/")
with self.assertRaises(FileNotFoundError):
os.rmdir(fullpath)
os.makedirs(fullpath + "/hello")
with self.assertRaises(OSError):
os.rmdir(fullpath)
os = FakeOS(filesystem=FakeFilesystemWithPermissions(FakeFilesystem(
files=[FakeFile(Path(path))])))
with self.assertRaises(NotADirectoryError):
os.rmdir(path)
class ChownCase(TestCase):
@given(text(), integers(), integers())
def test_chown_to_a_directory(self, path: str, uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chown(path, uid=uid, gid=gid)
assert os.filesystem[path].uid == uid
assert os.filesystem[path].gid == gid
@given(text(), integers(), integers())
def test_chown_to_a_file(self, path: str, uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
os.chown(path, gid=gid, uid=uid)
assert os.filesystem[path].uid == uid
assert os.filesystem[path].gid == gid
@given(text(), integers(), integers())
def test_chown_to_a_nonexisting_fileobject(self, path: str, uid: int,
gid: int):
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.chown(path, gid=gid, uid=uid)
@given(text(), integers(), integers())
def test_chown_not_changing_already_set_attributes(self, path: str,
uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chown(path, uid=uid, gid=gid)
os.chown(path, uid=-1, gid=-1)
assert os.filesystem[path].gid == gid
assert os.filesystem[path].uid == uid
class ChmodCase(TestCase):
@given(text(), integers())
def test_chmod(self, path, mode):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chmod(path, mode)
assert os.filesystem[path].mode == mode
class FileCase(TestCase):
@given(text())
@example("0")
def test_remove_a_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystemWithPermissions(FakeFilesystem(
files=[FakeFile(Path("hello/" + path))])))
os.mkdir("hello")
assert os.listdir("hello") == [path]
os.remove("hello/" + path)
assert os.listdir("hello") == []
@given(text())
def test_remove_a_directory(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
with self.assertRaises(IsADirectoryError):
os.remove(path)
@given(text())
def test_remove_a_non_existent_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.remove(path)
class CurrentDirectoryCase(TestCase):
@given(text())
def test_chdir(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chdir(path)
assert os.getcwd() == str(Path(path).absolute())
@given(text())
def test_chdir_directory_does_not_exist(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.chdir(path)
@given(text())
def test_chdir_directory_path_is_a_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
with self.assertRaises(NotADirectoryError):
os.chdir(path)
class DeviceCase(TestCase):
@given(integers(), integers())
def test_makedev(self, major, minor):
assume(-1 < major < 2 ** 31 and -1 < minor < 2 ** 31)
os = FakeOS()
assert os.makedev(major, minor) == _os.makedev(major, minor)
@given(integers())
def test_major(self, device):
assume(-1 < device < 2 ** 64)
os = FakeOS()
assert os.major(device) == _os.major(device)
@given(integers())
def test_minor(self, device):
assume(-1 < device < 2 ** 64)
os = FakeOS()
assert os.minor(device) == _os.minor(device)
class RenameCase(TestCase):
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_root_directory(self, old, new):
assume(old != new)
os = FakeOS()
os.mkdir(old)
os.rename(old, new)
with self.assertRaises(FileNotFoundError):
old_file = os.filesystem[Path(old)]
try:
new_file = os.filesystem[Path(new)]
except FileNotFoundError:
self.fail("Filke was not renamed.")
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_non_root_directory(self, root, old, new):
os = FakeOS()
os.mkdir(root)
os.mkdir(root + "/" + old)
os.rename(root + "/" + old, root + "/" + new)
assert os.listdir(root) == [new]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_root_non_leaf_folder(self, old, new, inside):
os = FakeOS()
os.mkdir(old)
os.mkdir(old + "/" + inside)
os.rename(old, new)
assert os.listdir(new) == [inside]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_non_root_non_leaf_folder(self, old, new, inside, root):
os = FakeOS()
os.makedirs(root + "/" + old + "/" + inside)
os.rename(root + "/" + old, root + "/" + new)
assert os.listdir(root + "/" + new) == [inside]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_when_destination_exists_on_windows(self, old, new):
assume(old != new)
os = FakeOS(operating_system=FakeWindows())
os.mkdir(old)
os.mkdir(new)
with self.assertRaises(OSError):
os.rename(old, new)
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_when_destination_exists_on_unix(self, old, new, somefile):
assume(old != new)
os = FakeOS(operating_system=FakeUnix(),
filesystem=FakeFilesystem(files=[FakeFile(Path(old)),
FakeFile(Path(new))],
operating_system=FakeUnix()))
os.rename(old, new)
os.filesystem[Path(new)]
with self.assertRaises(OSError):
fileobject = os.filesystem[Path(old)]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_a_folder_and_changing_its_hierarchy(self, a, b, c, d, e):
assume(e != b)
os = FakeOS()
os.makedirs(a + "/" + b + "/" + c + "/" + d)
os.rename(a + "/" + b + "/" + c, a + "/" + e)
assert set(os.listdir(a)) == {b, e}
assert os.listdir(a + "/" + e) == [d]
@given(text(alphabet=ascii_letters, min_size=1))
def test_renaming_to_the_same_thing(self, path):
os = FakeOS()
os.mkdir(path)
os.rename(path, path)
class AccessCase(TestCase):
def test_access_when_root(self):
os = FakeOS(user=Root())
os.mkdir("/", mode=0o000)
for access_modifier in (os.X_OK, os.W_OK, os.R_OK, os.F_OK):
assert os.access("/", access_modifier)
def test_access_exist(self):
os = FakeOS()
os.mkdir("/")
assert os.access("/", os.F_OK) and not os.access("other", os.F_OK)
def test_access_effective_ids(self):
os = FakeOS(user=FakeUser(gid=-2, uid=-2, is_sudoer=False))
os.setgid(0)
os.setuid(0)
assert os.getgid() == 0
assert os.getuid() == 0
os.mkdir("/", mode=0o070) # Group only
os.setgid(-7)
os.setuid(-7)
os.seteuid(0)
os.setegid(0)
assert not os.access("/", mode=os.R_OK)
assert os.access("/", mode=os.R_OK, effective_ids=True)
def test_access_when_owner(self):
os = FakeOS(user=FakeUser(gid=14, uid=42))
os.mkdir("r", mode=0o400)
os.mkdir("w", mode=0o200)
os.mkdir("x", mode=0o100)
os.mkdir("rw", mode=0o600)
os.mkdir("wx", mode=0o300)
os.mkdir("rx", mode=0o500)
os.mkdir("rwx", mode=0o700)
os.filesystem.set_user(FakeUser(gid=18, uid=42))
assert os.access("r", os.R_OK)
assert not os.access("r", os.W_OK)
assert not os.access("r", os.X_OK)
assert os.access("w", os.W_OK)
assert not os.access("w", os.R_OK)
assert not os.access("w", os.X_OK)
assert os.access("x", os.X_OK)
assert not os.access("x", os.R_OK)
assert not os.access("x", os.W_OK)
assert os.access("rw", os.R_OK)
assert os.access("rw", os.W_OK)
assert os.access("rw", os.R_OK | os.W_OK)
assert not os.access("rw", os.X_OK)
assert not os.access("rw", os.X_OK | os.W_OK)
assert os.access("wx", os.X_OK)
assert os.access("wx", os.W_OK)
assert os.access("wx", os.X_OK | os.W_OK)
assert not os.access("wx", os.R_OK)
assert not os.access("wx", os.X_OK | os.R_OK)
assert os.access("rx", os.X_OK)
assert os.access("rx", os.R_OK)
assert os.access("rx", os.X_OK | os.R_OK)
assert not os.access("rx", os.W_OK)
assert not os.access("rx", os.W_OK | os.R_OK)
assert os.access("rwx", os.R_OK)
assert os.access("rwx", os.X_OK)
assert os.access("rwx", os.F_OK)
assert os.access("rwx", os.R_OK | os.X_OK | os.W_OK)
assert os.access("rwx", os.X_OK | os.W_OK)
def test_access_when_everyone(self):
os = FakeOS(user=FakeUser(gid=-1, uid=-1))
os.mkdir("r", mode=0o004)
os.mkdir("w", mode=0o002)
os.mkdir("x", mode=0o001)
os.mkdir("rw", mode=0o006)
os.mkdir("wx", mode=0o003)
os.mkdir("rx", mode=0o005)
os.mkdir("rwx", mode=0o007)
os.filesystem.set_user(FakeUser(gid=0, uid=0))
assert os.access("r", os.R_OK)
assert not os.access("r", os.W_OK)
assert not os.access("r", os.X_OK)
assert os.access("w", os.W_OK)
assert not os.access("w", os.R_OK)
assert not os.access("w", os.X_OK)
assert os.access("x", os.X_OK)
assert not os.access("x", os.R_OK)
assert not os.access("x", os.W_OK)
assert os.access("rw", os.R_OK)
assert os.access("rw", os.W_OK)
assert os.access("rw", os.R_OK | os.W_OK)
assert not os.access("rw", os.X_OK)
assert not os.access("rw", os.X_OK | os.W_OK)
assert os.access("wx", os.X_OK)
assert os.access("wx", os.W_OK)
assert os.access("wx", os.X_OK | os.W_OK)
assert not os.access("wx", os.R_OK)
assert not os.access("wx", os.X_OK | os.R_OK)
assert os.access("rx", os.X_OK)
assert os.access("rx", os.R_OK)
assert os.access("rx", os.X_OK | os.R_OK)
assert not os.access("rx", os.W_OK)
assert not os.access("rx", os.W_OK | os.R_OK)
assert os.access("rwx", os.R_OK)
assert os.access("rwx", os.X_OK)
assert os.access("rwx", os.F_OK)
assert os.access("rwx", os.R_OK | os.X_OK | os.W_OK)
assert os.access("rwx", os.X_OK | os.W_OK)
def test_access_when_group(self):
os = FakeOS(user=FakeUser(gid=14, uid=42))
os.mkdir("r", mode=0o040)
os.mkdir("w", mode=0o020)
os.mkdir("x", mode=0o010)
os.mkdir("rw", mode=0o060)
os.mkdir("wx", mode=0o030)
os.mkdir("rx", mode=0o050)
os.mkdir("rwx", mode=0o070)
os.filesystem.set_user(FakeUser(gid=14, uid=56))
assert os.access("r", os.R_OK)
assert not os.access("r", os.W_OK)
assert not os.access("r", os.X_OK)
assert os.access("w", os.W_OK)
assert not os.access("w", os.R_OK)
assert not os.access("w", os.X_OK)
assert os.access("x", os.X_OK)
assert not os.access("x", os.R_OK)
assert not os.access("x", os.W_OK)
assert os.access("rw", os.R_OK)
assert os.access("rw", os.W_OK)
assert os.access("rw", os.R_OK | os.W_OK)
assert not os.access("rw", os.X_OK)
assert not os.access("rw", os.X_OK | os.W_OK)
assert os.access("wx", os.X_OK)
assert os.access("wx", os.W_OK)
assert os.access("wx", os.X_OK | os.W_OK)
assert not os.access("wx", os.R_OK)
assert not os.access("wx", os.X_OK | os.R_OK)
assert os.access("rx", os.X_OK)
assert os.access("rx", os.R_OK)
assert os.access("rx", os.X_OK | os.R_OK)
assert not os.access("rx", os.W_OK)
assert not os.access("rx", os.W_OK | os.R_OK)
assert os.access("rwx", os.R_OK)
assert os.access("rwx", os.X_OK)
assert os.access("rwx", os.F_OK)
assert os.access("rwx", os.R_OK | os.X_OK | os.W_OK)
assert os.access("rwx", os.X_OK | os.W_OK)
class PermissionsCase(TestCase):
def test_rmdir_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(uid=0, gid=0))
os.mkdir("/", mode=0)
with self.assertRaises(PermissionError):
os.rmdir("/")
def test_renaming_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser())
os.mkdir("/", mode=0o000)
with self.assertRaises(PermissionError):
os.rename("/", "lol")
def test_chmod_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(gid=2, uid=2, is_sudoer=False))
os.mkdir("/", mode=0o100)
with self.assertRaises(PermissionError):
os.chmod("/", mode=0o666)
def test_chown_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(gid=2, uid=2, is_sudoer=False))
os.mkdir("/", mode=0)
with self.assertRaises(PermissionError):
os.chown('/', uid=3)
def test_mkdir_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(gid=2, uid=2, is_sudoer=False))
os.mkdir("/", mode=0)
with self.assertRaises(PermissionError):
os.mkdir("/hello")
def test_listdir_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(gid=2, uid=2, is_sudoer=False))
os.mkdir("/", mode=0o666) # No execution allowed
with self.assertRaises(PermissionError):
os.listdir("/")
class UserAndGroupCase(TestCase):
@given(integers())
def test_gid(self, gid: int):
os = FakeOS()
os.setgid(gid)
assert os.getgid() == gid
@given(integers())
def test_uid(self, uid: int):
os = FakeOS()
os.setuid(uid)
assert os.getuid() == uid
@given(integers())
def test_egid(self, egid: int):
os = FakeOS()
os.setegid(egid)
assert os.getegid() == egid
@given(integers())
def test_euid(self, euid: int):
os = FakeOS()
os.seteuid(euid)
assert os.geteuid() == euid
| 33.403818
| 86
| 0.583656
| 2,957
| 22,748
| 4.34021
| 0.077782
| 0.057348
| 0.058906
| 0.062334
| 0.685912
| 0.637447
| 0.586723
| 0.563581
| 0.521583
| 0.514649
| 0
| 0.012003
| 0.278486
| 22,748
| 680
| 87
| 33.452941
| 0.769938
| 0.001363
| 0
| 0.586538
| 0
| 0
| 0.017082
| 0
| 0
| 0
| 0
| 0
| 0.278846
| 1
| 0.098077
| false
| 0
| 0.025
| 0
| 0.142308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fe6a6466ba62d142e4c8d8e39066315eacdcdb4
| 6,129
|
py
|
Python
|
ceed/utils.py
|
matham/ceed
|
5d32a99a33325b36dbe74d8b0a22e63abc92aab7
|
[
"MIT"
] | 1
|
2020-03-02T22:26:44.000Z
|
2020-03-02T22:26:44.000Z
|
ceed/utils.py
|
matham/ceed
|
5d32a99a33325b36dbe74d8b0a22e63abc92aab7
|
[
"MIT"
] | null | null | null |
ceed/utils.py
|
matham/ceed
|
5d32a99a33325b36dbe74d8b0a22e63abc92aab7
|
[
"MIT"
] | 2
|
2020-01-13T19:42:16.000Z
|
2020-01-27T14:58:09.000Z
|
"""Utilities
===================
Various tools used in :mod:`ceed`.
"""
import re
import pathlib
from collections import deque
from typing import List, Tuple, Any, Union
__all__ = (
'fix_name', 'update_key_if_other_key', 'collapse_list_to_counts',
'get_plugin_modules', 'CeedWithID',
)
_name_pat = re.compile('^(.+)-([0-9]+)$')
def fix_name(name, *names):
"""Fixes the name so that it is unique among the names in ``names``.
:Params:
`name`: str
A name of something
`*names`: iterables of strings
Positional argument, where each is a iterable of strings among
which we ensure that the returned name is unique.
:returns:
A string that is unique among all the ``names``, but is similar to
``name``. We append a integer to make it unique.
E.g.::
>>> fix_name('troll', ['toll', 'foll'], ['bole', 'cole'])
'troll'
>>> fix_name('troll', ['troll', 'toll', 'foll'], ['bole', 'cole'])
'troll-2'
>>> fix_name('troll', ['troll-2', 'toll', 'foll'], ['bole', 'cole'])
'troll'
>>> fix_name('troll', ['troll', 'troll-2', 'toll', 'foll'], \
['bole', 'cole'])
'troll-3'
"""
if not any((name in n for n in names)):
return name
m = re.match(_name_pat, name)
i = 2
if m is not None:
name, i = m.groups()
i = int(i)
new_name = '{}-{}'.format(name, i)
while any((new_name in n for n in names)):
i += 1
new_name = '{}-{}'.format(name, i)
return new_name
def update_key_if_other_key(items, key, value, other_key, key_map):
"""Given a dict, or list/tuple of dicts (recursively), it goes through all
the dicts and updates the keys who match.
Specifically, if a key matches ``key``, its value matches ``value``,
there's another key named ``other_key`` in the dict, and the ``value`` of
``other_key`` is in ``key_map``, then the value of ``other_key`` is
updated to that value from ``key_map``.
"""
for item in items:
if isinstance(item, dict):
if key in item and item[key] == value and other_key in item:
item[other_key] = key_map.get(item[other_key], item[other_key])
update_key_if_other_key(
item.values(), key, value, other_key, key_map)
elif isinstance(item, (list, tuple)):
update_key_if_other_key(item, key, value, other_key, key_map)
def collapse_list_to_counts(values: list) -> List[Tuple[Any, int]]:
"""Converts a sequence of items to tuples of the item and count of
sequential items.
E.g.::
>>> collapse_list_to_counts([1, 1, 2, 3, 1, 1, 1, 3,])
[(1, 2), (2, 1), (3, 1), (1, 3), (3, 1)]
"""
counter = None
last_item = object()
res = []
for value in values:
if value != last_item:
if counter is not None:
res.append((last_item, counter))
last_item = value
counter = 1
else:
counter += 1
if counter is not None:
# we saw some items at least, the last was not added
res.append((last_item, counter))
return res
def get_plugin_modules(
base_package: str, root: Union[str, pathlib.Path]
) -> Tuple[List[str], List[Tuple[Tuple[str], bytes]]]:
"""Takes a package name and it's corresponding root path and returns a list
of the modules recursively within this package, as well as the source files
in bytes.
Only ``*.py`` files are considered, and although included with the source
bytes, the ``packages`` list skips any files that start with a underscore
(except ``__init__.py`` of course).
"""
packages = []
files = []
fifo = deque([pathlib.Path(root)])
while fifo:
directory = fifo.popleft()
relative_dir = directory.relative_to(root)
directory_mod = '.'.join((base_package,) + relative_dir.parts)
for item in directory.iterdir():
if item.is_dir():
if not item.name == '__pycache__':
fifo.append(item)
continue
if not item.is_file() or not item.name.endswith(('.py', '.pyo')):
continue
# only pick one of pyo/py
if item.suffix == '.pyo' and item.with_suffix('.py').exists():
continue
files.append(
(relative_dir.parts + (item.name, ), item.read_bytes()))
if item.name.startswith('_') and item.name != '__init__.py' \
and item.name != '__init__.pyo':
continue
name = item.name[:-3]
if name == '__init__':
package = directory_mod
else:
package = f'{directory_mod}.{name}'
packages.append(package)
return packages, files
class CeedWithID:
"""Adds :attr:`ceed_id` to the class so that any inheriting class instance
can be associated with a unique integer ID for logging purposes.
The ID is not automatically set for every object, it is manually set
when :meth:`set_ceed_id` is called. See stage/function for when it's called.
"""
ceed_id: int = 0
"""The integer id of the object.
"""
def set_ceed_id(self, min_available: int) -> int:
"""Sets the ID of this and any sub objects, each to a number equal or
greater than ``min_available`` and returns the next minimum number
available to be used.
See :attr:`~ceed.analysis.CeedDataReader.event_data` for more details.
:param min_available: The minimum number available to be used for the
ID so it is unique.
:return: The next minimum available number that can be used. Any number
larger or equal to it is free to be used.
"""
self.ceed_id = min_available
return min_available + 1
| 32.775401
| 81
| 0.565019
| 818
| 6,129
| 4.09291
| 0.273839
| 0.033453
| 0.013142
| 0.019116
| 0.163082
| 0.11589
| 0.044803
| 0.03405
| 0.0227
| 0
| 0
| 0.007374
| 0.314081
| 6,129
| 186
| 82
| 32.951613
| 0.78901
| 0.414586
| 0
| 0.146341
| 0
| 0
| 0.060479
| 0.021992
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060976
| false
| 0
| 0.04878
| 0
| 0.195122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fe92dc5882b9ab766bfa49539001ca33aa51f84
| 510
|
py
|
Python
|
chapter1/quest1_4.py
|
mag6367/Coding_the_Coding_Interview_Python_Solutions
|
1d97d18d3d9732c25626e20cb3561ce4241b16e8
|
[
"MIT"
] | 1
|
2017-04-28T13:52:13.000Z
|
2017-04-28T13:52:13.000Z
|
chapter1/quest1_4.py
|
mag6367/Cracking_the_Coding_Interview_Python_Solutions
|
1d97d18d3d9732c25626e20cb3561ce4241b16e8
|
[
"MIT"
] | null | null | null |
chapter1/quest1_4.py
|
mag6367/Cracking_the_Coding_Interview_Python_Solutions
|
1d97d18d3d9732c25626e20cb3561ce4241b16e8
|
[
"MIT"
] | null | null | null |
# question 1.4 from cracking the code interview 4th ed.
'''
Write a method to decide if two strings are anagrams or not.
'''
# if we sort the two string, they should be the same
def areAnagram (str1, str2):
# check is strings are valid
if not isinstance(str1, str) or not isinstance(str2, str):
return False
# first we convert the two strings into lists and sort them
newStr1 = sorted(str1)
newStr2 = sorted(str2)
# if they are anagrams, the lists should be identical
return newStr1 == newStr2
| 24.285714
| 60
| 0.727451
| 83
| 510
| 4.46988
| 0.60241
| 0.053908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03202
| 0.203922
| 510
| 20
| 61
| 25.5
| 0.881773
| 0.594118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9febad62a52cd187ba14dbd7516dbb4c9c77a4fc
| 2,516
|
py
|
Python
|
firmware/measure_magnitude.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 14
|
2020-02-16T15:36:31.000Z
|
2022-03-27T02:24:40.000Z
|
firmware/measure_magnitude.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 1
|
2020-11-23T16:16:33.000Z
|
2020-11-23T16:16:33.000Z
|
firmware/measure_magnitude.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 4
|
2021-03-29T16:55:03.000Z
|
2022-01-23T16:43:59.000Z
|
from baremetal import *
from baremetal.signed import number_of_bits_needed
from settings import Settings
from math import log, pi
from matplotlib import pyplot as plt
import numpy as np
import sys
from math import log, ceil
from numpy import log10
#settings for 100KS/s
# hang attack decay
# fast 10000(100ms) 4(1ms) 10(62.5ms)
# med 25000(250ms) 4(1ms) 12(250ms)
# slow 100000(1s) 4(1ms) 13(500ms)
# long 200000(2s) 4(1ms) 15(2s)
def measure_magnitude(clk, audio, audio_stb, agc_speed, reset=0):
attack_factor = 4
max_factor = 15
decay_factor = Signed(5).select(agc_speed, 9, 11, 12, 13)
hang = Unsigned(19).select(agc_speed, 5000, 12500, 50000, 100000)
#use a leaky max hold
audio_bits = audio.subtype.bits
#add extra bits for decay calculation
audio = audio.resize(audio_bits+max_factor) << max_factor
max_hold = audio.subtype.register(clk, init=0, en=audio_stb)
counter = Unsigned(19).register(clk, en=audio_stb, init=0)
#if signal is greater than magnitude
attack = (audio > max_hold)
attack_new_val = max_hold + ((audio - max_hold) >> attack_factor)
decay_new_val = max_hold - (max_hold >> decay_factor)
hold_expired = counter == 0
counter.d(counter.subtype.select(attack, counter.subtype.select(hold_expired, counter - 1, 0), hang-1))
max_hold_new_val = audio.subtype.select(attack, max_hold.subtype.select(hold_expired, max_hold, decay_new_val), attack_new_val)
max_hold.d(audio.subtype.select(reset, max_hold_new_val, 0))
#remove extra bits (except one to allow for addition)
max_hold = (max_hold >> max_factor).resize(audio_bits)
return max_hold
if __name__ == "__main__" and "sim" in sys.argv:
settings = Settings()
settings.agc_frame_size = 100
settings.agc_frames = 4
clk = Clock("clk")
data_in = Signed(16).input("data_in")
stb_in = Boolean().input("stb_in")
magnitude = measure_magnitude(clk, data_in, stb_in, 0, 0)
stimulus = []
for i in range(1000):
stimulus.append(100)
for i in range(20000):
stimulus.append(0)
response = []
#simulate
clk.initialise()
i = 0
for data in stimulus:
data_in.set(data)
for i in range(2):
stb_in.set(i==1)
if i==1:
response.append(magnitude.get())
clk.tick()
i+=1
response = np.array(response)
plt.plot(response)
plt.plot(stimulus)
plt.show()
| 30.313253
| 131
| 0.65779
| 374
| 2,516
| 4.237968
| 0.347594
| 0.066246
| 0.022713
| 0.024606
| 0.023975
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067358
| 0.232909
| 2,516
| 82
| 132
| 30.682927
| 0.753886
| 0.149046
| 0
| 0
| 0
| 0
| 0.012676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0.163636
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fec2b87298b7ce1d8fa49d924c18e361c3fbd3b
| 737
|
py
|
Python
|
chpt6/Palindrome.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | null | null | null |
chpt6/Palindrome.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-21T09:39:00.000Z
|
2018-05-27T15:59:15.000Z
|
chpt6/Palindrome.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-19T14:59:56.000Z
|
2018-05-19T15:25:48.000Z
|
# This program prompts a user to enter an integer and reports whether the integer is a palindrome or not
# A number is a palindrome if its reversal is the same as itself.
def reverse(number):
position1 = number % 10
remainder1 = number // 10
position2 = remainder1 % 10
remainder2 = remainder1 // 10
position3 = remainder2
return int(str(position1) + str(position2) + str(position3))
def is_palindrome(number):
value = number
if value == reverse(number):
return 'This is a palindrome'
else:
return 'This is not a palindrome'
def main():
number_test = eval(input("Enter a four digit number to test if it's a palindrome: "))
print(is_palindrome(number_test))
main()
| 23.03125
| 104
| 0.679783
| 103
| 737
| 4.825243
| 0.446602
| 0.110664
| 0.078471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033929
| 0.240163
| 737
| 32
| 105
| 23.03125
| 0.853571
| 0.226594
| 0
| 0
| 0
| 0
| 0.176367
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0
| 0
| 0.352941
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ff112f147fc3eea03cddc2ce893a7da503429c2
| 1,045
|
py
|
Python
|
emilia/modules/sql/admin_sql.py
|
masterisira/ELIZA_OF-master
|
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
|
[
"Unlicense"
] | null | null | null |
emilia/modules/sql/admin_sql.py
|
masterisira/ELIZA_OF-master
|
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
|
[
"Unlicense"
] | null | null | null |
emilia/modules/sql/admin_sql.py
|
masterisira/ELIZA_OF-master
|
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
|
[
"Unlicense"
] | null | null | null |
import threading
from typing import Union
from sqlalchemy import Column, Integer, String, Boolean
from emilia.modules.sql import SESSION, BASE
class PermanentPin(BASE):
__tablename__ = "permanent_pin"
chat_id = Column(String(14), primary_key=True)
message_id = Column(Integer)
def __init__(self, chat_id):
self.chat_id = str(chat_id)
def __repr__(self):
return "<Permanent pin for ({})>".format(self.chat_id)
PermanentPin.__table__.create(checkfirst=True)
PERMPIN_LOCK = threading.RLock()
def set_permapin(chat_id, message_id):
with PERMPIN_LOCK:
permpin = SESSION.query(PermanentPin).get(str(chat_id))
if not permpin:
permpin = PermanentPin(chat_id)
permpin.message_id = int(message_id)
SESSION.add(permpin)
SESSION.commit()
def get_permapin(chat_id):
try:
permapin = SESSION.query(PermanentPin).get(str(chat_id))
if permapin:
return permapin.message_id
return 0
finally:
SESSION.close()
| 24.302326
| 64
| 0.677512
| 130
| 1,045
| 5.161538
| 0.423077
| 0.089419
| 0.044709
| 0.080477
| 0.113264
| 0.113264
| 0.113264
| 0.113264
| 0
| 0
| 0
| 0.003708
| 0.225837
| 1,045
| 42
| 65
| 24.880952
| 0.825711
| 0
| 0
| 0
| 0
| 0
| 0.035407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.133333
| 0.033333
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ff624252765d2c5657956ad0fdec3d525d53544
| 22,024
|
py
|
Python
|
lcfit_utils.py
|
idekany/lcfit
|
4a0080fca981afe2b8974db8f5d3484c663b6c13
|
[
"MIT"
] | null | null | null |
lcfit_utils.py
|
idekany/lcfit
|
4a0080fca981afe2b8974db8f5d3484c663b6c13
|
[
"MIT"
] | null | null | null |
lcfit_utils.py
|
idekany/lcfit
|
4a0080fca981afe2b8974db8f5d3484c663b6c13
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import numpy as np
import fourier as ff
import matplotlib
import warnings
from matplotlib import pyplot as plt
from os.path import isfile
matplotlib.use('Agg')
def warn(*args, **kwargs):
print('WARNING: ', *args, file=sys.stderr, **kwargs)
def fit_validate_model(model, x: np.array, y: np.array, train_index, val_index, weights: np.array = None):
x_t, x_v = x[train_index], x[val_index]
y_t, y_v = y[train_index], y[val_index]
if weights is not None:
weights_t, weights_v = weights[train_index], weights[val_index]
else:
weights_t = None
weights_v = None
# print("y_train:")
# print(y_t)
model.fit(x_t, y_t, weights=weights_t)
yhat_v = model.predict(x_v)
return y_v, yhat_v, weights_v
def get_stratification_labels(data, n_folds):
"""
Create an array of stratification labels from an array of continuous values to be used in a stratified cross-
validation splitter.
:param data: list or numpy.ndarray
The input data array.
:param n_folds: int
The number of cross-validation folds to be used with the output labels.
:return: labels, numpy.ndarray
The array of integer stratification labels.
"""
assert isinstance(data, np.ndarray or list), "data must be of type list or numpy.ndarray"
if isinstance(data, list):
data = np.array(data)
ndata = len(data)
isort = np.argsort(data) # Indices of sorted phases
labels = np.empty(ndata)
labels[isort] = np.arange(ndata) # Compute phase order
labels = np.floor(labels / n_folds) # compute phase labels for StratifiedKFold
if np.min(np.bincount(labels.astype(int))) < n_folds: # If too few elements are with last label, ...
labels[labels == np.max(labels)] = np.max(
labels) - 1 # ... the then change that label to the one preceding it
return labels
def write_results(pars, results: dict):
# check if the file already exists:
newfile = not isfile(os.path.join(pars.rootdir, pars.output_param_file))
with open(os.path.join(pars.rootdir, pars.output_param_file), 'a') as file:
if newfile:
# Write header:
if pars.compute_errors:
file.write('# id Nep period totamp A1 A2 A3 A1_e A2_e A3_e phi1 phi2 phi3 '
'phi1_e phi2_e phi3_e phi21 phi21_e phi31 phi31_e '
'meanmag meanmag_e cost aper phcov phcov2 snr ZPErr Npt order minmax')
else:
file.write('# id Nep period totamp A1 A2 A3 phi1 phi2 phi3 phi21 phi31 meanmag cost '
'aper phcov phcov2 snr ZPErr Npt order minmax')
if pars.feh_model_file is not None:
file.write(' FeH')
if pars.compute_errors:
file.write(' FeH_e')
if pars.pca_model_file is not None:
file.write(' E1 E2 E3 E4 E5 E6')
if pars.compute_errors:
file.write(' E1_e E2_e E3_e E4_e E5_e E6_e')
file.write('\n')
# ------------------------
if pars.compute_errors:
file.write(
"%s %4d %.6f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.4f %.4f %.3f %.3f "
"%.3f %.3f %.4f %d %.3f %.3f %.1f %.4f %4d %2d %.3f" %
(results['objname'], results['nepoch'], results['period'], results['tamp'],
results['A'][0], results['A'][1], results['A'][2],
results['A_std'][0], results['A_std'][1], results['A_std'][2],
results['Pha'][0], results['Pha'][1], results['Pha'][2],
results['Pha_std'][0], results['Pha_std'][1], results['Pha_std'][2],
results['phi21'], results['phi21_std'], results['phi31'], results['phi31_std'],
results['icept'], results['icept_std'], results['cost'], results['dataset'] + 1,
results['phcov'], results['phcov2'], results['snr'], results['totalzperr'],
results['ndata'], results['forder'], results['minmax']))
else:
file.write("%s %4d %.6f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.4f %.4f %.3f "
"%.4f %d %.3f %.3f %.1f %.4f %4d %2d %.3f" %
(results['objname'], results['nepoch'], results['period'], results['tamp'],
results['A'][0], results['A'][1], results['A'][2],
results['Pha'][0], results['Pha'][1], results['Pha'][2],
results['phi21'], results['phi31'],
results['icept'], results['cost'], results['dataset'] + 1,
results['phcov'], results['phcov2'], results['snr'], results['totalzperr'],
results['ndata'], results['forder'], results['minmax']))
if pars.feh_model_file is not None:
file.write(" %.3f" % results['feh'])
if pars.compute_errors:
file.write(" %.3f" % results['feh_std'])
if pars.pca_model_file is not None:
file.write(" %.6f %.6f %.6f %.6f %.6f %.6f" %
(results['pca_feat'][0], results['pca_feat'][1], results['pca_feat'][2],
results['pca_feat'][3], results['pca_feat'][4], results['pca_feat'][5]))
if pars.compute_errors:
file.write(" %.6f %.6f %.6f %.6f %.6f %.6f" %
(results['pca_feat_std'][0], results['pca_feat_std'][1], results['pca_feat_std'][2],
results['pca_feat_std'][3], results['pca_feat_std'][4], results['pca_feat_std'][5]))
file.write("\n")
def write_merged_datafile(pars, results: dict):
# check if the file already exists:
newfile = not isfile(os.path.join(pars.rootdir, pars.merged_output_datafile))
with open(os.path.join(pars.rootdir, pars.merged_output_datafile), 'a') as file:
if newfile:
file.write('# id time mag mag_err ZP_err\n')
outarr = np.rec.fromarrays((np.tile(results['objname'], results['ndata']),
results['otime'] + results['otime0'],
results['mag'], results['magerr'], results['zperr']))
np.savetxt(file, outarr, fmt='%s %.6f %.3f %.3f %.3f')
def write_single_datafile(pars, results: dict, phase_ext_neg=0, phase_ext_pos=1.2):
ophase_sorted, mag_sorted = extend_phases(results['ph'], results['mag'],
phase_ext_neg=phase_ext_neg, phase_ext_pos=phase_ext_pos, sort=True)
outarr = np.rec.fromarrays((ophase_sorted, mag_sorted), names=('phase', 'kmag'))
with open(os.path.join(pars.rootdir, pars.output_data_dir, results['objname'] + '.dat'), 'w') as file:
np.savetxt(file, outarr, fmt='%f %f')
if pars.fold_double_period:
ophase_sorted2, mag_sorted2 = extend_phases(results['ph_2p'], results['mag'],
phase_ext_neg=phase_ext_neg, phase_ext_pos=phase_ext_pos, sort=True)
outarr = np.rec.fromarrays((ophase_sorted2, mag_sorted2), names=('phase', 'kmag'))
with open(os.path.join(pars.rootdir, pars.output_data_dir, results['objname'] + '_2p.dat'), 'w') as file:
np.savetxt(file, outarr, fmt='%f %f')
def write_synthetic_data(pars, results: dict):
if pars.gpr_fit:
outarr = np.rec.fromarrays((results['phase_grid'], results['synmag_gpr'] - results['icept']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_gpr" + pars.syn_suffix + '.dat'),
outarr, fmt='%.4f %.4f')
if pars.n_augment_data is not None:
outarr = np.hstack((results['phase_grid'].reshape(-1, 1), (results['synmag_gpr']).reshape(-1, 1), results['synmag_gpa']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_gpr_aug" + pars.syn_suffix + '.dat'),
outarr, fmt='%7.4f ' * (pars.n_augment_data + 2))
else:
outarr = np.rec.fromarrays((results['phase_grid'], results['syn'] - results['icept']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_dff" + pars.syn_suffix + '.dat'),
outarr, fmt='%.4f %.4f')
def make_figures(pars, results: dict, constrain_yaxis_range=True,
minphase=0, maxphase=1.2, aspect_ratio=0.6, figformat: str = 'png'):
# Create phase diagram:
outfile = os.path.join(pars.rootdir, pars.plot_dir, results['objname'] + pars.plot_suffix + "." + figformat)
plottitle = results['objname']
# plottitle = None
# figtext = '$P = {0:.6f}$ , $N_F = {1}$ , ap = {2}'.format(results['period'],results['forder'],bestap+1)
# figtext = '$P = {0:.6f}$'.format(results['period'])
figtext = '$P = {0:.6f}$ , $S/N = {1:d}$'.format(results['period'], int(results['snr']))
data1 = np.vstack((results['ph_o'], results['mag_o'], results['magerr_o'])).T
data2 = np.vstack((results['ph'], results['mag'], results['magerr'])).T
if pars.fourier_from_gpr:
data3 = np.vstack((results['phase_grid'], results['synmag_gpr'])).T
else:
data3 = np.vstack((results['phase_grid'], results['syn'])).T
# labels = ("orig.", "clipped", "binned", "DFF")
if pars.gpr_fit and pars.plot_gpr:
data4 = np.vstack((results['phase_grid'], results['synmag_gpr'], results['sigma_gpr'])).T
plot_input = (data1, data2, data3, data4)
fillerr_index = (3,)
symbols = ('r.', 'b.', 'r-', 'b-')
else:
plot_input = (data1, data2, data3)
fillerr_index = ()
symbols = ('r.', 'k.' 'r-')
plotlc(plot_input, symbols=symbols, fillerr_index=fillerr_index, figsave=pars.save_figures, outfile=outfile,
xlabel='phase', ylabel='$' + pars.waveband + '$ [mag.]', figtext=figtext, title=plottitle,
constrain_yaxis_range=constrain_yaxis_range, minphase=minphase, maxphase=maxphase,
aspect_ratio=aspect_ratio, figformat=figformat)
if pars.fold_double_period:
# Create phase diagram with double period:
outfile = os.path.join(pars.rootdir, pars.plot_dir, results['objname'] + pars.plot_suffix + "_2p." + figformat)
figtext = '$2P = {0:.6f}$'.format(results['period'] * 2, results['forder'], results['dataset'] + 1)
data1 = np.vstack(
(results['ph_o_2p'], results['mag_o'], np.sqrt(results['magerr_o'] ** 2 + results['zperr_o'] ** 2))).T
data2 = np.vstack(
(results['ph_2p'], results['mag'], np.sqrt(results['magerr'] ** 2 + results['zperr'] ** 2))).T
labels = ("orig.", "clipped")
plot_input = (data1, data2)
symbols = ('ro', 'ko')
plotlc(plot_input, symbols=symbols, fillerr_index=(), figsave=pars.save_figures, outfile=outfile,
xlabel='phase', ylabel='$' + pars.waveband + '$ [mag.]', figtext=figtext, title=results['objname'],
constrain_yaxis_range=True, figformat=figformat)
def read_input(fname: str, do_gls=False, known_columns=False):
"""
Reads the input list file with columns: object ID, [period, [dataset]]
:param fname: string, the name of the input file
:param do_gls: boolean, whether to perform GLS on the input time series. If False, the second column of the input
file must contain the period.
:param known_columns: boolean; whether the dataset to be used is known. If True, the last column of the input
file must contain the number of the column.
:return: ndarray(s) or None(s); 1-d arrays with the obect IDs, periods, and datasets
"""
dtypes = ['|S25'] # dtype for first column: identifiers
if do_gls:
if known_columns:
usecols = (0, 1)
dtypes = dtypes + ['i']
else:
usecols = (0,)
else:
if known_columns:
usecols = (0, 1, 2)
dtypes = dtypes + ['f8'] + ['i']
else:
usecols = (0, 1)
dtypes = dtypes + ['f8']
arr = np.genfromtxt(fname, usecols=usecols,
dtype=dtypes, unpack=False, comments='#', filling_values=np.nan, names=True)
object_id = arr['id'].reshape(-1, ).astype(str)
if do_gls:
object_per = None
else:
object_per = arr['period'].reshape(-1, )
if known_columns:
object_ap = arr['ap'].reshape(-1, )
else:
object_ap = None
return object_id, object_per, object_ap
def read_lc(lcfile, n_data_cols: int = 1, is_err_col: bool = False, flag_column: bool = False,
snr_column: bool = False, is_zperr_col: bool = False, missing_values="NaN", invalid_raise=False):
assert n_data_cols > 0, "`n_datasets` must be non-zero integer"
colnames = ['otime']
dtypes = [float]
ncols = 1
for ii in range(n_data_cols):
colnames.append('mag' + str(ii+1))
dtypes.append(float)
ncols += 1
if is_err_col:
# We expect the column following each magnitude column to contain the magnitude uncertainty
colnames.append('magerr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if is_zperr_col:
# The last column is expected to contain the zero-point error:
colnames.append('zperr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if snr_column:
# We expect the next column to contain the S/N
colnames.append('snr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if flag_column:
# We expect the next column to contain the flag
colnames.append('flag' + str(ii + 1))
dtypes.append('|S10')
ncols += 1
used_cols = list(range(ncols))
# Read light curve:
lcdatain = np.genfromtxt(lcfile, unpack=False, comments='#', filling_values=np.nan,
dtype=dtypes, usecols=used_cols, missing_values=missing_values,
names=colnames, invalid_raise=invalid_raise)
print(lcfile + " found.")
lcdatain = lcdatain[~np.isnan(lcdatain['otime'])]
return lcdatain
def degrade_lc(otime, mag, magerr, zperr, period=1.0, remove_points=True, nkeep=50,
min_otime=None, max_otime=None,
add_noise=False, sigma_noise=0.05,
add_phasegap=False, gap_pos=None, gap_length=0.1,
add_outliers=False, sigma_outliers=0.1, frac_outliers=0.1,
verbose=False):
if min_otime is not None:
mask = (otime > min_otime)
otime, mag, magerr, zperr = otime[mask], mag[mask], magerr[mask], zperr[mask]
if max_otime is not None:
mask = (otime < max_otime)
otime, mag, magerr, zperr = otime[mask], mag[mask], magerr[mask], zperr[mask]
if add_phasegap:
if gap_pos is None:
# Make the phasegap's position random betwen 0 and 1:
gap_pos = np.random.random()
pha = ff.get_phases(period, otime, epoch=0.0, shift=0.0, all_positive=True)
if gap_pos + gap_length > 1:
not_gap_inds = [(pha < gap_pos) & (pha > (gap_pos - 1 + gap_length))]
else:
not_gap_inds = [(pha < gap_pos) | (pha > (gap_pos + gap_length))]
mag = mag[not_gap_inds]
otime = otime[not_gap_inds]
magerr = magerr[not_gap_inds]
zperr = zperr[not_gap_inds]
if verbose:
print("N_data = {} (after phase gap added)".format(len(mag)))
if remove_points:
nremove = len(mag) - nkeep
if nremove > 0:
rem_inds = np.random.choice(range(len(mag)), size=nremove, replace=False)
otime = np.delete(otime, rem_inds)
mag = np.delete(mag, rem_inds)
magerr = np.delete(magerr, rem_inds)
zperr = np.delete(zperr, rem_inds)
if verbose:
print("N_data = {} (after points removed)".format(len(mag)))
out_inds = np.array([])
if add_outliers:
out_inds = np.random.choice(range(len(mag)), size=int(len(mag) * frac_outliers), replace=False)
mag[out_inds] = np.random.normal(mag[out_inds], sigma_outliers)
if verbose:
print("{} %% of points made outliers with sigma = {}".format(frac_outliers * 100.0, sigma_outliers))
if add_noise:
mag = mag + np.random.normal(mag, sigma_outliers)
magerr = magerr + sigma_noise
return otime, mag, magerr, zperr, out_inds
def plotlc(datasets, symbols=(), labels=(), fillerr_index=(), title=None, figtext="",
minphase=-0.05, maxphase=2.05, figsave=False, outfile=None, invert_y_axis=True,
constrain_yaxis_range=False, xlabel='phase', ylabel='magnitude', aspect_ratio=0.6, figformat="png"):
capsize = 1 # size of the error cap
assert type(datasets) is tuple, "Error: expected tuple for argument, got {}".format(type(datasets))
assert type(symbols) is tuple, "Error: expected tuple for argument, got {}".format(type(symbols))
assert (type(labels) is tuple), "Error: expected tuple for argument, got {}".format(type(labels))
assert (type(figtext) is str), "Error: expected string for argument, got {}".format(type(figtext))
# Check if there is a title, if yes, adjust plot to make it fit and write it.
fig = plt.figure(figsize=(6, 6 * aspect_ratio))
if title is not None:
if len(labels) > 0:
fig.subplots_adjust(bottom=0.15, top=0.80, hspace=0.3, left=0.12, right=0.98, wspace=0)
else:
fig.subplots_adjust(bottom=0.15, top=0.88, hspace=0.3, left=0.12, right=0.98, wspace=0)
fig.suptitle('%s' % title, fontsize=12, fontweight='bold')
else:
if len(labels) > 0:
fig.subplots_adjust(bottom=0.15, top=0.88, hspace=0.3, left=0.12, right=0.98, wspace=0)
else:
fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.3, left=0.12, right=0.98, wspace=0)
ax = fig.add_subplot(111, facecolor='#FFFFEC')
nsymbols = len(symbols)
nlabels = len(labels)
# Iterate over the 'datasets' tuple:
for item, dataset in enumerate(datasets):
# assert(type(dataset) is ndarray)
if dataset.shape[0] < 1: # check if dataset is empty
continue
ncols = dataset.shape[1]
assert ncols > 1 # check if there are at least 2 columns
phase = dataset[:, 0]
mag = dataset[:, 1]
if ncols > 2:
magerr = dataset[:, 2]
else:
magerr = None
if nsymbols > item:
symbol = symbols[item]
color = None
else:
symbol = 'o'
color = next(ax._get_lines.prop_cycler)['color']
if nlabels > item:
label = labels[item]
else:
label = None
if item in fillerr_index:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
base, = ax.plot(phase, mag, symbol, label=label, color=color, zorder=48)
# Shade the 95% credible interval around the optimal solution.
ax.fill(np.concatenate([phase.ravel(), phase.ravel()[::-1]]),
np.concatenate([mag.ravel() - 1.9600 * magerr,
(mag.ravel() + 1.9600 * magerr)[::-1]]),
alpha=.4, fc=base.get_color(), ec='None', zorder=70)
else:
ax.errorbar(phase, mag, yerr=magerr, fmt=symbol, label=label, capsize=capsize, color=color)
if maxphase > 1:
ax.errorbar(phase + 1, mag, yerr=magerr, fmt=symbol, capsize=capsize, color=color)
if nlabels > 0:
plt.legend(fontsize=8, loc='upper center', bbox_to_anchor=(0.5, 1.20),
ncol=4, fancybox=True, shadow=False)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if len(figtext) > 0:
ax.text(0.05, 1.02, "%s" % figtext, ha='left', va='top', bbox=dict(boxstyle='round', ec='k', fc='w'),
transform=ax.transAxes)
plt.xlim(minphase, maxphase)
if constrain_yaxis_range:
# The y axis range will be will be optimized for the range datasets[0][1].
# print(datasets[1][1])
minmag = np.min(datasets[1][:, 1])
maxmag = np.max(datasets[1][:, 1])
magrange = maxmag - minmag
ax.set_ylim(minmag - magrange / 5., maxmag + magrange / 5.)
if invert_y_axis:
plt.gca().invert_yaxis()
# plt.tight_layout()
if figsave and (outfile is not None):
fig.savefig(outfile, format=figformat)
plt.close(fig)
else:
fig.show()
return None
def extend_phases(p, y, phase_ext_neg=0.0, phase_ext_pos=0.0, sort=False):
"""
Extend a phase and a corresponding data vector in phase.
"""
# Extend data vectors in phase:
neg_ext_mask = (p - 1 > phase_ext_neg) # select phases in negative direction
pos_ext_mask = (p + 1 < phase_ext_pos) # select phases in positive direction
# Compose new data vectors according to extended phases:
p_ext = np.hstack((p[neg_ext_mask] - 1, p, p[pos_ext_mask] + 1))
y_ext = np.hstack((y[neg_ext_mask], y, y[pos_ext_mask]))
# magerr_ext=np.hstack((results['magerr_binned'][neg_ext_mask], results['magerr_binned'],
# results['magerr_binned'][pos_ext_mask]))
if sort:
# Sort data according to observed phases:
indx = np.argsort(p_ext) # indices of sorted ophase
p_ext_sorted = p_ext[indx]
y_ext_sorted = y_ext[indx]
return p_ext_sorted, y_ext_sorted
else:
return p_ext, y_ext
def smolec_feh(period, phi31, amp2):
return -6.125 - 4.795 * period + 1.181 * phi31 + 7.876 * amp2
| 42.517375
| 133
| 0.582047
| 2,941
| 22,024
| 4.220673
| 0.17171
| 0.008056
| 0.009184
| 0.009667
| 0.365423
| 0.32184
| 0.295416
| 0.269717
| 0.240071
| 0.202691
| 0
| 0.027788
| 0.274519
| 22,024
| 517
| 134
| 42.599613
| 0.749093
| 0.120687
| 0
| 0.223141
| 0
| 0.008264
| 0.117861
| 0
| 0
| 0
| 0
| 0
| 0.019284
| 1
| 0.038567
| false
| 0
| 0.022039
| 0.002755
| 0.085399
| 0.013774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ff65d9e76edd0a7d15ce5ca32d68a653fd8c1bc
| 2,939
|
py
|
Python
|
facetool/annotator.py
|
yliess86/FaceTool
|
f93c511e9868b4555225750efbac2228a00fea00
|
[
"MIT"
] | 4
|
2020-05-03T01:29:23.000Z
|
2020-07-15T08:13:05.000Z
|
facetool/annotator.py
|
yliess86/FaceTool
|
f93c511e9868b4555225750efbac2228a00fea00
|
[
"MIT"
] | 3
|
2020-04-30T01:18:02.000Z
|
2020-05-01T14:52:11.000Z
|
facetool/annotator.py
|
yliess86/FaceCrop
|
f93c511e9868b4555225750efbac2228a00fea00
|
[
"MIT"
] | 1
|
2020-05-16T21:27:24.000Z
|
2020-05-16T21:27:24.000Z
|
# -*- coding: utf-8 -*-
"""facetool.annotator
The files provides a Face Annotator in charge of combining the result of the
Face Detector and Face Landmark in a single pandas DataFrame. This Face
Annotator is the API built to be used by the end user.
"""
from facetool.detector import FaceDetector
from facetool.landmarker import FaceLandmarker
from tqdm import tqdm
from typing import Tuple
import numpy as np
import pandas as pd
class FaceAnnotator:
"""Face Annotator
Face Annotator combine the boxes of a Face Detector and the landmarks of a
Face Landmarker in a single DataFrame that can later be used for analysis,
further computation, or visualization.
Arguments:
dbatch_size {int} -- batch size for the detector inference
lbatch_size {int} -- batch size for the landmarker frame loading
size {Tuple[int, int]} -- resize frames for detector
n_process {int} -- number of threads used by the landmarker
device {str} -- device to run the detector on ("cpu" or "cuda")
"""
def __init__(
self, dbatch_size: int, lbatch_size: int, size: Tuple[int, int],
n_process: int, device: str,
) -> None:
self.detector = FaceDetector(device, dbatch_size, size)
self.landmarker = FaceLandmarker(n_process, lbatch_size)
def __call__(self, path: str) -> pd.DataFrame:
"""Call
Combines boxes and landmarks in a single DataFrame.
Arguments:
path {str} -- path to the video to be annotated
Returns:
pd.DataFrame -- dataframe containing boxes and landmarks
informations of size [N, 1 + 4 + 68 * 2] where:
* N -> valid frames (frame with face detected)
* 1 -> frame_idx
* 4 -> box_x, box_y, box_w, box_h
* 68 * 2 -> landmark_i_x, landmark_i_y for i in range(68)
"""
boxes = self.detector(path)
landmarks = self.landmarker(path, boxes)
N, B = boxes.shape # Frames x Boxe Data
N, L, P = landmarks.shape # Frames x Landmark x Coords
# Combine Data
data = np.zeros((N, B + L * P), dtype=int)
pbar = tqdm(enumerate(zip(boxes, landmarks)), desc="Face Annotator")
for i, (box, landmark) in pbar:
data[i, 0:(4 + 1)] = box # t, x, y, w, h -> 5
data[i, 5::2] = landmark[:, 0] # x_0 .... x_68 -> 68
data[i, 6::2] = landmark[:, 1] # y_0 .... y_68 -> 68
# Helpers to Name Landmarks Columns
lpos = lambda k: "x" if k == 0 else "y"
lname = lambda j, k: f"landmark_{j + 1}_{lpos(k)}"
# Landmarks Column Names
names = ["frame_idx", "box_x", "box_y", "box_w", "box_h"]
names += [lname(j, k) for j in range(L) for k in range(P)]
# Create DataFrame
df = pd.DataFrame(data=data, columns=names)
return df
| 36.7375
| 78
| 0.600204
| 411
| 2,939
| 4.201946
| 0.3382
| 0.037638
| 0.015634
| 0.020845
| 0.044007
| 0.044007
| 0.018529
| 0.018529
| 0
| 0
| 0
| 0.01699
| 0.299081
| 2,939
| 80
| 79
| 36.7375
| 0.821359
| 0.4869
| 0
| 0
| 0
| 0
| 0.052632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ff7ddf37d375ebc0e9b1af36cfd6f7f85ab8e18
| 1,338
|
py
|
Python
|
pygrn/problems/air_quality.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 7
|
2018-07-18T16:08:51.000Z
|
2020-12-09T07:18:35.000Z
|
pygrn/problems/air_quality.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 3
|
2018-04-13T11:44:59.000Z
|
2018-04-19T13:58:06.000Z
|
pygrn/problems/air_quality.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 6
|
2018-07-22T01:54:14.000Z
|
2021-08-04T16:01:38.000Z
|
from __future__ import print_function
import numpy as np
import os
from datetime import datetime
from pygrn.problems import TimeRegression
class AirQuality(TimeRegression):
def __init__(self, namestr=datetime.now().isoformat(), learn=True,
epochs=1, root_dir='./', lamarckian=False):
data_file = os.path.join(root_dir, 'data/normalized_air_quality.csv')
all_dat = np.genfromtxt(data_file, delimiter=',')
winsize = 5
data = all_dat[:, 1:]
labels = all_dat[winsize:, 0]
windowed = data[:-winsize, :]
for i in range(1, winsize):
windowed = np.concatenate((windowed, data[i:-(winsize-i), :]),
axis=1)
num_train = int(3*np.floor(windowed.shape[0]/4))
self.x_train = windowed[:num_train, :]
self.x_test = windowed[num_train:, :]
self.y_train = labels[:num_train]
self.y_test = labels[num_train:]
self.batch_size = 30
self.epochs = epochs
self.learn = learn
self.generation = 0
self.error = 0.1
self.error_decrease = 0.9
self.lamarckian = lamarckian
self.nin = data.shape[1]
self.nout = 1
self.cacheable = False
self.logfile = os.path.join(root_dir, 'logs/air_' + namestr + '.log')
| 31.857143
| 77
| 0.595665
| 168
| 1,338
| 4.559524
| 0.446429
| 0.052219
| 0.062663
| 0.036554
| 0.044386
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01877
| 0.283259
| 1,338
| 41
| 78
| 32.634146
| 0.779979
| 0
| 0
| 0
| 0
| 0
| 0.035127
| 0.023169
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.151515
| 0
| 0.212121
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ff867269ebc563da12e37b56fdbdcb6807b0b80
| 3,572
|
py
|
Python
|
vocabulary.py
|
retrieva/python_stm
|
862e63e6f03b326cb036b1136dead280c42b9da8
|
[
"MIT"
] | 11
|
2020-02-07T05:26:08.000Z
|
2021-11-27T09:51:24.000Z
|
vocabulary.py
|
retrieva/python_stm
|
862e63e6f03b326cb036b1136dead280c42b9da8
|
[
"MIT"
] | null | null | null |
vocabulary.py
|
retrieva/python_stm
|
862e63e6f03b326cb036b1136dead280c42b9da8
|
[
"MIT"
] | 1
|
2020-02-10T02:44:37.000Z
|
2020-02-10T02:44:37.000Z
|
# This code is available under the MIT License.
# (c)2010-2011 Nakatani Shuyo / Cybozu Labs Inc.
# (c)2018-2019 Hiroki Iida / Retrieva Inc.
import nltk
import re
import MeCab
stopwords_list = nltk.corpus.stopwords.words('english')
recover_list = {"wa":"was", "ha":"has"}
wl = nltk.WordNetLemmatizer()
def load_corpus(ranges):
"""
load data from corpus
"""
tmp = re.match(r'(\d+):(\d+)$', ranges)
if tmp:
start = int(tmp.group(1))
end = int(tmp.group(2))
from nltk.corpus import brown as corpus
return [corpus.words(fileid) for fileid in corpus.fileids()[start:end]]
def load_dataframe(documents):
corpus = []
for doc in documents:
sentences = re.findall(r'\w+(?:\'\w+)?', doc)
if len(sentences) > 0:
corpus.append(sentences)
return corpus
def load_dataframe_jp(documents):
corpus = []
tagger = MeCab.Tagger('-O wakati')
tagger.parse("")
for doc in documents:
tokens = tagger.parse(doc.strip()).split()
corpus.append(tokens)
return corpus
def load_file(filename):
"""
for one file
one line corresponds to one doc
"""
corpus = []
f = open(filename, 'r')
for line in f:
doc = re.findall(r'\w+(?:\'\w+)?', line)
if len(doc) > 0:
corpus.append(doc)
f.close()
return corpus
def is_stopword(w):
return w in stopwords_list
def lemmatize(w0):
w = wl.lemmatize(w0.lower())
if w in recover_list: return recover_list[w]
return w
class Vocabulary:
def __init__(self, excluds_stopwords=False):
self.vocas = [] # id to word
self.vocas_id = dict() # word to id
self.docfreq = [] # id to document frequency
self.excluds_stopwords = excluds_stopwords
def term_to_id(self, term0):
term = lemmatize(term0)
if self.excluds_stopwords and is_stopword(term):
return None
if term not in self.vocas_id:
voca_id = len(self.vocas)
self.vocas_id[term] = voca_id
self.vocas.append(term)
self.docfreq.append(0)
else:
voca_id = self.vocas_id[term]
return voca_id
def doc_to_ids(self, doc):
ids_list = []
words = dict()
for term in doc:
id = self.term_to_id(term)
if id is not None:
ids_list.append(id)
if id not in words:
words[id] = 1
self.docfreq[id] += 1
if "close" in dir(doc):
doc.close()
return ids_list
def cut_low_freq(self, corpus, threshold=1):
new_vocas = []
new_docfreq = []
self.vocas_id = dict()
conv_map = dict()
for id, term in enumerate(self.vocas):
freq = self.docfreq[id]
if freq > threshold:
new_id = len(new_vocas)
self.vocas_id[term] = new_id
new_vocas.append(term)
new_docfreq.append(freq)
conv_map[id] = new_id
self.vocas = new_vocas
self.docfreq = new_docfreq
def conv(doc):
new_doc = []
for id in doc:
if id in conv_map: new_doc.append(conv_map[id])
return new_doc
return [conv(doc) for doc in corpus]
def __getitem__(self, v):
return self.vocas[v]
def size(self):
return len(self.vocas)
def is_stopword_id(self, id):
return self.vocas[id] in stopwords_list
| 26.072993
| 79
| 0.56075
| 471
| 3,572
| 4.11465
| 0.261147
| 0.065015
| 0.045408
| 0.02322
| 0.033024
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011662
| 0.327828
| 3,572
| 136
| 80
| 26.264706
| 0.795502
| 0.069429
| 0
| 0.09901
| 0
| 0
| 0.017661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138614
| false
| 0
| 0.039604
| 0.039604
| 0.326733
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ffac072e4010a04d6f1b435f72c2103f99a9533
| 7,664
|
py
|
Python
|
kubb_match/views/rest.py
|
BartSaelen/kubb_match
|
848663bb3db5da73b726a956aa887c3eec30db8b
|
[
"Apache-2.0"
] | 2
|
2015-05-03T13:42:27.000Z
|
2015-08-07T07:42:29.000Z
|
kubb_match/views/rest.py
|
BartSaelen/kubb_match
|
848663bb3db5da73b726a956aa887c3eec30db8b
|
[
"Apache-2.0"
] | 2
|
2016-09-15T12:38:22.000Z
|
2016-09-15T12:41:18.000Z
|
kubb_match/views/rest.py
|
BartSaelen/kubb_match
|
848663bb3db5da73b726a956aa887c3eec30db8b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from pyramid.view import view_defaults, view_config
from kubb_match.data.mappers import map_team, map_game
from kubb_match.data.models import Team
from kubb_match.service.tournament_service import TournamentService
class RestView(object):
def __init__(self, request):
self.request = request
self.data_manager = request.data_managers['data_manager']
def _get_json_body(self):
try:
json_body = self.request.json_body
except AttributeError as e:
raise HTTPBadRequest(detail="Request bevat geen json body. \n%s" % e)
except ValueError as e:
raise HTTPBadRequest(detail="Request bevat incorrecte json body. \n%s" % e)
if 'id' in self.request.matchdict and 'id' not in json_body:
json_body['id'] = self.request.matchdict['id']
return json_body
@view_defaults(renderer='json', accept='application/json')
class TeamView(RestView):
@view_config(route_name='teams',
request_method='GET',
permission='view',
renderer='listjson')
def get_teams(self):
return self.data_manager.get_teams()
@view_config(route_name='team',
request_method='GET',
permission='view',
renderer='itemjson')
def get_team(self):
tid = self.request.matchdict['id']
t = self.data_manager.get_team(tid)
if not t:
return HTTPNotFound()
return t
def edit_team(self, t, json_body):
t = map_team(json_body, t)
t = self.data_manager.save(t)
return t
@view_config(
route_name='teams',
request_method='POST',
permission='admin',
renderer='itemjson'
)
def add_team(self):
team_data = self._get_json_body()
t = Team()
t = self.edit_team(t, team_data)
self.request.response.status = '201'
self.request.response.location = \
self.request.route_path('team', id=t.id)
return t
@view_config(
route_name='team',
request_method='PUT',
permission='admin',
renderer='itemjson'
)
def update_team(self):
tid = self.request.matchdict.get('id')
t = self.data_manager.get_team(tid)
if not t:
return HTTPNotFound()
team_data = self._get_json_body()
if 'id' in self.request.matchdict and 'id' not in team_data:
team_data['id'] = self.request.matchdict['id']
t = self.edit_team(t, team_data)
self.request.response.status = '200'
self.request.response.location = \
self.request.route_path('team', id=t.id)
return t
@view_defaults(renderer='json', accept='application/json')
class RoundView(RestView):
@view_config(route_name='rounds',
request_method='GET',
permission='view',
renderer='listjson')
def get_rounds(self):
return self.data_manager.get_rounds()
@view_config(route_name='round',
request_method='GET',
permission='view',
renderer='itemjson')
def get_round(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
if not r:
return HTTPNotFound()
return r
@view_config(route_name='round_games',
request_method='GET',
permission='view',
renderer='listjson')
def get_games(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
return r.games
@view_config(route_name='round_game',
request_method='GET',
permission='view',
renderer='itemjson')
def get_game(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
if not r:
return HTTPNotFound()
gid = self.request.matchdict['gid']
game = self.data_manager.get_game(gid)
return game
@view_config(route_name='round_game',
request_method='PUT',
permission='view',
renderer='itemjson')
def edit_game(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
if not r:
return HTTPNotFound()
gid = self.request.matchdict['gid']
game = self.data_manager.get_game(gid)
game_data = self._get_json_body()
if 'gid' in self.request.matchdict and 'gid' not in game_data:
game_data['gid'] = self.request.matchdict['gid']
game = map_game(game_data, game)
game = self.data_manager.save(game)
return game
@view_config(route_name='round_positions',
request_method='GET',
permission='view',
renderer='listjson')
def get_positions(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
return r.positions
@view_defaults(renderer='json', accept='application/json')
class TournamentPhaseView(RestView):
def __init__(self, request):
super().__init__(request)
self.tournament_service = TournamentService(self.data_manager)
@view_config(route_name='phases',
request_method='GET',
permission='view',
renderer='listjson')
def get_phases(self):
return self.data_manager.get_phases()
@view_config(route_name='phase',
request_method='GET',
permission='view',
renderer='itemjson')
def get_phase(self):
pid = self.request.matchdict['id']
p = self.data_manager.get_phase(pid)
if not p:
return HTTPNotFound()
return p
@view_config(route_name='phase_status',
request_method='POST',
permission='view',
renderer='itemjson')
def tournament_phase_status(self):
data = self._get_json_body()
pid = self.request.matchdict['id']
if 'status' not in data:
return HTTPBadRequest('status should be present')
else:
status = data['status']
round = None
p = self.data_manager.get_phase(pid)
if status == 'init':
if not p:
return HTTPNotFound()
if p.type == 'battle':
round = self.tournament_service.init_battle_phase(p)
elif p.type == 'ko':
p1 = self.data_manager.get_phase(1)
lr = next((r for r in p1.rounds if not r.played))
round = self.tournament_service.init_ko_phase(p, lr.positions)
round = round['A']
elif status == 'next':
if p.type == 'battle':
round = self.tournament_service.next_battle_round(p)
elif p.type == 'ko':
round = self.tournament_service.next_ko_round(p)
round = round[0]
elif status == 'final':
if p.type == 'battle':
round = self.tournament_service.final_battle_round(p)
elif p.type == 'ko':
round = self.tournament_service.final_ko_round(p)
round = round[0]
else:
return HTTPBadRequest('invalid phase_type')
self.request.response.status = '201'
self.request.response.location = \
self.request.route_path('round', id=round.id)
| 34.678733
| 87
| 0.579201
| 883
| 7,664
| 4.826727
| 0.129105
| 0.077428
| 0.06687
| 0.063351
| 0.70671
| 0.580009
| 0.507274
| 0.450962
| 0.359456
| 0.249648
| 0
| 0.002836
| 0.30976
| 7,664
| 220
| 88
| 34.836364
| 0.802836
| 0.00274
| 0
| 0.550505
| 0
| 0
| 0.079702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085859
| false
| 0
| 0.025253
| 0.015152
| 0.247475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ffb3711d6a34d1adba73090bd3c202a99a4f456
| 2,651
|
py
|
Python
|
CTCWordBeamSearch-master/tests/test_word_beam_search.py
|
brucegrapes/htr
|
9f8f07173ccc740dd8a4dfc7e8038abe36664756
|
[
"MIT"
] | 488
|
2018-03-01T11:18:26.000Z
|
2022-03-10T09:29:32.000Z
|
CTCWordBeamSearch-master/tests/test_word_beam_search.py
|
brucegrapes/htr
|
9f8f07173ccc740dd8a4dfc7e8038abe36664756
|
[
"MIT"
] | 60
|
2018-03-10T18:37:51.000Z
|
2022-03-30T19:37:18.000Z
|
CTCWordBeamSearch-master/tests/test_word_beam_search.py
|
brucegrapes/htr
|
9f8f07173ccc740dd8a4dfc7e8038abe36664756
|
[
"MIT"
] | 152
|
2018-03-01T11:18:25.000Z
|
2022-03-08T23:37:46.000Z
|
import codecs
import numpy as np
from word_beam_search import WordBeamSearch
def apply_word_beam_search(mat, corpus, chars, word_chars):
"""Decode using word beam search. Result is tuple, first entry is label string, second entry is char string."""
T, B, C = mat.shape
# decode using the "Words" mode of word beam search with beam width set to 25 and add-k smoothing to 0.0
assert len(chars) + 1 == C
wbs = WordBeamSearch(25, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), word_chars.encode('utf8'))
label_str = wbs.compute(mat)
# result is string of labels terminated by blank
char_str = []
for curr_label_str in label_str:
s = ''
for label in curr_label_str:
s += chars[label] # map label to char
char_str.append(s)
return label_str[0], char_str[0]
def load_mat(fn):
"""Load matrix from csv and apply softmax."""
mat = np.genfromtxt(fn, delimiter=';')[:, :-1] # load matrix from file
T = mat.shape[0] # dim0=t, dim1=c
# apply softmax
res = np.zeros(mat.shape)
for t in range(T):
y = mat[t, :]
e = np.exp(y)
s = np.sum(e)
res[t, :] = e / s
# expand to TxBxC
return np.expand_dims(res, 1)
def test_mini_example():
"""Mini example, just to check that everything is working."""
corpus = 'a ba' # two words "a" and "ba", separated by whitespace
chars = 'ab ' # the first three characters which occur in the matrix (in this ordering)
word_chars = 'ab' # whitespace not included which serves as word-separating character
mat = np.array([[[0.9, 0.1, 0.0, 0.0]], [[0.0, 0.0, 0.0, 1.0]],
[[0.6, 0.4, 0.0, 0.0]]]) # 3 time-steps and 4 characters per time time ("a", "b", " ", blank)
res = apply_word_beam_search(mat, corpus, chars, word_chars)
print('')
print('Mini example:')
print('Label string:', res[0])
print('Char string:', '"' + res[1] + '"')
assert res[1] == 'ba'
def test_real_example():
"""Real example using a sample from a HTR dataset."""
data_path = '../data/bentham/'
corpus = codecs.open(data_path + 'corpus.txt', 'r', 'utf8').read()
chars = codecs.open(data_path + 'chars.txt', 'r', 'utf8').read()
word_chars = codecs.open(data_path + 'wordChars.txt', 'r', 'utf8').read()
mat = load_mat(data_path + 'mat_2.csv')
res = apply_word_beam_search(mat, corpus, chars, word_chars)
print('')
print('Real example:')
print('Label string:', res[0])
print('Char string:', '"' + res[1] + '"')
assert res[1] == 'submitt both mental and corporeal, is far beyond any idea'
| 35.346667
| 115
| 0.614485
| 410
| 2,651
| 3.87561
| 0.336585
| 0.01888
| 0.01888
| 0.020138
| 0.201385
| 0.172435
| 0.172435
| 0.172435
| 0.172435
| 0.139711
| 0
| 0.027094
| 0.234251
| 2,651
| 74
| 116
| 35.824324
| 0.755665
| 0.278386
| 0
| 0.163265
| 0
| 0
| 0.126461
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 1
| 0.081633
| false
| 0
| 0.061224
| 0
| 0.183673
| 0.163265
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ffdc1e59bb26b37e4cdbdb001abd755fccd616d
| 859
|
py
|
Python
|
src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py
|
YACS-RCOS/yacs.n
|
a04f8e79279826914b942e3a8c709c50f08ff149
|
[
"MIT"
] | 20
|
2020-02-29T19:03:31.000Z
|
2022-02-18T21:13:12.000Z
|
src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py
|
YACS-RCOS/yacs.n
|
a04f8e79279826914b942e3a8c709c50f08ff149
|
[
"MIT"
] | 465
|
2020-02-29T19:08:18.000Z
|
2022-03-18T22:21:49.000Z
|
src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py
|
YACS-RCOS/yacs.n
|
a04f8e79279826914b942e3a8c709c50f08ff149
|
[
"MIT"
] | 19
|
2020-02-29T01:22:23.000Z
|
2022-02-14T01:47:09.000Z
|
"""add session type and instructor
Revision ID: 54df4fb8dfe9
Revises: a3be4710680d
Create Date: 2021-09-25 03:08:18.501929
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '54df4fb8dfe9'
down_revision = 'a3be4710680d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('course_session', sa.Column('instructor', sa.VARCHAR(length=255), nullable=True))
op.add_column('course_session', sa.Column('session_type', sa.VARCHAR(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('course_session', 'session_type')
op.drop_column('course_session', 'instructor')
# ### end Alembic commands ###
| 27.709677
| 101
| 0.71362
| 109
| 859
| 5.504587
| 0.477064
| 0.08
| 0.126667
| 0.076667
| 0.43
| 0.35
| 0.25
| 0.146667
| 0
| 0
| 0
| 0.071429
| 0.152503
| 859
| 30
| 102
| 28.633333
| 0.752747
| 0.364377
| 0
| 0
| 0
| 0
| 0.243615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ffddf9f2ec970e9ca9b3a8192c022d87d76144d
| 1,656
|
py
|
Python
|
plot_data.py
|
qzane/kmeans-cuda
|
f2a0e8dd6859cf735c95e1365342f4623f0a71ff
|
[
"MIT"
] | null | null | null |
plot_data.py
|
qzane/kmeans-cuda
|
f2a0e8dd6859cf735c95e1365342f4623f0a71ff
|
[
"MIT"
] | null | null | null |
plot_data.py
|
qzane/kmeans-cuda
|
f2a0e8dd6859cf735c95e1365342f4623f0a71ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 22:31:17 2018
@author: qzane
"""
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
def read_points(fname):
points = []
with open(fname) as f:
while(1):
tmp = f.readline()
if tmp == '':
break
if ',' in tmp:
f1,f2 = tmp.split(',')[:2]
f1,f2 = float(f1), float(f2)
points.append((f1,f2))
return np.array(points)
def read_classes(fname):
classes = []
with open(fname) as f:
while(1):
tmp = f.readline()
if tmp == '':
break
_class = int(tmp)
classes.append(_class)
return np.array(classes)
def plot(points, classes):
assert(points.shape[0]==classes.shape[0])
num_classes = classes.max()+1
cmap = plt.get_cmap('jet')
colors = [cmap(i) for i in np.linspace(0, 1, num_classes)]
for i in range(num_classes):
plt.plot(points[classes==i,0], points[classes==i,1], 'x', color=colors[i])
plt.show()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-p', '--points', action='store', type=str, required=True,
help='points.txt')
parser.add_argument('-c', '--classes', action='store', type=str, required=True,
help='classes.txt')
args = parser.parse_args()
points = read_points(args.points)
classes = read_classes(args.classes)
plot(points, classes)
| 25.090909
| 83
| 0.532609
| 204
| 1,656
| 4.220588
| 0.426471
| 0.075494
| 0.059233
| 0.034843
| 0.181185
| 0.181185
| 0.181185
| 0.102207
| 0.102207
| 0.102207
| 0
| 0.028521
| 0.322464
| 1,656
| 66
| 84
| 25.090909
| 0.738859
| 0.057367
| 0
| 0.232558
| 0
| 0
| 0.042498
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 1
| 0.069767
| false
| 0
| 0.069767
| 0
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9fff12642cb00ff3e2ce7ae890c3d2b10cbbe1d1
| 8,936
|
py
|
Python
|
src/WignerFunctionMeasurement.py
|
ngchihuan/WignerFunc_Measurement
|
9c258180da4c1a1ff87b384f0aaf85dc0f92d667
|
[
"MIT"
] | null | null | null |
src/WignerFunctionMeasurement.py
|
ngchihuan/WignerFunc_Measurement
|
9c258180da4c1a1ff87b384f0aaf85dc0f92d667
|
[
"MIT"
] | null | null | null |
src/WignerFunctionMeasurement.py
|
ngchihuan/WignerFunc_Measurement
|
9c258180da4c1a1ff87b384f0aaf85dc0f92d667
|
[
"MIT"
] | null | null | null |
import os
from os.path import join, isfile
from shutil import Error
from sys import exec_prefix
import numpy as np
import fit
import simple_read_data
from tabulate import tabulate
import logging
np.seterr(all='raise')
class DataFormatError(Exception):
pass
class WrongPathFormat(Exception):
pass
def check_data_format(data):
'''
check if the input data satisfies the following requiresments:
1. it is a dictionary {x: [], y: [], yerr: []}.
2. The array must have same size
if the data format is wrong, raise a Type Error
'''
conf = {'x': [], 'y' : [], 'yerr' : [] }
if (check_structure(data,conf)==False):
raise DataFormatError("Wrong format for the input data")
else:
if (np.min(data['y']) < 0 or np.max(data['y'])>1.0):
raise DataFormatError("y is out of range (0,1)")
def print_debug():
debug_msg = 'debug'
return debug_msg
def check_structure(struct, conf):
if isinstance(struct, dict) and isinstance(conf, dict):
# struct is a dict of types or other dicts
return all(k in conf and check_structure(struct[k], conf[k]) for k in struct)
if isinstance(struct, list) and isinstance(conf, list):
# struct is list in the form [type or dict]
return all(check_structure(struct[0], c) for c in conf)
elif isinstance(struct, type):
# struct is the type of conf
return isinstance(conf, struct)
else:
# struct is neither a dict, nor list, not type
return False
class WignerFunc_Measurement():
def __init__(self,fpath,debug=False) -> None:
self.sb_list={} #dictionary that stores sb measurement
self.set_path(fpath)
self.list_all_files()
self.logger = logging.getLogger('WFM')
self.debug = debug
if debug == True:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.ERROR)
#add stream handler and formatter
c_handler = logging.StreamHandler()
c_format = logging.Formatter('WFM: %(message)s')
c_handler.setFormatter(c_format)
self.logger.addHandler(c_handler)
def set_path(self,fpath) -> None:
try:
os.listdir(fpath)
self.fpath = fpath
except (NotADirectoryError, FileNotFoundError):
self.logger.error('The given path is not a directory')
raise WrongPathFormat
def list_all_files(self):
print('Scanning the directory')
self.files = [f for f in os.listdir(self.fpath) if isfile(join(self.fpath, f)) and os.path.splitext(join(self.fpath,f))[1] in ['','.dat'] ]
self.fullpath_files = sorted( [join(self.fpath,f) for f in os.listdir(self.fpath) if isfile(join(self.fpath, f)) and os.path.splitext(join(self.fpath,f))[1]=='' ] )
if self.files == []:
self.logger.warning('The directory is empty')
else:
print(f'Discovered {len(self.files)} files in the directory')
return self.files
def setup_sbs(self):
print(f'Validating files')
cnt=0
for fname in self.fullpath_files:
try:
sbs = SideBandMeasurement(fname,raw = False,debug= self.debug)
self.sb_list[str(cnt)] = sbs
cnt += 1
except Exception as err:
pass
else:
sbs.eval_parity()
print(f'Discovered {cnt} valid files with right data format\n')
def get_files(self):
return self.files
def print_report(self):
print('Report summary \n')
t=[[key,sb.folder, sb.short_fname, sb.parity, sb.err_log] for key,sb in self.sb_list.items()]
print(tabulate(t, headers=['id', 'folder','filename', 'parity','Errors']))
def refit(self,id,weights=[],omega=None,gamma=None):
'''
Refit a sideband measurement using new weights, omega and gamma.
'''
if (id >= len(self.sb_list.keys()) ):
self.logger.warning('id is out of range')
return
else:
sb_target = self.sb_list[str(id)]
sb_target.reset_log_err()
print(f'Refitting Sideband measurement {sb_target.fname}')
if omega!= None:
sb_target.set_Omega(omega)
if len(weights) != 0:
sb_target.set_weight(weights)
if gamma!= None:
sb_target.set_gamma(gamma)
sb_target.eval_parity()
def show_errors(self):
pass
class SideBandMeasurement():
def __init__(self,fname,raw = False, debug = False ) -> None:
self.fname = fname
self.xy = dict((el,[]) for el in ['x','y','yerr'])
self.plot = None
self.parity = None
self.raw = raw
self.weight = [1, 0, 0]
self.Omega_0 = 0.05
self.gamma = 7e-4
self.offset = 0.0
#internal logging
self.err_log=[]
#logging
self.logger= logging.getLogger(self.fname)
#add stream handler and formatter
c_handler = logging.StreamHandler()
c_format = logging.Formatter('SBM: %(message)s')
c_handler.setFormatter(c_format)
self.logger.addHandler(c_handler)
if debug == True:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.ERROR)
#extract folder name and fname only
(self.folder, self.short_fname) = self.fname.split("/")[-2:]
#verify if the data file is valid
try:
np.genfromtxt(self.fname)
except IOError as err:
#self.logger.exception('file \'%s\' is not found' %(self.fname) )
raise
try:
self.extract_xy()
except ValueError as err:
#self.logger.exception(err)
raise
def log_err(self,errors):
self.err_log.append(errors)
def reset_log_err(self):
self.err_log=[]
def set_Omega(self,omega):
try:
self.Omega_0 = float(omega)
except ValueError as error:
self.logger.error(f'Rabi freq must a nummber {error}')
raise
def set_gamma(self,gamma):
try:
self.gamma = float(gamma)
except ValueError as error:
self.logger.error(f'gamma must a nummber {error}')
raise
def set_weight(self,weight) -> None:
self.logger.debug(f'Set weight when fitting sb {self.fname}')
try:
self.weight = [float(i) for i in weight]
except (TypeError,ValueError) as err:
self.logger.error(f'Set weight error')
raise
def extract_xy(self):
'''
Extract xy data from the data files
'''
if self.raw== True:
try:
(self.xy['x'], self.xy['y'], self.xy['yerr'],_,_) = simple_read_data.get_x_y(self.fname)
except Exception as err:
raise
else:
try:
self.xy['x'], self.xy['y'], self.xy['yerr'] = tuple(np.genfromtxt(self.fname))
except ValueError as err:
raise ValueError(f'{self.short_fname} has wrong data format')
def extract_pop(self):
try:
self.fit_res = fit.fit_sum_multi_sine_offset(self.xy['x'], self.xy['y'], self.xy['yerr'], self.weight, self.Omega_0, self.gamma, offset = self.offset, rsb=False\
,gamma_fixed=False,customized_bound_population=None,debug=False)
except FloatingPointError as err:
#self.logger.warning('There is a measurement with zero uncertainty')
self.log_err('zero sigma')
except Exception as err:
self.log_err('unexpected error in fitting')
#raise RuntimeError('Could not fit')
else:
redchi = self.fit_res['reduced_chi square']
if (redchi>10 or redchi<0):
#self.logger.warning(f'Could not fit well')
self.log_err(f'Could not fit well, redchi = {round(redchi,2)}')
return self.fit_res
def eval_parity(self):
self.logger.debug(f'Evaluate parity of {self.fname}')
res = self.extract_pop()
if res!= None:
self.weight_fit = res['weight fit']
self.parity = 0
for i,j in enumerate(self.weight_fit):
if i%2 == 0:
self.parity += j*1
else:
self.parity += j*(-1)
return self.parity
#use map and filter to do it in a better way???
def plotxy(self):
self.plot = None
if __name__ == '__main__':
fpath ='../tests/test_data'
wfm1 = WignerFunc_Measurement(fpath)
wfm1.setup_sbs()
wfm1.report()
| 31.575972
| 173
| 0.57218
| 1,141
| 8,936
| 4.379492
| 0.208589
| 0.040024
| 0.010006
| 0.014008
| 0.194717
| 0.160897
| 0.160897
| 0.14969
| 0.13408
| 0.129278
| 0
| 0.006376
| 0.315466
| 8,936
| 283
| 174
| 31.575972
| 0.810528
| 0.10385
| 0
| 0.280612
| 0
| 0
| 0.096856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0.020408
| 0.045918
| 0.005102
| 0.22449
| 0.045918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b000e8e09627008c8e1b4d9bdfd0f7e449d23a7e
| 1,729
|
py
|
Python
|
falmer/content/models/scheme.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 2
|
2017-04-27T19:35:59.000Z
|
2017-06-13T16:19:33.000Z
|
falmer/content/models/scheme.py
|
sussexstudent/falmer
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 975
|
2017-04-13T11:31:07.000Z
|
2022-02-10T07:46:18.000Z
|
falmer/content/models/scheme.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 3
|
2018-05-09T06:42:25.000Z
|
2020-12-10T18:29:30.000Z
|
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel, MultiFieldPanel
from wagtail.core.blocks import StreamBlock
from wagtail.core.fields import StreamField
from wagtail.images.edit_handlers import ImageChooserPanel
from falmer.content import components
from falmer.content.components.structures import sidebar_card
from falmer.content.models.mixins import SocialMediaMixin
from falmer.matte.models import MatteImage
from .core import Page
class SchemePage(Page, SocialMediaMixin):
subpage_types = []
parent_page_types = ('content.SchemeIndexPage', )
main = StreamField(
StreamBlock([
components.text.to_pair(),
]), verbose_name='Main Content',
null=True, blank=True
)
hero_image = models.ForeignKey(MatteImage, null=False, blank=False, on_delete=models.PROTECT)
sidebar_cards = StreamField([
sidebar_card.to_pair()
], blank=True)
content_panels = Page.content_panels + [
StreamFieldPanel('main'),
ImageChooserPanel('hero_image'),
StreamFieldPanel('sidebar_cards'),
MultiFieldPanel((
FieldPanel('social_facebook_url'),
FieldPanel('social_twitter_handle'),
FieldPanel('social_snapchat_handle'),
FieldPanel('social_instagram_handle'),
FieldPanel('social_email_address'),
)),
]
api_fields = [
'hero_image',
]
class SchemeIndexPage(Page):
subpage_types = (SchemePage, )
preamble = StreamField([
components.text.to_pair(),
])
content_panels = Page.content_panels + [
StreamFieldPanel('preamble'),
]
api_fields = [
'preamble',
]
| 27.887097
| 97
| 0.685367
| 171
| 1,729
| 6.730994
| 0.380117
| 0.069505
| 0.044309
| 0.034752
| 0.079931
| 0.079931
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219202
| 1,729
| 61
| 98
| 28.344262
| 0.852593
| 0
| 0
| 0.122449
| 0
| 0
| 0.111625
| 0.051475
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.204082
| 0
| 0.469388
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b00272462aa831ed8359bfb1b05ac3991b3aef99
| 956
|
py
|
Python
|
src/marion/marion/tests/test_fields.py
|
openfun/marion
|
bf06b64bf78bca16685e62ff14b66897c1dbe80c
|
[
"MIT"
] | 7
|
2021-04-06T20:33:31.000Z
|
2021-09-30T23:29:24.000Z
|
src/marion/marion/tests/test_fields.py
|
openfun/marion
|
bf06b64bf78bca16685e62ff14b66897c1dbe80c
|
[
"MIT"
] | 23
|
2020-09-09T15:01:50.000Z
|
2022-01-03T08:58:36.000Z
|
src/marion/marion/tests/test_fields.py
|
openfun/marion
|
bf06b64bf78bca16685e62ff14b66897c1dbe80c
|
[
"MIT"
] | 2
|
2020-12-14T10:07:07.000Z
|
2021-06-29T00:20:43.000Z
|
"""Tests for the marion application fields"""
from marion.defaults import DocumentIssuerChoices
from ..fields import IssuerLazyChoiceField, LazyChoiceField
def test_fields_lazy_choice_field():
"""
LazyChoiceField class.
Choices instance attribute should not be customizable.
"""
field = LazyChoiceField(
name="lazy_choice_field",
choices=[("option1", "Option 1"), ("option2", "Option 2")],
max_length=200,
)
errors = field.check()
assert len(errors) == 0
assert field.choices == []
def test_fields_issuer_lazy_choice_field(settings):
"""
IssuerLazyChoiceField class.
Choices attribute relies on DOCUMENT_ISSUER_CHOICES_CLASS setting.
"""
settings.MARION_DOCUMENT_ISSUER_CHOICES_CLASS = (
"marion.defaults.DocumentIssuerChoices"
)
field = IssuerLazyChoiceField(name="issuer_lazy_choice_field")
assert field.choices == DocumentIssuerChoices.choices
| 26.555556
| 70
| 0.712343
| 97
| 956
| 6.793814
| 0.463918
| 0.060698
| 0.091047
| 0.063733
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010363
| 0.192469
| 956
| 35
| 71
| 27.314286
| 0.843264
| 0.222803
| 0
| 0
| 0
| 0
| 0.154506
| 0.087268
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b00495771d6a310aa5e5d77c1c05c91690f9a756
| 2,331
|
py
|
Python
|
ObjectTrackingDrone/colorpickerusingTello.py
|
udayagopi587/ArealRobotics_AutonomousDrone
|
6bc10ee167076086abb3b2eef311ae43f457f21d
|
[
"MIT"
] | 1
|
2022-03-12T00:47:24.000Z
|
2022-03-12T00:47:24.000Z
|
ObjectTrackingDrone/colorpickerusingTello.py
|
udayagopi587/ArealRobotics_AutonomousDrone
|
6bc10ee167076086abb3b2eef311ae43f457f21d
|
[
"MIT"
] | null | null | null |
ObjectTrackingDrone/colorpickerusingTello.py
|
udayagopi587/ArealRobotics_AutonomousDrone
|
6bc10ee167076086abb3b2eef311ae43f457f21d
|
[
"MIT"
] | 1
|
2022-03-14T23:42:57.000Z
|
2022-03-14T23:42:57.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 3 12:15:40 2022
@author: udaya
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 27 18:06:29 2022
@author: udaya
"""
import cv2
import numpy as np
from djitellopy import Tello
frameWidth = 640
frameHeight = 480
###############################
# CONNECT TO TELLO
def initializeTello():
myDrone = Tello()
myDrone.connect()
myDrone.for_back_velocity = 0
myDrone.left_right_velocity = 0
myDrone.up_down_velocity = 0
myDrone.yaw_velocity = 0
myDrone.speed = 0
print(myDrone.get_battery())
myDrone.streamoff() #Turning off the streams, if any preious streams were on
myDrone.streamon()
return myDrone
# cap = cv2.VideoCapture(0)
# cap.set(3, frameWidth)
# cap.set(4, frameHeight)
def telloGetFrame(myDrone, w= 360,h=240):
myFrame = myDrone.get_frame_read()
myFrame = myFrame.frame
img = cv2.resize(myFrame,(w,h))
return img
def empty(a):
pass
myDrone = initializeTello()
cv2.namedWindow("HSV")
cv2.resizeWindow("HSV", 640, 240)
cv2.createTrackbar("HUE Min", "HSV", 0, 179, empty)
cv2.createTrackbar("HUE Max", "HSV", 179, 179, empty)
cv2.createTrackbar("SAT Min", "HSV", 0, 255, empty)
cv2.createTrackbar("SAT Max", "HSV", 255, 255, empty)
cv2.createTrackbar("VALUE Min", "HSV", 0, 255, empty)
cv2.createTrackbar("VALUE Max", "HSV", 255, 255, empty)
while True:
success, img = telloGetFrame(myDrone,frameWidth,frameHeight)
imgHsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("HUE Min", "HSV")
h_max = cv2.getTrackbarPos("HUE Max", "HSV")
s_min = cv2.getTrackbarPos("SAT Min", "HSV")
s_max = cv2.getTrackbarPos("SAT Max", "HSV")
v_min = cv2.getTrackbarPos("VALUE Min", "HSV")
v_max = cv2.getTrackbarPos("VALUE Max", "HSV")
print(h_min)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHsv, lower, upper)
result = cv2.bitwise_and(img, img, mask=mask)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
hStack = np.hstack([img, mask, result])
cv2.imshow('Horizontal Stacking', hStack)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#cap.release()
cv2.destroyAllWindows()
| 26.793103
| 81
| 0.632347
| 312
| 2,331
| 4.641026
| 0.384615
| 0.070442
| 0.075967
| 0.051796
| 0.112569
| 0.044199
| 0.044199
| 0
| 0
| 0
| 0
| 0.060523
| 0.213213
| 2,331
| 87
| 82
| 26.793103
| 0.729008
| 0.108966
| 0
| 0
| 0
| 0
| 0.081611
| 0
| 0
| 0
| 0.00212
| 0
| 0
| 1
| 0.056604
| false
| 0.018868
| 0.056604
| 0
| 0.150943
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0050cae1ff0c2350a07478cbaf2f32a1d466c54
| 16,101
|
py
|
Python
|
climetlab_plugin_tools/create_plugin_cmd.py
|
ecmwf-lab/climetlab-plugin-tools
|
52fc1c6c07958ecfb8a5c946f4851725832b3cd0
|
[
"Apache-2.0"
] | null | null | null |
climetlab_plugin_tools/create_plugin_cmd.py
|
ecmwf-lab/climetlab-plugin-tools
|
52fc1c6c07958ecfb8a5c946f4851725832b3cd0
|
[
"Apache-2.0"
] | null | null | null |
climetlab_plugin_tools/create_plugin_cmd.py
|
ecmwf-lab/climetlab-plugin-tools
|
52fc1c6c07958ecfb8a5c946f4851725832b3cd0
|
[
"Apache-2.0"
] | null | null | null |
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import configparser
import datetime
import logging
import os
import pathlib
from climetlab.scripts.tools import parse_args
from .str_utils import CamelCase, alphanum, camelCase, dashes, underscores
LOG = logging.getLogger(__name__)
# import climetlab.debug
APACHE_LICENCE = """This software is licensed under the terms of the Apache Licence Version 2.0
which can be obtained at http://www.apache.org/licenses/LICENSE-2.0."""
PREFIX_ECMWF_LICENCE = (
"""(C) Copyright {year} European Centre for Medium-Range Weather Forecasts."""
)
POSTFIX_ECMWF_LICENCE = """In applying this licence, ECMWF does not waive the privileges and immunities
granted to it by virtue of its status as an intergovernmental organisation
nor does it submit to any jurisdiction."""
class PluginContext:
def __init__(self, kind, **kwargs):
self.kind = kind
self._transformers = {}
self.kwargs = kwargs
def fill_form(self):
for t in TRANSFORMERS_CLASSES[self.kind]:
t(self)
@property
def template_dir(self):
here = os.path.dirname(__file__)
return os.path.realpath(os.path.join(here, "templates", self.kind))
@property
def output_dir(self):
return self("climetlab-plugin-name-climetlab-template")
def check_output_dir(self):
if os.path.exists(self.output_dir):
raise Exception(
f"Folder {self.output_dir} already exists. Not overwriting it."
)
def create_plugin(self):
self.check_output_dir()
for path in self.template_files_list():
template = os.path.join(self.template_dir, path)
output = os.path.join(self.output_dir, path)
output = self(output)
LOG.info(f"Creating {output}")
with open(template, "r") as f:
txt = f.read()
txt = self(txt)
os.makedirs(os.path.dirname(output), exist_ok=True)
with open(output, "w") as f:
f.write(txt)
print(f"Plugin built in {self.output_dir}")
print(self.final_help())
def template_files_list(self):
cwd = os.getcwd()
os.chdir(self.template_dir)
lst = [str(f) for f in pathlib.Path(".").glob("**/*") if os.path.isfile(str(f))]
# TODO: find a nicer way to avoid __path__ folders.
lst = [f for f in lst if "__pycache__" not in f]
os.chdir(cwd)
return lst
def __call__(self, txt):
if txt is None:
return None
assert isinstance(txt, str), txt
original = txt
for k, transformer in self._transformers.items():
txt = transformer(txt)
if txt != original:
txt = self(txt)
return txt
def get_default_email(self):
try:
return self._gitconfig("email")
except: # noqa:E722
return f'{self._transformers["full_name"].value.replace(" ", ".").lower()}@example.com'
def get_default_full_name(self):
try:
return self._gitconfig("name")
except: # noqa:E722
return "Joe Developer"
def _gitconfig(self, key):
if os.environ.get("CLIMETLAB_PLUGIN_TOOLS_NO_GUESS"):
raise Exception("CLIMETLAB_PLUGIN_TOOLS_NO_GUESS is set.")
config = configparser.ConfigParser()
gitconfig = os.path.expanduser("~/.gitconfig")
config.read(gitconfig)
value = config["user"][key]
LOG.info(f"Found {key} in gitconfig {value}")
return value
def final_help(self):
txt = """
--------------------------------------------------------------------
Climetlab plugin generated successfully. Next steps:
1. Create a repository on github at http://github.com/repo_url_climetlab_template.
2. Push to the repository as instructed by github:
cd climetlab-plugin-name-climetlab-template
git init
git add .
git commit -m'first commit'
git branch -M main
git remote add origin http://github.com/repo_url_climetlab_template
git push --set-upstream origin main
[Optional: See tests running http://github.com/repo_url_climetlab_template/actions]
3 - Publish to pipy (pip) manually:
python -m pip install --upgrade pip
pip install setuptools wheel twine
twine upload dist/*
# Need pipy login/password (create an account at https://pypi.org)
Others can now do `pip install climetlab-plugin-name-climetlab-template`.
4. Publish automatically from Github to pypi. [Optional]
Edit climetlab-plugin-name-climetlab-template/.github/workflows/check-and-publish to point to pypi instead of test.pypi.
Create a token from pypi at https://pypi.org/manage/account/token/
Add the token as a Github secret on the name PYPI_API_TOKEN at https://github.com/repo_url_climetlab_template/settings/secrets/actions/new
You are all set! Push the github repository and release from http://github.com/repo_url_climetlab_template/releases/new.
""" # noqa: E501
return self(txt)
class Transformer:
_help = ""
glob = None
def __init__(
self,
context,
key,
default=None,
pattern=None,
value=None,
force_prefix="",
):
LOG.debug(f"New Transformer({key})")
self._context = context
self.key = key
self.default = self._context(default)
self.force_prefix = self._context(force_prefix)
self.pattern = pattern
self.value = value
self.help = self._context(self._help)
self.fill()
LOG.debug(f"Transformer({key}) created")
def __repr__(self) -> str:
return f"Transformer({self.key}, pattern={self.pattern}, value={self.value})"
def fill(self):
if self.pattern is None:
self.pattern = self.key
if not self.glob:
self.adapts = [lambda x: x]
elif self.glob is True:
self.adapts = [underscores, dashes, CamelCase, camelCase]
else:
self.adapts = self.glob
self.read_value()
self.pattern = self.pattern + "_climetlab_template"
self._context._transformers[self.key] = self
def prompt(self):
return f"Please enter {self.desc} ('?' for help)"
def default_prompt(self):
if self.default:
return f"Hit 'return' to use the default value '{self.force_prefix}{self.default}'"
return ""
def try_reading_from_context(self):
if self._context.kwargs.get(self.key, None):
self.value = self._context.kwargs[self.key]
assert isinstance(self.value, str)
assert isinstance(self.force_prefix, str)
print(f"\n--> Using {self.force_prefix + self.value} (from command line)")
return True
def try_reading_from_user(self):
print()
value = input(">>>> " + self.force_prefix)
if value == "h" or value == "?":
print(f"?\n {self.help}")
if self.default is not None:
print(f" Default value: {self.force_prefix}{self.default}")
return self.try_reading_from_user()
if value:
self.value = value
print(f"\n--> Using {self.force_prefix + self.value}")
return True
def try_reading_from_default(self):
if self.default is not None:
print(f"\n--> Using {self.force_prefix + self.default} (default)")
self.value = self.default
return True
def read_value(self):
print()
print(self.prompt())
print(self.default_prompt())
if self.try_reading_from_context():
return
if self.try_reading_from_user():
return
if self.try_reading_from_default():
return
return self.read_value()
def __call__(self, txt):
for adapt in self.adapts:
p = adapt(self.pattern)
v = adapt(self.value)
if p in txt:
LOG.debug(f'Replacing "{p}" by "{v}"')
LOG.debug(f" k={self.key}")
LOG.debug(f" p: {self.pattern} -> {p}")
LOG.debug(f" v: {self.value} -> {v}")
txt = txt.replace(p, v)
return txt
class NoPromptTransformer(Transformer):
def read_value(self):
LOG.debug(f"{self.key}: not prompt using {self.value}.")
class GlobNoPromptTransformer(NoPromptTransformer):
glob = True
class SourceNameTransformer(GlobNoPromptTransformer):
def __init__(self, context):
name = context._transformers["plugin_name"].value
if name.endswith("-source"):
name = name[:-7]
super().__init__(context, "source_name", value=name)
class DatasetNameTransformer(Transformer):
desc = "the dataset name"
_help = """The dataset name is used as follow:
A climetlab dataset plugin package can provides one or more
datasets. This scripts creates a plugin with one dataset.
The dataset name will be used by the end users to access
the data through CliMetLab with:
cml.load_dataset("dataset-name", ...)
The convention is to make the dataset name start with
"plugin-name-climetlab-template".
The dataset name can easily be modified afterwards, without
regenerating a new plugin, simply by editing the setup.py."""
glob = True
def __init__(self, context):
super().__init__(
context,
"dataset_name",
default="",
force_prefix="plugin-name-climetlab-template",
)
def fill(self):
super().fill()
self.value = dashes(self.value).lower()
self.value = alphanum(self.value)
if self.value:
while self.value.startswith("-"):
self.value = self.value[1:]
name = "plugin-name-climetlab-template" + "-" + self.value
else:
self.value = "main"
name = "plugin-name-climetlab-template"
name = self._context(name)
GlobNoPromptTransformer(self._context, "dataset_full_name", value=name)
class PluginNameTransformer(Transformer):
desc = "the plugin name"
_help = """The plugin name is used to define:
- The python package name `import climetlab_{plugin_name} `
- The pip package name `pip install climetlab-{plugin-name}`.
It will also be used to suggest and appropriate URL on github.
The plugin_name can be the name of the project you are working on,
but notice that it should be specific enough as only one plugin with
a given name can be installed. Highly generic names (such as "meteo",
"domain", "copernicus", "country-name" are not recommended.
The plugin name cannot be easily modified afterwards.
You would need to regenerate a new one and copy existing code."""
glob = True
def __init__(self, context):
super().__init__(
context,
"plugin_name",
default="my_plugin",
)
context.check_output_dir()
class EmailTransformer(Transformer):
desc = "your email"
_help = """The email is used in setup.py to define the email maintainer of the pip package."""
def __init__(self, context):
super().__init__(
context,
"email",
default=context.get_default_email(),
)
class GithubUsernameTransformer(Transformer):
desc = "your Github user name"
_help = """The github username (or github space name) is used
to suggest a github repository url.
The username (ecmwf-lab) should be used if you wish to host your
repository on the github space "https://github.com/ecmwf-lab/").
Else, please provide your own github user name."""
def __init__(self, context):
super().__init__(
context,
"github_username",
default="ecmwf-lab",
)
class FullNameTransformer(Transformer):
desc = "your full name"
_help = """The full name is used in setup.py to define the maintainer of the pip package."""
def __init__(self, context):
super().__init__(
context,
"full_name",
default=context.get_default_full_name(),
)
class RepoUrlTransformer(Transformer):
desc = "the repository url"
_help = """The repository url name is used to define:
- The package url in the setup.py, i.e. the url published in Pypi for pip.
- The links in the README file.
If your do not want to host you repository on github,
please edit manually the generated setup.py afterwards."""
def __init__(self, context):
super().__init__(
context,
"repo_url",
default="github_username_climetlab_template/climetlab-plugin-name-climetlab-template",
force_prefix="https://github.com/",
)
class LicenceTransformer(Transformer):
_help = """The APACHE 2.0 licence is used for the plugin code.
Most users should answer "n" to use the standard APACHE 2.0 licence.
ECMWF users should answer "y" to add the appropriate addition to the licence.
The licence is added in the plugin code:
- In the header of each python file.
- In the LICENSE file.
- In the README.
If you choose another licence, please modify these files manually afterwards."""
desc = "Use the modified APACHE licence with ECMWF additions?"
def __init__(self, context):
super().__init__(context, "licence")
def prompt(self):
return f"{self.desc} ('y' or 'n', '?' for help)"
def fill(self):
self.read_value()
self.value = dict(y=True, n=False)[self.value.lower()]
self.year = str(datetime.datetime.now().year)
licence = APACHE_LICENCE
if self.value:
licence = "\n".join([PREFIX_ECMWF_LICENCE, licence, POSTFIX_ECMWF_LICENCE])
licence = licence.format(year=self.year)
print(f" Using this licence:\n{licence}\n")
NoPromptTransformer(self._context, "year_licence", value=str(self.year))
NoPromptTransformer(self._context, "licence_txt", value=licence)
NoPromptTransformer(self._context, "license_txt", value=licence)
licence_with_sharp = "\n".join(["# " + line for line in licence.split("\n")])
NoPromptTransformer(
self._context,
"licence_header",
pattern="# licence_header",
value=licence_with_sharp,
)
NoPromptTransformer(
self._context,
"license_header",
pattern="# license_header",
value=licence_with_sharp,
)
TRANSFORMERS_CLASSES = {
"dataset": [
PluginNameTransformer,
DatasetNameTransformer,
FullNameTransformer,
EmailTransformer,
GithubUsernameTransformer,
RepoUrlTransformer,
LicenceTransformer,
],
"source": [
PluginNameTransformer,
SourceNameTransformer,
FullNameTransformer,
EmailTransformer,
GithubUsernameTransformer,
RepoUrlTransformer,
LicenceTransformer,
],
}
class CreateDatasetPluginCmd:
@parse_args(
name=dict(help="Plugin name"),
dataset=dict(help="Dataset name"),
)
def do_plugin_create_dataset(self, args):
context = PluginContext(
"dataset", plugin_name=args.name, dataset_name=args.dataset
)
context.fill_form()
context.create_plugin()
class CreateSourcePluginCmd:
@parse_args(name=dict(help="Plugin name"))
def do_plugin_create_source(self, args):
context = PluginContext("source", plugin_name=args.name)
context.fill_form()
context.create_plugin()
| 33.266529
| 143
| 0.629464
| 1,976
| 16,101
| 4.981781
| 0.198381
| 0.021942
| 0.011174
| 0.024685
| 0.243905
| 0.162942
| 0.137647
| 0.105953
| 0.083503
| 0.067655
| 0
| 0.002619
| 0.264766
| 16,101
| 483
| 144
| 33.335404
| 0.828941
| 0.028942
| 0
| 0.220207
| 0
| 0.012953
| 0.367818
| 0.052371
| 0.002591
| 0
| 0
| 0.00207
| 0.007772
| 1
| 0.093264
| false
| 0.002591
| 0.020725
| 0.010363
| 0.261658
| 0.031088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b00bb16d432ae4e7eebbd1a8f438f11ad4838ec1
| 1,141
|
py
|
Python
|
openCVTutorials/openCVimgChangeColorspaceTutorial.py
|
nahutch/BasketballAI_P1
|
9a44f80787231df386910c28f17bab465fee013d
|
[
"Apache-2.0"
] | 1
|
2019-01-24T19:07:08.000Z
|
2019-01-24T19:07:08.000Z
|
openCVTutorials/openCVimgChangeColorspaceTutorial.py
|
nahutch/BasketballAI_P1
|
9a44f80787231df386910c28f17bab465fee013d
|
[
"Apache-2.0"
] | null | null | null |
openCVTutorials/openCVimgChangeColorspaceTutorial.py
|
nahutch/BasketballAI_P1
|
9a44f80787231df386910c28f17bab465fee013d
|
[
"Apache-2.0"
] | null | null | null |
#following tutorial: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#converting-colorspaces
import numpy as np
import cv2
#there are more than 150 color-space conversions methods available in OpenCV
#why so many?
#gets all possible color space conversion flags
flags = [i for i in dir(cv2) if i.startswith("COLOR_")]
#print (flags)
#converts a bgr color to hsv
green = np.uint8([[[0,255,0]]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
print (hsv_green)
#extracts any blue colored object using the built in video camera
#can detect my blue eyes if I get close and widen them
cap = cv2.VideoCapture(0)
while(1):
#take each frame
_, frame = cap.read()
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
mask = cv2.inRange(hsv,lower_blue,upper_blue)
res = cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow("frame",frame)
cv2.imshow("mask",mask)
cv2.imshow("result",res)
k = cv2.waitKey(5)& 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| 26.534884
| 158
| 0.718668
| 180
| 1,141
| 4.472222
| 0.572222
| 0.029814
| 0.037267
| 0.042236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049163
| 0.162138
| 1,141
| 42
| 159
| 27.166667
| 0.792887
| 0.404032
| 0
| 0
| 0
| 0
| 0.031343
| 0
| 0
| 0
| 0.00597
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b00d6bcbdc91daedbc8ff5cedd805b13268a4bca
| 7,026
|
py
|
Python
|
src/model1_predict.py
|
shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification
|
04fb40948fcae55c379d8e878c41f281948155e8
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:10:18.000Z
|
2020-08-07T03:42:38.000Z
|
src/model1_predict.py
|
shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification
|
04fb40948fcae55c379d8e878c41f281948155e8
|
[
"Apache-2.0"
] | null | null | null |
src/model1_predict.py
|
shubhampachori12110095/FashionAI-Clothing-Attribute-Labels-Classification
|
04fb40948fcae55c379d8e878c41f281948155e8
|
[
"Apache-2.0"
] | 3
|
2018-12-29T09:10:21.000Z
|
2021-05-23T06:30:35.000Z
|
# -*- coding: UTF-8 -*-
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import json
import cv2
from sklearn.model_selection import train_test_split
import matplotlib
from keras.utils import np_utils
from keras.optimizers import *
from keras.preprocessing.image import ImageDataGenerator
from fashionAI.config import config
from fashionAI.Utils.preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor
from fashionAI.Utils.preprocessing.simplepreprocessor import SimplePreprocessor
from fashionAI.Utils.preprocessing.meanpreprocessor import MeanPreprocessor
from fashionAI.Utils.preprocessing.patchpreprocessor import PatchPreprocessor
from fashionAI.Utils.preprocessing.croppreprocessor import CropPreprocessor
from fashionAI.callbacks.trainingmonitor import TrainingMonitor
from fashionAI.Utils.io.datagenerator import DataGenerator
from fashionAI.nn.inceptionresnet_v2 import InceptionResnetV2
def predict1(testb_data_root_path = '/data/Attributes/Round2b/', output_csv_root_path = '../'):
df_test = pd.read_csv(testb_data_root_path+'Tests/question.csv', header=None)
df_test.columns = ['image_id', 'class', 'x']
del df_test['x']
##########attributes setting##
classes = ['collar_design_labels', 'lapel_design_labels', 'neck_design_labels', 'neckline_design_labels',
'coat_length_labels', 'pant_length_labels', 'skirt_length_labels','sleeve_length_labels']
design_classes = ['collar_design_labels', 'lapel_design_labels', 'neck_design_labels', 'neckline_design_labels']
design_label_count = {'collar': 5,'lapel': 5,'neck': 5,'neckline': 10}
length_classes = ['coat_length_labels', 'pant_length_labels', 'skirt_length_labels','sleeve_length_labels']
length_label_count = {'coat': 8,'pant': 6,'skirt': 6,'sleeve': 9}
##########model############
incepres1 = InceptionResnetV2(500, 500, design_label_count, weight_decay=0.001)
design_model = incepres1.build_net()
incepres2 = InceptionResnetV2(500, 500, length_label_count, weight_decay=0.001)
length_model = incepres2.build_net()
design_model.load_weights('../models/model1/multitask_design_final.h5')
length_model.load_weights('../models/model1/multitask_length_final.h5')
##########functions#########
pre_resize = SimplePreprocessor(530, 530) #use opecv to resize in the width of 530*530
cp = CropPreprocessor(500, 500) #when 10crops, 530*530 -> 500*500
iap = ImageToArrayPreprocessor() # transform data format
design_means = json.loads(open('./model1_mean/multitask_mean_design.json').read())
length_means = json.loads(open('./model1_mean/multitask_mean_length.json').read())
design_mp = MeanPreprocessor(design_means['R'], design_means['G'], design_means['B'])
length_mp = MeanPreprocessor(length_means['R'], length_means['G'], length_means['B'])
val_aug = ImageDataGenerator(rescale=1./255)
##########predict############
tmp_df = {}
print('model1\'s design_model:predict design')
cnt = 0
for idx in range(4):
print()
cur_class = design_classes[idx]
df_load = df_test[(df_test['class'] == cur_class)].copy()
df_load.reset_index(inplace=True)
del df_load['index']
X_test = [testb_data_root_path + test_img for test_img in df_load['image_id']]
print('design samples num-{0}:'.format(cur_class),len(X_test))
print('[INFO] predicting on test data (with crops)...')
print()
testGen = DataGenerator((X_test, None), 32, aug=val_aug, preprocessors=[pre_resize, design_mp])
predictions = []
for (i, images) in enumerate(testGen.generator(training=False, passes=1)):
if i % 10 == 0:
print('{}_test_batch_num/epochs:{}/{}'.format(cur_class, i, int(len(X_test) / 32)))
for image in images:
crops = cp.preprocess(image)
crops = np.array([iap.preprocess(c) for c in crops], dtype='float32')
pred = design_model.predict(crops)
predictions.append(pred[idx].mean(axis=0))
result = []
for i in range(len(X_test)):
tmp_list = predictions[i]
tmp_result = ''
for tmp_ret in tmp_list:
tmp_result += '{:.4f};'.format(tmp_ret)
print(X_test[i].split('/')[-1],' predicted: ', tmp_result[:-1])
result.append(tmp_result[:-1])
cnt = cnt +1
df_load['result'] = result
print(len(df_load))
tmp_df[cur_class] = df_load.copy()
print('Model1-design_model completes the prediction of design:on the {0}of samples'.format(cnt))
print('############################################')
print()
print('next is to predict length using Model1-lenght_model')
print('model1\'s length_model:predict length')
cnt = 0
for idx in range(4):
print()
cur_class = length_classes[idx]
df_load = df_test[(df_test['class'] == cur_class)].copy()
df_load.reset_index(inplace=True)
del df_load['index']
X_test = [testb_data_root_path + test_img for test_img in df_load['image_id']]
print('length samples num-{0}:'.format(cur_class), len(X_test))
print('[INFO] predicting on test data (with crops)...')
print()
testGen = DataGenerator((X_test, None), 32, aug=val_aug, preprocessors=[pre_resize, length_mp])
predictions = []
for (i, images) in enumerate(testGen.generator(training=False, passes=1)):
if i%10 ==0:
print('{}_test_batch_num/epochs:{}/{}'.format(cur_class, i, int(len(X_test) / 32)))
for image in images:
crops = cp.preprocess(image)
crops = np.array([iap.preprocess(c) for c in crops], dtype='float32')
pred = length_model.predict(crops)
predictions.append(pred[idx].mean(axis=0))
result = []
for i in range(len(X_test)):
tmp_list = predictions[i]
tmp_result = ''
for tmp_ret in tmp_list:
tmp_result += '{:.4f};'.format(tmp_ret)
print(X_test[i].split('/')[-1], ' predicted: ', tmp_result[:-1])
result.append(tmp_result[:-1])
cnt = cnt + 1
df_load['result'] = result
print(len(df_load))
tmp_df[cur_class] = df_load.copy()
print('Model1-length_model completes the prediction of length:on the {0}of samples'.format(cnt))
print()
print('Complete!')
###########output csv######
df_result = []
for cur in classes:
tmp = tmp_df[cur]
tmp.reset_index(inplace=True)
del tmp['index']
df_result.append(tmp)
for i in df_result:
i.columns = ['image_id', 'class', 'label']
result = pd.concat(df_result)
result.to_csv('../output/model1_result.csv', index=None, header=None)
print('model1 predicts the {} samples'.format(len(result)))
###result1###
return result
| 39.033333
| 117
| 0.646883
| 894
| 7,026
| 4.864653
| 0.213647
| 0.019315
| 0.024833
| 0.03564
| 0.51966
| 0.500805
| 0.472292
| 0.472292
| 0.453438
| 0.437802
| 0
| 0.023544
| 0.208084
| 7,026
| 179
| 118
| 39.251397
| 0.758088
| 0.024765
| 0
| 0.454545
| 0
| 0.007576
| 0.182101
| 0.053934
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007576
| false
| 0.015152
| 0.151515
| 0
| 0.166667
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b00f7bd4e39ef2e25f158e39f9604eb34518aa71
| 815
|
py
|
Python
|
test_parametrized_tests.py
|
karianjahi/python_pytest_tutorial
|
d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd
|
[
"MIT"
] | null | null | null |
test_parametrized_tests.py
|
karianjahi/python_pytest_tutorial
|
d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd
|
[
"MIT"
] | null | null | null |
test_parametrized_tests.py
|
karianjahi/python_pytest_tutorial
|
d8cf7bc9d85e75cc3248a35d8abdfd24d76276cd
|
[
"MIT"
] | null | null | null |
"""
Organizing test and parametrizing
"""
# Parametrized tests: Run many tests in one
# pylint: disable=W0622
# pylint: disable=R0201
# pylint: disable=R0903
import pytest
from word_counter import count_words
class TestWordCounterParametrization:
"""
In this case we want to test many tests in one function
"""
Tests = [
("Today is Monday", 3),
("head", 1),
("He jumps", 2),
("He\nis\nnot\nfeeling\nwell", 5),
("Mein Hände", 2),
('<h1>This is a heading</h1>', 4),
('<h1 class="foo">this is a heading</h1>', 5),
("Joseph-Njeri", 2)
]
@pytest.mark.parametrize('input, output', Tests)
def test_all_in_one(self, input, output):
"""
Testing all in one
"""
assert count_words(input) == output
| 27.166667
| 59
| 0.586503
| 103
| 815
| 4.582524
| 0.601942
| 0.042373
| 0.04661
| 0.059322
| 0.067797
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040404
| 0.271166
| 815
| 29
| 60
| 28.103448
| 0.754209
| 0.266258
| 0
| 0
| 0
| 0
| 0.276867
| 0.047359
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0110b071338ec4840e5427dcade83815657e854
| 1,685
|
py
|
Python
|
src/dep_appearances/cli.py
|
jdlubrano/dep-appearances
|
bf752b469463ee8cb7351df37231d250be3bcf47
|
[
"MIT"
] | null | null | null |
src/dep_appearances/cli.py
|
jdlubrano/dep-appearances
|
bf752b469463ee8cb7351df37231d250be3bcf47
|
[
"MIT"
] | null | null | null |
src/dep_appearances/cli.py
|
jdlubrano/dep-appearances
|
bf752b469463ee8cb7351df37231d250be3bcf47
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
import os
import pdb
import sys
from dep_appearances.appearances_report import AppearancesReport
def main():
parser = ArgumentParser(description='Find dependencies that are unused and underused in your codebase.')
parser.add_argument(
'project_root',
metavar='PATH',
type=str,
nargs='?',
default=os.getcwd(),
help="The path to your project's root (defaults to your current working directory)"
)
parser.add_argument(
'--underused_threshold',
type=int,
default=2,
help='The threshold to set for marking dependencies as underused (default: 2)'
)
args = parser.parse_args()
report = AppearancesReport(project_root=args.project_root).compile()
unused_dependencies = report.unused_dependencies()
underused_dependencies = report.underused_dependencies(usage_threshold=args.underused_threshold)
if len(unused_dependencies) == 0:
print("No unused dependencies found")
else:
print("Unused dependencies:")
for dep in unused_dependencies:
print(f"\t{dep.name}")
print("")
if len(underused_dependencies) == 0:
print("No underused dependencies found")
else:
print(f"Underused dependencies (usage threshold = {args.underused_threshold}):")
for dep in underused_dependencies:
print(f"\t{dep.name}\n\t\timported in:")
for import_statement in dep.import_statements:
print(f"\t\t{os.path.relpath(import_statement.source_file)}:{import_statement.line_number}")
print("")
if __name__ == "__main__":
main()
| 30.089286
| 108
| 0.668249
| 193
| 1,685
| 5.658031
| 0.38342
| 0.098901
| 0.019231
| 0.064103
| 0.152015
| 0.152015
| 0.104396
| 0
| 0
| 0
| 0
| 0.003084
| 0.230267
| 1,685
| 55
| 109
| 30.636364
| 0.838859
| 0
| 0
| 0.139535
| 0
| 0
| 0.315134
| 0.093175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.186047
| 0
| 0.209302
| 0.209302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0134690af47b5e16baf709ce4dca459913ce34e
| 1,175
|
py
|
Python
|
pyfirmata_tmp36_MQ7_Mysql.py
|
amy861113/Arduino
|
7592c2029242fca24245ee1c34b2b9f6043070d1
|
[
"MIT"
] | null | null | null |
pyfirmata_tmp36_MQ7_Mysql.py
|
amy861113/Arduino
|
7592c2029242fca24245ee1c34b2b9f6043070d1
|
[
"MIT"
] | null | null | null |
pyfirmata_tmp36_MQ7_Mysql.py
|
amy861113/Arduino
|
7592c2029242fca24245ee1c34b2b9f6043070d1
|
[
"MIT"
] | null | null | null |
from pyfirmata import Arduino, util
from time import sleep
import pymysql
def arduino_map(x, in_min, in_max, out_min, out_max):
return(x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min
PORT = "COM4"
uno = Arduino(PORT)
sleep(5)
it = util.Iterator(uno)
it.start()
a4 = uno.get_pin('a:4:i')
a5 = uno.get_pin('a:5:i')
db = pymysql.connect("120.110.114.14", "hanshin", "Hanshin519", "Student", port = 3306)
cursor = db.cursor()
print("Arduino start~")
try:
while True:
gas = a4.read()
tmp = a5.read()
try:
gasValue = round(gas * 1024)
Vout = arduino_map(tmp, 0, 1, 0, 5)
tmpValue = round((((Vout * 1000) - 500) / 10) , 2)
#tmpValue = ((round(tmp * 1024)) * (5.0/1024) -0.5) / 0.01
sleep(5)
except TypeError:
pass
print('{0} {1}'.format(gasValue, tmpValue))
sql = "update Student.articles_envdata set tmpValue = {1}, gasValue = {0} where data_id = 1".format(gasValue, tmpValue)
cursor.execute(sql)
db.commit()
print("Update Success~")
sleep(5)
except Exception as e:
db.rollback()
print("Error!:{0}".format(e))
except KeyboardInterrupt:
uno.exit()
| 23.5
| 124
| 0.612766
| 175
| 1,175
| 4.022857
| 0.468571
| 0.021307
| 0.017045
| 0.028409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073144
| 0.220426
| 1,175
| 49
| 125
| 23.979592
| 0.695415
| 0.048511
| 0
| 0.135135
| 0
| 0
| 0.163229
| 0.021525
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0.027027
| 0.081081
| 0.027027
| 0.108108
| 0.108108
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b01504199a00f0b0ea4a2e7806f9a6775f0b35bb
| 11,037
|
py
|
Python
|
BCPNN/backend/_cpu_base_backend.py
|
KTH-HPC/StreamBrain
|
37b16e7c8e02e6d2800bcf89630a0f4419e90cd4
|
[
"BSD-2-Clause"
] | 4
|
2020-10-20T22:15:25.000Z
|
2022-02-10T10:25:24.000Z
|
BCPNN/backend/_cpu_base_backend.py
|
KTH-HPC/StreamBrain
|
37b16e7c8e02e6d2800bcf89630a0f4419e90cd4
|
[
"BSD-2-Clause"
] | 1
|
2020-12-16T10:46:50.000Z
|
2020-12-16T10:46:50.000Z
|
BCPNN/backend/_cpu_base_backend.py
|
KTH-HPC/StreamBrain
|
37b16e7c8e02e6d2800bcf89630a0f4419e90cd4
|
[
"BSD-2-Clause"
] | 1
|
2020-10-20T22:15:29.000Z
|
2020-10-20T22:15:29.000Z
|
import sys
import numpy as np
from tqdm import tqdm
from contextlib import nullcontext
class DenseLayer:
_update_state = None
_softmax_minicolumns = None
_update_counters = None
_update_weights = None
_update_bias = None
def __init__(
self,
in_features,
hypercolumns,
minicolumns,
taupdt,
initial_counters,
dtype=np.float64):
self.in_features = in_features
self.hypercolumns = hypercolumns
self.minicolumns = minicolumns
self.out_features = hypercolumns * minicolumns
self.taupdt = taupdt
self.dtype = dtype
self.weights = (
0.1 *
np.random.randn(
self.in_features,
self.out_features)).astype(dtype)
self.bias = (0.1 * np.random.rand(self.out_features)).astype(dtype)
self.Ci = initial_counters[0] * np.ones([in_features]).astype(dtype)
self.Cj = initial_counters[1] * \
np.ones([self.out_features]).astype(dtype)
self.Cij = initial_counters[2] * \
np.ones([self.in_features, self.out_features]).astype(dtype)
def compute_activation(self, inputs):
activations = np.zeros(
[inputs.shape[0], self.out_features], dtype=self.dtype)
activations = self._update_state(
activations, self.weights, self.bias, inputs)
activations = self._softmax_minicolumns(
activations, self.hypercolumns, self.minicolumns)
return activations
def convert(self, dtype):
self.dtype = dtype
self.weights = self.weights.astype(dtype)
self.bias = self.bias.astype(dtype)
self.Ci = self.Ci.astype(dtype)
self.Cj = self.Cj.astype(dtype)
self.Cij = self.Cij.astype(dtype)
def train_step(self, inputs, outputs):
self.Ci, self.Cj, self.Cij = self._update_counters(
self.Ci, self.Cj, self.Cij, inputs, outputs, self.taupdt)
def train_finalize(self):
self.weights = self._update_weights(
self.weights, self.Ci, self.Cj, self.Cij, self.taupdt / 2)
self.bias = self._update_bias(self.bias, self.Cj, self.taupdt / 2)
class StructuralPlasticityLayer:
_update_state = None
_softmax_minicolumns = None
_update_counters = None
_update_weights = None
_update_bias = None
_update_mask = None
_apply_mask = None
def __init__(
self,
in_features,
hypercolumns,
minicolumns,
taupdt,
khalf,
pmin,
taubdt,
density,
mask_iterations,
initial_counters,
dtype=np.float64):
self.in_features = in_features
self.hypercolumns = hypercolumns
self.minicolumns = minicolumns
self.out_features = hypercolumns * minicolumns
self.taupdt = taupdt
self.khalf = khalf
self.pmin = pmin
self.taubdt = taubdt
self.density = density
self.mask_iterations = mask_iterations
self.dtype = dtype
self.weights = (
0.1 *
np.random.randn(
self.in_features,
self.out_features)).astype(dtype)
self.bias = (0.1 * np.random.rand(self.out_features)).astype(dtype)
self.Ci = initial_counters[0] * np.ones([in_features]).astype(dtype)
self.Cj = initial_counters[1] * \
np.ones([self.out_features]).astype(dtype)
self.Cij = initial_counters[2] * \
np.ones([self.in_features, self.out_features]).astype(dtype)
self.kbi = np.ones([self.out_features]).astype(dtype)
self.wmask = (
np.random.rand(
self.in_features,
self.hypercolumns) < self.density).astype(
np.uint8)
def compute_activation(self, inputs):
activations = np.zeros(
[inputs.shape[0], self.out_features], dtype=self.dtype)
activations = self._update_state(
activations, self.weights, self.bias, inputs)
activations = self._softmax_minicolumns(
activations, self.hypercolumns, self.minicolumns)
return activations
def convert(self, dtype):
self.dtype = dtype
self.weights = self.weights.astype(dtype)
self.bias = self.bias.astype(dtype)
self.Ci = self.Ci.astype(dtype)
self.Cj = self.Cj.astype(dtype)
self.Cij = self.Cij.astype(dtype)
self.kbi = self.kbi.astype(dtype)
def train_step(self, inputs, outputs, hypercolumn=None):
self.Ci, self.Cj, self.Cij = self._update_counters(
self.Ci, self.Cj, self.Cij, inputs, outputs, self.taupdt)
self.weights = self._update_weights(
self.weights, self.Ci, self.Cj, self.Cij, self.taupdt / 2)
self.bias, self.kbi = self._update_bias(
self.bias, self.kbi, self.Cj, self.taupdt / 2, self.khalf, self.pmin, self.taubdt)
if hypercolumn is not None:
#print("Updating hypercolumn:", hypercolumn)
self.wmask = self._update_mask(
self.wmask,
self.weights,
self.Ci,
self.Cj,
self.Cij,
self.taupdt / 2,
self.hypercolumns,
self.minicolumns,
hypercolumn,
self.mask_iterations)
self.weights = self._apply_mask(
self.weights,
self.wmask,
self.hypercolumns,
self.minicolumns)
def train_finalize(self):
pass
class Network:
def __init__(self, dtype):
self.dtype = dtype
self._layers = []
self.world_rank = 0
self.world_size = 1
def add_layer(self, layer):
if layer.dtype != self.dtype:
layer.convert(self.dtype)
self._layers.append(layer)
def fit(
self,
training_data,
training_labels,
maximal_batch_size,
schedule):
training_data = training_data.astype(self.dtype)
training_labels = training_labels.astype(self.dtype)
for layer, epochs in schedule:
self._train_layer(
layer,
maximal_batch_size,
training_data,
training_labels,
epochs)
def evaluate(self, images, labels, maximal_batch_size):
images = images.astype(self.dtype)
labels = labels.astype(self.dtype)
correct = np.array([0])
total = np.array([0])
number_of_batches = (
images.shape[0] + maximal_batch_size - 1) // maximal_batch_size
if self.world_rank == 0:
cm = tqdm(total=number_of_batches)
else:
cm = nullcontext()
with cm as pbar:
if self.world_rank == 0:
pbar.set_description('Evaluation')
for i in range(number_of_batches):
global_start = i * maximal_batch_size
global_end = global_start + maximal_batch_size if global_start + \
maximal_batch_size <= images.shape[0] else images.shape[0]
local_batch_size = (
global_end - global_start) // self.world_size
start_sample = global_start + self.world_rank * local_batch_size
end_sample = start_sample + local_batch_size
batch_images = images[start_sample:end_sample, :]
batch_labels = labels[start_sample:end_sample, :]
activations = batch_images
for layer in self._layers:
activations = layer.compute_activation(activations)
correct += (np.argmax(activations, axis=1) ==
np.argmax(batch_labels, axis=1)).sum()
total += batch_images.shape[0]
if self.world_rank == 0:
pbar.update(1)
return correct, total
def _train_layer(
self,
layer,
maximal_batch_size,
images,
labels,
epochs):
for epoch in range(epochs):
if self.world_rank == 0:
print('Layer - %d/%d' %
(layer + 1, len(self._layers)), flush=True)
idx = np.random.permutation(range(images.shape[0]))
shuffled_images = images[idx, :]
shuffled_labels = labels[idx, :]
n_hypercolumns = self._layers[layer].hypercolumns
hypercolumns_shuffled = np.random.permutation(
range(n_hypercolumns))
number_of_batches = (
images.shape[0] + maximal_batch_size - 1) // maximal_batch_size
local_batch_size = maximal_batch_size // self.world_size
if self.world_rank == 0:
cm = tqdm(total=number_of_batches)
else:
cm = nullcontext()
with cm as pbar:
if self.world_rank == 0:
pbar.set_description('Epoch %d/%d' % (epoch + 1, epochs))
for i in range(number_of_batches):
global_start = i * maximal_batch_size
global_end = global_start + maximal_batch_size if global_start + \
maximal_batch_size <= images.shape[0] else images.shape[0]
local_batch_size = (
global_end - global_start) // self.world_size
start_sample = global_start + self.world_rank * local_batch_size
end_sample = start_sample + local_batch_size
batch_images = shuffled_images[start_sample:end_sample, :]
batch_labels = shuffled_labels[start_sample:end_sample, :]
prev_activation = None
activation = batch_images
for l in range(layer + 1):
prev_activation = activation
activation = self._layers[l].compute_activation(
prev_activation)
if epoch > 0 and i % (
number_of_batches // (n_hypercolumns + 1)) == 0:
h = i // (number_of_batches // (n_hypercolumns + 1))
h = hypercolumns_shuffled[h] if h < n_hypercolumns else None
else:
h = None
if layer + 1 == len(self._layers):
self._layers[layer].train_step(
prev_activation, batch_labels)
else:
self._layers[layer].train_step(
prev_activation, activation, h)
if self.world_rank == 0:
pbar.update(1)
self._layers[layer].train_finalize()
| 35.038095
| 94
| 0.552596
| 1,179
| 11,037
| 4.951654
| 0.10687
| 0.04779
| 0.048818
| 0.039397
| 0.664782
| 0.640459
| 0.627612
| 0.591641
| 0.564406
| 0.545906
| 0
| 0.008322
| 0.357615
| 11,037
| 314
| 95
| 35.149682
| 0.815092
| 0.003896
| 0
| 0.606061
| 0
| 0
| 0.003093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056818
| false
| 0.003788
| 0.015152
| 0
| 0.140152
| 0.003788
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b01639c2289f47ba698eea2092678bb22c032e75
| 6,879
|
py
|
Python
|
flux_sensors/flux_sensor.py
|
Flux-Coordinator/flux-sensors
|
44968c95e277023c3a6777d653e7b3cb4e333923
|
[
"MIT"
] | null | null | null |
flux_sensors/flux_sensor.py
|
Flux-Coordinator/flux-sensors
|
44968c95e277023c3a6777d653e7b3cb4e333923
|
[
"MIT"
] | 1
|
2018-06-14T18:21:33.000Z
|
2018-06-14T18:21:33.000Z
|
flux_sensors/flux_sensor.py
|
Flux-Coordinator/flux-sensors
|
44968c95e277023c3a6777d653e7b3cb4e333923
|
[
"MIT"
] | null | null | null |
from flux_sensors.localizer.localizer import Localizer, Coordinates, LocalizerError, PozyxDeviceError
from flux_sensors.light_sensor.light_sensor import LightSensor
from flux_sensors.config_loader import ConfigLoader
from flux_sensors.flux_server import FluxServer, FluxServerError
from flux_sensors.models import models
import time
import requests
import json
import logging
logger = logging.getLogger(__name__)
class FluxSensorError(Exception):
"""Base class for exceptions in this module."""
class InitializationError(FluxSensorError):
"""Exception raised when the initialization of the sensors failed."""
class FluxSensor:
"""Controlling class for the flux-sensors components"""
def __init__(self, localizer_instance: Localizer, light_sensor_instance: LightSensor, config_loader: ConfigLoader,
flux_server: FluxServer) -> None:
self._localizer = localizer_instance
self._light_sensor = light_sensor_instance
self._config_loader = config_loader
self._flux_server = flux_server
self._timeout = time.time()
def start_when_ready(self) -> None:
logger.info("Flux-sensors in standby. Start polling Flux-server")
while True:
if not self._flux_server.poll_server_urls(self._config_loader.get_server_urls(),
self._config_loader.get_timeout()):
logger.warning("All server URLs failed to respond. Retry started...")
continue
logger.info("Server responding. Start measurement when ready...")
if not self._flux_server.poll_active_measurement():
FluxSensor.handle_retry(3)
continue
logger.info("Success! A flux-server is available and a measurement is active.")
try:
self._flux_server.login_at_server()
response = self._flux_server.get_active_measurement()
self._flux_server.log_server_response(response)
except requests.exceptions.RequestException as err:
logger.error("Request error while loading active measurement from Flux-server")
logger.error(err)
FluxSensor.handle_retry(3)
continue
except FluxServerError as err:
logger.error("Server error while loading active measurement from Flux-server")
logger.error(err)
FluxSensor.handle_retry(3)
continue
try:
self.clear_sensors()
self.initialize_sensors(response.text)
except InitializationError as err:
logger.error(err)
logger.error("Error while initializing the sensors")
FluxSensor.handle_retry(3)
continue
logger.info("Flux-sensors initialized. Start measurement...")
self.start_measurement()
@staticmethod
def handle_retry(seconds: int) -> None:
logger.info("Retry starts in {} seconds...".format(seconds))
time.sleep(seconds)
def initialize_sensors(self, measurement: str) -> None:
self.initialize_localizer(measurement)
self.initialize_light_sensor()
def initialize_localizer(self, measurement: str) -> None:
try:
measurement_json = json.loads(measurement)
for anchorPosition in measurement_json["anchorPositions"]:
self._localizer.add_anchor_to_cache(int(anchorPosition["anchor"]["networkId"], 16),
Coordinates(int(anchorPosition["xposition"]),
int(anchorPosition["yposition"]),
int(anchorPosition["zposition"])))
except(ValueError, KeyError, TypeError):
raise InitializationError("Error while parsing the Pozyx Anchors.")
try:
self._localizer.initialize()
except LocalizerError as err:
logger.error(err)
raise InitializationError("Error while initializing Pozyx.")
def initialize_light_sensor(self) -> None:
self._light_sensor.initialize()
def clear_sensors(self) -> None:
self._localizer.clear()
def _reset_timeout(self) -> None:
self._timeout = time.time() + self._config_loader.get_timeout()
def _is_timeout_exceeded(self) -> bool:
return time.time() > self._timeout
def start_measurement(self) -> None:
readings = []
self._flux_server.initialize_last_response()
self._reset_timeout()
while not self._is_timeout_exceeded():
try:
position = self._localizer.do_positioning()
illuminance = self._light_sensor.do_measurement()
readings.append(models.Reading(illuminance, position))
try:
if self._flux_server.get_last_response() == 200:
if len(readings) >= self._flux_server.MIN_BATCH_SIZE:
self._flux_server.reset_last_response()
json_data = json.dumps(readings, default=lambda o: o.__dict__)
self._flux_server.send_data_to_server(json_data)
del readings[:]
self._reset_timeout()
elif self._flux_server.get_last_response() == 401:
logger.info("Auth token expired. Try new login...")
self._flux_server.login_at_server()
self._flux_server.initialize_last_response()
elif self._flux_server.get_last_response() == 404:
logger.info("The measurement has been stopped by the server.")
return
elif self._flux_server.get_last_response() != self._flux_server.RESPONSE_PENDING:
logger.info("The measurement has been stopped.")
return
except requests.exceptions.RequestException as err:
logger.error("Request error while sending new readings to Flux-server")
logger.error(err)
return
except FluxServerError as err:
logger.error("Server error while sending new readings to Flux-server")
logger.error(err)
return
except PozyxDeviceError as err:
logger.error("Pozyx error while creating new readings")
logger.error(err)
continue
logger.error("Timeout of {}s is exceeded while waiting for Flux-server response".format(
self._config_loader.get_timeout()))
| 44.668831
| 118
| 0.602704
| 687
| 6,879
| 5.793304
| 0.235808
| 0.067839
| 0.059799
| 0.028141
| 0.284673
| 0.260302
| 0.195226
| 0.131156
| 0.131156
| 0.111055
| 0
| 0.003204
| 0.319378
| 6,879
| 153
| 119
| 44.960784
| 0.84686
| 0.022532
| 0
| 0.291339
| 0
| 0
| 0.135063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07874
| false
| 0
| 0.070866
| 0.007874
| 0.212598
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b019647d7984c42bcd98ff6521f630e19b83c858
| 11,288
|
py
|
Python
|
Network.py
|
Coldog2333/pytoflow
|
3cec913fa5a2ddb8133a075d4ff177cceb74f06a
|
[
"MIT"
] | 102
|
2018-12-29T16:19:18.000Z
|
2022-01-13T03:54:04.000Z
|
Network.py
|
mengxiangyudlut/pytoflow
|
3cec913fa5a2ddb8133a075d4ff177cceb74f06a
|
[
"MIT"
] | 19
|
2019-04-26T10:19:14.000Z
|
2021-11-14T07:36:23.000Z
|
Network.py
|
mengxiangyudlut/pytoflow
|
3cec913fa5a2ddb8133a075d4ff177cceb74f06a
|
[
"MIT"
] | 32
|
2019-03-04T00:10:06.000Z
|
2022-01-11T08:19:19.000Z
|
import math
import torch
# import torch.utils.serialization # it was removed in torch v1.0.0 or higher version.
arguments_strModel = 'sintel-final'
SpyNet_model_dir = './models' # The directory of SpyNet's weights
def normalize(tensorInput):
tensorRed = (tensorInput[:, 0:1, :, :] - 0.485) / 0.229
tensorGreen = (tensorInput[:, 1:2, :, :] - 0.456) / 0.224
tensorBlue = (tensorInput[:, 2:3, :, :] - 0.406) / 0.225
return torch.cat([tensorRed, tensorGreen, tensorBlue], 1)
def denormalize(tensorInput):
tensorRed = (tensorInput[:, 0:1, :, :] * 0.229) + 0.485
tensorGreen = (tensorInput[:, 1:2, :, :] * 0.224) + 0.456
tensorBlue = (tensorInput[:, 2:3, :, :] * 0.225) + 0.406
return torch.cat([tensorRed, tensorGreen, tensorBlue], 1)
Backward_tensorGrid = {}
def Backward(tensorInput, tensorFlow, cuda_flag):
if str(tensorFlow.size()) not in Backward_tensorGrid:
tensorHorizontal = torch.linspace(-1.0, 1.0, tensorFlow.size(3)).view(1, 1, 1, tensorFlow.size(3)).expand(tensorFlow.size(0), -1, tensorFlow.size(2), -1)
tensorVertical = torch.linspace(-1.0, 1.0, tensorFlow.size(2)).view(1, 1, tensorFlow.size(2), 1).expand(tensorFlow.size(0), -1, -1, tensorFlow.size(3))
if cuda_flag:
Backward_tensorGrid[str(tensorFlow.size())] = torch.cat([ tensorHorizontal, tensorVertical ], 1).cuda()
else:
Backward_tensorGrid[str(tensorFlow.size())] = torch.cat([tensorHorizontal, tensorVertical], 1)
# end
tensorFlow = torch.cat([ tensorFlow[:, 0:1, :, :] / ((tensorInput.size(3) - 1.0) / 2.0), tensorFlow[:, 1:2, :, :] / ((tensorInput.size(2) - 1.0) / 2.0) ], 1)
return torch.nn.functional.grid_sample(input=tensorInput, grid=(Backward_tensorGrid[str(tensorFlow.size())] + tensorFlow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='border')
# end
class SpyNet(torch.nn.Module):
def __init__(self, cuda_flag):
super(SpyNet, self).__init__()
self.cuda_flag = cuda_flag
class Basic(torch.nn.Module):
def __init__(self, intLevel):
super(Basic, self).__init__()
self.moduleBasic = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3),
torch.nn.ReLU(inplace=False),
torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3),
torch.nn.ReLU(inplace=False),
torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3),
torch.nn.ReLU(inplace=False),
torch.nn.Conv2d(in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3),
torch.nn.ReLU(inplace=False),
torch.nn.Conv2d(in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3)
)
# end
def forward(self, tensorInput):
return self.moduleBasic(tensorInput)
self.moduleBasic = torch.nn.ModuleList([Basic(intLevel) for intLevel in range(4)])
self.load_state_dict(torch.load(SpyNet_model_dir + '/network-' + arguments_strModel + '.pytorch'), strict=False)
def forward(self, tensorFirst, tensorSecond):
tensorFirst = [tensorFirst]
tensorSecond = [tensorSecond]
for intLevel in range(3):
if tensorFirst[0].size(2) > 32 or tensorFirst[0].size(3) > 32:
tensorFirst.insert(0, torch.nn.functional.avg_pool2d(input=tensorFirst[0], kernel_size=2, stride=2))
tensorSecond.insert(0, torch.nn.functional.avg_pool2d(input=tensorSecond[0], kernel_size=2, stride=2))
tensorFlow = tensorFirst[0].new_zeros(tensorFirst[0].size(0), 2,
int(math.floor(tensorFirst[0].size(2) / 2.0)),
int(math.floor(tensorFirst[0].size(3) / 2.0)))
for intLevel in range(len(tensorFirst)):
tensorUpsampled = torch.nn.functional.interpolate(input=tensorFlow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
# if the sizes of upsampling and downsampling are not the same, apply zero-padding.
if tensorUpsampled.size(2) != tensorFirst[intLevel].size(2):
tensorUpsampled = torch.nn.functional.pad(input=tensorUpsampled, pad=[0, 0, 0, 1], mode='replicate')
if tensorUpsampled.size(3) != tensorFirst[intLevel].size(3):
tensorUpsampled = torch.nn.functional.pad(input=tensorUpsampled, pad=[0, 1, 0, 0], mode='replicate')
# input :[first picture of corresponding level,
# the output of w with input second picture of corresponding level and upsampling flow,
# upsampling flow]
# then we obtain the final flow. 最终再加起来得到intLevel的flow
tensorFlow = self.moduleBasic[intLevel](torch.cat([tensorFirst[intLevel],
Backward(tensorInput=tensorSecond[intLevel],
tensorFlow=tensorUpsampled,
cuda_flag=self.cuda_flag),
tensorUpsampled], 1)) + tensorUpsampled
return tensorFlow
class warp(torch.nn.Module):
def __init__(self, h, w, cuda_flag):
super(warp, self).__init__()
self.height = h
self.width = w
if cuda_flag:
self.addterm = self.init_addterm().cuda()
else:
self.addterm = self.init_addterm()
def init_addterm(self):
n = torch.FloatTensor(list(range(self.width)))
horizontal_term = n.expand((1, 1, self.height, self.width)) # 第一个1是batch size
n = torch.FloatTensor(list(range(self.height)))
vertical_term = n.expand((1, 1, self.width, self.height)).permute(0, 1, 3, 2)
addterm = torch.cat((horizontal_term, vertical_term), dim=1)
return addterm
def forward(self, frame, flow):
"""
:param frame: frame.shape (batch_size=1, n_channels=3, width=256, height=448)
:param flow: flow.shape (batch_size=1, n_channels=2, width=256, height=448)
:return: reference_frame: warped frame
"""
if True:
flow = flow + self.addterm
else:
self.addterm = self.init_addterm()
flow = flow + self.addterm
horizontal_flow = flow[0, 0, :, :].expand(1, 1, self.height, self.width) # 第一个0是batch size
vertical_flow = flow[0, 1, :, :].expand(1, 1, self.height, self.width)
horizontal_flow = horizontal_flow * 2 / (self.width - 1) - 1
vertical_flow = vertical_flow * 2 / (self.height - 1) - 1
flow = torch.cat((horizontal_flow, vertical_flow), dim=1)
flow = flow.permute(0, 2, 3, 1)
reference_frame = torch.nn.functional.grid_sample(frame, flow)
return reference_frame
class ResNet(torch.nn.Module):
"""
Three-layers ResNet/ResBlock
reference: https://blog.csdn.net/chenyuping333/article/details/82344334
"""
def __init__(self, task):
super(ResNet, self).__init__()
self.task = task
self.conv_3x2_64_9x9 = torch.nn.Conv2d(in_channels=3 * 2, out_channels=64, kernel_size=9, padding=8 // 2)
self.conv_3x7_64_9x9 = torch.nn.Conv2d(in_channels=3 * 7, out_channels=64, kernel_size=9, padding=8 // 2)
self.conv_64_64_9x9 = torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=9, padding=8 // 2)
self.conv_64_64_1x1 = torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1)
self.conv_64_3_1x1 = torch.nn.Conv2d(in_channels=64, out_channels=3, kernel_size=1)
def ResBlock(self, x, aver):
if self.task == 'interp':
x = torch.nn.functional.relu(self.conv_3x2_64_9x9(x))
x = torch.nn.functional.relu(self.conv_64_64_1x1(x))
elif self.task in ['denoise', 'denoising']:
x = torch.nn.functional.relu(self.conv_3x7_64_9x9(x))
x = torch.nn.functional.relu(self.conv_64_64_1x1(x))
elif self.task in ['sr', 'super-resolution']:
x = torch.nn.functional.relu(self.conv_3x7_64_9x9(x))
x = torch.nn.functional.relu(self.conv_64_64_9x9(x))
x = torch.nn.functional.relu(self.conv_64_64_1x1(x))
else:
raise NameError('Only support: [interp, denoise/denoising, sr/super-resolution]')
x = self.conv_64_3_1x1(x) + aver
return x
def forward(self, frames):
aver = frames.mean(dim=1)
x = frames[:, 0, :, :, :]
for i in range(1, frames.size(1)):
x = torch.cat((x, frames[:, i, :, :, :]), dim=1)
result = self.ResBlock(x, aver)
return result
class TOFlow(torch.nn.Module):
def __init__(self, h, w, task, cuda_flag):
super(TOFlow, self).__init__()
self.height = h
self.width = w
self.task = task
self.cuda_flag = cuda_flag
self.SpyNet = SpyNet(cuda_flag=self.cuda_flag) # SpyNet层
# for param in self.SpyNet.parameters(): # fix
# param.requires_grad = False
self.warp = warp(self.height, self.width, cuda_flag=self.cuda_flag)
self.ResNet = ResNet(task=self.task)
# frames should be TensorFloat
def forward(self, frames):
"""
:param frames: [batch_size=1, img_num, n_channels=3, h, w]
:return:
"""
for i in range(frames.size(1)):
frames[:, i, :, :, :] = normalize(frames[:, i, :, :, :])
if self.cuda_flag:
opticalflows = torch.zeros(frames.size(0), frames.size(1), 2, frames.size(3), frames.size(4)).cuda()
warpframes = torch.empty(frames.size(0), frames.size(1), 3, frames.size(3), frames.size(4)).cuda()
else:
opticalflows = torch.zeros(frames.size(0), frames.size(1), 2, frames.size(3), frames.size(4))
warpframes = torch.empty(frames.size(0), frames.size(1), 3, frames.size(3), frames.size(4))
if self.task == 'interp':
process_index = [0, 1]
opticalflows[:, 1, :, :, :] = self.SpyNet(frames[:, 0, :, :, :], frames[:, 1, :, :, :]) / 2
opticalflows[:, 0, :, :, :] = self.SpyNet(frames[:, 1, :, :, :], frames[:, 0, :, :, :]) / 2
elif self.task in ['denoise', 'denoising', 'sr', 'super-resolution']:
process_index = [0, 1, 2, 4, 5, 6]
for i in process_index:
opticalflows[:, i, :, :, :] = self.SpyNet(frames[:, 3, :, :, :], frames[:, i, :, :, :])
warpframes[:, 3, :, :, :] = frames[:, 3, :, :, :]
else:
raise NameError('Only support: [interp, denoise/denoising, sr/super-resolution]')
for i in process_index:
warpframes[:, i, :, :, :] = self.warp(frames[:, i, :, :, :], opticalflows[:, i, :, :, :])
# warpframes: [batch_size=1, img_num=7, n_channels=3, height=256, width=448]
Img = self.ResNet(warpframes)
# Img: [batch_size=1, n_channels=3, h, w]
Img = denormalize(Img)
return Img
| 47.230126
| 186
| 0.589033
| 1,426
| 11,288
| 4.532959
| 0.153576
| 0.037902
| 0.036819
| 0.023205
| 0.485767
| 0.391244
| 0.318998
| 0.282178
| 0.23453
| 0.209777
| 0
| 0.051084
| 0.264706
| 11,288
| 238
| 187
| 47.428571
| 0.727711
| 0.092931
| 0
| 0.236364
| 0
| 0
| 0.027739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.012121
| 0.006061
| 0.193939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b01bbd168b9b732e58f788ff84aca342f6b50515
| 2,668
|
py
|
Python
|
storagetest/pkgs/ltp/acl/acl_test.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
storagetest/pkgs/ltp/acl/acl_test.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
storagetest/pkgs/ltp/acl/acl_test.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : acl_test.py
@Time : 2020/11/9 9:25
@Author: Tao.Xu
@Email : tao.xu2008@outlook.com
"""
import os
import unittest
from storagetest.libs import utils
from storagetest.libs.log import log
from storagetest.libs.exceptions import PlatformError, NoSuchDir, NoSuchBinary
logger = log.get_logger()
cur_dir = os.path.dirname(os.path.realpath(__file__))
bin_path = os.path.join(cur_dir, 'bin')
class AclXattr(object):
"""Test ACL and Extend Attribute on Linux system"""
def __init__(self, top_path):
self.top_path = top_path
def verify(self):
if os.name != "posix":
raise PlatformError("Just support for linux machine!")
if not os.path.isdir(self.top_path):
raise NoSuchDir(self.top_path)
try:
utils.run_cmd("which attr", expected_rc=0)
except Exception as e:
logger.error(e)
raise NoSuchBinary("attr, try install it.(apt-get install -y attr)")
def run(self, test_path):
"""cd <test_path>; ./tacl_xattr.sh """
logger.info(self.run.__doc__)
utils.mkdir_path(test_path)
acl_bin = os.path.join(bin_path, 'tacl_xattr.sh')
test_log = os.path.join(self.top_path, 'tacl_xattr.log')
acl_cmd = "rm -rf {0}/*; cd {0}; {1} | tee {2}".format(test_path, acl_bin, test_log)
try:
os.system('chmod +x {0}/*'.format(bin_path))
rc, output = utils.run_cmd(acl_cmd, expected_rc="ignore")
logger.info(output)
if rc != 0:
raise Exception("tacl_xattr.sh exit with !0")
if "FAILED:" in output:
raise Exception("FAIL: test acl_xattr on {}".format(test_path))
logger.info("PASS: test acl_xattr on {}".format(test_path))
except Exception as e:
logger.info("FAIL: test acl_xattr on {}".format(test_path))
raise e
finally:
pass
return True
def sanity(self):
self.verify()
test_path = os.path.join(self.top_path, "acl_attribute")
assert self.run(test_path)
return True
def stress(self):
self.verify()
test_path = os.path.join(self.top_path, "acl_attribute")
assert self.run(test_path)
return True
class UnitTestCase(unittest.TestCase):
def setUp(self) -> None:
self.acl = AclXattr("/mnt/test")
def test_01(self):
self.acl.sanity()
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(UnitTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| 29.644444
| 92
| 0.613943
| 359
| 2,668
| 4.376045
| 0.345404
| 0.056015
| 0.049013
| 0.026735
| 0.211967
| 0.181413
| 0.168046
| 0.150223
| 0.109484
| 0.109484
| 0
| 0.013111
| 0.256747
| 2,668
| 89
| 93
| 29.977528
| 0.779123
| 0.084708
| 0
| 0.216667
| 0
| 0
| 0.136664
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.116667
| false
| 0.033333
| 0.083333
| 0
| 0.283333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0204523055a99ef60f353c69bef13df582957e8
| 15,276
|
py
|
Python
|
library/modules/encoder_decoders/sequence_to_sequence.py
|
dangitstam/le-traducteur
|
499005ac198029fd2a7e7469fb250b8b3af6a619
|
[
"Apache-2.0"
] | 6
|
2018-10-23T10:05:55.000Z
|
2020-08-30T13:04:51.000Z
|
library/modules/encoder_decoders/sequence_to_sequence.py
|
dangitstam/le-traducteur
|
499005ac198029fd2a7e7469fb250b8b3af6a619
|
[
"Apache-2.0"
] | 1
|
2018-08-20T21:58:33.000Z
|
2020-12-29T17:44:04.000Z
|
library/modules/encoder_decoders/sequence_to_sequence.py
|
dangitstam/le-traducteur
|
499005ac198029fd2a7e7469fb250b8b3af6a619
|
[
"Apache-2.0"
] | 1
|
2022-03-26T05:13:38.000Z
|
2022-03-26T05:13:38.000Z
|
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.attention import BilinearAttention
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import InitializerApplicator, RegularizerApplicator, util
from overrides import overrides
# This is largely based on AllenNLP's general Seq2Seq encoder-decoder:
# https://github.com/allenai/allennlp/blob/master/allennlp/models/encoder_decoders/simple_seq2seq.py
#
# but offers more flexibility. Maybe I'll subclass this module when they've addressed their TODOs.
# TODO: Add more asserts so people don't do dumb shit
# TODO: Better docstrings.
@Model.register("sequence_to_sequence")
class SequenceToSequence(Model):
"""
Base class for sequence-to-sequence models.
"""
DECODERS = {"rnn": torch.nn.RNN, "lstm": torch.nn.LSTM, "gru": torch.nn.GRU}
def __init__(self,
# Vocabluary.
vocab: Vocabulary,
# Embeddings.
source_field_embedder: TextFieldEmbedder,
target_embedding_size: int,
# Encoders and Decoders.
encoder: Seq2SeqEncoder,
decoder_type: str,
output_projection_layer: FeedForward,
source_namespace: str = "source",
target_namespace: str = "target",
# Hyperparamters and flags.
decoder_attention_function: BilinearAttention = None,
decoder_is_bidirectional: bool = False,
decoder_num_layers: int = 1,
apply_attention: Optional[bool] = False,
max_decoding_steps: int = 100,
scheduled_sampling_ratio: float = 0.4,
# Logistical.
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
if encoder.get_input_dim() != source_field_embedder.get_output_dim():
raise ConfigurationError("The input dimension of the encoder must match the embedding"
"size of the source_field_embedder. Found {} and {}, respectively."
.format(encoder.get_input_dim(),
source_field_embedder.get_output_dim()))
if output_projection_layer.get_output_dim() != vocab.get_vocab_size(target_namespace):
raise ConfigurationError("The output dimension of the output_projection_layer must match the "
"size of the French vocabulary. Found {} and {}, "
"respectively.".format(output_projection_layer.get_output_dim(),
vocab.get_vocab_size(target_namespace)))
if decoder_type not in SequenceToSequence.DECODERS:
raise ConfigurationError("Unrecognized decoder option '{}'".format(decoder_type))
# For dealing with input.
self.source_vocab_size = vocab.get_vocab_size(source_namespace)
self.target_vocab_size = vocab.get_vocab_size(target_namespace)
self.source_field_embedder = source_field_embedder or TextFieldEmbedder()
self.encoder = encoder
# For dealing with / producing output.
self.target_vocab_size = vocab.get_vocab_size(target_namespace)
self.target_embedder = Embedding(self.target_vocab_size, target_embedding_size)
# Input size will either be the target embedding size or the target embedding size plus the
# encoder hidden size to attend on the input.
#
# When making a custom attention function that uses neither of those input sizes, you will
# have to define the decoder yourself.
decoder_input_size = target_embedding_size
if apply_attention:
decoder_input_size += encoder.get_output_dim()
# Hidden size of the encoder and decoder should match.
decoder_hidden_size = encoder.get_output_dim()
self.decoder = SequenceToSequence.DECODERS[decoder_type](
decoder_input_size,
decoder_hidden_size,
num_layers=decoder_num_layers,
batch_first=True,
bias=True,
bidirectional=decoder_is_bidirectional
)
self.output_projection_layer = output_projection_layer
self.apply_attention = apply_attention
self.decoder_attention_function = decoder_attention_function or BilinearAttention(
matrix_dim=encoder.get_output_dim(),
vector_dim=encoder.get_output_dim()
)
# Hyperparameters.
self._max_decoding_steps = max_decoding_steps
self._scheduled_sampling_ratio = scheduled_sampling_ratio
# Used for prepping the translation primer (initialization of the target word-level
# encoder's hidden state).
#
# If the decoder is an LSTM, both hidden states and cell states must be initialized.
# Also, hidden states that prime translation via this encoder must be duplicated
# across by number of layers they has.
self._decoder_is_lstm = isinstance(self.decoder, torch.nn.LSTM)
self._decoder_num_layers = decoder_num_layers
self._start_index = vocab.get_token_index(START_SYMBOL, target_namespace)
self._end_index = vocab.get_token_index(END_SYMBOL, target_namespace)
self._source_namespace = source_namespace
self._target_namespace = target_namespace
self._batch_size = None
initializer(self)
@overrides
def forward(self,
source: Dict[str, torch.LongTensor],
target: Dict[str, torch.LongTensor]) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
output_dict: dict = {}
source = self.preprocess_input(source)
# Embed and encode the source sequence.
source_sequence_encoded = self.encode_input(source)
source_mask = util.get_text_field_mask(source)
source_lengths = source_mask.sum(dim=-1)
source_encoded = torch.zeros_like(source_sequence_encoded[:, 0])
for i, length in enumerate(source_lengths):
source_encoded[i] = source_sequence_encoded[i, length - 1]
batch_size = source_encoded.size(0)
# Determine number of decoding steps. If training or computing validation, we decode
# target_seq_len times and compute loss.
if target:
target_tokens = target['tokens']
target_seq_len = target['tokens'].size(1)
num_decoding_steps = target_seq_len - 1
else:
num_decoding_steps = self.max_decoding_steps
# Begin decoding the encoded source, swapping in predictions for ground truth at the
# scheduled sampling rate.
last_predictions = None
step_logits, step_probabilities, step_predictions = [], [], []
decoder_hidden = self.init_decoder_hidden_state(source_encoded)
for timestep in range(num_decoding_steps):
if self.training and torch.rand(1).item() >= self._scheduled_sampling_ratio:
input_choices = target_tokens[:, timestep]
else:
if timestep == 0: # Initialize decoding with the start token.
input_choices = source_mask.new_full((batch_size,),
fill_value=self._start_index)
else:
input_choices = last_predictions
decoder_input = self.prepare_decode_step_input(input_choices, decoder_hidden,
source_sequence_encoded, source_mask)
if len(decoder_input.shape) < 3:
decoder_input = decoder_input.unsqueeze(1)
_, decoder_hidden = self.decoder(decoder_input, decoder_hidden)
# Probability distribution for what the next decoded class should be.
output_projection = self.output_projection_layer(decoder_hidden[0][-1]
if self._decoder_is_lstm
else decoder_hidden[-1])
step_logits.append(output_projection.unsqueeze(1))
# Collect predicted classes and their probabilities.
class_probabilities = F.softmax(output_projection, dim=-1)
_, predicted_classes = torch.max(class_probabilities, 1)
step_probabilities.append(class_probabilities.unsqueeze(1))
step_predictions.append(predicted_classes.unsqueeze(1))
last_predictions = predicted_classes
try:
logits = torch.cat(step_logits, 1)
except:
import pdb; pdb.set_trace()
class_probabilities = torch.cat(step_probabilities, 1)
all_predictions = torch.cat(step_predictions, 1)
output_dict = {"logits": logits,
"class_probabilities": class_probabilities,
"predictions": all_predictions}
if target:
target_mask = util.get_text_field_mask(target)
relevant_targets = target['tokens'][:, 1:].contiguous()
relevant_mask = target_mask[:, 1:].contiguous()
loss = util.sequence_cross_entropy_with_logits(logits, relevant_targets, relevant_mask)
output_dict["loss"] = loss
return output_dict
def preprocess_input(self, source: Dict[str, torch.LongTensor]) -> Dict[str, torch.Tensor]:
"""
Perform any preprocessing on the input text field you like; returns the source unchanged
by default.
"""
# pylint: disable=R0201
return source
def encode_input(self, source: Dict[str, torch.LongTensor]) -> Tuple[torch.FloatTensor,
torch.FloatTensor]:
"""
Encode the source utterance how you see fit, as long as you return a tuple of
tensors.
By default, embeds the source utterance and feeds it to the source encoder.
Note that when subclassing this module, the decoder_hidden_size should be the same as
the encoder's hidden size.
Required shapes: (batch_size, sequence_length, decoder_hidden_size)
"""
source_sequence_embedded = self.source_field_embedder(source)
source_sequence_mask = util.get_text_field_mask(source)
encoded_source_sequence = self.encoder(source_sequence_embedded, source_sequence_mask)
return encoded_source_sequence
def init_decoder_hidden_state(self, source_sequence_encoded: torch.FloatTensor) -> torch.FloatTensor:
"""
Prep the hidden state initialization of the word-level Target decoder any way
you like.
By default, uses only the final hidden state of the encoded source.
Required shape: (batch_size, num_decoder_layers, encoder_hidden_size)
"""
decoder_primer = source_sequence_encoded.unsqueeze(0)
decoder_primer = decoder_primer.expand(
self._decoder_num_layers, -1, self.encoder.get_output_dim()
).contiguous()
# If the decoder is an LSTM, we need to initialize a cell state.
if self._decoder_is_lstm:
decoder_primer = (decoder_primer, torch.zeros_like(decoder_primer))
return decoder_primer
def prepare_decode_step_input(self,
input_indices: torch.LongTensor,
decoder_hidden: torch.LongTensor,
encoder_outputs: torch.LongTensor,
encoder_outputs_mask: torch.LongTensor) -> torch.LongTensor:
"""
Prepares the current timestep input for the decoder.
By default, simply embeds and returns the input. If using attention, the default attention
(BiLinearAttention) is applied to attend on the step input given the encoded source
sequence and the previous hidden state.
Parameters:
-----------
input_indices : torch.LongTensor
Indices of either the gold inputs to the decoder or the predicted labels from the
previous timestep.
decoder_hidden : torch.LongTensor, optional (not needed if no attention)
Output from the decoder at the last time step. Needed only if using attention.
encoder_outputs : torch.LongTensor, optional (not needed if no attention)
Encoder outputs from all time steps. Needed only if using attention.
encoder_outputs_mask : torch.LongTensor, optional (not needed if no attention)
Masks on encoder outputs. Needed only if using attention.
"""
# input_indices : (batch_size,) since we are processing these one timestep at a time.
# (batch_size, target_embedding_dim)
embedded_input = self.target_embedder(input_indices)
if self.apply_attention:
if isinstance(decoder_hidden, tuple):
decoder_hidden = decoder_hidden[0]
# encoder_outputs : (batch_size, input_sequence_length, encoder_output_dim)
# Ensuring mask is also a FloatTensor. Or else the multiplication within attention will
# complain.
encoder_outputs_mask = encoder_outputs_mask.float()
# (batch_size, input_sequence_length)
input_weights = self.decoder_attention_function(decoder_hidden[-1], encoder_outputs,
encoder_outputs_mask)
# (batch_size, encoder_output_dim)
attended_input = util.weighted_sum(encoder_outputs, input_weights)
# (batch_size, encoder_output_dim + target_embedding_dim)
return torch.cat((attended_input, embedded_input), -1)
else:
return embedded_input
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
predicted_indices = output_dict["predictions"]
if not isinstance(predicted_indices, np.ndarray):
predicted_indices = predicted_indices.detach().cpu().numpy()
all_predicted_tokens = []
for indices in predicted_indices:
indices = list(indices)
# Collect indices till the first END_SYMBOL.
if self._end_index in indices:
indices = indices[:indices.index(self._end_index)]
predicted_tokens = [self.vocab.get_token_from_index(x, namespace=self._target_namespace)
for x in indices]
all_predicted_tokens.append(predicted_tokens)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
| 48.805112
| 106
| 0.645719
| 1,692
| 15,276
| 5.575059
| 0.208629
| 0.024807
| 0.011449
| 0.009011
| 0.146719
| 0.091911
| 0.078978
| 0.058624
| 0.035832
| 0.035832
| 0
| 0.003859
| 0.28751
| 15,276
| 312
| 107
| 48.961538
| 0.862826
| 0.244043
| 0
| 0.06383
| 0
| 0
| 0.03666
| 0.004014
| 0
| 0
| 0
| 0.003205
| 0
| 1
| 0.037234
| false
| 0
| 0.074468
| 0
| 0.159574
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b023ba4b1780ce639f98fb2247c460ffe792c1f6
| 20,333
|
py
|
Python
|
tests/rewards_tree/test_rewards_flow.py
|
shuklaayush/badger-system
|
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
|
[
"MIT"
] | 99
|
2020-12-02T08:40:48.000Z
|
2022-03-15T05:21:06.000Z
|
tests/rewards_tree/test_rewards_flow.py
|
shuklaayush/badger-system
|
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
|
[
"MIT"
] | 115
|
2020-12-15T07:15:39.000Z
|
2022-03-28T22:21:03.000Z
|
tests/rewards_tree/test_rewards_flow.py
|
shuklaayush/badger-system
|
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
|
[
"MIT"
] | 56
|
2020-12-11T06:50:04.000Z
|
2022-02-21T09:17:38.000Z
|
import json
import secrets
import brownie
from dotmap import DotMap
import pytest
import pprint
from brownie import *
from helpers.constants import *
from helpers.registry import registry
from rich.console import Console
FARM_ADDRESS = "0xa0246c9032bC3A600820415aE600c6388619A14D"
XSUSHI_ADDRESS = "0x8798249c2E607446EfB7Ad49eC89dD1865Ff4272"
SECS_PER_HOUR = 3600
SECS_PER_DAY = 86400
console = Console()
@pytest.fixture(scope="function", autouse="True")
def setup():
from assistant.rewards import rewards_assistant
return rewards_assistant
# @pytest.fixture(scope="function")
# def setup_badger(badger_tree_unit):
# return badger_tree_unit
def random_32_bytes():
return "0x" + secrets.token_hex(32)
# generates merkle root purely off dummy data
def internal_generate_rewards_in_range(
rewards_assistant, currentMerkleData, newRewards, startBlock, endBlock, pastRewards
):
cumulativeRewards = rewards_assistant.process_cumulative_rewards(
pastRewards, newRewards
)
# Take metadata from geyserRewards
console.print("Processing to merkle tree")
merkleTree = rewards_assistant.rewards_to_merkle_tree(
cumulativeRewards, startBlock, endBlock, newRewards
)
# Publish data
rootHash = rewards_assistant.hash(merkleTree["merkleRoot"])
contentFileName = rewards_assistant.content_hash_to_filename(rootHash)
console.log(
{
"merkleRoot": merkleTree["merkleRoot"],
"rootHash": str(rootHash),
"contentFile": contentFileName,
"startBlock": startBlock,
"endBlock": endBlock,
"currentContentHash": currentMerkleData["contentHash"],
}
)
return {
"contentFileName": contentFileName,
"merkleTree": merkleTree,
"rootHash": rootHash,
}
# @pytest.mark.skip()
def test_rewards_flow(setup):
rewards_assistant = setup
badgerTree = rewards_assistant.BadgerTree
guardian = rewards_assistant.guardian
rootUpdater = rewards_assistant.rootUpdater
admin, proposer, validator, user = accounts[:4]
rewardsContract = admin.deploy(badgerTree)
rewardsContract.initialize(admin, proposer, validator)
# Propose root
root = random_32_bytes()
contentHash = random_32_bytes()
startBlock = rewardsContract.lastPublishEndBlock() + 1
# Test variations of invalid data upload and verify revert string
with brownie.reverts("Incorrect cycle"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle(),
startBlock,
startBlock + 1,
{"from": proposer},
)
with brownie.reverts("Incorrect cycle"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 2,
startBlock,
startBlock + 1,
{"from": proposer},
)
with brownie.reverts("Incorrect start block"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 1,
rewardsContract.lastPublishEndBlock() + 2,
startBlock + 1,
{"from": proposer},
)
with brownie.reverts("Incorrect start block"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 1,
rewardsContract.lastPublishEndBlock(),
startBlock + 1,
{"from": proposer},
)
# Ensure event
tx = rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 1,
startBlock,
startBlock + 1,
{"from": proposer},
)
assert "RootProposed" in tx.events.keys()
# Approve root
# Test variations of invalid data upload and verify revert string
with brownie.reverts("Incorrect root"):
rewardsContract.approveRoot(
random_32_bytes(),
contentHash,
rewardsContract.currentCycle(),
startBlock,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect content hash"):
rewardsContract.approveRoot(
root,
random_32_bytes(),
rewardsContract.currentCycle(),
startBlock,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.currentCycle(),
startBlock,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.currentCycle() + 2,
startBlock,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle start block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock + 1,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle start block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock - 1,
startBlock + 1,
{"from": validator},
)
with brownie.reverts("Incorrect cycle end block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock,
startBlock + 9,
{"from": validator},
)
with brownie.reverts("Incorrect cycle end block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock,
startBlock + 11,
{"from": validator},
)
with brownie.reverts("Incorrect cycle end block"):
rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock,
startBlock,
{"from": validator},
)
# Ensure event
tx = rewardsContract.approveRoot(
root,
contentHash,
rewardsContract.pendingCycle(),
startBlock,
startBlock + 1,
{"from": validator},
)
assert "RootUpdated" in tx.events.keys()
with brownie.reverts("Incorrect start block"):
rewardsContract.proposeRoot(
root,
contentHash,
rewardsContract.currentCycle() + 1,
rewardsContract.lastPublishStartBlock() + 1,
startBlock + 1,
{"from": proposer},
)
# Claim as a user
rewardsContract = admin.deploy(badgerTree)
rewardsContract.initialize(admin, proposer, validator)
startBlock = rewardsContract.lastPublishEndBlock() + 1
endBlock = startBlock + 5
currCycle = rewardsContract.currentCycle()
nextCycle = currCycle + 1
currentRoot = rewardsContract.merkleRoot()
# Update to new root with xSushi and FARM
farmClaim = 100000000000
xSushiClaim = 5555555555
geyserRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {
user.address: {FARM_ADDRESS: farmClaim, XSUSHI_ADDRESS: xSushiClaim},
accounts[5].address: {FARM_ADDRESS: 100, XSUSHI_ADDRESS: 100},
accounts[6].address: {FARM_ADDRESS: 100, XSUSHI_ADDRESS: 100},
},
"tokens": [FARM_ADDRESS, XSUSHI_ADDRESS],
"cycle": nextCycle,
}
)
pastRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {},
"tokens": [FARM_ADDRESS, XSUSHI_ADDRESS],
"cycle": currCycle,
}
)
rewards_data = internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": currentRoot},
geyserRewards,
startBlock,
endBlock,
pastRewards,
)
rewardsContract.proposeRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": proposer},
)
rewardsContract.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": validator},
)
# Claim as user who has xSushi and FARM
# This revert message means the claim was valid and it tried to transfer rewards
# it can't actually transfer any with this setup
with brownie.reverts("ERC20: transfer amount exceeds balance"):
rewardsContract.claim(
[FARM_ADDRESS, XSUSHI_ADDRESS], # FARM # XSUSHI
[farmClaim, xSushiClaim],
rewards_data["merkleTree"]["claims"][user]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][user]["proof"],
[farmClaim, xSushiClaim],
{"from": user},
)
# Ensure tokens are as expected
# farmBalance = Contract.at("0xa0246c9032bC3A600820415aE600c6388619A14D").balanceOf(user)
# assert farmClaim == farmBalance
# Claim partial as a user
with brownie.reverts("ERC20: transfer amount exceeds balance"):
rewardsContract.claim(
[FARM_ADDRESS, XSUSHI_ADDRESS],
[farmClaim, xSushiClaim],
rewards_data["merkleTree"]["claims"][user]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][user]["proof"],
[farmClaim - 100, xSushiClaim - 100],
{"from": user},
)
# Claim with MockToken and confirm new balance
mockToken = rewards_assistant.MockToken
mockContract = admin.deploy(mockToken)
mockContract.initialize([rewardsContract], [100000000])
startBlock = rewardsContract.lastPublishEndBlock() + 1
endBlock = startBlock + 5
currCycle = rewardsContract.currentCycle()
nextCycle = currCycle + 1
currentRoot = rewardsContract.merkleRoot()
geyserRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {
user.address: {},
accounts[5].address: {},
accounts[6].address: {},
},
"tokens": [mockContract],
"cycle": nextCycle,
}
)
geyserRewards["claims"][user.address][str(mockContract)] = 100
geyserRewards["claims"][accounts[5].address][str(mockContract)] = 20
geyserRewards["claims"][accounts[6].address][str(mockContract)] = 0
pastRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {},
"tokens": [mockContract],
"cycle": currCycle,
}
)
rewards_data = internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": currentRoot},
geyserRewards,
startBlock,
endBlock,
pastRewards,
)
rewardsContract.proposeRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": proposer},
)
rewardsContract.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": validator},
)
rewardsContract.claim(
[mockContract],
[100],
rewards_data["merkleTree"]["claims"][user]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][user]["proof"],
[100],
{"from": user},
)
assert mockContract.balanceOf(user) == 100
assert mockContract.balanceOf(str(rewardsContract)) == 100000000 - 100
# Try to claim with zero tokens all around, expect failure
rewardsContract = admin.deploy(badgerTree)
rewardsContract.initialize(admin, proposer, validator)
startBlock = rewardsContract.lastPublishEndBlock() + 1
endBlock = startBlock + 5
currCycle = rewardsContract.currentCycle()
nextCycle = currCycle + 1
currentRoot = rewardsContract.merkleRoot()
geyserRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {
user.address: {FARM_ADDRESS: 0, XSUSHI_ADDRESS: 0},
accounts[5].address: {FARM_ADDRESS: 0, XSUSHI_ADDRESS: 0},
accounts[6].address: {FARM_ADDRESS: 0, XSUSHI_ADDRESS: 0},
},
"tokens": [FARM_ADDRESS, XSUSHI_ADDRESS], # FARM # XSUSHI
"cycle": nextCycle,
}
)
pastRewards = DotMap(
{
"badger_tree": rewardsContract,
"claims": {},
"tokens": [FARM_ADDRESS, XSUSHI_ADDRESS], # FARM # XSUSHI
"cycle": currCycle,
}
)
rewards_data = internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": currentRoot},
geyserRewards,
startBlock,
endBlock,
pastRewards,
)
rewardsContract.proposeRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": proposer},
)
rewardsContract.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": validator},
)
with brownie.reverts("No tokens to claim"):
rewardsContract.claim(
[FARM_ADDRESS, XSUSHI_ADDRESS], # FARM # XSUSHI
[0, 0],
rewards_data["merkleTree"]["claims"][user]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][user]["proof"],
[0, 0],
{"from": user},
)
def test_salary(setup):
rewards_assistant = setup
admin, proposer, validator = accounts[:3]
users = accounts[3:]
rewards_contract = admin.deploy(rewards_assistant.BadgerTree)
rewards_contract.initialize(admin, proposer, validator)
def make_salary_entry(recipient, token, total_amount, duration, start_time):
return DotMap(
{
"recipient": recipient,
"token": token,
"totalAmount": total_amount,
"duration": duration,
"startTime": start_time,
"endTime": start_time + duration,
}
)
def update_root(rewards_data):
rewards_contract.proposeRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": proposer},
)
rewards_contract.approveRoot(
rewards_data["merkleTree"]["merkleRoot"],
rewards_data["rootHash"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["startBlock"],
rewards_data["merkleTree"]["endBlock"],
{"from": validator},
)
def calculate_payment(salary_entry, start_block_time, end_block_time):
print(
f"salary_entry: {salary_entry}\nstart_block_time:\t{start_block_time}\nend_block_time: \t{end_block_time}"
)
if (
salary_entry.startTime <= end_block_time
and salary_entry.endTime > start_block_time
):
start_time = max(salary_entry.startTime, start_block_time)
end_time = min(salary_entry.endTime, end_block_time)
return (
salary_entry.totalAmount
* salary_entry.duration
/ (end_time - start_time)
)
return 0
mock_token = rewards_assistant.MockToken
mock_contract = admin.deploy(mock_token)
mock_contract.initialize([rewards_contract], [10_000_000_000_000_000_000_000_000])
salaries = [
make_salary_entry(
users[0].address,
mock_contract,
1_000_000_000_000_000_000,
SECS_PER_DAY * 360,
chain.time() - SECS_PER_DAY * 30,
),
make_salary_entry(
users[1].address,
mock_contract,
1_000_000_000_000_000_000,
SECS_PER_DAY * 180,
chain.time() - SECS_PER_DAY * 200,
),
make_salary_entry(
users[2].address,
mock_contract,
1_000_000_000_000_000_000,
SECS_PER_DAY * 180,
chain.time() + SECS_PER_DAY * 30,
),
make_salary_entry(
users[3].address,
mock_contract,
1_000_000_000_000_000_000,
SECS_PER_DAY * 180,
chain.time() + SECS_PER_HOUR * 2,
),
]
void_state = DotMap(
{
"badger_tree": rewards_contract,
"claims": {},
"tokens": [mock_contract.address],
"cycle": rewards_contract.currentCycle(),
}
)
initial_state = DotMap(
{
"badger_tree": rewards_contract,
"claims": {users[20].address: {mock_contract.address: 456}},
"tokens": [mock_contract.address],
"cycle": rewards_contract.currentCycle() + 1,
}
)
update_root(
internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": rewards_contract.merkleRoot()},
initial_state,
rewards_contract.lastPublishEndBlock() + 1,
web3.eth.blockNumber,
void_state,
)
)
sleep_time = SECS_PER_HOUR * 4
chain.sleep(sleep_time)
chain.mine(50)
last_publish_time = rewards_contract.lastPublishTimestamp()
chain_time = chain.time()
claims = {
entry.recipient: {
mock_contract.address: calculate_payment(
entry, rewards_contract.lastPublishTimestamp(), chain.time()
)
}
for entry in salaries
}
assert claims[users[0]][mock_contract.address] > 0
assert claims[users[1]][mock_contract.address] == 0
assert claims[users[2]][mock_contract.address] == 0
assert claims[users[3]][mock_contract.address] > 0
update_state = DotMap(
{
"badger_tree": rewards_contract,
"claims": claims,
"tokens": [mock_contract],
"cycle": rewards_contract.currentCycle() + 1,
}
)
rewards_data = internal_generate_rewards_in_range(
rewards_assistant,
{"contentHash": rewards_contract.merkleRoot()},
update_state,
rewards_contract.lastPublishEndBlock() + 1,
web3.eth.blockNumber,
initial_state,
)
console.log(rewards_data)
update_root(rewards_data)
# TODO: Do something more than just verify that the above change was made
entry1 = salaries[0]
rewards_contract.claim(
[mock_contract],
[claims[entry1.recipient][mock_contract.address]],
rewards_data["merkleTree"]["claims"][entry1.recipient]["index"],
rewards_data["merkleTree"]["cycle"],
rewards_data["merkleTree"]["claims"][entry1.recipient]["proof"],
[calculate_payment(entry1, last_publish_time, chain_time)],
{"from": entry1.recipient},
)
| 31.137825
| 119
| 0.59278
| 1,750
| 20,333
| 6.698857
| 0.138857
| 0.058176
| 0.084193
| 0.017402
| 0.626717
| 0.609059
| 0.601638
| 0.55583
| 0.51352
| 0.497654
| 0
| 0.028435
| 0.299513
| 20,333
| 652
| 120
| 31.185583
| 0.794636
| 0.049722
| 0
| 0.570916
| 0
| 0
| 0.113231
| 0.007984
| 0
| 0
| 0.004355
| 0.001534
| 0.014363
| 1
| 0.014363
| false
| 0
| 0.019749
| 0.003591
| 0.044883
| 0.005386
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0249f5db53b2ce54527df608f97d99c1010a240
| 23,869
|
py
|
Python
|
HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py
|
Borja-Perez-Diaz/HII-CHI-Mistry
|
d0dafc753c63246bf14b77807a885ddc7bd4bb99
|
[
"MIT"
] | null | null | null |
HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py
|
Borja-Perez-Diaz/HII-CHI-Mistry
|
d0dafc753c63246bf14b77807a885ddc7bd4bb99
|
[
"MIT"
] | null | null | null |
HCm-uv/HCm-UV_v4.11/HCm-UV_v4.11.py
|
Borja-Perez-Diaz/HII-CHI-Mistry
|
d0dafc753c63246bf14b77807a885ddc7bd4bb99
|
[
"MIT"
] | null | null | null |
# Filename: HCm_UV_v4.11.py
import string
import numpy as np
import sys
#sys.stderr = open('errorlog.txt', 'w')
#Function for interpolation of grids
def interpolate(grid,z,zmin,zmax,n):
ncol = 9
vec = []
for col in range(ncol):
inter = 0
no_inter = 0
for row in range(0,len(grid)):
if grid[row,z] < zmin or grid[row,z] > zmax: continue
if z == 2: x = 0; y = 1
if z == 1: x = 0; y = 2
if z == 0: x = 1; y = 2
if row == (len(grid)-1):
vec.append(grid[row,col])
no_inter = no_inter + 1
elif grid[row,x] < grid[row+1,x] or grid[row,y] < grid[row+1,y] :
vec.append(grid[row,col])
no_inter = no_inter + 1
else:
inter = inter + 1
for index in range(0,n):
i = grid[row,col]+(index)*(grid[row+1,col]-grid[row,col])/n
vec.append(i)
out = np.transpose(np.reshape(vec,(-1,n*inter+no_inter)))
return out
print (' ---------------------------------------------------------------------')
print (' This is HII-CHI-mistry for UV version 4.11')
print (' See Perez-Montero, & Amorin (2017) for details')
print ( ' Insert the name of your input text file with some or all of the following columns:')
print (' Lya 1216, CIV 1549, HeII 1640, OIII 1665, CIII 1909, Hb 4861, OIII 5007')
print ('in arbitrary units and reddening corrected. Each column must be given')
print ('with labels and followed by its corresponding flux error.')
print ('---------------------------------------------------------------------')
# Input file reading
if len(sys.argv) == 1:
if int(sys.version[0]) < 3:
input00 = raw_input('Insert input file name:')
else:
input00 = input('Insert input file name:')
else:
input00 = str(sys.argv[1])
try:
input0 = np.genfromtxt(input00,dtype=None,names=True, encoding = 'ascii')
print ('The input file is:'+input00)
except:
print ('Input file error: It does not exist or has wrong format')
sys.exit
print ('')
if input0.size == 1:
input1 = np.stack((input0,input0))
else:
input1 = input0
# Iterations for Montecarlo error derivation
if len(sys.argv) < 3:
n = 25
else:
n = int(sys.argv[2])
print ('The number of iterations for MonteCarlo simulation is: ',n)
print ('')
# Reading of models grids. These can be changed
print ('')
question = True
while question:
print('-------------------------------------------------')
print ('(1) POPSTAR with Chabrier IMF, age = 1 Myr')
print ('(2) BPASS v.2.1 a_IMF = 1.35, Mup = 300, age = 1Myr')
print('-------------------------------------------------')
if int(sys.version[0]) < 3:
sed = raw_input('Choose SED of the models:')
else:
sed = input('Choose SED of the models:')
if sed == '1' or sed == '2' : question = False
print ('')
question = True
while question:
if int(sys.version[0]) < 3:
inter = raw_input('Choose models [0] No interpolated [1] Interpolated: ')
else:
inter = input('Choose models [0] No interpolated [1] Interpolated: ')
if inter == '0' or inter == '1': question = False
print ('')
sed = int(sed)
inter = int(inter)
if sed==1 :
grid1 = np.loadtxt('C17_popstar_uv_v4.0.dat')
grid2 = np.loadtxt('C17_popstar_logU_adapted_emp_uv_v4.0.dat')
grid3 = np.loadtxt('C17_popstar_logU-CO_adapted_emp_uv_v4.0.dat')
if inter == 0:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. No interpolation'
print ('No interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O')
res_CO = 0.125
elif inter == 1:
sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF interpolated'
print ('Interpolation for the POPSTAR models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O')
res_CO = 0.125
elif sed==2:
grid1 = np.loadtxt('C17_bpass_uv_v4.1.dat')
grid2 = np.loadtxt('C17_bpass_logU_adapted_emp_uv_v4.1.dat')
grid3 = np.loadtxt('C17_bpass_logU-CO_adapted_emp_uv_v4.1.dat')
if inter == 0:
sed_type = 'BPASS a_IMF = 1.35, M_up = 300, age = 1Myr. No interpolation'
print ('No interpolation for theBPASS models is going to be used.')
print ('The grid has a resolution of 0.1 dex for O/H and 0.125 dex for N/O')
res_CO = 0.125
elif inter == 1:
sed_type = 'BPASS a_IMF = 1.35, M_up = 300, age = 1Myr interpolated'
print ('Interpolation for theBPASS models is going to be used.')
print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for N/O')
res_CO = 0.125
grids = []
OHffs = []
eOHffs = []
COffs = []
eCOffs = []
logUffs = []
elogUffs = []
Label_ID = False
Label_Lya = False
Label_eLya = False
Label_CIV = False
Label_eCIV = False
Label_HeII = False
Label_eHeII = False
Label_OIII_1665 = False
Label_eOIII_1665 = False
Label_CIII = False
Label_eCIII = False
Label_OIII_5007 = False
Label_eOIII_5007 = False
Label_Hbeta = False
Label_eHbeta = False
for col in range(0,len(input1.dtype.names),1):
if input1.dtype.names[col] == 'ID':
Label_ID = True
if input1.dtype.names[col] == 'Lya_1216':
Label_Lya = True
if input1.dtype.names[col] == 'eLya_1216':
Label_eLya = True
if input1.dtype.names[col] == 'CIV_1549':
Label_CIV = True
if input1.dtype.names[col] == 'eCIV_1549':
Label_eCIV = True
if input1.dtype.names[col] == 'HeII_1640':
Label_HeII = True
if input1.dtype.names[col] == 'eHeII_1640':
Label_eHeII = True
if input1.dtype.names[col] == 'OIII_1665':
Label_OIII_1665 = True
if input1.dtype.names[col] == 'eOIII_1665':
Label_eOIII_1665 = True
if input1.dtype.names[col] == 'CIII_1909':
Label_CIII = True
if input1.dtype.names[col] == 'eCIII_1909':
Label_eCIII = True
if input1.dtype.names[col] == 'Hb_4861':
Label_Hbeta = True
if input1.dtype.names[col] == 'eHb_4861':
Label_eHbeta = True
if input1.dtype.names[col] == 'OIII_5007':
Label_OIII_5007 = True
if input1.dtype.names[col] == 'eOIII_5007':
Label_eOIII_5007 = True
if Label_ID == False:
Names = np.arange(1,input1.size+1,1)
else:
Names = input1['ID']
if Label_Lya == False:
Lya_1216 = np.zeros(input1.size)
else:
Lya_1216 = input1['Lya_1216']
if Label_eLya == False:
eLya_1216 = np.zeros(input1.size)
else:
eLya_1216 = input1['eLya_1216']
if Label_CIV == False:
CIV_1549 = np.zeros(input1.size)
else:
CIV_1549 = input1['CIV_1549']
if Label_eCIV == False:
eCIV_1549 = np.zeros(input1.size)
else:
eCIV_1549 = input1['eCIV_1549']
if Label_HeII == False:
HeII_1640 = np.zeros(input1.size)
else:
HeII_1640 = input1['HeII_1640']
if Label_eHeII == False:
eHeII_1640 = np.zeros(input1.size)
else:
eHeII_1640 = input1['eHeII_1640']
if Label_OIII_1665 == False:
OIII_1665 = np.zeros(input1.size)
else:
OIII_1665 = input1['OIII_1665']
if Label_eOIII_1665 == False:
eOIII_1665 = np.zeros(input1.size)
else:
eOIII_1665 = input1['eOIII_1665']
if Label_CIII == False:
CIII_1909 = np.zeros(input1.size)
else:
CIII_1909 = input1['CIII_1909']
if Label_eCIII == False:
eCIII_1909 = np.zeros(input1.size)
else:
eCIII_1909 = input1['eCIII_1909']
if Label_Hbeta == False:
Hb_4861 = np.zeros(len(input1))
else:
Hb_4861 = input1['Hb_4861']
if Label_eHbeta == False:
eHb_4861 = np.zeros(input1.size)
else:
eHb_4861 = input1['eHb_4861']
if Label_OIII_5007 == False:
OIII_5007 = np.zeros(input1.size)
else:
OIII_5007 = input1['OIII_5007']
if Label_eOIII_5007 == False:
eOIII_5007 = np.zeros(input1.size)
else:
eOIII_5007 = input1['eOIII_5007']
output = np.zeros(input1.size, dtype=[('ID', 'U12'), ('Lya_1216', float),('eLya_1216', float),('CIV_1549', float),('eCIV_1549', float),('HeII_1640', float),('eHeII_1640', float),('OIII_1665', float),('eOIII_1665', float),('CIII_1909', float),('eCIII_1909', float),('Hb_4861', float),('eHb_4861', float),('OIII_5007', float),('eOIII_5007', float),('grid', int),('OH', float),('eOH', float),('CO', float),('eCO', float),('logU', float),('elogU', float)] )
output['ID'] = Names
output['Lya_1216'] = Lya_1216
output['eLya_1216'] = eLya_1216
output['CIV_1549'] = CIV_1549
output['eCIV_1549'] = eCIV_1549
output['HeII_1640'] = HeII_1640
output['eHeII_1640'] = eHeII_1640
output['OIII_1665'] = OIII_1665
output['eOIII_1665'] = eOIII_1665
output['CIII_1909'] = CIII_1909
output['eCIII_1909'] = eCIII_1909
output['Hb_4861'] = Hb_4861
output['eHb_4861'] = eHb_4861
output['OIII_5007'] = OIII_5007
output['eOIII_5007'] = eOIII_5007
print ('Reading grids ....')
print ('')
print ('')
print ('----------------------------------------------------------------')
print ('(%) ID Grid 12+log(O/H) log(C/O) log(U)')
print ('-----------------------------------------------------------------')
# Beginning of loop of calculation
count = 0
for tab in range(0,len(input1),1):
count = count + 1
OH_mc = []
CO_mc = []
logU_mc = []
OHe_mc = []
COe_mc = []
logUe_mc = []
for monte in range(0,n,1):
OH_p = 0
logU_p = 0
CO_p = 0
den_OH = 0
den_CO = 0
OH_e = 0
CO_e = 0
logU_e = 0
den_OH_e = 0
den_CO_e = 0
tol_max = 1e2
Lya_1216_obs = 0
if Lya_1216[tab] == 0:
Lya_1216_obs = 0
else:
while Lya_1216_obs <= 0:
Lya_1216_obs = np.random.normal(Lya_1216[tab],eLya_1216[tab]+1e-5)
CIV_1549_obs = 0
if CIV_1549[tab] == 0:
CIV_1549_obs = 0
else:
while CIV_1549_obs <= 0:
CIV_1549_obs = np.random.normal(CIV_1549[tab],eCIV_1549[tab]+1e-5)
HeII_1640_obs = 0
if HeII_1640[tab] == 0:
HeII_1640_obs = 0
else:
if HeII_1640_obs <= 0:
HeII_1640_obs = np.random.normal(HeII_1640[tab],eHeII_1640[tab]+1e-5)
OIII_1665_obs = 0
if OIII_1665[tab] == 0:
OIII_1665_obs = 0
else:
while OIII_1665_obs <= 0:
OIII_1665_obs = np.random.normal(OIII_1665[tab],eOIII_1665[tab]+1e-5)
CIII_1909_obs = 0
if CIII_1909[tab] == 0:
CIII_1909_obs = 0
else:
while CIII_1909_obs <= 0:
CIII_1909_obs = np.random.normal(CIII_1909[tab],eCIII_1909[tab]+1e-5)
Hb_4861_obs = 0
if Hb_4861[tab] == 0:
Hb_4861_obs = 0
else:
while Hb_4861_obs <= 0:
Hb_4861_obs = np.random.normal(Hb_4861[tab],eHb_4861[tab]+1e-5)
OIII_5007_obs = 0
if OIII_5007[tab] == 0:
OIII_5007_obs = 0
else:
while OIII_5007_obs <= 0:
OIII_5007_obs = np.random.normal(OIII_5007[tab],eOIII_5007[tab]+1e-5)
if OIII_1665_obs == 0 or OIII_5007_obs == 0:
ROIII_obs = 0
else:
ROIII_obs = OIII_5007_obs/OIII_1665_obs
if Lya_1216_obs == 0 or CIII_1909_obs == 0:
C34_obs = 0
else:
C34_obs = (CIII_1909_obs + CIV_1549_obs) / (Lya_1216_obs)
if HeII_1640_obs == 0 or CIII_1909_obs == 0:
C34He2_obs = 0
else:
C34He2_obs = (CIII_1909_obs + CIV_1549_obs) / (HeII_1640_obs)
if CIII_1909_obs == 0 or OIII_1665_obs == 0:
C3O3_obs = -10
else:
C3O3_obs = np.log10((CIII_1909_obs) / (OIII_1665_obs))
if CIII_1909_obs == 0 or CIV_1549_obs == 0:
C3C4_obs = 0
else:
C3C4_obs = (CIII_1909_obs/CIV_1549_obs)
if CIII_1909_obs == 0 or Hb_4861_obs == 0:
C34Hb_obs = 0
else:
C34Hb_obs = (CIII_1909_obs + CIV_1549_obs) / Hb_4861_obs
# Selection of grid
if OIII_1665[tab] > 0 and OIII_5007[tab] > 0:
grid = grid1
if monte == n-1: grids.append(1)
grid_type = 1
elif OIII_1665[tab] > 0 and CIII_1909[tab] > 0:
grid = grid2
if monte == n-1: grids.append(2)
grid_type = 2
else:
grid = grid3
if monte == n-1: grids.append(3)
grid_type = 3
# Calculation of C/O
if C3O3_obs == -10:
CO = -10
else:
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_CO = 0
for index in grid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_p = CO_p
den_CO = den_CO
else:
CO_p = index[1] /np.exp(CHI_CO) + CO_p
den_CO = 1 / np.exp(CHI_CO) + den_CO
CO = CO_p / den_CO
# Calculation of C/O error
if C3O3_obs == -10:
eCO = 0
else:
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_CO = 0
for index in grid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_e = CO_e
den_CO_e = den_CO_e
else:
CO_e = (index[1] - CO)**2 / np.exp(CHI_CO) + CO_e
den_CO_e = 1 /np.exp(CHI_CO) + den_CO_e
eCO = CO_e / den_CO_e
# Calculation of O/H and log U
if C34_obs == 0 and ROIII_obs == 0 and C34Hb_obs == 0 and C34He2_obs == 0 :
OH = 0
logU = 0
else:
CHI_ROIII = 0
CHI_C3C4 = 0
CHI_C34He2 = 0
CHI_C34 = 0
CHI_C34Hb = 0
CHI_OH = 0
for index in grid:
if CO > -10 and np.abs(index[1] - CO) > np.abs(eCO+0.125):
continue
if CIV_1549_obs > 0 and index[4] == 0:
continue
if HeII_1640_obs > 0 and index[5] == 0:
continue
else:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C34_obs == 0:
CHI_C34 = 0
elif index[3] == 0 or index[7] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[7]+index[4])/index[3] - C34_obs)**2/((index[7]+index[4])/index[3])
if C34He2_obs == 0:
CHI_C34He2 = 0
elif index[5] == 0 or index[7] == 0:
CHI_C34He2 = tol_max
else:
CHI_C34He2 = ((index[7]+index[4])/index[5] - C34He2_obs)**2/((index[7]+index[4])/index[5])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[7] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[7]+index[4] - C34Hb_obs)**2/(index[7]+index[4])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[4] == 0 or index[7] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[7]/index[4] - C3C4_obs)**2/(index[7]/index[4])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_p = OH_p
logU_p = logU_p
den_OH = den_OH
else:
OH_p = index[0] / np.exp(CHI_OH) + OH_p
logU_p = index[2] / np.exp(CHI_OH) + logU_p
den_OH = 1 /np.exp(CHI_OH) + den_OH
OH = OH_p / den_OH
logU = logU_p / den_OH
# Calculation of error of O/H and logU
if C34_obs == 0 and ROIII_obs == 0 and C34Hb_obs == 0 and C34He2_obs == 0:
eOH = 0
elogU = 0
else:
CHI_ROIII = 0
CHI_C3C4 = 0
CHI_C34 = 0
CHI_C34He2 = 0
CHI_C34Hb = 0
CHI_OH = 0
for index in grid:
if CO > -10 and np.abs(index[1] - CO) > np.abs(eCO+res_CO):
continue
if CIV_1549_obs > 0 and index[4] == 0:
continue
if HeII_1640_obs > 0 and index[5] == 0:
continue
else:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C34_obs == 0:
CHI_C34 = 0
elif index[3] == 0 or index[7] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[7]+index[4])/index[3] - C34_obs)**2/((index[7]+index[4])/index[3])
if C34He2_obs == 0:
CHI_C34He2 = 0
elif index[5] == 0 or index[7] == 0:
CHI_C34He2 = tol_max
else:
CHI_C34He2 = ((index[7]+index[4])/index[5] - C34He2_obs)**2/((index[7]+index[4])/index[5])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[7] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[7]+index[4] - C34Hb_obs)**2/(index[7]+index[4])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[4] == 0 or index[7] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[7]/index[4] - C3C4_obs)**2/(index[7]/index[4])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_e = OH_e
logU_e = logU_e
den_OH_e = den_OH_e
else:
OH_e = (index[0] - OH)**2 /np.exp(CHI_OH) + OH_e
logU_e = (index[2] - logU)**2 /np.exp(CHI_OH) + logU_e
den_OH_e = 1 /np.exp(CHI_OH) + den_OH_e
eOH = OH_e / den_OH_e
elogU = logU_e / den_OH_e
# Iterations for interpolated models
if inter == 0 or (OH == 0 and CO == -10):
COf = CO
OHf = OH
logUf = logU
elif inter == 1:
if OH == 0:
igrid = grid
else:
igrid = interpolate(grid,2,logU-elogU-0.25,logU+elogU+0.25,10)
igrid = igrid[np.lexsort((igrid[:,1],igrid[:,2]))]
igrid = interpolate(igrid,0,OH-eOH-0.1,OH+eOH+0.1,10)
if CO == -10:
igrid = igrid
else:
igrid = igrid[np.lexsort((igrid[:,0],igrid[:,2]))]
igrid = interpolate(igrid,1,CO-eCO-0.125,CO+eCO+0.125,10)
CHI_ROIII = 0
CHI_C3O3 = 0
CHI_C3C4 = 0
CHI_C34He2 = 0
CHI_C34 = 0
CHI_C34Hb = 0
CHI_OH = 0
CHI_CO = 0
for index in igrid:
if ROIII_obs == 0:
CHI_ROIII = 0
elif index[6] == 0 or index[8] == 0:
CHI_ROIII = tol_max
else:
CHI_ROIII = (index[8]/index[6] - ROIII_obs)**2/(index[8]/index[6])
if C3O3_obs == -10:
CHI_C3O3 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3O3 = tol_max
else:
CHI_C3O3 =(np.log10((index[7])/index[6]) - C3O3_obs)**2/np.log10((index[7])/(index[6]+1e-5))
if C34_obs == 0:
CHI_C34 = 0
elif index[4] == 0:
CHI_C34 = tol_max
else:
CHI_C34 = ((index[6]+index[7])/index[3] - C34_obs)**2/((index[6]+index[7])/index[3])
if C34Hb_obs == 0:
CHI_C34Hb = 0
elif index[4] == 0:
CHI_C34Hb = tol_max
else:
CHI_C34Hb = (index[6]+index[7] - C34_obs)**2/(index[6]+index[7])
if C3C4_obs == 0:
CHI_C3C4 = 0
elif index[7] == 0 or index[6] == 0:
CHI_C3C4 = tol_max
else:
CHI_C3C4 = (index[6]/index[7] - C3C4_obs)**2/(index[6]/index[7])
if C34Hb_obs > 0:
CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5
else:
CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5
if CHI_OH == 0:
OH_p = OH_p
logU_p = logU_p
den_OH = den_OH
else:
OH_p = index[0] /np.exp(CHI_OH) + OH_p
logU_p = index[2] /np.exp(CHI_OH) + logU_p
den_OH = 1 /np.exp(CHI_OH) + den_OH
CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5
if CHI_CO == 0:
CO_p = CO_p
den_CO = den_CO
else:
CO_p = index[1] /np.exp(CHI_CO)**2 + CO_p
den_CO = 1 /np.exp(CHI_CO)**2 + den_CO
if CO == -10:
COf = -10
else:
COf = CO_p / den_CO
if OH == 0:
OHf = 0
logUf = 0
else:
OHf = OH_p / den_OH
logUf = logU_p / den_OH
OH_mc.append(OHf)
CO_mc.append(COf)
logU_mc.append(logUf)
OHe_mc.append(eOH)
COe_mc.append(eCO)
logUe_mc.append(elogU)
OHff = np.mean(OH_mc)
eOHff = (np.std(OH_mc)**2+np.mean(OHe_mc)**2)**0.5
COff = np.mean(CO_mc)
eCOff = (np.std(CO_mc)**2+np.mean(COe_mc)**2)**0.5
logUff = np.mean(logU_mc)
elogUff = (np.std(logU_mc)**2+np.mean(logUe_mc)**2)**0.5
OHffs.append(OHff)
eOHffs.append(eOHff)
COffs.append(COff)
eCOffs.append(eCOff)
logUffs.append(logUff)
elogUffs.append(elogUff)
if input0.size == 1 and tab==0: continue
print (round(100*(count)/float(input1.size),1),'%',Names[tab],grid_type,'', round(OHff,2), round(eOHff,2),'',round(COff,2), round(eCOff,2), '',round(logUff,2), round(elogUff,2))
output['grid'] = grids
output['OH'] = OHffs
output['eOH'] = eOHffs
output['CO'] = COffs
output['eCO'] = eCOffs
output['logU'] = logUffs
output['elogU'] = elogUffs
if input0.size == 1: output = np.delete(output,obj=1,axis=0)
lineas_header = [' HII-CHI-mistry_UV v.4.11 output file', 'Input file:'+input00,'Iterations for MonteCarlo: '+str(n),'Used models: '+sed_type,'','ID. Lya eLya 1549 e1549 1640 e1640 1665 e1665 1909 e1909 Hbeta eHbeta 5007 e5007 i O/H eO/H C/O eC/O logU elogU']
header = '\n'.join(lineas_header)
np.savetxt(input00+'_hcm-uv-output.dat',output,fmt=' '.join(['%s']*1+['%.3f']*14+['%i']+['%.2f']*6),header=header)
print ('________________________________')
print ('Results are stored in '+input00+'_hcm-uv-output.dat')
| 30.759021
| 453
| 0.526541
| 3,539
| 23,869
| 3.342187
| 0.083639
| 0.023334
| 0.02232
| 0.020883
| 0.498647
| 0.440227
| 0.36422
| 0.319412
| 0.303855
| 0.284156
| 0
| 0.118509
| 0.325485
| 23,869
| 775
| 454
| 30.79871
| 0.616149
| 0.016884
| 0
| 0.458529
| 0
| 0.010955
| 0.129669
| 0.025712
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001565
| false
| 0.01252
| 0.004695
| 0
| 0.007825
| 0.057903
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b02d1a840f2e9ca574098b991b8f37e1b954c866
| 979
|
py
|
Python
|
excel2.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
excel2.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
excel2.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
# excel2.py
import xlrd
def print_xls(path):
xlsFile = xlrd.open_workbook(path)
try:
mySheet = xlsFile.sheets()[0] # 访问第1张表序号0 // xlsFile.sheet_by_name('sheetName') 通过工作表名访问
except:
print('no such sheet in file')
return
print('%d rows, %d cols' % (mySheet.nrows, mySheet.ncols)) # 输出工作表共几行(rows)和几列(cols)
for row in range(0, mySheet.nrows):
temp = ''
for col in range(0, mySheet.ncols):
if mySheet.cell(row, col).value != None:
temp += str(mySheet.cell(row, col).value) + '\t'
print(temp)
if __name__ == '__main__':
print_xls('D:\\python_path\\sample_ex.xls')
'''
模块是对象,并且所有的模块都有一个内置属性 __name__。一个模块的 __name__ 的值取决于您如何应用模块。如果 import 一个模块,
那么模块__name__ 的值通常为模块文件名,不带路径或者文件扩展名。但是您也可以像一个标准的程序样直接运行模块,在这种情况下,
__name__ 的值将是一个特别缺省"__main__"。
在cmd 中直接运行.py文件,则__name__的值是'__main__';
而在import 一个.py文件后,__name__的值就不是'__main__'了;
从而用if __name__ == '__main__'来判断是否是在直接运行该.py文件
'''
| 27.194444
| 96
| 0.670072
| 126
| 979
| 4.738095
| 0.579365
| 0.026801
| 0.026801
| 0.050251
| 0.073702
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.19714
| 979
| 35
| 97
| 27.971429
| 0.751908
| 0.091931
| 0
| 0
| 0
| 0
| 0.13438
| 0.052356
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.176471
| 0.294118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b02f9eadae5afd900218c21f9e3251e4c4f3cf07
| 1,162
|
py
|
Python
|
reth_buffer/reth_buffer/__init__.py
|
sosp2021/Reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | null | null | null |
reth_buffer/reth_buffer/__init__.py
|
sosp2021/Reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | 1
|
2021-08-10T02:58:58.000Z
|
2021-08-10T02:58:58.000Z
|
reth_buffer/reth_buffer/__init__.py
|
sosp2021/reth
|
10c032f44a25049355ebdd97a2cb3299e8c3fb82
|
[
"MIT"
] | null | null | null |
import multiprocessing as mp
import portpicker
from .client import Client, NumpyLoader, TorchCudaLoader
from .sampler import PERSampler
from .server.main_loop import main_loop
from .utils import get_local_ip
def start_server(
capacity, batch_size, host=None, port=None, samplers=None, cache_policy=None
):
if host is None:
host = get_local_ip()
if port is None:
port = portpicker.pick_unused_port()
meta_addr = f"tcp://{host}:{port}"
ctx = mp.get_context("spawn")
proc = ctx.Process(
target=main_loop,
args=(capacity, batch_size, meta_addr, samplers, cache_policy),
)
proc.start()
return proc, meta_addr
def start_per(
capacity,
batch_size,
alpha=0.6,
beta=0.4,
sample_start=1000,
num_sampler_procs=1,
host=None,
port=None,
cache_policy=None,
):
samplers = [
{
"sampler_cls": PERSampler,
"num_procs": num_sampler_procs,
"sample_start": sample_start,
"kwargs": {"alpha": alpha, "beta": beta},
}
]
return start_server(capacity, batch_size, host, port, samplers, cache_policy)
| 23.24
| 81
| 0.645439
| 148
| 1,162
| 4.844595
| 0.391892
| 0.072524
| 0.09484
| 0.066946
| 0.089261
| 0.089261
| 0
| 0
| 0
| 0
| 0
| 0.010333
| 0.25043
| 1,162
| 49
| 82
| 23.714286
| 0.812859
| 0
| 0
| 0.04878
| 0
| 0
| 0.061102
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.146341
| 0
| 0.243902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b02fad481b4d3cb3263f98acf09c40e1f2669bfa
| 7,171
|
py
|
Python
|
agent.py
|
FlowerForAlgernon/rainbow
|
78492ba572e2f8b4b2228d2ca625af94a09ee696
|
[
"Apache-2.0"
] | 1
|
2022-03-23T02:02:10.000Z
|
2022-03-23T02:02:10.000Z
|
agent.py
|
FlowerForAlgernon/rainbow
|
78492ba572e2f8b4b2228d2ca625af94a09ee696
|
[
"Apache-2.0"
] | null | null | null |
agent.py
|
FlowerForAlgernon/rainbow
|
78492ba572e2f8b4b2228d2ca625af94a09ee696
|
[
"Apache-2.0"
] | null | null | null |
import random
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
from memory import Transition, ReplayMemory, PrioritizedReplayMemory, NStepMemory
from DQN import DQN, DuelingDQN, NoisyDQN, DistributionalDQN
class Agent:
def __init__(self, config):
# Distributional DQN
self.support = torch.linspace(config.v_min, config.v_max, config.atom_size).to(config.device)
self.policy_net = DistributionalDQN(config.c, config.h, config.w, config.n_actions, config.atom_size, self.support).to(config.device)
self.target_net = DistributionalDQN(config.c, config.h, config.w, config.n_actions, config.atom_size, self.support).to(config.device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
#self.memory = ReplayMemory(config.memory_size)
self.memory = PrioritizedReplayMemory(config.memory_size, config.alpha)
self.memory_n = NStepMemory(config.memory_size, config.alpha, config.gamma, config.n_step)
self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=config.learning_rate, eps=0.001, alpha=0.95)
@staticmethod
def get_state(obs, config):
state = np.array(obs)[14:77,:,:]
state = np.ascontiguousarray(state.transpose((2, 0, 1)), dtype=np.float)
state = torch.from_numpy(state / 255)
return state.unsqueeze(0).to(config.device)
def select_action(self, state, epsilon, config):
if random.random() > epsilon:
with torch.no_grad():
return self.policy_net(state).max(1)[1].view(1,1).to(config.device)
else:
return torch.tensor([[random.randrange(4)]], device=config.device, dtype=torch.long)
def transition_to_tensor(self, transitions):
for i in range(len(transitions)):
transitions[i][0] = torch.tensor(transitions[i][0]).to(config.device)
transitions[i][1] = torch.tensor(transitions[i][1]).to(config.device)
transitions[i][2] = torch.tensor(transitions[i][2]).to(config.device)
transitions[i][3] = torch.tensor(transitions[i][3]).to(config.device)
return transitions
def optimize_model(self, config):
#transitions = self.memory.sample(config.batch_size)
# PrioritizedReplayMemory
transitions, weights, indices = self.memory.sample(config.batch_size, config.beta)
transitions = self.transition_to_tensor(transitions)
batch = Transition(*zip(*transitions))
loss, weights_loss = self.get_loss(batch, config, weights, config.gamma)
# N Step
transitions_n, _, _ = self.memory_n.sample_from_indices(config.batch_size, config.beta, indices)
transitions_n = self.transition_to_tensor(transitions_n)
batch_n = Transition(*zip(*transitions_n))
gamma_n = config.gamma ** config.n_step
loss_n, weights_loss_n = self.get_loss(batch_n, config, weights, gamma_n)
weights_loss += weights_loss_n
self.optimizer.zero_grad()
#loss.backward()
# PrioritizedReplayMemory
weights_loss.backward()
for param in self.policy_net.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
# PrioritizedReplayMemory
loss_for_prior = loss.detach().cpu().numpy()
new_priorities = loss_for_prior + config.prior_eps
self.memory.update_priorities(indices, new_priorities)
# N Step
self.memory_n.update_priorities(indices, new_priorities)
# Noisy Net
self.policy_net.reset_noise()
self.target_net.reset_noise()
def get_loss(self, batch, config, weights, gamma):
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=config.device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward).unsqueeze(1)
state_action_values = self.policy_net(state_batch).gather(1, action_batch)
next_state_action_values = torch.zeros(config.batch_size, device=config.device).unsqueeze(1)
next_state_action_values[non_final_mask] = self.target_net(non_final_next_states).gather(
1, self.policy_net(non_final_next_states).detach().argmax(dim=1, keepdim=True)
).detach()
expected_state_action_values = reward_batch + gamma * next_state_action_values
#loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)
# PrioritizedReplayMemory
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values, reduction="none")
weights = torch.FloatTensor(np.array(weights).reshape(-1, 1)).to(config.device)
weights_loss = torch.mean(weights * loss)
return loss, weights_loss
def get_DistributionalDQN_loss(self, batch, config, weights, gamma):
state_batch = torch.cat(batch.state)
next_state_batch = torch.cat(batch.next_state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward).unsqueeze(1)
done_batch = torch.cat([1 if s is not None else 0 for s in batch.next_state]).unsqueeze(1)
delta_z = float(config.v_max - config.v_min) / (config.atom_size - 1)
with torch.no_grad():
next_action = self.policy_net(next_state_batch).argmax(1)
next_dist = self.target_net.dist(next_state_batch)
next_dist = next_dist[range(config.batch_size), next_action]
t_z = reward_batch + (1 - done_batch) * gamma * self.support
t_z = t_z.clamp(min=config.v_min, max=config.v_max)
b = (t_z - config.v_min) / delta_z
l = b.floor().long()
u = b.ceil().long()
offset = (
torch.linspace(
0, (config.batch_size - 1) * config.atom_size, config.batch_size
).long()
.unsqueeze(1)
.expand(config.batch_size, config.atom_size)
.to(config.device)
)
proj_dist = torch.zeros(next_dist.size(), device=config.device)
proj_dist.view(-1).index_add_(
0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1)
)
proj_dist.view(-1).index_add_(
0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1)
)
dist = self.policy_net.dist(state_batch)
log_p = torch.log(dist[range(config.batch_size), action_batch])
elementwise_loss = -(proj_dist * log_p).sum(1)
# PrioritizedReplayMemory
weights = torch.FloatTensor(np.array(weights).reshape(-1, 1)).to(config.device)
weights_loss = torch.mean(weights * elementwise_loss)
return elementwise_loss, weights_loss
| 48.452703
| 142
| 0.647748
| 923
| 7,171
| 4.812568
| 0.183099
| 0.043224
| 0.037821
| 0.028366
| 0.352094
| 0.21792
| 0.156911
| 0.147006
| 0.147006
| 0.147006
| 0
| 0.011534
| 0.238321
| 7,171
| 148
| 143
| 48.452703
| 0.801721
| 0.048529
| 0
| 0.107143
| 0
| 0
| 0.0006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.071429
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0370f00352f25c209bf62c39330309ded5b5b35
| 413
|
py
|
Python
|
xslt/apply.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | 21
|
2015-02-06T21:55:59.000Z
|
2021-04-29T11:23:18.000Z
|
xslt/apply.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | 9
|
2015-02-03T10:41:35.000Z
|
2020-02-18T12:46:10.000Z
|
xslt/apply.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | 20
|
2015-02-06T00:09:19.000Z
|
2020-01-10T13:27:06.000Z
|
"""Apply a stylesheet to an XML file"""
import sys
from lxml import etree
if len(sys.argv) != 3:
print >>sys.stderr, "Usage: %s <stylesheet> <xml doc> ..." % sys.argv[0]
sys.exit(1)
transform = etree.XSLT(etree.XML(open(sys.argv[1], "r").read()))
for xmlfile in sys.argv[2:]:
with open(xmlfile, "r") as fp:
doc = etree.parse(fp)
print(etree.tostring(transform(doc), pretty_print=True))
| 27.533333
| 76
| 0.639225
| 67
| 413
| 3.925373
| 0.597015
| 0.106464
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0.176755
| 413
| 14
| 77
| 29.5
| 0.758824
| 0.079903
| 0
| 0
| 0
| 0
| 0.101604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b037c4f526f6d6afd8598b5e5a8cb64d9cc7462a
| 7,122
|
py
|
Python
|
docs/conf.py
|
vlukes/io3d
|
34d048b7f737a5e56610879f6ab103128e8f0750
|
[
"MIT"
] | 8
|
2016-09-26T01:35:15.000Z
|
2022-02-23T04:05:23.000Z
|
docs/conf.py
|
vlukes/io3d
|
34d048b7f737a5e56610879f6ab103128e8f0750
|
[
"MIT"
] | 4
|
2016-05-18T11:04:56.000Z
|
2018-10-24T11:03:03.000Z
|
docs/conf.py
|
vlukes/io3d
|
34d048b7f737a5e56610879f6ab103128e8f0750
|
[
"MIT"
] | 6
|
2017-03-24T20:43:21.000Z
|
2021-08-23T06:05:34.000Z
|
# -*- coding: utf-8 -*-
#
# io3d documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 27 12:01:57 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
# mock
import mock
MOCK_MODULES = [
"numpy",
"scipy",
"matplotlib",
"matplotlib.pyplot",
"matplotlib.widgets",
"scipy.io",
"yaml",
"pydicom",
# 'scipy.interpolate', 'scipy.ndimage', 'pycut', 'io3d', 'sed3', 'pysegbase',
# 'pysegbase.pycut', 'sklearn', 'skimage', 'dicom', 'vtk', 'vtk.util',
# 'larcc', 'larcc.VIEW', 'larcc.MKPOL', 'larcc.AA', 'larcc.INTERVALS',
# 'larcc.MAP',
"PyQt5",
"PyQt5.QtCore",
"PyQt5.QtGui", #'web', 'lar2psm',
# 'scipy.ndimage.measurements', 'lar', 'extern.lar', 'splines',
# 'scipy.sparse', 'skimage.filter', 'mapper', 'skelet3d', 'numpy.core',
# 'skimage.filters', 'skimage.restoration','skimage.io',
# 'gzip', 'cPickle',
# 'lbpLibrary', 'skimage.exposure', 'PyQt4.QVTKRenderWindowInteractor',
# 'matplotlib.backends', 'matplotlib.backends.backend_qt4agg', 'numpy.linalg',
# 'PyQt4.Qt', 'matplotlib.figure', 'skimage.morphology', 'gtk',
# 'pysegbase.seed_editor_qt', 'vtk.qt4', 'vtk.qt4.QVTKRenderWindowInteractor',
# 'seg2fem', 'skimage.segmentation', 'skimage.transform', 'matplotlib.patches', 'skimage.feature',
# 'scipy.ndimage.morphology', 'mpl_toolkits', 'mpl_toolkits.mplot3d',
# 'scipy.ndimage.measurement', 'scipy.ndimage.interpolation',
# 'matplotlib.backends.backend_gtkagg', 'cv2', 'skimage.measure', 'dicom2fem',
# 'morphsnakes', 'scipy.ndimage.filters', 'scipy.signal', 'pandas',
# 'scipy.stats', 'io3d.misc', 'lisa.extern.lar', 'scipy.cluster',
# 'scipy.cluster.vq', 'scipy.cluster.vq',
# 'ipdb', 'multipolyfit', 'PIL', 'yaml',
"SimpleITK",
# 'six', 'nearpy', 'SimpleITK', 'lar', 'pandas'
"ruamel.yaml.YAML",
]
#
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# import sklearn
# sklearn.__version__ = '0.0'
# import scipy
# scipy.__version__ = '0.0'
# import pysegbase.pycut
# pysegbase.pycut.methods = ['graphcut']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"io3d"
copyright = u"2017, Miroslav Jirik"
author = u"Miroslav Jirik"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"1.2.3"
# The full version, including alpha/beta/rc tags.
release = u"1.2.3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "io3ddoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "io3d.tex", u"io3d Documentation", u"Miroslav Jirik", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "io3d", u"io3d Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"io3d",
u"io3d Documentation",
author,
"io3d",
"One line description of project.",
"Miscellaneous",
)
]
| 32.226244
| 102
| 0.664139
| 888
| 7,122
| 5.269144
| 0.427928
| 0.010259
| 0.005984
| 0.006412
| 0.089549
| 0.04424
| 0.03847
| 0.022654
| 0.022654
| 0.022654
| 0
| 0.011389
| 0.173968
| 7,122
| 220
| 103
| 32.372727
| 0.783954
| 0.739118
| 0
| 0.030303
| 0
| 0
| 0.294355
| 0
| 0
| 0
| 0
| 0.004545
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b03a815221b3f33cdcf33d82406be159b843f64d
| 2,096
|
py
|
Python
|
School-Management-System/teachers/views.py
|
GisaKaze/Python-Quarantine-Projects
|
29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4
|
[
"MIT"
] | null | null | null |
School-Management-System/teachers/views.py
|
GisaKaze/Python-Quarantine-Projects
|
29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4
|
[
"MIT"
] | null | null | null |
School-Management-System/teachers/views.py
|
GisaKaze/Python-Quarantine-Projects
|
29fabcb7e4046e6f3e9a19403e6d2490fe4b9fc4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404, redirect
from .models import TeacherInfo
from .forms import CreateTeacher
from django.contrib import messages
from django.core.paginator import Paginator
# Create your views here.
def teacher_list(request):
teachers = TeacherInfo.objects.all()
paginator = Paginator(teachers, 1)
page = request.GET.get('page')
paged_teachers = paginator.get_page(page)
context = {
"teachers": paged_teachers
}
return render(request, "teachers/teacher_list.html", context)
def single_teacher(request, teacher_id):
single_teacher = get_object_or_404(TeacherInfo, pk=teacher_id)
context = {
"single_teacher": single_teacher
}
return render(request, "teachers/single_teacher.html", context)
def create_teacher(request):
if request.method == "POST":
forms = CreateTeacher(request.POST, request.FILES or None)
if forms.is_valid():
forms.save()
messages.success(request, "Teacher Registration Successfully!")
return redirect("teacher_list")
else:
forms = CreateTeacher()
context = {
"forms": forms
}
return render(request, "teachers/create_teacher.html", context)
def edit_teacher(request, pk):
teacher_edit = TeacherInfo.objects.get(id=pk)
edit_teacher_forms = CreateTeacher(instance=teacher_edit)
if request.method == "POST":
edit_teacher_forms = CreateTeacher(request.POST, request.FILES or None, instance=teacher_edit)
if edit_teacher_forms.is_valid():
edit_teacher_forms.save()
messages.success(request, "Edit Teacher Info Successfully!")
return redirect("teacher_list")
context = {
"edit_teacher_forms": edit_teacher_forms
}
return render(request, "teachers/edit_teacher.html", context)
def delete_teacher(request, teacher_id):
teacher_delete = TeacherInfo.objects.get(id=teacher_id)
teacher_delete.delete()
messages.success(request, "Delete Teacher Info Successfully")
return redirect("teacher_list")
| 30.376812
| 102
| 0.705153
| 243
| 2,096
| 5.888889
| 0.222222
| 0.069182
| 0.067086
| 0.075472
| 0.246681
| 0.132774
| 0.132774
| 0.065688
| 0
| 0
| 0
| 0.004162
| 0.197519
| 2,096
| 68
| 103
| 30.823529
| 0.846611
| 0.010973
| 0
| 0.176471
| 0
| 0
| 0.143961
| 0.052174
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098039
| false
| 0
| 0.098039
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b043e0116441bcee9ae6a5419079e591b49e7c1e
| 3,267
|
py
|
Python
|
tests/service/test_integer_converter_service.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 25
|
2018-03-04T07:49:21.000Z
|
2022-03-28T05:20:50.000Z
|
tests/service/test_integer_converter_service.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 22
|
2018-03-25T13:19:45.000Z
|
2020-11-28T17:21:08.000Z
|
tests/service/test_integer_converter_service.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 31
|
2018-03-25T09:45:13.000Z
|
2022-03-24T05:32:18.000Z
|
import unittest
from unittest.mock import patch
from waves_gateway.model import Transaction, TransactionReceiver
from waves_gateway.service import IntegerConverterService
class IntegerConverterServiceSpec(unittest.TestCase):
@patch.multiple( # type: ignore
IntegerConverterService, __abstractmethods__=set())
def setUp(self):
self._integer_converter = IntegerConverterService()
def test_revert_amount_conversion(self):
res = self._integer_converter.revert_amount_conversion(40)
self.assertEqual(res, 40)
def test_convert_amount_to_int(self):
res = self._integer_converter.convert_amount_to_int(40.33)
self.assertEqual(res, 40.33)
def test_safely_convert_to_int_success(self):
with patch.object(self._integer_converter, 'convert_amount_to_int'):
self._integer_converter.convert_amount_to_int.return_value = 40
res = self._integer_converter.safely_convert_to_int(0.40)
self.assertEqual(res, 40)
def test_safely_convert_to_int_throws(self):
with patch.object(self._integer_converter, 'convert_amount_to_int'):
self._integer_converter.convert_amount_to_int.return_value = 0.40
with self.assertRaises(TypeError):
self._integer_converter.safely_convert_to_int(0.40)
def test_convert_transaction_to_int(self):
transaction = Transaction(
tx="79283647",
receivers=[
TransactionReceiver(address="9782364", amount=0.40),
TransactionReceiver(address="9782364", amount=0.30)
])
expected_result = Transaction(
tx="79283647",
receivers=[
TransactionReceiver(address="9782364", amount=40),
TransactionReceiver(address="9782364", amount=30)
])
with patch.object(self._integer_converter, 'safely_convert_to_int'):
def stub(amount: float):
return int(amount * 100)
self._integer_converter.safely_convert_to_int.side_effect = stub
actual_result = self._integer_converter.convert_transaction_to_int(transaction)
self.assertEqual(actual_result, expected_result)
self.assertEqual(self._integer_converter.safely_convert_to_int.call_count, 2)
def test_revert_transaction_conversion(self):
expected_result = Transaction(
tx="79283647",
receivers=[
TransactionReceiver(address="9782364", amount=0.40),
TransactionReceiver(address="9782364", amount=0.30)
])
transaction = Transaction(
tx="79283647",
receivers=[
TransactionReceiver(address="9782364", amount=40),
TransactionReceiver(address="9782364", amount=30)
])
with patch.object(self._integer_converter, 'revert_amount_conversion'):
def stub(amount: float):
return float(amount / 100)
self._integer_converter.revert_amount_conversion.side_effect = stub
actual_result = self._integer_converter.revert_transaction_conversion(transaction)
self.assertEqual(actual_result, expected_result)
| 37.551724
| 94
| 0.67034
| 338
| 3,267
| 6.136095
| 0.186391
| 0.08486
| 0.154291
| 0.150434
| 0.721794
| 0.674542
| 0.6027
| 0.461909
| 0.417551
| 0.367406
| 0
| 0.056818
| 0.245791
| 3,267
| 86
| 95
| 37.988372
| 0.784903
| 0.003673
| 0
| 0.5
| 0
| 0
| 0.053797
| 0.026745
| 0
| 0
| 0
| 0
| 0.109375
| 1
| 0.140625
| false
| 0
| 0.0625
| 0.03125
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b044475c3b8a25898a8527a87ed6dc1d9dadbb1d
| 6,670
|
py
|
Python
|
live_demo.py
|
GerryZhang7/ASL-Translator-
|
3963311d8dd1f010ee5a19b3760b451bc287ab1e
|
[
"MIT"
] | null | null | null |
live_demo.py
|
GerryZhang7/ASL-Translator-
|
3963311d8dd1f010ee5a19b3760b451bc287ab1e
|
[
"MIT"
] | null | null | null |
live_demo.py
|
GerryZhang7/ASL-Translator-
|
3963311d8dd1f010ee5a19b3760b451bc287ab1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
LIVE DEMO
This script loads a pre-trained model (for best results use pre-trained weights for classification block)
and classifies American Sign Language finger spelling frame-by-frame in real-time
"""
import string
import cv2
import time
from processing import square_pad, preprocess_for_vgg
from model import create_model
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weights", default=None,
help="path to the model weights")
required_ap = ap.add_argument_group('required arguments')
required_ap.add_argument("-m", "--model",
type=str, default="resnet", required=True,
help="name of pre-trained network to use")
args = vars(ap.parse_args())
# ====== Create model for real-time classification ======
# =======================================================
# Map model names to classes
MODELS = ["resnet", "vgg16", "inception", "xception", "mobilenet"]
if args["model"] not in MODELS:
raise AssertionError("The --model command line argument should be a key in the `MODELS` dictionary")
# Create pre-trained model + classification block, with or without pre-trained weights
my_model = create_model(model=args["model"],
model_weights_path=args["weights"])
# Dictionary to convert numerical classes to alphabet
label_dict = {pos: letter
for pos, letter in enumerate(string.ascii_uppercase)}
# ====================== Live loop ======================
# =======================================================
video_capture = cv2.VideoCapture(0)
#if not video_capture.isOpened():
# raise Exception("Could not open video device")
# Set properties. Each returns === True on success (i.e. correct resolution)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 5000)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 5000)
path = "C:/Users/Desktop/splash.jpg"
img = cv2.imread(path)
imgWrite = np.zeros((512, 512, 3), np.uint8)
flag1 = 0
flag2 = 0
flag3 = 0
fps = 0
i = 0
timer = 0
start = time.time()
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
fps += 1
timer += 1
# Draw rectangle around face
x = 313
y = 82
w = 451
h = 568
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 3)
# Crop + process captured frame
hand = frame[83:650, 314:764]
#hand = frame[0:1000, 0:1000]
hand = square_pad(hand)
hand = preprocess_for_vgg(hand)
# Make prediction
my_predict = my_model.predict(hand,
batch_size=1,
verbose=0)
# Predict letter
top_prd = np.argmax(my_predict)
if (flag1 == 1):
cv2.putText(frame, text="hi ",
org=(50, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
if (flag2 == 1):
cv2.putText(frame, text="im ",
org=(185, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
if (flag3 == 1):
cv2.putText(frame, text="good",
org=(300, (560 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
timer = -50
# Only display predictions with probabilities greater than 0.5
#if np.max(my_predict) >= 0.50:
#if timer >= 15:
if np.max(my_predict) >= 0.9925 and timer >= 12:
timer = 0;
prediction_result = "hi im good"
#prediction_result = label_dict[top_prd]
preds_list = np.argsort(my_predict)[0]
#pred_2 = label_dict[preds_list[-2]]
#pred_3 = label_dict[preds_list[-3]]
width = int(video_capture.get(3) + 0.5)
height = int(video_capture.get(4) + 0.5)
# Annotate image with most probable prediction
if i != 2 and i != 5 and i != 10:
cv2.putText(frame, text=prediction_result[i],
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=17, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
elif i == 2:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag1 = 1
#cv2.imshow("img", img)
#cv2.imwrite("splash.jpg", img)
#cv2.waitKey(0)
elif i == 5:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag2 = 1
cv2.imwrite(path, frame)
elif i == 10:
cv2.putText(frame, text="[space]",
org=(width // 2 + 230, height // 2 + 75),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=5, color=(255, 255, 0),
thickness=15, lineType=cv2.LINE_AA)
flag3 = 1
i = (i+1) % (len(prediction_result)+1)
# Annotate image with second most probable prediction (displayed on bottom left)
'''cv2.putText(frame, text=pred_2,
org=(width // 2 + width // 5 + 40, (360 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)
# Annotate image with third probable prediction (displayed on bottom right)
cv2.putText(frame, text=pred_3,
org=(width // 2 + width // 3 + 5, (360 + 240)),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=6, color=(0, 0, 255),
thickness=6, lineType=cv2.LINE_AA)'''
# Display the resulting frame
cv2.imshow('Video', frame)
# Press 'q' to exit live loop
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# Calculate frames per second
end = time.time()
FPS = fps/(end-start)
print("[INFO] approx. FPS: {:.2f}".format(FPS))
# Release the capture
video_capture.release()
cv2.destroyAllWindows()
| 33.517588
| 105
| 0.553373
| 831
| 6,670
| 4.340554
| 0.318893
| 0.024951
| 0.037427
| 0.047408
| 0.324369
| 0.275575
| 0.259218
| 0.242584
| 0.242584
| 0.242584
| 0
| 0.068587
| 0.302699
| 6,670
| 198
| 106
| 33.686869
| 0.706945
| 0.214843
| 0
| 0.238938
| 0
| 0
| 0.072739
| 0.005898
| 0
| 0
| 0.000874
| 0
| 0.00885
| 1
| 0
| false
| 0
| 0.061947
| 0
| 0.061947
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b044b434998843e21fedc472b72d6aa6d023641a
| 8,770
|
py
|
Python
|
prob2020/python/gene_sequence.py
|
KarchinLab/probabilistic2020
|
8e0b1b9578bd8189b1690dd2f17476c3305b98dc
|
[
"Apache-2.0"
] | 8
|
2016-04-30T03:26:40.000Z
|
2021-09-17T04:47:08.000Z
|
prob2020/python/gene_sequence.py
|
KarchinLab/probabilistic2020
|
8e0b1b9578bd8189b1690dd2f17476c3305b98dc
|
[
"Apache-2.0"
] | 9
|
2016-08-18T15:19:04.000Z
|
2019-07-17T18:16:52.000Z
|
prob2020/python/gene_sequence.py
|
KarchinLab/probabilistic2020
|
8e0b1b9578bd8189b1690dd2f17476c3305b98dc
|
[
"Apache-2.0"
] | 7
|
2016-10-19T03:43:42.000Z
|
2021-07-31T02:40:20.000Z
|
"""Fetches gene sequence from gene fasta created by extract_genes.py"""
import prob2020.python.utils as utils
class GeneSequence(object):
def __init__(self, fasta_obj,
nuc_context=1.5):
self.fasta = fasta_obj
self.nuc_context = nuc_context
def set_gene(self, bed_line):
"""Updates gene sequence for a new gene (bed line).
Parameters
----------
bed_line : BedLine
BedLine object representing a single gene in a BED file
"""
self.bed = bed_line # gene that was specified as BED
self._reset_seq() # fetch sequence for bed line
def _reset_seq(self):
"""Updates attributes for gene represented in the self.bed attribute.
Sequences are always upper case.
"""
exon_seq_list, five_ss_seq_list, three_ss_seq_list = self._fetch_seq()
self.exon_seq = ''.join(exon_seq_list)
self.three_prime_seq = three_ss_seq_list
self.five_prime_seq = five_ss_seq_list
self._to_upper() # make sure all sequences are in upper case
def add_germline_variants(self, germline_nucs, coding_pos):
"""Add potential germline variants into the nucleotide sequence.
Sequenced individuals may potentially have a SNP at a somatic mutation position.
Therefore they may differ from the reference genome. This method updates the gene
germline gene sequence to match the actual individual.
Parameters
----------
germline_nucs : list of str
list of DNA nucleotides containing the germline letter
coding_pos : int
0-based nucleotide position in coding sequence
NOTE: the self.exon_seq attribute is updated, no return value
"""
if len(germline_nucs) != len(coding_pos):
raise ValueError('Each germline nucleotide should have a coding position')
es = list(self.exon_seq)
for i in range(len(germline_nucs)):
gl_nuc, cpos = germline_nucs[i].upper(), coding_pos[i]
if not utils.is_valid_nuc(gl_nuc):
raise ValueError('{0} is not a valid nucleotide'.format(gl_nuc))
if cpos >= 0:
es[cpos] = gl_nuc
self.exon_seq = ''.join(es)
def _to_upper(self):
"""Convert sequences to upper case."""
self.exon_seq = self.exon_seq.upper()
self.three_prime_seq = [s.upper() for s in self.three_prime_seq]
self.five_prime_seq = [s.upper() for s in self.five_prime_seq]
def _fetch_seq(self):
"""Fetches gene sequence from PySAM fasta object.
Returns
-------
exons : list of str
list of exon nucleotide sequences
five_prime_ss : list of str
list of 5' splice site sequences
three_prime_ss : list of str
list of 3' splice site sequences
"""
exons = []
three_prime_ss = []
five_prime_ss = []
num_exons = self.bed.get_num_exons()
for i in range(num_exons):
# add exon sequence
tmp_id = '{0};exon{1}'.format(self.bed.gene_name, i)
tmp_exon = self.fasta.fetch(reference=tmp_id)
exons.append(tmp_exon)
# add splice site sequence
tmp_id_3ss = '{0};3SS'.format(tmp_id)
tmp_id_5ss = '{0};5SS'.format(tmp_id)
if num_exons == 1:
pass
elif i == 0:
tmp_5ss = self.fasta.fetch(tmp_id_5ss)
five_prime_ss.append(tmp_5ss)
elif i == (num_exons - 1):
tmp_3ss = self.fasta.fetch(tmp_id_3ss)
three_prime_ss.append(tmp_3ss)
else:
tmp_3ss = self.fasta.fetch(tmp_id_3ss)
tmp_5ss = self.fasta.fetch(tmp_id_5ss)
three_prime_ss.append(tmp_3ss)
five_prime_ss.append(tmp_5ss)
return exons, five_prime_ss, three_prime_ss
def _fetch_5ss_fasta(fasta, gene_name, exon_num,
chrom, strand, start, end):
"""Retreives the 5' SS sequence flanking the specified exon.
Returns a string in fasta format with the first line containing
a ">" and the second line contains the two base pairs of 5' SS.
Parameters
----------
fasta : pysam.Fastafile
fasta object from pysam
gene_name : str
gene name used for fasta seq id
exon_num : int
the `exon_num` exon, used for seq id
chrom : str
chromsome
strand : str
strand, {'+', '-'}
start : int
0-based start position
end : int
0-based end position
Returns
-------
ss_fasta : str
string in fasta format with first line being seq id
"""
if strand == '+':
ss_seq = fasta.fetch(reference=chrom,
start=end-1,
end=end+3)
elif strand == '-':
ss_seq = fasta.fetch(reference=chrom,
start=start-3,
end=start+1)
ss_seq = utils.rev_comp(ss_seq)
ss_fasta = '>{0};exon{1};5SS\n{2}\n'.format(gene_name,
exon_num,
ss_seq.upper())
return ss_fasta
def _fetch_3ss_fasta(fasta, gene_name, exon_num,
chrom, strand, start, end):
"""Retreives the 3' SS sequence flanking the specified exon.
Returns a string in fasta format with the first line containing
a ">" and the second line contains the two base pairs of 3' SS.
Parameters
----------
fasta : pysam.Fastafile
fasta object from pysam
gene_name : str
gene name used for fasta seq id
exon_num : int
the `exon_num` exon, used for seq id
chrom : str
chromsome
strand : str
strand, {'+', '-'}
start : int
0-based start position
end : int
0-based end position
Returns
-------
ss_fasta : str
string in fasta format with first line being seq id
"""
if strand == '-':
ss_seq = fasta.fetch(reference=chrom,
start=end-1,
end=end+3)
ss_seq = utils.rev_comp(ss_seq)
elif strand == '+':
ss_seq = fasta.fetch(reference=chrom,
start=start-3,
end=start+1)
ss_fasta = '>{0};exon{1};3SS\n{2}\n'.format(gene_name,
exon_num,
ss_seq.upper())
return ss_fasta
def fetch_gene_fasta(gene_bed, fasta_obj):
"""Retreive gene sequences in FASTA format.
Parameters
----------
gene_bed : BedLine
BedLine object representing a single gene
fasta_obj : pysam.Fastafile
fasta object for index retreival of sequence
Returns
-------
gene_fasta : str
sequence of gene in FASTA format
"""
gene_fasta = ''
strand = gene_bed.strand
exons = gene_bed.get_exons()
if strand == '-':
exons.reverse() # order exons 5' to 3', so reverse if '-' strand
# iterate over exons
for i, exon in enumerate(exons):
exon_seq = fasta_obj.fetch(reference=gene_bed.chrom,
start=exon[0],
end=exon[1]).upper()
if strand == '-':
exon_seq = utils.rev_comp(exon_seq)
exon_fasta = '>{0};exon{1}\n{2}\n'.format(gene_bed.gene_name,
i, exon_seq)
# get splice site sequence
if len(exons) == 1:
# splice sites don't matter if there is no splicing
ss_fasta = ''
elif i == 0:
# first exon only, get 3' SS
ss_fasta = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
elif i == (len(exons) - 1):
# last exon only, get 5' SS
ss_fasta = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
else:
# middle exon, get bot 5' and 3' SS
fasta_3ss = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
fasta_5ss = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
ss_fasta = fasta_5ss + fasta_3ss
gene_fasta += exon_fasta + ss_fasta
return gene_fasta
| 34.801587
| 89
| 0.55382
| 1,125
| 8,770
| 4.113778
| 0.163556
| 0.022688
| 0.010372
| 0.015557
| 0.440147
| 0.415298
| 0.394987
| 0.357822
| 0.32325
| 0.32325
| 0
| 0.015946
| 0.356442
| 8,770
| 251
| 90
| 34.940239
| 0.80404
| 0.337856
| 0
| 0.356522
| 0
| 0
| 0.03378
| 0.008681
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078261
| false
| 0.008696
| 0.008696
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b04538316ec8e7dec6961b4c00010c7027a8e97d
| 1,118
|
py
|
Python
|
src/main/python/request/http_request.py
|
photowey/pytest-dynamic-framework
|
4e7b6d74594191006b50831d42e7aae21e154d56
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/request/http_request.py
|
photowey/pytest-dynamic-framework
|
4e7b6d74594191006b50831d42e7aae21e154d56
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/request/http_request.py
|
photowey/pytest-dynamic-framework
|
4e7b6d74594191006b50831d42e7aae21e154d56
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# ---------------------------------------------
# @file http_request
# @description http_request
# @author WcJun
# @date 2021/07/19
# ---------------------------------------------
from src.main.python.request.options import RequestOptions
class HttpRequest:
"""
Http Request
"""
def __init__(self, options: RequestOptions):
self.url = options.url
self.method = options.method
self.body = options.body
self.headers = options.headers
self.parameters = options.parameters
self.ssl = options.ssl
self.mock_enabled = options.mock_enabled
self.mock_response = options.mock_response
def populateUrlParameters(self) -> str:
param_chain: [] = ['?']
if type(self.parameters) == dict and len(self.parameters) > 0:
for parameter_key in self.parameters.keys():
single_param: [] = [parameter_key, '=', self.parameters[parameter_key], '&']
param_chain.append(''.join(single_param))
chain_str: str = ''.join(param_chain)
return chain_str[:-1]
| 29.421053
| 92
| 0.573345
| 115
| 1,118
| 5.4
| 0.469565
| 0.112721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012821
| 0.232558
| 1,118
| 37
| 93
| 30.216216
| 0.710956
| 0.18068
| 0
| 0
| 0
| 0
| 0.003356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b04682256b68f1be1d146f950d4cf5cacbc05399
| 5,728
|
py
|
Python
|
bot/helper/mirror_utils/download_utils/aria2_download.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
bot/helper/mirror_utils/download_utils/aria2_download.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
bot/helper/mirror_utils/download_utils/aria2_download.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
from time import sleep
from threading import Thread
from bot import aria2, download_dict_lock, download_dict, STOP_DUPLICATE, TORRENT_DIRECT_LIMIT, ZIP_UNZIP_LIMIT, LOGGER, STORAGE_THRESHOLD
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.bot_utils import is_magnet, getDownloadByGid, new_thread, get_readable_file_size
from bot.helper.mirror_utils.status_utils.aria_download_status import AriaDownloadStatus
from bot.helper.telegram_helper.message_utils import sendMarkup, sendStatusMessage, sendMessage
from bot.helper.ext_utils.fs_utils import get_base_name, check_storage_threshold
@new_thread
def __onDownloadStarted(api, gid):
try:
if any([STOP_DUPLICATE, TORRENT_DIRECT_LIMIT, ZIP_UNZIP_LIMIT, STORAGE_THRESHOLD]):
sleep(1.5)
dl = getDownloadByGid(gid)
if not dl:
return
download = api.get_download(gid)
if STOP_DUPLICATE and not dl.getListener().isLeech:
LOGGER.info('Checking File/Folder if already in Drive...')
sname = download.name
if dl.getListener().isZip:
sname = sname + ".zip"
elif dl.getListener().extract:
try:
sname = get_base_name(sname)
except:
sname = None
if sname is not None:
smsg, button = GoogleDriveHelper().drive_list(sname, True)
if smsg:
dl.getListener().onDownloadError('File/Folder already available in Drive.\n\n')
api.remove([download], force=True, files=True)
return sendMarkup("Here are the search results:", dl.getListener().bot, dl.getListener().message, button)
if any([ZIP_UNZIP_LIMIT, TORRENT_DIRECT_LIMIT, STORAGE_THRESHOLD]):
sleep(1)
limit = None
size = api.get_download(gid).total_length
arch = any([dl.getListener().isZip, dl.getListener().extract])
if STORAGE_THRESHOLD is not None:
acpt = check_storage_threshold(size, arch, True)
# True if files allocated, if allocation disabled remove True arg
if not acpt:
msg = f'You must leave {STORAGE_THRESHOLD}GB free storage.'
msg += f'\nYour File/Folder size is {get_readable_file_size(size)}'
dl.getListener().onDownloadError(msg)
return api.remove([download], force=True, files=True)
if ZIP_UNZIP_LIMIT is not None and arch:
mssg = f'Zip/Unzip limit is {ZIP_UNZIP_LIMIT}GB'
limit = ZIP_UNZIP_LIMIT
elif TORRENT_DIRECT_LIMIT is not None:
mssg = f'Torrent/Direct limit is {TORRENT_DIRECT_LIMIT}GB'
limit = TORRENT_DIRECT_LIMIT
if limit is not None:
LOGGER.info('Checking File/Folder Size...')
if size > limit * 1024**3:
dl.getListener().onDownloadError(f'{mssg}.\nYour File/Folder size is {get_readable_file_size(size)}')
return api.remove([download], force=True, files=True)
except Exception as e:
LOGGER.error(f"{e} onDownloadStart: {gid} stop duplicate and size check didn't pass")
@new_thread
def __onDownloadComplete(api, gid):
LOGGER.info(f"onDownloadComplete: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
if download.followed_by_ids:
new_gid = download.followed_by_ids[0]
new_download = api.get_download(new_gid)
if not dl:
dl = getDownloadByGid(new_gid)
with download_dict_lock:
download_dict[dl.uid()] = AriaDownloadStatus(new_gid, dl.getListener())
LOGGER.info(f'Changed gid from {gid} to {new_gid}')
elif dl:
Thread(target=dl.getListener().onDownloadComplete).start()
@new_thread
def __onDownloadStopped(api, gid):
sleep(4)
dl = getDownloadByGid(gid)
if dl:
dl.getListener().onDownloadError('Dead torrent!')
@new_thread
def __onDownloadError(api, gid):
LOGGER.info(f"onDownloadError: {gid}")
sleep(0.5)
dl = getDownloadByGid(gid)
try:
download = api.get_download(gid)
error = download.error_message
LOGGER.info(f"Download Error: {error}")
except:
pass
if dl:
dl.getListener().onDownloadError(error)
def start_listener():
aria2.listen_to_notifications(threaded=True, on_download_start=__onDownloadStarted,
on_download_error=__onDownloadError,
on_download_stop=__onDownloadStopped,
on_download_complete=__onDownloadComplete,
timeout=20)
def add_aria2c_download(link: str, path, listener, filename):
if is_magnet(link):
download = aria2.add_magnet(link, {'dir': path, 'out': filename})
else:
download = aria2.add_uris([link], {'dir': path, 'out': filename})
if download.error_message:
error = str(download.error_message).replace('<', ' ').replace('>', ' ')
LOGGER.info(f"Download Error: {error}")
return sendMessage(error, listener.bot, listener.message)
with download_dict_lock:
download_dict[listener.uid] = AriaDownloadStatus(download.gid, listener)
LOGGER.info(f"Started: {download.gid} DIR: {download.dir} ")
sendStatusMessage(listener.message, listener.bot)
start_listener()
| 46.569106
| 138
| 0.618191
| 648
| 5,728
| 5.265432
| 0.233025
| 0.053341
| 0.036928
| 0.025791
| 0.251759
| 0.137456
| 0.085873
| 0.075615
| 0.025791
| 0.025791
| 0
| 0.004666
| 0.289106
| 5,728
| 122
| 139
| 46.95082
| 0.833251
| 0.010999
| 0
| 0.232143
| 0
| 0
| 0.118665
| 0.018541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0.017857
| 0.071429
| 0
| 0.169643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b047b2781fee7bef3205107d3cc7277c6707a880
| 3,407
|
py
|
Python
|
gol.py
|
AjayMT/game-of-life
|
681bb92e1d7c0644645af7b77f0106ba2d4c9c20
|
[
"MIT"
] | null | null | null |
gol.py
|
AjayMT/game-of-life
|
681bb92e1d7c0644645af7b77f0106ba2d4c9c20
|
[
"MIT"
] | null | null | null |
gol.py
|
AjayMT/game-of-life
|
681bb92e1d7c0644645af7b77f0106ba2d4c9c20
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from pygamehelper import *
from vec2d import *
from random import randrange
class Matrix:
def __init__(self, w, h):
self.w, self.h = w, h
self._data = []
for i in range(self.w * self.h):
self._data.append(None)
def __getitem__(self, i):
return self._data[i]
def _index(self, x, y):
return x + (y * self.w)
def get(self, x, y):
return self._data[self._index(x, y)]
def set(self, x, y, v):
self._data[self._index(x, y)] = v
class Cell:
def __init__(self, x, y, w):
self.x, self.y, self.w = x, y, w
self.alive = False
def draw(self, screen):
color = (255, 255, 0) if self.alive else (100, 100, 100)
xywh = (self.x * self.w, self.y * self.w, self.w, self.w)
pygame.draw.rect(screen, color, xywh, 0)
pygame.draw.rect(screen, (100, 0, 0), xywh, 1)
class GameOfLife(PygameHelper):
def __init__(self):
self.w, self.h = 800, 600
PygameHelper.__init__(self, size=(self.w, self.h))
self.begin = raw_input('Begin: ') or [3]
self.begin = [int(x) for x in self.begin]
self.stay = raw_input('Stay: ') or [3, 2]
self.stay = [int(x) for x in self.stay]
self.paused = True
self.cellw = input('Cell width: ')
self.cells = Matrix(self.w / self.cellw, self.h / self.cellw)
random = (raw_input
('Random arrangement of live cells? (y/n) ') == 'y')
for i in range(self.cells.w):
for j in range(self.cells.h):
c = Cell(i, j, self.cellw)
if random: c.alive = (randrange(2) == 1)
self.cells.set(i, j, c)
def neighbours(self, c):
n = []
x, y = c.x, c.y
for i in [1, -1, 0]:
for j in [1, -1, 0]:
if i == 0 and j == 0: continue
if (x + i) < 0: i += self.cells.w
if (x + i) >= self.cells.w: i -= self.cells.w
if (y + j) < 0: j += self.cells.h
if (y + j) >= self.cells.h: j -= self.cells.h
n.append(self.cells.get(x + i, y + j))
return n
def mouseUp(self, pos):
if not self.paused: return
x = (pos[0] - (pos[0] % self.cellw)) / self.cellw
y = (pos[1] - (pos[1] % self.cellw)) / self.cellw
c = self.cells.get(x, y)
c.alive = not c.alive
def keyDown(self, key):
if key == 275 and self.paused:
self.paused = False
self.update()
self.draw()
self.paused = True
else:
self.paused = not self.paused
def update(self):
if self.paused: return
changed = []
for c in self.cells:
neighbours = self.neighbours(c)
liveneighbours = [n for n in neighbours if n.alive]
if c.alive:
if len(liveneighbours) not in self.stay:
changed.append(c)
if not c.alive:
if len(liveneighbours) in self.begin:
changed.append(c)
for c in changed:
c.alive = not c.alive
def draw(self):
self.screen.fill((0, 0, 0))
for c in self.cells:
c.draw(self.screen)
pygame.display.update()
g = GameOfLife()
g.mainLoop(60)
| 27.039683
| 70
| 0.502495
| 494
| 3,407
| 3.402834
| 0.172065
| 0.074955
| 0.042832
| 0.023795
| 0.15586
| 0.060678
| 0
| 0
| 0
| 0
| 0
| 0.026147
| 0.360141
| 3,407
| 125
| 71
| 27.256
| 0.744954
| 0
| 0
| 0.086022
| 0
| 0
| 0.019378
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139785
| false
| 0
| 0.053763
| 0.032258
| 0.268817
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b04cbd151462272c28fb0ccf978f4c3ccbb776cd
| 11,913
|
py
|
Python
|
frontend/alexa/alexa.py
|
jjanetzki/HackHPI-2017
|
5345a4b385b92dff8b665818127e85eb1e14b31f
|
[
"MIT"
] | 1
|
2017-06-17T18:18:55.000Z
|
2017-06-17T18:18:55.000Z
|
frontend/alexa/alexa.py
|
janetzki/Productivity-Bot
|
5345a4b385b92dff8b665818127e85eb1e14b31f
|
[
"MIT"
] | null | null | null |
frontend/alexa/alexa.py
|
janetzki/Productivity-Bot
|
5345a4b385b92dff8b665818127e85eb1e14b31f
|
[
"MIT"
] | null | null | null |
"""
This code sample is a part of a simple demo to show beginners how to create a skill (app) for the Amazon Echo using AWS Lambda and the Alexa Skills Kit.
For the full code sample visit https://github.com/pmckinney8/Alexa_Dojo_Skill.git
"""
from __future__ import print_function
import requests
import json
alcohol_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/add"
caffeine_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/caffeine/add"
profile_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/setprofile"
caffeine_recommendation_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/caffeine/recommendation"
alcohol_recommendation_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/recommendation"
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "DrinkIntend":
return get_drink_response(intent_request)
elif intent_name == "DrinkFinishedIntend":
return get_finished_drink(intent_request)
elif intent_name == "CaffeineIntend":
return get_caffeine(intent_request)
elif intent_name == "AlcoholIntend":
return get_alcohol(intent_request)
elif intent_name == "CaffeineRecommendationIntend":
return get_caffeine_recommendation()
elif intent_name == "AlcoholRecommendationIntend":
return get_alcohol_recommendation()
elif intent_name == "CaffeineLevelIntend":
return get_caffeine_level()
elif intent_name == "AlcoholLevelIntend":
return get_alcohol_level()
elif intent_name == "SexIntend":
return set_sex(intent_request)
elif intent_name == "BodyweightIntend":
return set_bodyweight(intent_request)
elif intent_name == "AgeIntend":
return set_age(intent_request)
elif intent_name == "AMAZON.HelpIntent":
return get_help_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to the Productivity Bot. I will help you stay in your Ballmer Peak."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with the same text.
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_help_response():
session_attributes = {}
card_title = "Help"
speech_output = "Welcome to the help section for the Productivity Bot. A couple of examples of phrases that I can except are... What shall I drink... or, how much alcohol does a drink contain. Lets get started now by trying one of these."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_drink_response(intent_request):
session_attributes = {}
card_title = "Drink response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
requests.post(caffeine_url, json={"drink": drink}) # todo: specify serving (ml)
requests.post(alcohol_url, json={"drink": drink}) # todo: specify serving (ml)
speech_output = f"Enjoy your {drink}."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_finished_drink(intent_request):
session_attributes = {}
card_title = "Finished drink response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
# requests.post("https://hpi.de/naumann/sites/ingestion/hackhpi/", json={"drink finished": drink})
speech_output = f"I hope your {drink} was tasty."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine_recommendation():
session_attributes = {}
card_title = "Caffeine recommendation response"
json_answer = requests.get(caffeine_recommendation_url).text
speech_output = json.loads(json_answer)["results"]
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol_recommendation():
session_attributes = {}
card_title = "Alcohol recommendation response"
json_answer = requests.get(alcohol_recommendation_url).text
speech_output = json.loads(json_answer)["results"]
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine(intent_request):
session_attributes = {}
card_title = "Caffeine response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
speech_output = f"{drink} contains a lot of caffeine."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol(intent_request):
session_attributes = {}
card_title = "Alcohol response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
speech_output = f"{drink} contains a lot of alcohol."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine_level():
session_attributes = {}
card_title = "Caffeine level response"
speech_output = "Your caffeine level is over 9000."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol_level():
session_attributes = {}
card_title = "Alcohol level response"
speech_output = "Your alcohol level is over 9000."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_sex(intent_request):
session_attributes = {}
card_title = "Sex response"
sex = intent_request["intent"]["slots"]["Sex"]["value"]
requests.post(profile_url, json={"sex": sex})
speech_output = f"Yes, you are so {sex}."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_bodyweight(intent_request):
session_attributes = {}
card_title = "Bodyweight response"
weight = intent_request["intent"]["slots"]["Number"]["value"]
requests.post(profile_url, json={"bodyweight": weight})
speech_output = f"A bodyweight of {weight} is just perfect!"
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_age(intent_request):
session_attributes = {}
card_title = "Age response"
age = intent_request["intent"]["slots"]["Number"]["value"]
requests.post(profile_url, json={"age": age})
speech_output = f"I am less than {age} years old."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for using the Productivity bot! I hope you were productive."
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
| 39.44702
| 242
| 0.697138
| 1,390
| 11,913
| 5.704317
| 0.182014
| 0.062051
| 0.062555
| 0.05297
| 0.565015
| 0.464371
| 0.399168
| 0.388952
| 0.368521
| 0.334468
| 0
| 0.001259
| 0.200201
| 11,913
| 301
| 243
| 39.578073
| 0.830919
| 0.118526
| 0
| 0.366667
| 0
| 0.004762
| 0.218771
| 0.009278
| 0
| 0
| 0
| 0.003322
| 0
| 1
| 0.1
| false
| 0
| 0.014286
| 0.009524
| 0.271429
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b04d338c3d1c16a12edd8387b7d2185efd9aed7b
| 474
|
py
|
Python
|
day1.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | 2
|
2021-12-04T21:15:14.000Z
|
2021-12-12T09:28:28.000Z
|
day1.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | null | null | null |
day1.py
|
kdrag0n/aoc2021
|
469bd861a7d7c0add14412a705ec4cb1e1b5a10f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], "r") as f:
lines = [l for l in f.read().split("\n") if l]
ilist = []
imap = {}
total = 0
result = 0
other = 0
last = -1
while True:
for l in lines:
val = int(l.split()[0])
if last != -1 and val > last:
total += 1
last = val
break
print(f"Total: {total}")
print(f"Result: {result}")
print(f"Other: {other}")
| 12.810811
| 50
| 0.529536
| 79
| 474
| 3.177215
| 0.506329
| 0.071713
| 0.047809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026946
| 0.295359
| 474
| 36
| 51
| 13.166667
| 0.724551
| 0.044304
| 0
| 0
| 0
| 0
| 0.103982
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0.047619
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b04f12eb656c69facb8b7d0c196d013597b90eb0
| 11,920
|
py
|
Python
|
esst/utils/historygraph.py
|
etcher-be/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | 4
|
2018-06-24T14:03:44.000Z
|
2019-01-21T01:20:02.000Z
|
esst/utils/historygraph.py
|
etcher-be/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | 106
|
2018-06-24T13:59:52.000Z
|
2019-11-26T09:05:14.000Z
|
esst/utils/historygraph.py
|
theendsofinvention/esst
|
ac41cd0c07af8ca8532997f533756c529c9609a4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Creates graphic of perfs
"""
import datetime
import typing
from collections import namedtuple
from tempfile import mktemp
import humanize
from esst.core import CTX
PLT = GRID_SPEC = TICKER = None
# https://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server/4935945#4935945
# noinspection SpellCheckingInspection
def _init_mpl():
"""
This is a very stupid hack to go around Matplotlib being stupid about Tkinter.
My linters don't like import statements mixed within the code, so this will do.
"""
global PLT, GRID_SPEC, TICKER # pylint: disable=global-statement
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt_
from matplotlib import gridspec as grd_, ticker as tick_
PLT = plt_
GRID_SPEC = grd_
TICKER = tick_
_init_mpl()
GraphValues = namedtuple('GraphValues', ['server_cpu_history',
'server_mem_history',
'server_bytes_sent_history',
'server_bytes_recv_history',
'dcs_cpu_history',
'dcs_mem_history',
'players_history', ])
PlotLine = namedtuple('PlotValue',
[
'values',
'label',
'style',
])
def process_values(values_to_process: GraphValues, time_delta: float) -> GraphValues:
"""
Converts raw values for plotting
Args:
values_to_process: values in set from CTX
time_delta: how far behind?
Returns: processed values
"""
def _process(values):
return [data for data in values if data[0] >= time_delta] or [(time_delta, 0)]
server_cpu_history = _process(values_to_process.server_cpu_history)
server_mem_history = _process(values_to_process.server_mem_history)
server_bytes_sent_history = _process(values_to_process.server_bytes_sent_history)
server_bytes_recv_history = _process(values_to_process.server_bytes_recv_history)
dcs_cpu_history = _process(values_to_process.dcs_cpu_history)
dcs_mem_history = _process(values_to_process.dcs_mem_history)
players_history = _process(values_to_process.players_history)
return GraphValues(
server_cpu_history=zip(*server_cpu_history),
server_mem_history=zip(*server_mem_history),
server_bytes_sent_history=zip(*server_bytes_sent_history),
server_bytes_recv_history=zip(*server_bytes_recv_history),
dcs_cpu_history=zip(*dcs_cpu_history),
dcs_mem_history=zip(*dcs_mem_history),
players_history=tuple(zip(*players_history)),
)
def _make_delta(now, days, hours, minutes):
delta = datetime.timedelta(
days=days, hours=hours, minutes=minutes).total_seconds()
if delta == 0:
delta = datetime.timedelta(hours=2).total_seconds()
return now - delta
def _x_format_func(val, _):
val = datetime.datetime.fromtimestamp(val)
return str(val).replace(' ', '\n')
def _y_format_func_percent(val, _):
return str(int(val)) + '%'
def _y_format_func_bytes(val, _):
return humanize.naturalsize(val)
def _plot_axis(grid_spec, grid_pos, # pylint: disable=too-many-arguments
values_to_plot: typing.Set[PlotLine],
title,
y_label_text,
values,
now,
y_format_func,
visible_x_labels=False,
share_x=None):
axis = PLT.subplot(grid_spec[grid_pos], sharex=share_x) # type: ignore
axis.set_title(title)
PLT.setp(axis.get_xticklabels(), visible=visible_x_labels) # type: ignore
axis.set_ylabel(y_label_text)
for line in values_to_plot:
line_, = axis.plot(*line.values, line.style)
PLT.setp(line_, label=line.label) # type: ignore
_add_players_count_to_axis(axis, values.players_history)
axis.xaxis.set_major_formatter(TICKER.FuncFormatter(_x_format_func)) # type: ignore
axis.yaxis.set_major_formatter(TICKER.FuncFormatter(y_format_func)) # type: ignore
axis.grid(True)
axis.set_xlim(right=now)
return axis
# pylint: disable=too-many-arguments,too-many-locals
def _get_axis(
grid_spec,
now,
values,
grid_pos,
values_list: typing.List[typing.Any],
labels_list: typing.List[str],
title: str,
y_label: str,
visible_x: bool,
y_format_func: typing.Callable,
share_x=None,
):
lines_to_plot = set()
styles = ['r', 'b']
for _values, _label in zip(values_list, labels_list):
lines_to_plot.add(
PlotLine(
values=_values,
style=styles.pop(),
label=_label
)
)
axis = _plot_axis(grid_spec,
now=now,
values_to_plot=lines_to_plot,
grid_pos=grid_pos,
title=title,
y_label_text=y_label,
values=values,
visible_x_labels=visible_x,
share_x=share_x,
y_format_func=y_format_func)
return axis
def _plot_server(grid_spec, values, now):
axis = _get_axis(
grid_spec=grid_spec,
now=now,
values=values,
grid_pos=0,
values_list=[values.server_cpu_history, values.server_mem_history],
labels_list=['CPU', 'Memory'],
title='Server stats',
y_label='Percentage used',
visible_x=False,
y_format_func=_y_format_func_percent,
)
axis.set_ylim([0, 100])
return axis
def _plot_dcs(grid_spec, values, now, share_x=None):
axis = _get_axis(
grid_spec=grid_spec,
now=now,
values=values,
grid_pos=1,
values_list=[values.dcs_cpu_history, values.dcs_mem_history],
labels_list=['CPU', 'Memory'],
title='DCS stats',
y_label='Percentage used',
visible_x=False,
y_format_func=_y_format_func_percent,
share_x=share_x
)
axis.set_ylim([0, 100])
return axis
def _plot_bandwidth(grid_spec, values, now, share_x=None):
axis = _get_axis(
grid_spec=grid_spec,
now=now,
values=values,
grid_pos=2,
values_list=[values.server_bytes_sent_history, values.server_bytes_recv_history],
labels_list=['Bytes sent', 'Bytes received'],
title='Bytes sent',
y_label='Bytes received',
visible_x=True,
y_format_func=_y_format_func_bytes,
share_x=share_x
)
return axis
def _add_players_count_to_axis(axis, players_history):
ax_players = axis.twinx()
max_player_count = max(
max((players_count for players_count in players_history[1])), 10)
ax_players.set_ylim([0, max_player_count + (max_player_count / 4)])
ax_players.yaxis.set_major_locator(TICKER.MaxNLocator(integer=True))
ax_players.set_ylabel('Connected players')
players_history, = ax_players.plot(*players_history, 'k.', )
PLT.setp(players_history, label='Players count')
lines, labels = axis.get_legend_handles_labels()
lines2, labels2 = ax_players.get_legend_handles_labels()
axis.legend(lines + lines2, labels + labels2)
def _make_history_graph( # pylint: disable=too-many-arguments
values_to_process,
days=0,
hours=0,
minutes=0,
show: bool = False,
save_path=None):
"""
Creates a graph of perfs
Args:
show: show and exit
save_path: specify path to save to (default to temp path)
"""
# noinspection PyTypeChecker
now = datetime.datetime.now().timestamp()
time_delta = _make_delta(now, days, hours, minutes)
values = process_values(values_to_process, time_delta)
figure = PLT.figure(figsize=(18, 12)) # type: ignore
grid_spec = GRID_SPEC.GridSpec(3, 1, height_ratios=[1, 1, 1]) # type: ignore
ax_server = _plot_server(grid_spec, values, now)
_plot_dcs(grid_spec, values, now, share_x=ax_server)
_plot_bandwidth(grid_spec, values, now, share_x=ax_server)
PLT.tight_layout() # type: ignore
figure.tight_layout()
if show:
PLT.show() # type: ignore
PLT.close() # type: ignore
return None
if not save_path:
save_path = mktemp('.png') # nosec
PLT.savefig(save_path) # type: ignore
PLT.close() # type: ignore
return save_path
# pylint: disable=too-many-arguments
def make_history_graph(callback=None, days=0, hours=0, minutes=0, show: bool = False, save_path=None):
"""
Creates a graph of perfs
Args:
minutes: number of minutes to go back
hours: number of hours to go back
days: number of days to go back
callback: optional call back to the future
show: show and exit
save_path: specify path to save to (default to temp path)
"""
values_to_process = GraphValues(
dcs_cpu_history=CTX.dcs_cpu_history,
dcs_mem_history=CTX.dcs_mem_history,
server_cpu_history=CTX.server_cpu_history,
server_mem_history=CTX.server_mem_history,
server_bytes_recv_history=CTX.server_bytes_recv_history,
server_bytes_sent_history=CTX.server_bytes_sent_history,
players_history=CTX.players_history,
)
graph = _make_history_graph(values_to_process, days, hours, minutes, show, save_path)
if callback:
callback(graph)
# process_pool = futures.ProcessPoolExecutor(max_workers=1)
# values_to_process = GraphValues(
# dcs_cpu_history=CTX.dcs_cpu_history,
# dcs_mem_history=CTX.dcs_mem_history,
# server_cpu_history=CTX.server_cpu_history,
# server_mem_history=CTX.server_mem_history,
# server_bytes_recv_history=CTX.server_bytes_recv_history,
# server_bytes_sent_history=CTX.server_bytes_sent_history,
# players_history=CTX.players_history,
# )
# future = process_pool.submit(
# _make_history_graph, values_to_process, days, hours, minutes, show, save_path
# )
# if callback:
# future.add_done_callback(callback)
if __name__ == '__main__':
# Debug code
import random
TIME_DELTA = datetime.timedelta(hours=5)
TOTAL_SECONDS = int(TIME_DELTA.total_seconds())
NOW = datetime.datetime.now().timestamp()
PLAYER_COUNT = 0
CTX.players_history.append((NOW - TOTAL_SECONDS, 0))
SKIP = 0
for time_stamp in range(TOTAL_SECONDS, 0, -10):
CTX.server_mem_history.append(
(NOW - time_stamp, random.randint(60, 70))) # nosec
CTX.dcs_cpu_history.append((NOW - time_stamp, random.randint(20, 30))) # nosec
CTX.dcs_mem_history.append((NOW - time_stamp, random.randint(60, 70))) # nosec
SKIP += 1
if SKIP > 20:
SKIP = 0
CTX.server_bytes_recv_history.append(
(NOW - time_stamp, random.randint(0, 50000000))) # nosec
CTX.server_bytes_sent_history.append(
(NOW - time_stamp, random.randint(0, 50000000))) # nosec
if time_stamp <= int(TOTAL_SECONDS / 2):
CTX.server_cpu_history.append(
(NOW - time_stamp, random.randint(20, 30))) # nosec
if random.randint(0, 100) > 99: # nosec
PLAYER_COUNT += random.choice([-1, 1]) # nosec
if PLAYER_COUNT < 0:
PLAYER_COUNT = 0
continue
CTX.players_history.append((NOW - time_stamp, PLAYER_COUNT))
TIME_DELTA = datetime.datetime.now() - TIME_DELTA # type: ignore
TIME_DELTA = TIME_DELTA.timestamp() # type: ignore
make_history_graph(hours=5, save_path='./test.png')
| 32.747253
| 117
| 0.640017
| 1,508
| 11,920
| 4.723475
| 0.169761
| 0.030886
| 0.031588
| 0.033974
| 0.458655
| 0.38846
| 0.325284
| 0.265057
| 0.233329
| 0.224344
| 0
| 0.013602
| 0.266023
| 11,920
| 363
| 118
| 32.837466
| 0.800549
| 0.163926
| 0
| 0.174089
| 0
| 0
| 0.035455
| 0.005109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060729
| false
| 0
| 0.040486
| 0.012146
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b04f60f28cbb6155e0266d15a62d61ce814d26c3
| 1,267
|
py
|
Python
|
20.valid-parentheses.py
|
Qianli-Ma/LeetCode
|
ebda421c3d652adffca5e547a22937bf1726a532
|
[
"MIT"
] | null | null | null |
20.valid-parentheses.py
|
Qianli-Ma/LeetCode
|
ebda421c3d652adffca5e547a22937bf1726a532
|
[
"MIT"
] | null | null | null |
20.valid-parentheses.py
|
Qianli-Ma/LeetCode
|
ebda421c3d652adffca5e547a22937bf1726a532
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=20 lang=python3
#
# [20] Valid Parentheses
#
# https://leetcode.com/problems/valid-parentheses/description/
#
# algorithms
# Easy (36.20%)
# Total Accepted: 554.4K
# Total Submissions: 1.5M
# Testcase Example: '"()"'
#
# Given a string containing just the characters '(', ')', '{', '}', '[' and
# ']', determine if the input string is valid.
#
# An input string is valid if:
#
#
# Open brackets must be closed by the same type of brackets.
# Open brackets must be closed in the correct order.
#
#
# Note that an empty string is also considered valid.
#
# Example 1:
#
#
# Input: "()"
# Output: true
#
#
# Example 2:
#
#
# Input: "()[]{}"
# Output: true
#
#
# Example 3:
#
#
# Input: "(]"
# Output: false
#
#
# Example 4:
#
#
# Input: "([)]"
# Output: false
#
#
# Example 5:
#
#
# Input: "{[]}"
# Output: true
#
#
#
class Solution:
def isValid(self, s: str) -> bool:
stack = []
dict = {"]": "[", "}": "{", ")": "("}
for char in s:
if char in dict.values():
stack.append(char)
elif char in dict.keys():
if stack == [] or dict[char] != stack.pop():
return False
else:
return False
return stack == []
| 16.454545
| 75
| 0.534333
| 147
| 1,267
| 4.605442
| 0.557823
| 0.081241
| 0.06647
| 0.053176
| 0.070901
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022099
| 0.285714
| 1,267
| 76
| 76
| 16.671053
| 0.725967
| 0.568272
| 0
| 0.153846
| 0
| 0
| 0.012371
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b05bf40e3728937480f8f42cb9c975d60036475f
| 6,911
|
py
|
Python
|
neptune-python-utils/neptune_python_utils/glue_gremlin_client.py
|
Alfian878787/amazon-neptune-tools
|
a447da238e99612a290babc66878fe772727a19e
|
[
"Apache-2.0"
] | null | null | null |
neptune-python-utils/neptune_python_utils/glue_gremlin_client.py
|
Alfian878787/amazon-neptune-tools
|
a447da238e99612a290babc66878fe772727a19e
|
[
"Apache-2.0"
] | null | null | null |
neptune-python-utils/neptune_python_utils/glue_gremlin_client.py
|
Alfian878787/amazon-neptune-tools
|
a447da238e99612a290babc66878fe772727a19e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Amazon.com, Inc. or its affiliates.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import sys
from pyspark.sql.functions import lit
from pyspark.sql.functions import format_string
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.process.traversal import *
from neptune_python_utils.gremlin_utils import GremlinUtils
from neptune_python_utils.endpoints import Endpoints
class GlueGremlinClient:
def __init__(self, endpoints):
self.gremlin_utils = GremlinUtils(endpoints)
GremlinUtils.init_statics(globals())
def add_vertices(self, label):
"""Adds a vertex with the supplied label for each row in a DataFrame partition.
If the DataFrame contains an '~id' column, the values in this column will be treated as user-supplied IDs for the new vertices.
If the DataFrame does not have an '~id' column, Neptune will autogenerate a UUID for each vertex.
Example:
>>> dynamicframe.toDF().foreachPartition(neptune.add_vertices('Product'))
"""
def add_vertices_for_label(rows):
try:
conn = self.gremlin_utils.remote_connection()
g = self.gremlin_utils.traversal_source(connection=conn)
for row in rows:
entries = row.asDict()
traversal = g.addV(label)
for key, value in entries.items():
key = key.split(':')[0]
if key == '~id':
traversal.property(id, value)
elif key == '~label':
pass
else:
traversal.property(key, value)
traversal.next()
conn.close()
except GremlinServerError as err:
print("Neptune error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
return add_vertices_for_label
def upsert_vertices(self, label):
"""Conditionally adds vertices for the rows in a DataFrame partition using the Gremlin coalesce() idiom.
The DataFrame must contain an '~id' column.
Example:
>>> dynamicframe.toDF().foreachPartition(neptune.upsert_vertices('Product'))
"""
def upsert_vertices_for_label(rows):
try:
conn = self.gremlin_utils.remote_connection()
g = self.gremlin_utils.traversal_source(connection=conn)
for row in rows:
entries = row.asDict()
create_traversal = __.addV(label)
for key, value in entries.items():
key = key.split(':')[0]
if key == '~id':
create_traversal.property(id, value)
elif key == '~label':
pass
else:
create_traversal.property(key, value)
g.V(entries['~id']).fold().coalesce(__.unfold(), create_traversal).next()
conn.close()
except GremlinServerError as err:
print("Neptune error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
return upsert_vertices_for_label
def add_edges(self, label):
"""Adds an edge with the supplied label for each row in a DataFrame partition.
If the DataFrame contains an '~id' column, the values in this column will be treated as user-supplied IDs for the new edges.
If the DataFrame does not have an '~id' column, Neptune will autogenerate a UUID for each edge.
Example:
>>> dynamicframe.toDF().foreachPartition(neptune.add_edges('ORDER_DETAIL'))
"""
def add_edges_for_label(rows):
try:
conn = self.gremlin_utils.remote_connection()
g = self.gremlin_utils.traversal_source(connection=conn)
for row in rows:
entries = row.asDict()
traversal = g.V(row['~from']).addE(label).to(V(row['~to'])).property(id, row['~id'])
for key, value in entries.items():
key = key.split(':')[0]
if key not in ['~id', '~from', '~to', '~label']:
traversal.property(key, value)
traversal.next()
conn.close()
except GremlinServerError as err:
print("Neptune error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
return add_edges_for_label
def upsert_edges(self, label):
"""Conditionally adds edges for the rows in a DataFrame partition using the Gremlin coalesce() idiom.
The DataFrame must contain '~id', '~from', '~to' and '~label' columns.
Example:
>>> dynamicframe.toDF().foreachPartition(neptune.upsert_edges('ORDER_DETAIL'))
"""
def add_edges_for_label(rows):
try:
conn = self.gremlin_utils.remote_connection()
g = self.gremlin_utils.traversal_source(connection=conn)
for row in rows:
entries = row.asDict()
create_traversal = __.V(row['~from']).addE(label).to(V(row['~to'])).property(id, row['~id'])
for key, value in entries.items():
key = key.split(':')[0]
if key not in ['~id', '~from', '~to', '~label']:
create_traversal.property(key, value)
g.E(entries['~id']).fold().coalesce(__.unfold(), create_traversal).next()
conn.close()
except GremlinServerError as err:
print("Neptune error: {0}".format(err))
except:
print("Unexpected error:", sys.exc_info()[0])
return add_edges_for_label
| 46.073333
| 135
| 0.570395
| 763
| 6,911
| 5.044561
| 0.218873
| 0.031177
| 0.037412
| 0.021824
| 0.665887
| 0.650818
| 0.58171
| 0.58171
| 0.58171
| 0.558846
| 0
| 0.004337
| 0.332658
| 6,911
| 150
| 136
| 46.073333
| 0.830226
| 0.261323
| 0
| 0.68
| 0
| 0
| 0.04578
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09
| false
| 0.02
| 0.12
| 0
| 0.26
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b05fe1389ad39d5ec1240e047aa523f2264c0d97
| 343
|
py
|
Python
|
floyd_warshall/messages/rate_request.py
|
hrs231/sample-code
|
91c2972d1a414397d3505d3b4df9ee80b67bcac0
|
[
"MIT"
] | null | null | null |
floyd_warshall/messages/rate_request.py
|
hrs231/sample-code
|
91c2972d1a414397d3505d3b4df9ee80b67bcac0
|
[
"MIT"
] | null | null | null |
floyd_warshall/messages/rate_request.py
|
hrs231/sample-code
|
91c2972d1a414397d3505d3b4df9ee80b67bcac0
|
[
"MIT"
] | null | null | null |
class RateRequest(object):
"""" Used by Price Engine Clients to query the Price Engine """
def __init__(self, exch_1, curr_1, exch_2, curr_2):
self.exch_1 = exch_1
self.curr_1 = curr_1
self.exch_2 = exch_2
self.curr_2 = curr_2
self.rate = 0
self.path = []
self.error_msg = None
| 28.583333
| 67
| 0.594752
| 53
| 343
| 3.528302
| 0.45283
| 0.128342
| 0.096257
| 0.106952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054852
| 0.309038
| 343
| 12
| 68
| 28.583333
| 0.734177
| 0.163265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0618e2deaae21564649c946c7681a44ee75680f
| 2,613
|
py
|
Python
|
backend/app/api/api_v1/router/file/excel_tool.py
|
PY-GZKY/fastapi-crawl-admin
|
6535054994d11e3c31b4caeae65e8fa0f495d2b7
|
[
"MIT"
] | 13
|
2021-07-25T15:26:04.000Z
|
2022-03-02T12:12:02.000Z
|
backend/app/api/api_v1/router/file/excel_tool.py
|
PY-GZKY/fastapi-crawl-admin
|
6535054994d11e3c31b4caeae65e8fa0f495d2b7
|
[
"MIT"
] | 1
|
2021-07-26T03:26:09.000Z
|
2021-07-26T09:05:38.000Z
|
backend/app/api/api_v1/router/file/excel_tool.py
|
PY-GZKY/fastapi-crawl-admin
|
6535054994d11e3c31b4caeae65e8fa0f495d2b7
|
[
"MIT"
] | 3
|
2021-07-26T01:44:24.000Z
|
2021-07-31T14:31:49.000Z
|
# -*- coding: utf-8 -*
# @Time : 2020/12/22 15:58
from fastapi import Depends
from motor.motor_asyncio import AsyncIOMotorClient
from app.api.db.mongoDB import get_database
import pandas as pd
import numpy as np
from io import BytesIO
class ExcelTools:
def __init__(self, columns_map=None, order=None):
'''
:param columns_map: 列名映射 => {"name":"姓名","score":"成绩","sex":"性别"}
:param columns_map: 列排序列表 => ["name","sex","score"]
'''
self.columns_map = columns_map
self.order = order
def get_excel_header(self,excel):
df = pd.read_excel(excel, skiprows=None)
print(df.columns)
return df.columns
# 表格转字典
def excel_to_dict(self,excel,skiprows=1):
'''
Excel转Python dict
:param excel: bytes
:return:
'''
if not excel:
return []
df = pd.read_excel(excel, skiprows=None)
df = df.replace(np.nan, '', regex=True)
# 去除所有列数据中的空格
stripstr = lambda x: x.strip() if isinstance(x, np.unicode) else x
df = df.applymap(stripstr)
# 列名映射
if self.columns_map:
columns_map = dict(zip(self.columns_map.values(), self.columns_map.keys()))
df = df.rename(columns=columns_map)
result = df.to_dict(orient='records')
return result
# 字典转表格
def dict_to_excel(self, datas):
"""
:param datas: 数据集 => [{"name":"张三","score":90,"sex":"男"}]
:return:
"""
# 初始化IO
output = BytesIO()
# 将字典列表转换为DataFrame
pf = pd.DataFrame(datas)
# 按字段排序
if self.order:
pf = pf[self.order]
# 将列名替换为中文
if self.columns_map:
pf.rename(columns=self.columns_map, inplace=True)
# 指定生成的Excel表格名称
writer = pd.ExcelWriter(output, engine='xlsxwriter')
# 替换空单元格
pf.fillna(' ', inplace=True)
# 输出
pf.to_excel(writer, encoding='utf-8', sheet_name='sheet1', index=False)
# 格式化Excel
workbook = writer.book
worksheet = writer.sheets['sheet1']
format = workbook.add_format({'text_wrap': True})
# 设置列宽
for i, col in enumerate(pf.columns):
# find and set length of column
column_len = pf[col].astype(str).str.len().max()
column_len = max(column_len, len(col)) + 2
# set column length
worksheet.set_column(i, i, column_len)
# 保存到IO
writer.close()
output.seek(0)
return output
if __name__ == '__main__':
pass
| 25.871287
| 87
| 0.564485
| 313
| 2,613
| 4.571885
| 0.450479
| 0.083857
| 0.068484
| 0.02935
| 0.075472
| 0.041929
| 0.041929
| 0
| 0
| 0
| 0
| 0.01168
| 0.311902
| 2,613
| 101
| 88
| 25.871287
| 0.784205
| 0.168006
| 0
| 0.083333
| 0
| 0
| 0.025292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.020833
| 0.125
| 0
| 0.3125
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0619b37fbd880320070eeeb51552bb149486090
| 1,164
|
py
|
Python
|
Lab8/1 + 2 (Simple socket server)/simple_client.py
|
marianfx/python-labs
|
7066db410ad19cababb7b66745641e65a28ccd98
|
[
"MIT"
] | null | null | null |
Lab8/1 + 2 (Simple socket server)/simple_client.py
|
marianfx/python-labs
|
7066db410ad19cababb7b66745641e65a28ccd98
|
[
"MIT"
] | null | null | null |
Lab8/1 + 2 (Simple socket server)/simple_client.py
|
marianfx/python-labs
|
7066db410ad19cababb7b66745641e65a28ccd98
|
[
"MIT"
] | null | null | null |
"""Simple socket client for the simple socket client."""
import sys
import socket
import time
SOCKET_ADDRESS = "127.0.0.1"
SOCKET_PORT = 6996
def build_client_tcp(address: str, port: int):
"""Builds the TCP client."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((address, port))
time.sleep(1)
sock.close()
except:
print("Cannot connect to the target server.")
def build_client_udp(address: str, port: int, message: str):
"""Builds the UDP client."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message.encode(), (address, port))
if __name__ == "__main__":
if len(sys.argv) < 5:
print("You must give as args the mode, server address, the port and the message to send.")
exit()
MODE = sys.argv[1]
SOCKET_ADDRESS = sys.argv[2]
SOCKET_PORT = int(sys.argv[3])
MESSAGE = sys.argv[4]
if MODE == "TCP":
build_client_tcp(SOCKET_ADDRESS, SOCKET_PORT)
elif MODE == "UDP":
build_client_udp(SOCKET_ADDRESS, SOCKET_PORT, MESSAGE)
else:
print("Unable to determine what you want.")
| 28.390244
| 98
| 0.649485
| 165
| 1,164
| 4.412121
| 0.381818
| 0.048077
| 0.049451
| 0.046703
| 0.104396
| 0.104396
| 0.104396
| 0.104396
| 0
| 0
| 0
| 0.017758
| 0.225945
| 1,164
| 40
| 99
| 29.1
| 0.790233
| 0.082474
| 0
| 0
| 0
| 0
| 0.165399
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0
| 0.166667
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b062b0f29115369104d664570dbb03f1de934fe3
| 2,689
|
py
|
Python
|
009/app.py
|
ilos-vigil/random-script
|
bf8d45196d4faa6912dc0469a86b8370f43ce7ac
|
[
"MIT"
] | null | null | null |
009/app.py
|
ilos-vigil/random-script
|
bf8d45196d4faa6912dc0469a86b8370f43ce7ac
|
[
"MIT"
] | null | null | null |
009/app.py
|
ilos-vigil/random-script
|
bf8d45196d4faa6912dc0469a86b8370f43ce7ac
|
[
"MIT"
] | null | null | null |
import bs4
import nltk
import json
import re
import requests
with open('./acronym_abbreviation_id.json', 'r') as f:
data = f.read()
list_acronym_abbreviation = json.loads(data)
from_wikipedia = False
if from_wikipedia:
# Take text with Indonesian language from Wikipedia randomly
html = requests.get('https://id.wikipedia.org/wiki/Istimewa:Halaman_sembarang').text
soup = bs4.BeautifulSoup(html, 'html.parser')
for p in soup.find('div', class_='mw-parser-output').find_all('p'):
text = f'{text}{p.get_text()}'
text = re.sub(r'\n', '', text)
text = re.sub(r'\[\d*\]', '', text)
else:
text = '''
Linux (atau GNU/Linux, lihat kontroversi penamaannya) adalah nama yang diberikan kepada kumpulan sistem operasi Mirip Unix yang menggunakan Kernel Linux sebagai kernelnya. Linux merupakan proyek perangkat lunak bebas dan sumber terbuka terbesar di dunia. Seperti perangkat lunak bebas dan sumber terbuka lainnya pada umumnya, kode sumber Linux dapat dimodifikasi, digunakan dan didistribusikan kembali secara bebas oleh siapa saja
'''
text = re.sub(r'\n', '', text)
print(f'Input : {text}')
# pisah berdasarkan kalimat
# step 1
boundary = '•'
rule = {
r'\.': f'.•',
r'\?': f'?•',
'!': f'!•',
';': f';•',
':': f':•'
}
for old, new in rule.items():
text = re.sub(old, new, text)
# step 2
for word in re.finditer(r'"(.+)"', text):
start_position, end_position = word.regs[0][0], word.regs[0][1]
quoted_sentence = text[start_position:end_position]
quoted_sentence = re.sub('•', '', quoted_sentence) # remove boundary
if text[end_position] == '.': # move boundary if character after " is .
text = text[:start_position] + quoted_sentence + text[end_position:]
else:
text = text[:start_position] + quoted_sentence + '•' + text[end_position:]
# step 3
for word in re.finditer(r'([\w]*)(\.|\?|!|;|:)•', text): # [word][sign]•
# [0] -> position start, [1] -> position for [word], [2] -> position for [sign]
# position value is adalah (start, end + 1)
word_start_position, word_end_position, boundary_position = word.regs[1][0], word.regs[2][1], word.regs[0][1]
if text[word_start_position:word_end_position] in list_acronym_abbreviation:
text = text[:word_end_position] + text[boundary_position:] # remove boundary
# step 4
for word in re.finditer(r'([\w]+) ?(!|\?)(•) ?[a-z]', text): #[word](optional space)[sign][•](optional space)[lowercase char]
boundary_position = word.regs[2][1]
text = text[:boundary_position] + text[boundary_position:]
# step 5
sentences = text.split('•')
print('Output:')
[print(s.lstrip(' ').rstrip(' ')) for s in sentences]
| 38.414286
| 430
| 0.661584
| 386
| 2,689
| 4.53886
| 0.367876
| 0.050228
| 0.020548
| 0.017123
| 0.214612
| 0.174658
| 0.025114
| 0.025114
| 0
| 0
| 0
| 0.010314
| 0.170695
| 2,689
| 69
| 431
| 38.971014
| 0.769058
| 0.144663
| 0
| 0.08
| 0
| 0.02
| 0.297155
| 0.022319
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b062c54e4119bba9afb9e6fce3e62bb1a445400e
| 2,295
|
py
|
Python
|
graphs/page_rank.py
|
tg12/Python
|
398d1dbf4b780d1725aeae9a91b4c79f4410e2f0
|
[
"MIT"
] | null | null | null |
graphs/page_rank.py
|
tg12/Python
|
398d1dbf4b780d1725aeae9a91b4c79f4410e2f0
|
[
"MIT"
] | null | null | null |
graphs/page_rank.py
|
tg12/Python
|
398d1dbf4b780d1725aeae9a91b4c79f4410e2f0
|
[
"MIT"
] | 1
|
2020-06-26T09:46:17.000Z
|
2020-06-26T09:46:17.000Z
|
'''THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk
# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB
# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu
# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd
# contact :- github@jamessawyer.co.uk
"""
Author: https://github.com/bhushan-borole
"""
"""
The input graph for the algorithm is:
A B C
A 0 1 1
B 0 0 1
C 1 0 0
"""
graph = [[0, 1, 1], [0, 0, 1], [1, 0, 0]]
class Node:
def __init__(self, name):
self.name = name
self.inbound = []
self.outbound = []
def add_inbound(self, node):
self.inbound.append(node)
def add_outbound(self, node):
self.outbound.append(node)
def __repr__(self):
return "Node {}: Inbound: {} ; Outbound: {}".format(
self.name, self.inbound, self.outbound
)
def page_rank(nodes, limit=3, d=0.85):
ranks = {}
for node in nodes:
ranks[node.name] = 1
outbounds = {}
for node in nodes:
outbounds[node.name] = len(node.outbound)
for i in range(limit):
print("======= Iteration {} =======".format(i + 1))
for j, node in enumerate(nodes):
ranks[node.name] = (1 - d) + d * sum(
[ranks[ib] / outbounds[ib] for ib in node.inbound]
)
print(ranks)
def main():
names = list(input("Enter Names of the Nodes: ").split())
nodes = [Node(name) for name in names]
for ri, row in enumerate(graph):
for ci, col in enumerate(row):
if col == 1:
nodes[ci].add_inbound(names[ri])
nodes[ri].add_outbound(names[ci])
print("======= Nodes =======")
for node in nodes:
print(node)
page_rank(nodes)
if __name__ == "__main__":
main()
| 25.21978
| 74
| 0.616122
| 301
| 2,295
| 4.624585
| 0.408638
| 0.031609
| 0.006466
| 0.030172
| 0.076868
| 0.043103
| 0
| 0
| 0
| 0
| 0
| 0.042966
| 0.259695
| 2,295
| 90
| 75
| 25.5
| 0.776339
| 0.326797
| 0
| 0.071429
| 0
| 0
| 0.083866
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0.02381
| 0.190476
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b064ac81a6a14605eca93bb63e07f0834ed4309a
| 1,147
|
py
|
Python
|
lairgpt/utils/assets.py
|
lightonai/lairgpt
|
7580e1339a39662b2ff636d158c36195eb7fe3fb
|
[
"MIT"
] | 19
|
2021-05-04T13:54:45.000Z
|
2022-01-05T15:45:12.000Z
|
lairgpt/utils/assets.py
|
lightonai/lairgpt
|
7580e1339a39662b2ff636d158c36195eb7fe3fb
|
[
"MIT"
] | null | null | null |
lairgpt/utils/assets.py
|
lightonai/lairgpt
|
7580e1339a39662b2ff636d158c36195eb7fe3fb
|
[
"MIT"
] | 1
|
2021-05-28T15:25:12.000Z
|
2021-05-28T15:25:12.000Z
|
from enum import Enum
from os.path import expanduser
from lairgpt.utils.remote import local_dir
class Config(Enum):
"""Settings for preconfigured models instances
"""
SMALL = {
"d_model": 768,
"n_heads": 12,
"n_layers": 12,
"vocab_size": 50262,
"max_seq_len": 1024
}
MEDIUM = {
"d_model": 1024,
"n_heads": 16,
"n_layers": 24,
"vocab_size": 50262,
"max_seq_len": 1024
}
LARGE = {
"d_model": 1280,
"n_heads": 20,
"n_layers": 36,
"vocab_size": 50262,
"max_seq_len": 1024
}
XLARGE = {
"d_model": 1280,
"n_heads": 20,
"n_layers": 36,
"vocab_size": 50262,
"max_seq_len": 1024
}
class Snapshot(Enum):
"""Snapshots for preconfigured models state dictionaries
"""
SMALL = local_dir + "small.pt"
MEDIUM = local_dir + "medium.pt"
LARGE = local_dir + "large.pt"
XLARGE = local_dir + "xlarge.pt"
class Tokenizer(Enum):
"""Tokenizers for preconfigured models inference
"""
CCNET = local_dir + "tokenizer_ccnet.json"
| 23.408163
| 60
| 0.558849
| 135
| 1,147
| 4.518519
| 0.377778
| 0.078689
| 0.091803
| 0.111475
| 0.265574
| 0.265574
| 0.265574
| 0.177049
| 0.177049
| 0.177049
| 0
| 0.085459
| 0.316478
| 1,147
| 48
| 61
| 23.895833
| 0.692602
| 0.133391
| 0
| 0.358974
| 0
| 0
| 0.232271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0651029340e768b51b715881e03f9826ce6837f
| 1,546
|
py
|
Python
|
smart_open/__init__.py
|
DataTron-io/smart_open
|
3565eff8f0ffe19d7fd31063753384e0084fb1e0
|
[
"MIT"
] | 1
|
2020-09-28T06:47:58.000Z
|
2020-09-28T06:47:58.000Z
|
smart_open/__init__.py
|
DataTron-io/smart_open
|
3565eff8f0ffe19d7fd31063753384e0084fb1e0
|
[
"MIT"
] | null | null | null |
smart_open/__init__.py
|
DataTron-io/smart_open
|
3565eff8f0ffe19d7fd31063753384e0084fb1e0
|
[
"MIT"
] | null | null | null |
import shutil
from .smart_open_lib import *
DEFAULT_CHUNKSIZE = 16*1024*1024 # 16mb
def copy_file(src, dest, close_src=True, close_dest=True, make_path=False):
"""
Copies file from src to dest. Supports s3 and webhdfs (does not include kerberos support)
If src does not exist, a FileNotFoundError is raised.
:param src: file-like object or path
:param dest: file-like object or path
:param close_src: boolean (optional). if True, src file is closed after use.
:param close_dest: boolean (optional). if True, dest file is closed after use.
:param make_path: str (optional, default False). if True, destination parent directories are created if missing. Only if path is local
"""
logging.info("Copy file from {} to {}".format(src, dest))
if make_path:
dir_path, _ = os.path.split(dest)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
in_file = smart_open(src, 'rb')
out_file = smart_open(dest, 'wb')
try:
shutil.copyfileobj(in_file, out_file, DEFAULT_CHUNKSIZE)
except NotImplementedError as e:
logging.info("Error encountered copying file. Falling back to looping over input file. {}".format(e))
for line in in_file:
out_file.write(line)
try:
out_file.flush()
except Exception as e:
logging.info("Unable to flush out_file")
if in_file and not in_file.closed and close_src:
in_file.close()
if out_file and not out_file.closed and close_dest:
out_file.close()
| 34.355556
| 138
| 0.679172
| 233
| 1,546
| 4.360515
| 0.377682
| 0.055118
| 0.027559
| 0.031496
| 0.098425
| 0.098425
| 0
| 0
| 0
| 0
| 0
| 0.010943
| 0.231565
| 1,546
| 44
| 139
| 35.136364
| 0.844276
| 0.335058
| 0
| 0.08
| 0
| 0
| 0.12753
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b068470f8ca662453890dee9ded5d2a25fb6fcdd
| 4,706
|
py
|
Python
|
guacozy_server/backend/api/utils.py
|
yinm8315/guacozy-django-react
|
99a8270cb660052d3b4868b7959a5750968d9cc3
|
[
"MIT"
] | 121
|
2019-10-28T09:23:05.000Z
|
2022-03-19T00:30:36.000Z
|
guacozy_server/backend/api/utils.py
|
peppelinux/guacozy
|
ff4ca3fae8b9a5cb379a7a73d39f0d0ea8b6521c
|
[
"MIT"
] | 43
|
2019-10-28T09:22:59.000Z
|
2022-03-18T23:01:25.000Z
|
guacozy_server/backend/api/utils.py
|
peppelinux/guacozy
|
ff4ca3fae8b9a5cb379a7a73d39f0d0ea8b6521c
|
[
"MIT"
] | 44
|
2019-11-05T01:58:05.000Z
|
2022-03-30T08:05:18.000Z
|
import rules
from backend.models import Folder
def add_folder_to_tree_dictionary(folder, resulting_set, include_ancestors=False):
"""
Adds folder, folder's ancestors and folder's descendants
Ancestors are needed to build the traverse path in tree view
Descendants are needed because user has permission to see them
:type folder: Folder
:type resulting_set: set
:type include_ancestors: bool}
"""
# Include all ancestors, which we get from django-mptt's get_ancestors()
# it's a "cheap" query
if include_ancestors and folder.parent is not None:
for ancestor in folder.parent.get_ancestors(ascending=False, include_self=True):
resulting_set.add(ancestor)
# add this folder
resulting_set.add(folder)
# add all foldres children
for child in folder.children.all():
add_folder_to_tree_dictionary(child, resulting_set, include_ancestors=False)
def check_folder_permissions(folder, resulting_set, user, require_view_permission=False):
"""
Recursively check folders and adds it to resulting_set if user has direct permission on folder
If require_view_permission is set to True, it returns only folders with direct permission and all child folders
If require_view_permission is set to True, it also returns all ancestor folders
:type folder: backend.Folder
:type user: users.User
:type resulting_set: set
:type require_view_permission: bool
"""
if rules.test_rule('has_direct_permission', user, folder):
add_folder_to_tree_dictionary(folder, resulting_set, include_ancestors=not require_view_permission)
else:
for child in folder.children.all():
check_folder_permissions(child, resulting_set, user, require_view_permission)
def folder_to_object(folder, user, allowed_to_list=None, allowed_to_view=None, include_objects=True):
"""
Given folder converts it and it's children and objects to a tree format, which is used in API
:type folder: Folder
:type user: users.User
:type allowed_to_list: set
:type allowed_to_view: set
:type include_objects: bool
"""
if allowed_to_list is None:
allowed_to_list = user_allowed_folders_ids(user, require_view_permission=False)
if allowed_to_view is None:
allowed_to_view = user_allowed_folders_ids(user, require_view_permission=True)
result = {'id': folder.id, 'text': folder.name, 'isFolder': True}
result_children = []
# For every child check if it is included in allowed folders
# (precalculated list of folders allowed and
# their ancestors, which is needed to get to this folder
for child in folder.children.all():
if child in allowed_to_list:
result_children += [folder_to_object(
folder=child,
user=user,
allowed_to_list=allowed_to_list,
allowed_to_view=allowed_to_view,
include_objects=include_objects
)
]
# If we are asked (include_objects) and folder is in allowed_to_view list
# include all objects (currently only connections)
if include_objects and folder.id in allowed_to_view:
for connection in folder.connections.all():
connection_object = {'id': connection.id,
'text': connection.name,
'isFolder': False,
'protocol': connection.protocol,
}
result_children += [connection_object]
result['children'] = result_children
return result
def user_allowed_folders(user, require_view_permission=False):
"""
If require_view_permission is False, return list of folders user is allowed to list
If require_view_permission is True, return list of folders user is allowed to view
:type require_view_permission: bool
:type user: users.User
"""
resulting_folder = set()
# iterate over root folders
for folder in Folder.objects.all().filter(parent=None):
check_folder_permissions(folder, resulting_folder, user, require_view_permission)
return resulting_folder
def user_allowed_folders_ids(user, require_view_permission=False):
"""
If require_view_permission is False, return list of ids of folders user is allowed to list
If require_view_permission is True, return list of ids of folders user is allowed to view
:type require_view_permission: bool
:type user: users.User
"""
resulting_set = set()
for folder in user_allowed_folders(user, require_view_permission):
resulting_set.add(folder.id)
return resulting_set
| 36.765625
| 115
| 0.698683
| 626
| 4,706
| 5.028754
| 0.167732
| 0.05432
| 0.120076
| 0.063532
| 0.401842
| 0.327192
| 0.260801
| 0.241423
| 0.226811
| 0.203939
| 0
| 0
| 0.236507
| 4,706
| 127
| 116
| 37.055118
| 0.876148
| 0.379728
| 0
| 0.057692
| 0
| 0
| 0.023671
| 0.007647
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.038462
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b06a64034b02fc50eab6da81b27b39ddfc4affcc
| 348
|
py
|
Python
|
web/services/device-service/src/app.py
|
fhgrings/match-io
|
0acb0b006ae8d8073f1d148e80275a568c2517ae
|
[
"MIT"
] | null | null | null |
web/services/device-service/src/app.py
|
fhgrings/match-io
|
0acb0b006ae8d8073f1d148e80275a568c2517ae
|
[
"MIT"
] | null | null | null |
web/services/device-service/src/app.py
|
fhgrings/match-io
|
0acb0b006ae8d8073f1d148e80275a568c2517ae
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_cors import CORS
from src.ext import configuration
def minimal_app(**config):
app = Flask(__name__)
configuration.init_app(app, **config)
CORS(app)
return app
def create_app(**config):
app = minimal_app(**config)
configuration.load_extensions(app)
return app
| 19.333333
| 42
| 0.672414
| 44
| 348
| 5.090909
| 0.386364
| 0.160714
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 348
| 18
| 43
| 19.333333
| 0.848485
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.25
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b06d15947556e9e4b04c29a89022d993e3d2bccf
| 4,357
|
py
|
Python
|
src/face_utils/save_figure.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | null | null | null |
src/face_utils/save_figure.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | null | null | null |
src/face_utils/save_figure.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import albumentations as A
from pathlib import Path
import torch
from torch import nn
from src_backup.cdan import get_model
from src.backbone.iresnet import get_arcface_backbone
class MyModel(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.layers = [backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4]
def forward(self, x):
activations = []
x = self.backbone.prelu(self.backbone.bn1(self.backbone.conv1(x)))
for layer in self.layers:
x = layer(x)
activations.append(x)
return activations
def get_best_model(mode='arcface', base_path='log/best_weight/{}.pth'):
model_path_dict = {'BSP': 'FACE_CDAN_BSP_BOTH', 'DAN': 'FACE_DAN_BOTH',
'BOTH': 'FACE_BOTH', 'FACE': 'FACE'}
backbone = get_arcface_backbone('cpu')
if mode != 'arcface':
backbone = get_model(backbone, fc_dim=512, embed_dim=512, nclass=460, hidden_dim=1024,
pretrained_path=base_path.format(model_path_dict[mode])).backbone
backbone.eval()
return MyModel(backbone)
def img_preprocessing(img):
transforms = A.Compose([
A.SmallestMaxSize(112),
A.CenterCrop(112, 112, p=1),
])
img = ((np.transpose(transforms(image=np.array(img))['image'], (2, 0, 1)) / 255) - 0.5) / 0.5
return img
def activation_based_map_f(activations):
attention_map = []
for activation in activations:
img = activation.pow(2).mean(1).detach().numpy()[0, :, :, np.newaxis]
resized_img = A.Resize(112, 112, 4)(image=img)['image']
attention_map.append((resized_img, img))
return attention_map
def show_example(img_path='iu_mask.jpg', mode='arcface', show=True):
img = Image.open(img_path)
img_resized = A.Resize(112, 112)(image=np.array(img))['image']
img_np = np.array(img)
img_np = img_preprocessing(img)
input_img = torch.from_numpy(img_np).float().unsqueeze(0)
model = get_best_model(mode)
activations = model(input_img)
attention_maps = activation_based_map_f(activations)
if show:
plt.imshow(img)
plt.show()
for attention_map in attention_maps:
plt.figure(figsize=(16, 10))
plt.subplot(1, 2, 1)
plt.imshow(img_resized, interpolation='bicubic')
plt.imshow(attention_map[0], alpha=0.8, interpolation='bicubic')
plt.subplot(1, 2, 2)
plt.imshow(attention_map[1], interpolation='bicubic')
plt.show()
return [maps[0] for maps in attention_maps]
def compare_example(img_path, mode1='arcface', mode2='BSP', alpha=0.7, show=False):
transforms = A.Compose([
A.SmallestMaxSize(112),
A.CenterCrop(112, 112, p=1),
])
img = transforms(image=np.array(Image.open(img_path)))['image']
attn1 = show_example(img_path=img_path, mode=mode1, show=False)
attn2 = show_example(img_path=img_path, mode=mode2, show=False)
plt.figure(figsize=(16, 6))
plt.subplot(2, 5, 1)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
for i, attention_map in enumerate(zip(attn1, attn2)):
plt.subplot(2, 5, 2 + i)
plt.imshow(img, alpha=0.8)
plt.imshow(attention_map[0], alpha=alpha, interpolation='bicubic')
plt.xticks([])
plt.yticks([])
plt.subplot(2, 5, 7 + i)
plt.imshow(img, alpha=0.8)
plt.imshow(attention_map[1], alpha=alpha, interpolation='bicubic')
plt.xticks([])
plt.yticks([])
if show:
plt.show()
else:
Path('result/attention_fig/').mkdir(exist_ok=True, parents=True)
plt.savefig('result/attention_fig/{}_{}_{}_{}.jpg'.format(
os.path.basename(img_path).split('.')[0], mode1, mode2, int(alpha*10)))
plt.close('all')
def run(args):
for mode in ['FACE', 'BSP', 'BOTH', 'DAN']:
for image_path in ['iu.jpg', 'iu_mask1.jpg', 'iu_mask2.jpg', 'iu_mask3.jpg', 'iu_mask4.jpg']:
for alpha in [0.8, 0.9]:
print('mode: {}'.format(mode))
print('alpha: {}'.format(alpha))
compare_example(img_path='examples/{}'.format(image_path), mode1='arcface', mode2=mode, alpha=alpha)
| 36.008264
| 116
| 0.627037
| 593
| 4,357
| 4.448567
| 0.261383
| 0.026535
| 0.026535
| 0.031842
| 0.191433
| 0.144807
| 0.134193
| 0.112206
| 0.075815
| 0.075815
| 0
| 0.036665
| 0.223778
| 4,357
| 121
| 116
| 36.008264
| 0.743347
| 0
| 0
| 0.221154
| 0
| 0
| 0.078935
| 0.018128
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.096154
| 0
| 0.230769
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b070934d7222c882ff718596c5213477b01b49fc
| 2,481
|
py
|
Python
|
tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py
|
monishshah18/pytest-splunk-addon
|
1600f2c7d30ec304e9855642e63511780556b406
|
[
"Apache-2.0"
] | 39
|
2020-06-09T17:37:21.000Z
|
2022-02-08T01:57:35.000Z
|
tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py
|
monishshah18/pytest-splunk-addon
|
1600f2c7d30ec304e9855642e63511780556b406
|
[
"Apache-2.0"
] | 372
|
2020-04-15T13:55:09.000Z
|
2022-03-31T17:14:56.000Z
|
tests/unit/tests_standard_lib/tests_sample_generation/test_time_parser.py
|
isabella232/pytest-splunk-addon
|
5e6ae2b47df7a1feb6f358bbbd1f02197b5024f6
|
[
"Apache-2.0"
] | 22
|
2020-05-06T10:43:45.000Z
|
2022-03-16T15:50:08.000Z
|
import pytest
from datetime import datetime
from freezegun import freeze_time
from pytest_splunk_addon.standard_lib.sample_generation.time_parser import (
time_parse,
)
@pytest.fixture(scope="session")
def tp():
return time_parse()
def generate_parameters():
result = []
for s in ("s", "sec", "secs", "second", "seconds"):
result.append(("-", "60", s, datetime(2020, 9, 1, 8, 15, 13)))
for m in ("m", "min", "minute", "minutes"):
result.append(("-", "60", m, datetime(2020, 9, 1, 7, 16, 13)))
for h in ("h", "hr", "hrs", "hour", "hours"):
result.append(("-", "60", h, datetime(2020, 8, 29, 20, 16, 13)))
for d in ("d", "day", "days"):
result.append(("-", "1", d, datetime(2020, 8, 31, 8, 16, 13)))
for w in ("w", "week", "weeks"):
result.append(("-", "2", w, datetime(2020, 8, 18, 8, 16, 13)))
for m in ("mon", "month", "months"):
result.append(("-", "2", m, datetime(2020, 7, 1, 8, 16, 13)))
for q in ("q", "qtr", "qtrs", "quarter", "quarters"):
result.append(("-", "2", q, datetime(2020, 3, 1, 8, 16, 13)))
for y in ("y", "yr", "yrs", "year", "years"):
result.append(("-", "2", y, datetime(2018, 9, 1, 8, 16, 13)))
result.extend(
[
("+", "5", "months", datetime(2021, 2, 1, 8, 16, 13)),
("+", "3", "months", datetime(2020, 12, 1, 8, 16, 13)),
("-", "11", "months", datetime(2019, 10, 1, 8, 16, 13)),
("smth", "15", "minutes", datetime(2020, 9, 1, 8, 31, 13)),
]
)
return result
class Testtime_parse:
@freeze_time("2020-09-01T04:16:13-04:00")
@pytest.mark.parametrize("sign, num, unit, expected", generate_parameters())
def test_convert_to_time(self, tp, sign, num, unit, expected):
assert tp.convert_to_time(sign, num, unit) == expected
@pytest.mark.parametrize(
"timezone_time, expected",
[
("+1122", datetime(2020, 9, 1, 19, 37, 13)),
("+0022", datetime(2020, 9, 1, 8, 15, 13)),
("+2322", datetime(2020, 9, 1, 8, 15, 13)),
("+1200", datetime(2020, 9, 1, 8, 15, 13)),
("+0559", datetime(2020, 9, 1, 8, 15, 13)),
("-1122", datetime(2020, 8, 31, 20, 53, 13)),
],
)
def test_get_timezone_time(self, tp, timezone_time, expected):
assert (
tp.get_timezone_time(datetime(2020, 9, 1, 8, 15, 13), timezone_time)
== expected
)
| 37.590909
| 80
| 0.523176
| 343
| 2,481
| 3.71137
| 0.323615
| 0.150825
| 0.091909
| 0.098979
| 0.115475
| 0.089552
| 0.089552
| 0
| 0
| 0
| 0
| 0.150707
| 0.25917
| 2,481
| 65
| 81
| 38.169231
| 0.541893
| 0
| 0
| 0
| 0
| 0
| 0.116888
| 0.010077
| 0
| 0
| 0
| 0
| 0.035088
| 1
| 0.070175
| false
| 0
| 0.070175
| 0.017544
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c660dc00601aa00fc2df39ad1285ba2cbf2bab57
| 3,426
|
py
|
Python
|
recbole/utils/inferred_lm.py
|
ghazalehnt/RecBole
|
f1219847005e2c8d72b8c3cd5c49a138fe83276d
|
[
"MIT"
] | null | null | null |
recbole/utils/inferred_lm.py
|
ghazalehnt/RecBole
|
f1219847005e2c8d72b8c3cd5c49a138fe83276d
|
[
"MIT"
] | null | null | null |
recbole/utils/inferred_lm.py
|
ghazalehnt/RecBole
|
f1219847005e2c8d72b8c3cd5c49a138fe83276d
|
[
"MIT"
] | null | null | null |
import time
import torch
from recbole.config import Config
from recbole.utils import get_model, init_seed
import gensim
import gensim.downloader as api
from recbole.data import create_dataset, data_preparation
import numpy as np
URL_FIELD = "item_url"
class ItemLM:
def __init__(self, checkpoint_file, model_name, dataset_name, k=20, step=5000, load_docs=None, config_dict=None, config_file_list=None):
checkpoint = torch.load(checkpoint_file, map_location=torch.device('cpu'))
config = Config(model=model_name, dataset=dataset_name, config_file_list=config_file_list, config_dict=config_dict)
init_seed(config['seed'], config['reproducibility'])
dataset = create_dataset(config)
train_data, valid_data, test_data = data_preparation(config, dataset)
model = get_model(config['model'])(config, train_data).to(config['device'])
model.load_state_dict(checkpoint['state_dict'])
item_ids = dataset.get_item_feature()['item_id']
items = model.item_embedding(item_ids)
item_identifiers = dataset.get_item_feature()[URL_FIELD]
item_identifiers = dataset.id2token(URL_FIELD, item_identifiers)
url_id = {}
for i in range(1, len(item_identifiers)):
url_id[item_identifiers[i]] = i
url_id_temp = {}
if load_docs is not None:
item_ids = set()
for url in load_docs:
if url in url_id:
item_ids.add(url_id[url])
url_id_temp[url] = url_id[url]
else:
# print(f"{url} does not exist in model")
pass
item_ids = [0] + list(item_ids)
url_id = url_id_temp
id_url = {}
for url, id in url_id.items():
id_url[id] = url
print("loading glove")
s = time.time()
pretrained_embedding_name = "glove-wiki-gigaword-50"
model_path = api.load(pretrained_embedding_name, return_path=True)
w2v_model = gensim.models.KeyedVectors.load_word2vec_format(model_path)
w2v_id_terms = np.array(w2v_model.index_to_key)
print(f"done: {time.time() - s}")
self.item_lms = {}
print("making item lm...")
ts = time.time()
s = 1
e = step
if e > len(item_ids) > s:
e = len(item_ids)
while e <= len(item_ids):
print(f"{s}:{e}")
batch_ids = item_ids[s:e]
batch_items = items[batch_ids].detach().clone()
batch_lms = torch.matmul(batch_items, model.word_embedding.weight.T)
batch_lms = torch.softmax(batch_lms, 1)
batch_lms_top = batch_lms.topk(k, dim=1)
probs_normalized_topk = (batch_lms_top.values.T / batch_lms_top.values.sum(1)).T
min_ps = probs_normalized_topk.min(dim=1)
estimated_length = torch.ones(e - s) / min_ps.values
item_lm_probs = (probs_normalized_topk.T * estimated_length).T
item_lm_keys = w2v_id_terms[batch_lms_top.indices]
for i in range(len(batch_ids)):
self.item_lms[id_url[int(batch_ids[i])]] = (item_lm_keys[i], item_lm_probs[i].numpy())
s = e
e += step
if e > len(item_ids) > s:
e = len(item_ids)
print(f"done: {time.time()-ts}")
def get_lm(self):
return self.item_lms
| 40.305882
| 140
| 0.613543
| 473
| 3,426
| 4.150106
| 0.264271
| 0.042792
| 0.020377
| 0.028018
| 0.057565
| 0.042282
| 0.030565
| 0.030565
| 0.030565
| 0.030565
| 0
| 0.008495
| 0.278459
| 3,426
| 84
| 141
| 40.785714
| 0.785599
| 0.011384
| 0
| 0.054054
| 0
| 0
| 0.047858
| 0.006499
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0.013514
| 0.108108
| 0.013514
| 0.162162
| 0.067568
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6690d881a99354cf92a13a7b705df947e112eb1
| 5,009
|
py
|
Python
|
menu.py
|
kokohi28/stock-prediction
|
82d18cbb6366d522a01252e0cdc6eafa9fffea6d
|
[
"MIT"
] | 11
|
2020-06-15T12:38:57.000Z
|
2021-12-08T13:34:28.000Z
|
menu.py
|
kokohi28/stock-prediction
|
82d18cbb6366d522a01252e0cdc6eafa9fffea6d
|
[
"MIT"
] | null | null | null |
menu.py
|
kokohi28/stock-prediction
|
82d18cbb6366d522a01252e0cdc6eafa9fffea6d
|
[
"MIT"
] | 5
|
2020-12-17T16:58:36.000Z
|
2022-02-08T09:29:28.000Z
|
import os
import const as CONST
from datetime import datetime
# Const
MENU_ROOT = 0
MENU_SPECIFY_DATE = 1
MENU_SPECIFY_PERCENT_TRAINED = 2
currMenu = MENU_ROOT
stockList = ['AAPL', '^DJI', '^HSI', '^GSPC']
def welcomeMessage():
print('##############################################################################')
print('#### ####')
print('#### Stock Prediction using Long short-term memory (LSTM) ####')
print('#### ####')
print('#### BY : - Malik Dwi Yoni Fordana (17051204024) ####')
print('#### - Roy Belmiro Virgiant (17051204016) ####')
print('#### - Koko Himawan Permadi (19051204111) ####')
print('#### ####')
print('##############################################################################')
return
def validateDate(date_text):
try:
datetime.strptime(date_text, '%Y/%m/%d')
return True
except ValueError:
return False
def menuSpecifyPercentTrained():
print('\nEnter trained data percentage (%) :')
print('')
print('Press [B] for Back')
return
def menuSpecifyDate():
print('\nEnter period of stock, start date - end date :')
print('example : 2010/01/05-2015/01/05')
print('')
print('Press [B] for Back')
return
def menuRoot():
print('\nSelect Stock:')
print('1. Apple (AAPL)')
print('2. Dow Jones Industrial Average (^DJI)')
print('3. Hang Seng Index (^HSI)')
print('4. S&P 500 (^GSPC)')
print('')
print('Press [Q] for Exit')
return
def handleInputDate(inputVal):
if inputVal == 'B' or \
inputVal == 'b':
return (-1, [])
else:
dateSplit = inputVal.split('-')
if len(dateSplit) < 2:
print('\nRange INVALID... (Press any key to continue)')
input('')
return (0, [])
else:
if validateDate(dateSplit[0]) == False:
print('\nDate start INVALID... (Press any key to continue)')
input('')
return (0, [])
if validateDate(dateSplit[1]) == False:
print('\nDate end INVALID... (Press any key to continue)')
input('')
return (0, [])
return (1, dateSplit)
def handleInputPercentTrained(inputVal):
if inputVal == 'B' or \
inputVal == 'b':
return -1
else:
if inputVal.isnumeric():
num = int(inputVal)
if num == 0 or \
num > 100:
print('\nPercentage INVALID... (Press any key to continue)')
input('')
return 0
else:
return num
else:
print('\nPercentage INVALID... (Press any key to continue)')
input('')
return 0
def clearScreen():
os.system('cls' if os.name == 'nt' else 'clear')
return
def menuLoop():
loopMenu = True
global currMenu
stock = ''
dateRange = []
percentTrained = 0
while loopMenu:
try:
# Clear screen
clearScreen()
# Display Welcome
welcomeMessage()
# Display Input
inputMsg = ''
if currMenu == MENU_ROOT:
menuRoot()
inputMsg = 'Select : '
elif currMenu == MENU_SPECIFY_DATE:
menuSpecifyDate()
inputMsg = 'Specify : '
elif currMenu == MENU_SPECIFY_PERCENT_TRAINED:
menuSpecifyPercentTrained()
inputMsg = 'Percentage : '
# Get Input
inputVal = input(inputMsg)
# Listen Quit Input
if inputVal == 'Q' or \
inputVal == 'q':
stock = ''
dateRange = []
percentTrained = 0
loopMenu = False
else:
# Root
if currMenu == MENU_ROOT:
if inputVal.isnumeric():
num = int(inputVal)
if num == 0 or \
num > len(stockList):
print('\nSelection INVALID... (Press any key to continue)')
input('')
else:
stock = stockList[num - 1]
currMenu = currMenu + 1
else:
print('\nSelection INVALID... (Press any key to continue)')
input('')
# Date
elif currMenu == MENU_SPECIFY_DATE:
res, dateRange = handleInputDate(inputVal)
if res < 0:
currMenu = currMenu - 1
elif res == 0:
pass
elif res > 0:
currMenu = currMenu + 1
# Percent trained
elif currMenu == MENU_SPECIFY_PERCENT_TRAINED:
percentTrained = handleInputPercentTrained(inputVal)
if percentTrained < 0:
currMenu = currMenu - 1
elif percentTrained == 0:
pass
elif percentTrained > 0:
# EXIT MENU LOOP
loopMenu = False
except KeyboardInterrupt:
stock = ''
dateRange = []
percentTrained = 0
loopMenu = False
return (stock, dateRange, percentTrained)
| 27.075676
| 89
| 0.502296
| 467
| 5,009
| 5.342612
| 0.301927
| 0.033667
| 0.042084
| 0.050501
| 0.340681
| 0.291784
| 0.228457
| 0.228457
| 0.202806
| 0.132265
| 0
| 0.027151
| 0.345578
| 5,009
| 185
| 90
| 27.075676
| 0.733984
| 0.023558
| 0
| 0.503356
| 0
| 0
| 0.287851
| 0.036263
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060403
| false
| 0.013423
| 0.020134
| 0
| 0.194631
| 0.201342
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6692746527064fc0f46c5e36e6e97f09870ae4f
| 3,410
|
py
|
Python
|
demo/infinity/triton_client.py
|
dumpmemory/transformer-deploy
|
36993d8dd53c7440e49dce36c332fa4cc08cf9fb
|
[
"Apache-2.0"
] | 698
|
2021-11-22T17:42:40.000Z
|
2022-03-31T11:16:08.000Z
|
demo/infinity/triton_client.py
|
dumpmemory/transformer-deploy
|
36993d8dd53c7440e49dce36c332fa4cc08cf9fb
|
[
"Apache-2.0"
] | 38
|
2021-11-23T13:45:04.000Z
|
2022-03-31T10:36:45.000Z
|
demo/infinity/triton_client.py
|
dumpmemory/transformer-deploy
|
36993d8dd53c7440e49dce36c332fa4cc08cf9fb
|
[
"Apache-2.0"
] | 58
|
2021-11-24T11:46:21.000Z
|
2022-03-29T08:45:16.000Z
|
# Copyright 2022, Lefebvre Dalloz Services
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import tritonclient.http
from transformer_deploy.benchmarks.utils import print_timings, setup_logging, track_infer_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="require inference", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--length", required=True, help="sequence length", choices=(16, 128), type=int)
parser.add_argument("--model", required=True, help="model type", choices=("onnx", "tensorrt"))
args, _ = parser.parse_known_args()
setup_logging()
model_name = f"transformer_{args.model}_inference"
url = "127.0.0.1:8000"
model_version = "1"
batch_size = 1
if args.length == 128:
# from https://venturebeat.com/2021/08/25/how-hugging-face-is-tackling-bias-in-nlp/, text used in the HF demo
text = """Today, Hugging Face has expanded to become a robust NLP startup,
known primarily for making open-source software such as Transformers and Datasets,
used for building NLP systems. “The software Hugging Face develops can be used for
classification, question answering, translation, and many other NLP tasks,” Rush said.
Hugging Face also hosts a range of pretrained NLP models, on GitHub, that practitioners can download
and apply for their problems, Rush added.""" # noqa: W291
else:
text = "This live event is great. I will sign-up for Infinity."
triton_client = tritonclient.http.InferenceServerClient(url=url, verbose=False)
assert triton_client.is_model_ready(
model_name=model_name, model_version=model_version
), f"model {model_name} not yet ready"
model_metadata = triton_client.get_model_metadata(model_name=model_name, model_version=model_version)
model_config = triton_client.get_model_config(model_name=model_name, model_version=model_version)
query = tritonclient.http.InferInput(name="TEXT", shape=(batch_size,), datatype="BYTES")
model_score = tritonclient.http.InferRequestedOutput(name="output", binary_data=False)
time_buffer = list()
for _ in range(10000):
query.set_data_from_numpy(np.asarray([text] * batch_size, dtype=object))
_ = triton_client.infer(
model_name=model_name, model_version=model_version, inputs=[query], outputs=[model_score]
)
for _ in range(1000):
with track_infer_time(time_buffer):
query.set_data_from_numpy(np.asarray([text] * batch_size, dtype=object))
response = triton_client.infer(
model_name=model_name, model_version=model_version, inputs=[query], outputs=[model_score]
)
print_timings(name="triton transformers", timings=time_buffer)
print(response.as_numpy("output"))
| 46.712329
| 117
| 0.72346
| 458
| 3,410
| 5.20524
| 0.469432
| 0.045302
| 0.058725
| 0.037752
| 0.171141
| 0.171141
| 0.171141
| 0.171141
| 0.118289
| 0.118289
| 0
| 0.017248
| 0.183871
| 3,410
| 72
| 118
| 47.361111
| 0.839382
| 0.202346
| 0
| 0.085106
| 0
| 0
| 0.280059
| 0.012579
| 0
| 0
| 0
| 0
| 0.021277
| 1
| 0
| false
| 0
| 0.085106
| 0
| 0.085106
| 0.06383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c66969c34948d04bc70f6e069bd8dabc5e27f5b6
| 2,361
|
py
|
Python
|
mf/knnbased.py
|
waashk/extended-pipeline
|
1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7
|
[
"MIT"
] | null | null | null |
mf/knnbased.py
|
waashk/extended-pipeline
|
1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7
|
[
"MIT"
] | null | null | null |
mf/knnbased.py
|
waashk/extended-pipeline
|
1f8cdfcd1530a9dd502ea0d76d89b5010d19daf7
|
[
"MIT"
] | null | null | null |
import numpy as np
from tqdm import tqdm
from scipy.sparse import csr_matrix, hstack, vstack
from sklearn.neighbors import NearestNeighbors
class MFKnn(object):
"""
Implementation of
"""
def __init__(self, metric, k):
self.k = k
self.metric = metric
def fit(self, X, y):
#
self.X_train = X
self.y_train = y
#
self.classes = sorted(map(int, list(set(self.y_train))))
self.n_classes = len(self.classes)
#
self.docs_by_class = [len(np.where(self.y_train == i)[0]) for i in self.classes]
#
self.X_by_class = []
self.knn_by_class = []
#self.scores = {}
#
njobs=-1
if self.metric == 'l1':
njobs=1
for i in self.classes:
X_tmp = self.X_train[np.where(self.y_train == i)]
#print ("xtmp"+str(X_tmp.shape[0])+" class: "+str(i))
data=[]
data.append(0)
ind=[]
ind.append(0)
auxf=csr_matrix((data, (ind,ind)), shape=(1,self.X_train.shape[1]),dtype=np.float64) #zero a linha
if X_tmp.shape[0]<self.k+1:
newxtmp=[]
for iww in list(range(X_tmp.shape[0])):
newxtmp.append(X_tmp[iww])
for iww in list(range(self.k+1-X_tmp.shape[0])):
newxtmp.append(auxf)
X_tmp=vstack(newxtmp)
knn = NearestNeighbors(n_neighbors=self.k+1, algorithm="brute", metric=self.metric, n_jobs=njobs)
knn.fit(X_tmp)
self.knn_by_class.append(knn)
return self
def csr_matrix_equal2(self, a1, a2):
return all((np.array_equal(a1.indptr, a2.indptr),
np.array_equal(a1.indices, a2.indices),
np.array_equal(a1.data, a2.data)))
def transform(self, X):
#
istrain = True if self.csr_matrix_equal2(self.X_train, X) else False
#print(istrain)
n_neighbors = self.k+1 if istrain else self.k
metafeatures = []
scores = {}
for j in self.classes:
if self.metric == "l1" or self.metric == "l2":
scores[j] = 0.0 + self.knn_by_class[j].kneighbors(X, n_neighbors, return_distance=True)[0]
if self.metric == "cosine":
scores[j] = 1.0 - self.knn_by_class[j].kneighbors(X, n_neighbors, return_distance=True)[0]
#
for i, doc in enumerate(X):
for j in self.classes:
if istrain:
if self.y_train[i] == j:
metafeatures += list(scores[j][i][1:])
else:
metafeatures += list(scores[j][i][:-1])
else:
metafeatures += list(scores[j][i])
return np.array(metafeatures).reshape((X.shape[0],self.k*self.n_classes))
| 23.147059
| 105
| 0.647183
| 386
| 2,361
| 3.826425
| 0.243523
| 0.021666
| 0.033852
| 0.037915
| 0.280298
| 0.212593
| 0.131347
| 0.131347
| 0.131347
| 0.131347
| 0
| 0.020419
| 0.191021
| 2,361
| 101
| 106
| 23.376238
| 0.75288
| 0.047861
| 0
| 0.065574
| 0
| 0
| 0.00764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.065574
| 0.016393
| 0.196721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c66bd961fbf8bcb3556ef3c4fc46854f04ab9b95
| 581
|
py
|
Python
|
general-practice/Exercises solved/codingbat/Warmup2/string_match.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/codingbat/Warmup2/string_match.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/codingbat/Warmup2/string_match.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
#Given 2 strings, a and b, return the number of the positions where they contain the same length 2 substring. So "xxcaazz" and "xxbaaz" yields 3, since the "xx", "aa", and "az" substrings appear in the same place in both strings.
#string_match('xxcaazz', 'xxbaaz') → 3
#string_match('abc', 'abc') → 2
#string_match('abc', 'axc') → 0
def string_match(a,b):
if a > b :
higher = a
else:
higher = b
count = 0
for i in range(len(higher)):
if a[i:i+2] == b[i:i+2] and len(a[i:i+2]) == len(b[i:i+2]) == 2:
count +=1
return count
| 34.176471
| 229
| 0.593804
| 105
| 581
| 3.27619
| 0.447619
| 0.127907
| 0.034884
| 0.023256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029954
| 0.253012
| 581
| 16
| 230
| 36.3125
| 0.75576
| 0.55938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c66f914aa66ae752fa396361357e16cd39293db5
| 10,951
|
py
|
Python
|
courses/views.py
|
mdavoodi/konkourse-python
|
50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee
|
[
"MIT"
] | 4
|
2015-06-23T22:17:50.000Z
|
2019-01-17T21:32:02.000Z
|
courses/views.py
|
mdavoodi/konkourse-python
|
50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee
|
[
"MIT"
] | null | null | null |
courses/views.py
|
mdavoodi/konkourse-python
|
50f2904e7bbb31f00c4dd66fb55cd644ea3c4eee
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, render_to_response
from django.template.context import RequestContext
from account.views import login
from models import Course
from website.views import index
from forms import CourseForm, CourseInitialForm
from account.util import createImage
from django.core.context_processors import csrf
from events.forms import EventForm
from events.models import Event
import datetime
from documents.forms import DocumentForm
from datetime import timedelta
from conversation.models import ConvoWall, ConversationPost
from documents.views import __upload_core
from documents.models import Document
from endless_pagination.decorators import page_template
from notification.views import notifyCreateEvent, notifyDocShareCourse
from page.models import Page
def _course_context(request, course_id):
course = Course.objects.get(id=course_id)
institution = Page.objects.get(title=course.institution)
variables_for_template = {
'name': request.user.first_name + ' ' + request.user.last_name,
'image': course.image,
'course_name': course.name,
'course_number': course.number,
'institution': institution,
'description': course.about,
'professor': course.professor,
'course_id': course_id,
'course_form': CourseInitialForm(),
'courses': Course.objects.filter(course_users__username=request.user.username),
'current_course': course,
'section_number': course.course_id,
'is_logged_in': True,
}
return variables_for_template
def _course_exists(request):
form = CourseInitialForm(request.POST, request.FILES)
if form.is_valid():
section_number = ""
if request.user.get_profile().school.title == "James Madison University":
section_number = request.POST['course_id'].zfill(4)
else:
section_number = request.POST['course_id']
try:
course = Course.objects.get(
number__iexact=request.POST['course_number'],
course_id__iexact=section_number,
institution__iexact=request.user.get_profile().school.title)
return (True, course)
except Course.DoesNotExist:
wall = ConvoWall(wall_type=3)
wall.save()
course = Course(
wall=wall,
number=request.POST['course_number'],
course_id=section_number,
institution=request.user.get_profile().school.title)
course.save()
return (False, course)
return (False, None)
@page_template('website/course/course_page.html')
def course(request, course_id,
error_message='', template='website/course/course.html', extra_context=None):
if not request.user.is_authenticated() or not request.user.is_active:
return redirect(index)
else:
variables = _course_context(request, course_id)
variables['event_form'] = EventForm()
variables['doc_form'] = DocumentForm()
c = Course.objects.get(id=course_id)
posts = ConversationPost.objects.filter(wall=c.wall, deleted=False).order_by('created')
variables['wall'] = c.wall
variables['posts'] = posts.reverse()
if error_message != '':
variables['error_message'] = error_message
variables.update(csrf(request))
if extra_context is not None:
variables.update(extra_context)
return render_to_response(
template, variables, context_instance=RequestContext(request))
def create(request):
if not request.user.is_authenticated():
return login(request)
if request.method == 'POST':
exists, c = get_or_add_course(request)
if(c is None):
return index(request)
if(not exists):
return course_info_edit(request, c.id)
else:
return course(request, c.id)
return redirect(index)
def get_or_add_course(request):
exists, c = _course_exists(request=request)
if c is None:
return (exists, c)
c = Course.objects.get(id=c.id)
c.add_student(request.user)
return (exists, c)
def course_leave(request, course_id):
c = Course.objects.get(id=course_id)
if(c.in_course(request.user)):
c.remove_student(request.user)
return redirect(index)
return redirect(404)
def course_info(request, course_id):
if not request.user.is_authenticated():
return login(request)
course = Course.objects.get(id=course_id)
variables = _course_context(request, course_id)
variables['timeValue'] = course.time
variables['semester'] = course.get_semester
variables['credits'] = course.credits
return render(request, 'website/course/course_info.html',
variables,
context_instance=RequestContext(request),
)
def course_info_edit(request, course_id):
if not request.user.is_authenticated():
return login(request)
variables = _course_context(request, course_id)
variables['form'] = CourseForm(instance=Course.objects.get(id=course_id))
variables.update(csrf(request))
return render(request, 'website/course/course_info_edit.html',
variables,
context_instance=RequestContext(request),
)
def course_update(request, course_id):
if request.method == 'POST':
c = Course.objects.get(id=course_id)
form = CourseForm(request.POST, request.FILES, instance=c)
if form.is_valid():
u = request.user
c = form.save()
dimentions = (150, 150)
if len(request.FILES) == 1:
image = request.FILES['image']
c.image.save(image.name, createImage(c.image, dimentions))
name = u.first_name + ' ' + u.last_name
username = request.user.username
variables_for_template = {
'name': name,
'username': username,
}
return redirect('/course/' + str(course_id) + '/', variables_for_template)
else:
variables = _course_context(request, course_id)
variables['form'] = form
variables.update(csrf(request))
return render(request, 'website/course/course_info_edit.html',
variables,
context_instance=RequestContext(request))
def create_event(request, course_id):
if request.method == 'POST':
variables = _course_context(request, course_id)
c = Course.objects.get(id=course_id)
e = Event()
e.creator = request.user
form = EventForm(request.POST, request.FILES, instance=e)
if form.is_valid():
e = form.save()
c.add_event(e)
e.join_event(request.user.id)
c.save()
wallPost = ConversationPost(creator=request.user, wall=c.wall, message="", post_type='E', event=e)
wallPost.save()
notifyCreateEvent(course=c, event=e)
return redirect(course_events, course_id)
else:
variables['form'] = form
variables.update(csrf(request))
return course(request, course_id, "Invalid event creation fields!!")
return redirect(index)
def course_documents(request, course_id):
if not request.user.is_authenticated():
return login(request)
variables = _course_context(request, course_id)
variables['documents'] = Document.objects.filter(course__id=course_id).order_by('modified')
return render(request, 'website/course/course_documents.html',
variables,
context_instance=RequestContext(request),
)
def course_upload(request, course_id):
doc = __upload_core(request)
if isinstance(doc, Exception):
return course(request, course_id, "Invalid document!")
else:
c = Course.objects.get(id=course_id)
message = request.POST['message_post']
wallPost = ConversationPost(creator=request.user, wall=c.wall, message=message, post_type='D', document=doc)
wallPost.save()
doc.course.add(c)
doc.save()
notifyDocShareCourse(document=doc, course=c)
return redirect(course_documents, course_id)
def course_events(request, course_id):
if not request.user.is_authenticated():
return login(request)
variables = _course_context(request, course_id)
today = datetime.date.today()
week1_end = today + timedelta(days=6 - today.weekday())
week2_end = week1_end + timedelta(days=7)
c = Course.objects.get(id=course_id)
variables['thisWeek'] = c.events.filter(start_date__range=[today, week1_end], deleted=False)
variables['nextWeek'] = c.events.filter(start_date__gt=week1_end, start_date__lte=week2_end, deleted=False)
variables['future'] = c.events.filter(start_date__gt=week2_end, deleted=False)
return render(request, 'website/course/course_events.html',
variables,
context_instance=RequestContext(request),
)
def course_members(request, course_id):
if not request.user.is_authenticated():
return login(request)
template_variables = _course_context(request, course_id)
c = Course.objects.get(id=course_id)
_list = c.course_users.filter(courseuser__role='S')
new_list = (chunks(_list, 3))
template_variables['course_members'] = new_list
template_variables['user'] = request.user
return render(request, 'website/course/course_members.html',
template_variables,
context_instance=RequestContext(request),
)
def course_gradebook(request, course_id):
if not request.user.is_authenticated():
return login(request)
return render(request, 'website/course/course_gradebook.html',
_course_context(request, course_id),
context_instance=RequestContext(request),
)
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i + n]
from django.http import HttpResponse
from django.utils import simplejson
def add_course(request):
results = {'success': False}
if request.user.is_authenticated() and request.user.is_active:
if request.method == 'POST':
courses = Course.objects.filter(course_users__username=request.user.username)
if courses.count() >= 10:
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
exists, c = get_or_add_course(request)
if c is not None:
results = {'success': True}
else:
results = {'success': False}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
| 37.892734
| 116
| 0.649621
| 1,250
| 10,951
| 5.504
| 0.1512
| 0.052326
| 0.050145
| 0.028779
| 0.440698
| 0.408866
| 0.334884
| 0.257703
| 0.20814
| 0.184157
| 0
| 0.00303
| 0.246462
| 10,951
| 288
| 117
| 38.024306
| 0.830708
| 0
| 0
| 0.320158
| 0
| 0
| 0.07287
| 0.027303
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067194
| false
| 0
| 0.083004
| 0
| 0.29249
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c67157381752f709d6b39cd4632427d8936411ad
| 2,701
|
py
|
Python
|
rx/operators/observable/delaywithselector.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-16T09:07:13.000Z
|
2018-11-16T09:07:13.000Z
|
rx/operators/observable/delaywithselector.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
rx/operators/observable/delaywithselector.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-08T08:23:08.000Z
|
2020-05-08T08:23:08.000Z
|
from rx.core import ObservableBase, AnonymousObservable, typing
from rx.disposables import CompositeDisposable, \
SingleAssignmentDisposable, SerialDisposable
def delay_with_selector(self, subscription_delay=None,
delay_duration_mapper=None) -> ObservableBase:
"""Time shifts the observable sequence based on a subscription delay
and a delay mapper function for each element.
# with mapper only
1 - res = source.delay_with_selector(lambda x: Scheduler.timer(5000))
# with delay and mapper
2 - res = source.delay_with_selector(Observable.timer(2000),
lambda x: Observable.timer(x))
subscription_delay -- [Optional] Sequence indicating the delay for the
subscription to the source.
delay_duration_mapper [Optional] Selector function to retrieve a
sequence indicating the delay for each given element.
Returns time-shifted sequence.
"""
source = self
sub_delay, mapper = None, None
if isinstance(subscription_delay, typing.Observable):
mapper = delay_duration_mapper
sub_delay = subscription_delay
else:
mapper = subscription_delay
def subscribe(observer, scheduler=None):
delays = CompositeDisposable()
at_end = [False]
def done():
if (at_end[0] and delays.length == 0):
observer.on_completed()
subscription = SerialDisposable()
def start():
def on_next(x):
try:
delay = mapper(x)
except Exception as error:
observer.on_error(error)
return
d = SingleAssignmentDisposable()
delays.add(d)
def on_next(_):
observer.on_next(x)
delays.remove(d)
done()
def on_completed():
observer.on_next(x)
delays.remove(d)
done()
d.disposable = delay.subscribe_(on_next, observer.on_error, on_completed, scheduler)
def on_completed():
at_end[0] = True
subscription.dispose()
done()
subscription.disposable = source.subscribe_(on_next, observer.on_error, on_completed, scheduler)
if not sub_delay:
start()
else:
subscription.disposable(sub_delay.subscribe_(
lambda _: start(),
observer.on_error,
start))
return CompositeDisposable(subscription, delays)
return AnonymousObservable(subscribe)
| 32.154762
| 108
| 0.585339
| 265
| 2,701
| 5.8
| 0.301887
| 0.045543
| 0.039037
| 0.03123
| 0.178269
| 0.106701
| 0.106701
| 0.106701
| 0.065062
| 0
| 0
| 0.00734
| 0.344317
| 2,701
| 83
| 109
| 32.542169
| 0.860531
| 0.226953
| 0
| 0.215686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156863
| false
| 0
| 0.039216
| 0
| 0.254902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6715e41c59947802aabe44b258270730dfcbb52
| 719
|
py
|
Python
|
w2/palindrome.py
|
connorw72/connorapcsptri3
|
2e885644ed2a8d478e5ce193f94b02ad03c6e6b3
|
[
"MIT"
] | null | null | null |
w2/palindrome.py
|
connorw72/connorapcsptri3
|
2e885644ed2a8d478e5ce193f94b02ad03c6e6b3
|
[
"MIT"
] | 3
|
2022-03-14T21:10:05.000Z
|
2022-03-28T21:11:17.000Z
|
w2/palindrome.py
|
connorw72/connorapcsptri3
|
2e885644ed2a8d478e5ce193f94b02ad03c6e6b3
|
[
"MIT"
] | 2
|
2022-03-10T06:11:11.000Z
|
2022-03-11T06:11:11.000Z
|
class Palindrome:
def __init__(self, test):
self.test = test
def __call__(self):
test_strip = list([n for n in self.test if n.isalpha() or n.isnumeric()])
self.test = "".join(test_strip)
self.test = self.test.lower()
#Test to see if the phrase/word is a palindrome
if self.test == self.test[::-1]:
return "is a palindrome"
else:
return "is not a palindrome"
# Testing these to see if they are palindromes
test_cases = ["A man, a plan, a canal -- Panama", "racecar", "broncos"]
def main():
try:
for v in test_cases:
palindrome = Palindrome(test=v)
print(v, palindrome())
except:
print("ERROR!")
| 31.26087
| 81
| 0.585535
| 100
| 719
| 4.09
| 0.47
| 0.176039
| 0.08802
| 0.117359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001965
| 0.292072
| 719
| 23
| 82
| 31.26087
| 0.801572
| 0.126565
| 0
| 0
| 0
| 0
| 0.137161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0
| 0
| 0.315789
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c672a5daf5acf1852874d76a788a6d4edc536ca3
| 3,890
|
py
|
Python
|
sat-competition-2018/xof-state/sha3-xof.py
|
cipherboy/sat
|
65cbcebf03ffdfd64d49359ebb1d654c73e2c720
|
[
"MIT"
] | 1
|
2019-01-19T23:04:50.000Z
|
2019-01-19T23:04:50.000Z
|
sat-competition-2018/xof-state/sha3-xof.py
|
cipherboy/sat
|
65cbcebf03ffdfd64d49359ebb1d654c73e2c720
|
[
"MIT"
] | null | null | null |
sat-competition-2018/xof-state/sha3-xof.py
|
cipherboy/sat
|
65cbcebf03ffdfd64d49359ebb1d654c73e2c720
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import hash_framework as hf
hf.config.model_dir = "/home/cipherboy/GitHub/sat/sat-competition-2018/models"
import time, sys, os, random
run = False
release = False
if '--run' in sys.argv:
run = True
if '--release' in sys.argv:
release = True
if '-h' in sys.argv or '--help' in sys.argv:
print(sys.argv[0] + " [--run] [--release] [--args file] [w r e s]")
print('---')
print("Generates models for benchmarking. Runs if specified, otherwise only creates models.")
print("--run - runs the resulting CNF file")
print("--release - deletes the intermediate stages after creation")
print("--args file - specify a file to load arguments from")
print("w - sha3 w")
print("r - sha3 rounds format (str only)")
print("e - effective margin (128/256/.../512: as if w=1600)")
print("s - steps to apply to base state (extract s*e*w/64 bits from the XOF)")
sys.exit(0)
def sha3_xof_recreate_args():
r_args = sys.argv[1:]
if run:
r_args = r_args[1:]
if release:
r_args = r_args[1:]
w = int(r_args[0])
r = int(r_args[1])
e = int(r_args[2])
s = int(r_args[3])
sha3_xof_recreate(w, r, e, s)
def sha3_xof_recreate_file():
fname = sys.argv[-1]
args = open(fname, 'r').read().split('\n')
for s_arg in args:
if len(s_arg) == 0:
continue
arg = s_arg.split(" ")
w = int(arg[0])
r = int(arg[1])
e = int(arg[2])
s = int(arg[3])
sha3_xof_recreate(w, r, e, s)
def sha3_perform(w, r, in_state):
eval_table = hf.algorithms._sha3.perform_sha3({}, in_state, None, rounds=r, w=w)
out_state = []
for j in range(0, 25*w):
out_state.append(eval_table['out' + str(j)])
return ''.join(out_state)
def sha3_xof_recreate(w, r, e, s):
margin = e*w//64
algo = hf.algorithms.sha3(w=w, rounds=r)
base_seed = []
for j in range(0, 25*w):
if random.randint(0, 1) == 0:
base_seed.append('F')
else:
base_seed.append('T')
base_seed = ''.join(base_seed)
states = []
cstate = sha3_perform(w, r, base_seed)
for i in range(0, s):
states.append(cstate)
cstate = sha3_perform(w, r, cstate)
tag = "sha3-xof_recreate-w" + str(w) + "-r" + str(r) + '-e' + str(e) + "-s" + str(s)
prefixes = []
for i in range(0, s):
prefixes.append('h' + str(i))
m = hf.models()
m.start(tag, recreate=True)
print(w, r, e, s, margin)
print("base_seed: " + base_seed)
for i in range(0, s):
print("state " + str(i) + ": " + states[i])
hf.models.vars.write_header()
hf.models.generate(algo, prefixes, rounds=r, bypass=True)
hf.models.vars.write_assign(['cchain', 'cknown'])
if s > 1:
cchain = ['and']
for i in range(0, s-1):
for j in range(0, 25*w):
cchain.append(('equal', 'h' + str(i) + 'out' + str(j), 'h' + str(i+1) + 'in' + str(j)))
cchain = tuple(cchain)
hf.models.vars.write_clause('cchain', cchain, '10-chain.txt')
cknown = ['and']
for i in range(0, s):
for j in range(0, margin):
cknown.append(('equal', 'h' + str(i) + 'out' + str(j), states[i][j]))
cknown = tuple(cknown)
hf.models.vars.write_clause('cknown', cknown, '20-known.txt')
m.collapse()
m.build()
if run:
t1 = time.time()
res = m.run(count=1)
t2 = (time.time() - t1)
print("Run time: " + str(t2))
for result in m.load_results():
o_s = ""
for j in range(0, 25*w):
o_s += result['h0in' + str(j)]
print("predicted_seed: " + str(o_s))
if release:
os.system("rm -rf *.txt *.bc *.concat *.out")
print("")
if '--args' in sys.argv:
sha3_xof_recreate_file()
else:
sha3_xof_recreate_args()
| 28.814815
| 103
| 0.554756
| 607
| 3,890
| 3.453048
| 0.253707
| 0.033397
| 0.038168
| 0.009542
| 0.191317
| 0.133111
| 0.120706
| 0.067748
| 0.025763
| 0.025763
| 0
| 0.030699
| 0.271465
| 3,890
| 134
| 104
| 29.029851
| 0.708892
| 0.005398
| 0
| 0.163636
| 0
| 0.009091
| 0.184074
| 0.013961
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0.009091
| 0.018182
| 0
| 0.063636
| 0.145455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6739210f1e8d51ce9d34502997456a48bfc0ddd
| 3,357
|
py
|
Python
|
methinks/db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
methinks/db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
methinks/db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import datetime
import xxhash
import json
from flask_sqlalchemy import SQLAlchemy
from methinks.utils import str_to_date
from methinks.config import get_default_conf
db = SQLAlchemy()
class Entry(db.Model):
__tablename__ = 'entry'
id = db.Column(db.Integer, primary_key=True)
hexid = db.Column(db.String(16), unique=True, nullable=False, index=True)
text = db.Column(db.Text(), nullable=False)
date = db.Column(db.Date(), index=True, nullable=False)
last_edited = db.Column(db.DateTime(timezone=True), default=datetime.datetime.utcnow)
misc = db.Column(db.JSON, nullable=True)
def __init__(self, **data):
if 'id' in data:
raise AttributeError('id cannot be set')
if 'hexid' in data:
raise AttributeError('hexid cannot be set')
self.text = data.pop('text')
self.date = data.pop('date')
if 'last_edited' in data:
self.last_edited = data.pop('last_edited')
assert type(self.date) is datetime.date
self.misc = data
self.hexid = self.hash
def __repr__(self):
return 'Entry: %r:\n%s' % (self.date, self.text)
@property
def hash(self):
content = '%s%s%s' % (self.text, self.date, json.dumps(self.misc))
hs = xxhash.xxh64(content).hexdigest()
return hs
@classmethod
def string_to_date(cl, text):
return datetime.datetime.strptime(text,
get_default_conf()['dateformat']).date()
@classmethod
def date_to_string(cl, date):
return date.strftime(get_default_conf()['dateformat'])
@property
def filename(self):
return '%s.md' % Entry.date_to_string(self.date)
def as_dict(self):
d = dict(id=self.id,
hexid=self.hexid,
text=self.text,
date=self.date,
last_edited=self.last_edited,
**self.misc)
return d
@classmethod
def from_dict(cl, data):
return Entry(text=data['text'],
date=str_to_date(data['date']).date(),
last_edited=str_to_date(data['last_edited']),
**data.get('misc', {}))
def to_file(self, folderpath):
path = os.path.join(folderpath, self.filename)
with open(path, 'w') as f:
f.write(self.text)
@classmethod
def from_file(cl, filepath):
with open(filepath, 'r') as f:
contents = f.read()
filename = os.path.basename(filepath).replace('.md', '')
if filename == 'template':
date = datetime.date.today()
last_edited = datetime.datetime.min
else:
date = cl.string_to_date(filename)
mtime = os.path.getmtime(filepath)
last_edited = datetime.datetime.fromtimestamp(mtime)
return Entry(text=contents, date=date, last_edited=last_edited)
@classmethod
def from_config(cl, config):
sections = []
for title, cl in config.triggers.items():
line = cl.default_text(title)
sections.append(line)
contents = '%s\n' % '\n'.join(sections)
date = datetime.date.today()
last_edited = datetime.datetime.min
return Entry(text=contents, date=date, last_edited=last_edited)
| 31.373832
| 89
| 0.593983
| 416
| 3,357
| 4.661058
| 0.254808
| 0.077359
| 0.030944
| 0.027849
| 0.104177
| 0.104177
| 0.104177
| 0.104177
| 0.104177
| 0.052604
| 0
| 0.001662
| 0.282991
| 3,357
| 106
| 90
| 31.669811
| 0.803905
| 0
| 0
| 0.151163
| 0
| 0
| 0.048853
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 1
| 0.127907
| false
| 0
| 0.081395
| 0.05814
| 0.406977
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6742b09c8b11bbe5babccf11451efdfb75310ee
| 2,797
|
py
|
Python
|
dense_estimation/points_estimation.py
|
zouzhenhong98/kitti-tools
|
30b7d5c799ca2a44fe88522f6d46ad2a53c61d53
|
[
"MIT"
] | 7
|
2020-01-03T13:05:36.000Z
|
2021-08-03T07:51:43.000Z
|
dense_estimation/points_estimation.py
|
zouzhenhong98/kitti-tools
|
30b7d5c799ca2a44fe88522f6d46ad2a53c61d53
|
[
"MIT"
] | null | null | null |
dense_estimation/points_estimation.py
|
zouzhenhong98/kitti-tools
|
30b7d5c799ca2a44fe88522f6d46ad2a53c61d53
|
[
"MIT"
] | 3
|
2020-07-07T03:35:06.000Z
|
2021-07-21T11:40:38.000Z
|
'''
point clouds estimation: transfer sparse map to dense map,
work for both depth and reflectance.
'''
import sys
sys.path.append("..")
from utils import data_provider
from utils import velo_2_cam
import numpy as np
# fetch image and point clouds: coordinates and reflectance
def rawData(pc_path_, img_path_):
# loar filtered pointcloud
lidar = data_provider.read_pc2array(pc_path_, height=None, font=True)
lidar = np.array(lidar)
print('\nfiltered pointcloud size: ', (np.size(lidar,1), np.size(lidar,0)))
# load image
img = data_provider.read_img(img_path_)
return img, lidar
# project points on the image plane
def lidarPreprocess(point_cloud_, calib_path_, type_):
# type_: r:reflectance, 2d:2d depth, 3d:3d_depth
assert type_ in {"r", "2d", "3d"}, \
"type_ should be r:reflectance or 2d:2d_depth or 3d:3d_depth"
param = data_provider.read_calib(calib_path_, [2,4,5])
# projection: pixels = cam2img * cam2cam * vel2cam * pointcloud
# matrix type: np.array
cam2img = param[0].reshape([3,4]) # from camera-view to pixels
cam2cam = param[1].reshape([3,3]) # rectify camera-view
vel2cam = param[2].reshape([3,4]) # from lidar-view to camera-view
# get camera-view coordinates & pixel coordinates(after cam2img)
__, pixel = velo_2_cam.lidar_to_camera_project(trans_mat=vel2cam,
rec_mat=cam2cam,
cam_mat=cam2img,
data=point_cloud_,
pixel_range=(1242,375)
)
if type_=="r":
pixel = np.row_stack((pixel[:2,:],pixel[3,:]))
print("return 2d coodinates with reflectance")
elif type_=="2d":
pixel = np.row_stack((pixel[:2,:],pixel[4,:]))
print("return 2d coodinates with 2d depth")
elif type_=="3d":
pixel = np.row_stack((pixel[:2,:],pixel[5,:]))
print("return 2d coodinates with 3d depth")
return pixel
def completion(point_cloud_):
"""codes wait for completion"""
pass
if __name__ == "__main__":
filename = "um_000000"
pc_path = "../data/bin/"+filename+".bin"
calib_path = "../data/calib/"+filename+".txt"
image_path = "../data/img/"+filename+".png"
print('using data ',filename,' for test')
img, lidar = rawData(pc_path_=pc_path, img_path_=image_path)
pixel = lidarPreprocess(point_cloud_=lidar,
calib_path_=calib_path, type_="r")
# add pixels to image
# velo_2_cam.add_pc_to_img(img_path=image_path, coor=pixel, saveto='./result/'+filename+'_composition.png')
| 35.405063
| 111
| 0.598498
| 354
| 2,797
| 4.49435
| 0.333333
| 0.018856
| 0.015085
| 0.028284
| 0.099937
| 0.049026
| 0.049026
| 0
| 0
| 0
| 0
| 0.031778
| 0.279943
| 2,797
| 79
| 111
| 35.405063
| 0.758193
| 0.231677
| 0
| 0
| 0
| 0
| 0.137412
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 1
| 0.066667
| false
| 0.022222
| 0.088889
| 0
| 0.2
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6770cd7813960cae894c7947e2f76b45e5169f4
| 1,014
|
py
|
Python
|
tests/run_compiler.py
|
yshrdbrn/ogle
|
529337203b1bd3ec66c08f4ed153dba5fc8349a1
|
[
"MIT"
] | null | null | null |
tests/run_compiler.py
|
yshrdbrn/ogle
|
529337203b1bd3ec66c08f4ed153dba5fc8349a1
|
[
"MIT"
] | null | null | null |
tests/run_compiler.py
|
yshrdbrn/ogle
|
529337203b1bd3ec66c08f4ed153dba5fc8349a1
|
[
"MIT"
] | null | null | null |
from ogle.code_generator.code_generator import CodeGenerator
from ogle.lexer.lexer import Lexer
from ogle.parser.parser import Parser
from ogle.semantic_analyzer.semantic_analyzer import SemanticAnalyzer
def _get_errors_warnings(all_errors):
errors = [e for e in all_errors if 'Error' in e[1]]
warnings = [e for e in all_errors if 'Warning' in e[1]]
return errors, warnings
def get_semantic_errors(input_file):
lexer = Lexer(input_file)
parser = Parser(lexer)
parser.parse()
semantic_analyzer = SemanticAnalyzer(parser.ast)
semantic_analyzer.analyze()
return _get_errors_warnings(semantic_analyzer.errors)
def run(input_file, output_filename):
lexer = Lexer(input_file)
parser = Parser(lexer)
parser.parse()
semantic_analyzer = SemanticAnalyzer(parser.ast)
semantic_analyzer.analyze()
with open(output_filename, 'w') as output:
code_generator = CodeGenerator(parser.ast, semantic_analyzer.symbol_table)
code_generator.generate(output)
| 36.214286
| 82
| 0.759369
| 134
| 1,014
| 5.522388
| 0.291045
| 0.172973
| 0.068919
| 0.101351
| 0.348649
| 0.348649
| 0.348649
| 0.3
| 0.3
| 0.3
| 0
| 0.002345
| 0.158777
| 1,014
| 27
| 83
| 37.555556
| 0.865182
| 0
| 0
| 0.416667
| 0
| 0
| 0.012821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.166667
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c678c38909ca5f9f3348fe7d0e9471e1720d3bee
| 817
|
py
|
Python
|
graph/dfs_dict_attempt2.py
|
automoto/python-code-golf
|
1a4e0b5984e64620637de9d80e82c6e89997f4af
|
[
"MIT"
] | null | null | null |
graph/dfs_dict_attempt2.py
|
automoto/python-code-golf
|
1a4e0b5984e64620637de9d80e82c6e89997f4af
|
[
"MIT"
] | null | null | null |
graph/dfs_dict_attempt2.py
|
automoto/python-code-golf
|
1a4e0b5984e64620637de9d80e82c6e89997f4af
|
[
"MIT"
] | null | null | null |
# !depth first search !dfs !graph
# dict of nodes as the key and sets for the edges(children)
graph = {'A': set(['B', 'C', 'D']),
'B': set(['E', 'F']),
'C': set([]),
'D': set(['G', 'H']),
'E': set([]),
'F': set(['I', 'J']),
'G': set(['K']),
'H': set([]),
'I': set([]),
'J': set([]),
'K': set([])}
def dfs(graph, start):
visited = set()
stack = [start]
while stack:
current_node = stack.pop()
print('visiting node ', current_node)
visited.add(current_node)
stack.extend(graph[current_node] - visited)
dfs(graph, 'A')
# PESUDOCODE
# create set of visited nodes
# create a searching stack with the starting node
# while the stack has nodes
# pop the current_node off of the stack
# add current node to visited
# add the connected nodes minus visitsed to the stack to search
| 24.029412
| 63
| 0.597307
| 125
| 817
| 3.864
| 0.408
| 0.136646
| 0.066253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203182
| 817
| 34
| 63
| 24.029412
| 0.741935
| 0.405141
| 0
| 0
| 0
| 0
| 0.075472
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.05
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c67973e1c48ecff18bf6a4fc82b259940ef31d3c
| 4,561
|
py
|
Python
|
tools/fastq_pair_names/fastq_pair_names.py
|
Neato-Nick/pico_galaxy
|
79666612a9ca2d335622bc282a4768bb43d91419
|
[
"MIT"
] | 18
|
2015-06-09T13:57:09.000Z
|
2022-01-14T21:05:54.000Z
|
tools/fastq_pair_names/fastq_pair_names.py
|
Neato-Nick/pico_galaxy
|
79666612a9ca2d335622bc282a4768bb43d91419
|
[
"MIT"
] | 34
|
2015-04-02T19:26:08.000Z
|
2021-06-17T18:59:24.000Z
|
tools/fastq_pair_names/fastq_pair_names.py
|
Neato-Nick/pico_galaxy
|
79666612a9ca2d335622bc282a4768bb43d91419
|
[
"MIT"
] | 24
|
2015-02-25T13:40:19.000Z
|
2021-09-08T20:40:40.000Z
|
#!/usr/bin/env python
"""Extract paired read names from FASTQ file(s).
The input file should be a valid FASTQ file(s), the output is two tabular
files - the paired read names (without suffixes), and unpaired read names
(including any unrecognised pair names).
Note that the FASTQ variant is unimportant (Sanger, Solexa, Illumina, or even
Color Space should all work equally well).
This script is copyright 2014-2017 by Peter Cock, The James Hutton Institute
(formerly SCRI), Scotland, UK. All rights reserved.
See accompanying text file for licence details (MIT license).
"""
from __future__ import print_function
import os
import re
import sys
if "-v" in sys.argv or "--version" in sys.argv:
print("0.0.5")
sys.exit(0)
from galaxy_utils.sequence.fastq import fastqReader
msg = """Expects at least 3 arguments:
- Pair names tabular output filename
- Non-pair names tabular output filename
- Input FASTQ input filename(s)
"""
if len(sys.argv) < 3:
sys.exit(msg)
output_pairs = sys.argv[1]
output_nonpairs = sys.argv[2]
input_fastq_filenames = sys.argv[3:]
# Cope with three widely used suffix naming convensions,
# Illumina: /1 or /2
# Forward/revered: .f or .r
# Sanger, e.g. .p1k and .q1k
# See http://staden.sourceforge.net/manual/pregap4_unix_50.html
re_f = re.compile(r"(/1|\.f|\.[sfp]\d\w*)$")
re_r = re.compile(r"(/2|\.r|\.[rq]\d\w*)$")
# assert re_f.match("demo/1")
assert re_f.search("demo.f")
assert re_f.search("demo.s1")
assert re_f.search("demo.f1k")
assert re_f.search("demo.p1")
assert re_f.search("demo.p1k")
assert re_f.search("demo.p1lk")
assert re_r.search("demo/2")
assert re_r.search("demo.r")
assert re_r.search("demo.q1")
assert re_r.search("demo.q1lk")
assert not re_r.search("demo/1")
assert not re_r.search("demo.f")
assert not re_r.search("demo.p")
assert not re_f.search("demo/2")
assert not re_f.search("demo.r")
assert not re_f.search("demo.q")
re_illumina_f = re.compile(r"^@[a-zA-Z0-9_:-]+ 1:.*$")
re_illumina_r = re.compile(r"^@[a-zA-Z0-9_:-]+ 2:.*$")
assert re_illumina_f.match("@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 1:N:0:TGNCCA")
assert re_illumina_r.match("@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 2:N:0:TGNCCA")
assert not re_illumina_f.match(
"@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 2:N:0:TGNCCA"
)
assert not re_illumina_r.match(
"@HWI-ST916:79:D04M5ACXX:1:1101:10000:100326 1:N:0:TGNCCA"
)
count = 0
pairs = set() # Will this scale OK?
forward = 0
reverse = 0
neither = 0
out_pairs = open(output_pairs, "w")
out_nonpairs = open(output_nonpairs, "w")
for input_fastq in input_fastq_filenames:
if not os.path.isfile(input_fastq):
sys.exit("Missing input FASTQ file %r" % input_fastq)
in_handle = open(input_fastq)
# Don't care about the FASTQ type really...
for record in fastqReader(in_handle, "sanger"):
count += 1
name = record.identifier.split(None, 1)[0]
assert name[0] == "@", record.identifier # Quirk of the Galaxy parser
name = name[1:]
is_forward = False
suffix = re_f.search(name)
if suffix:
# ============
# Forward read
# ============
template = name[: suffix.start()]
is_forward = True
elif re_illumina_f.match(record.identifier):
template = name # No suffix
is_forward = True
if is_forward:
forward += 1
if template not in pairs:
pairs.add(template)
out_pairs.write(template + "\n")
else:
is_reverse = False
suffix = re_r.search(name)
if suffix:
# ============
# Reverse read
# ============
template = name[: suffix.start()]
is_reverse = True
elif re_illumina_r.match(record.identifier):
template = name # No suffix
is_reverse = True
if is_reverse:
reverse += 1
if template not in pairs:
pairs.add(template)
out_pairs.write(template + "\n")
else:
# ===========================
# Neither forward nor reverse
# ===========================
out_nonpairs.write(name + "\n")
neither += 1
in_handle.close()
out_pairs.close()
out_nonpairs.close()
print(
"%i reads (%i forward, %i reverse, %i neither), %i pairs"
% (count, forward, reverse, neither, len(pairs))
)
| 31.027211
| 86
| 0.611489
| 657
| 4,561
| 4.127854
| 0.302892
| 0.058997
| 0.033186
| 0.043142
| 0.347345
| 0.255162
| 0.185103
| 0.173304
| 0.141593
| 0.141593
| 0
| 0.045967
| 0.241614
| 4,561
| 146
| 87
| 31.239726
| 0.738075
| 0.232186
| 0
| 0.181818
| 0
| 0.040404
| 0.195396
| 0.061871
| 0
| 0
| 0
| 0
| 0.212121
| 1
| 0
| false
| 0
| 0.050505
| 0
| 0.050505
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6798f3695e83af119f05e4fdd4f14111d00889d
| 2,903
|
py
|
Python
|
code/05_speech_to_text/main_05_b_wake_word.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | 1
|
2021-09-08T09:21:16.000Z
|
2021-09-08T09:21:16.000Z
|
code/05_speech_to_text/main_05_b_wake_word.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | null | null | null |
code/05_speech_to_text/main_05_b_wake_word.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | 2
|
2022-02-06T09:54:40.000Z
|
2022-03-01T07:52:51.000Z
|
from loguru import logger
import yaml
import time
import pyaudio
import struct
import os
import sys
from vosk import Model, SpkModel, KaldiRecognizer
import json
import text2numde
from TTS import Voice
import multiprocessing
CONFIG_FILE = "config.yml"
SAMPLE_RATE = 16000
FRAME_LENGTH = 512
class VoiceAssistant():
def __init__(self):
logger.info("Initialisiere VoiceAssistant...")
logger.debug("Lese Konfiguration...")
global CONFIG_FILE
with open(CONFIG_FILE, "r", encoding='utf8') as ymlfile:
self.cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
if self.cfg:
logger.debug("Konfiguration gelesen.")
else:
logger.debug("Konfiguration konnte nicht gelesen werden.")
sys.exit(1)
language = self.cfg['assistant']['language']
if not language:
language = "de"
logger.info("Verwende Sprache {}", language)
logger.debug("Initialisiere Audioeingabe...")
self.pa = pyaudio.PyAudio()
self.audio_stream = self.pa.open(
rate=SAMPLE_RATE,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=FRAME_LENGTH,
input_device_index=0)
logger.debug("Audiostream geöffnet.")
logger.info("Initialisiere Sprachausgabe...")
self.tts = Voice()
voices = self.tts.get_voice_keys_by_language(language)
if len(voices) > 0:
logger.info('Stimme {} gesetzt.', voices[0])
self.tts.set_voice(voices[0])
else:
logger.warning("Es wurden keine Stimmen gefunden.")
self.tts.say("Initialisierung abgeschlossen")
logger.debug("Sprachausgabe initialisiert")
# Initialisiere Spracherkennung
logger.info("Initialisiere Spracherkennung...")
stt_model = Model('./vosk-model-de-0.6')
speaker_model = SpkModel('./vosk-model-spk-0.4')
self.rec = KaldiRecognizer(stt_model, speaker_model, 16000)
logger.info("Initialisierung der Spracherkennung abgeschlossen.")
def run(self):
logger.info("VoiceAssistant Instanz wurde gestartet.")
try:
while True:
pcm = self.audio_stream.read(FRAME_LENGTH)
if self.rec.AcceptWaveform(pcm):
recResult = json.loads(self.rec.Result())
# Hole das Resultat aus dem JSON Objekt
sentence = recResult['text']
logger.debug('Ich habe verstanden "{}"', sentence)
if sentence.lower().startswith("kevin"):
sentence = sentence [5:] # Schneide Kevin am Anfang des Satzes weg
sentence = sentence.strip() # Entferne Leerzeichen am Anfang und Ende des Satzes
logger.info("Prozessiere Befehl {}.", sentence)
except KeyboardInterrupt:
logger.debug("Per Keyboard beendet")
finally:
logger.debug('Beginne Aufräumarbeiten...')
if self.audio_stream is not None:
self.audio_stream.close()
if self.pa is not None:
self.pa.terminate()
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
va = VoiceAssistant()
logger.info("Anwendung wurde gestartet")
va.run()
| 27.130841
| 86
| 0.709955
| 356
| 2,903
| 5.679775
| 0.466292
| 0.04451
| 0.029674
| 0.012859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011638
| 0.171202
| 2,903
| 107
| 87
| 27.130841
| 0.828761
| 0.054426
| 0
| 0.02439
| 0
| 0
| 0.238964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.146341
| 0
| 0.182927
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6832c12d1f11f0fd4b7b74f990fd950eb68d5c6
| 2,506
|
py
|
Python
|
functions/formatString.py
|
Steve-Xyh/AutoAoxiang
|
a8f1abed0f17b967456b1fa539c0aae79dac1d01
|
[
"WTFPL"
] | 7
|
2020-02-17T08:12:14.000Z
|
2021-12-29T09:41:35.000Z
|
functions/formatString.py
|
Steve-Xyh/AutoAoxiang
|
a8f1abed0f17b967456b1fa539c0aae79dac1d01
|
[
"WTFPL"
] | null | null | null |
functions/formatString.py
|
Steve-Xyh/AutoAoxiang
|
a8f1abed0f17b967456b1fa539c0aae79dac1d01
|
[
"WTFPL"
] | 1
|
2020-07-24T07:16:14.000Z
|
2020-07-24T07:16:14.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import colorama
colorama.init(autoreset=True)
logData = {
'所在位置': 'Location',
'是否经停湖北': '否',
'接触湖北籍人员': '否',
'接触确诊疑似': '否',
'今日体温': '37.2度以下',
'有无疑似或异常': '无',
'是否隔离': '否',
}
def log_line(dic: dict, color=True):
'''
中文单行log
#### Parameters::
dic: log dict(e.g. {name: value})
'''
time_info = setColor(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
color='greenFore') if color else datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
res = '[' + time_info + '] '
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(12, chr(12288))
val_info = setColor(dic[key], color='yellowFore') if color else dic[key]
res += val_info if flg else ''.ljust(20, chr(12288)) + '\n'
return res
def log_cn(dic: dict):
"""
中文多行log
:param dic: log dict(e.g. {name: value})
"""
formLen = 40
res = '-' * formLen + '\n'
res += '[' + setColor(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'), color='greenFore') + ']\n'
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(12, chr(12288))
res += (setColor(dic[key], color='yellowFore')
if flg else '').ljust(20, chr(12288)) + '\n'
res += '-' * formLen
return res
def log_en(dic):
"""
英文log
:param dic: log dict(e.g. {name: value})
"""
formLen = 40
res = '-' * formLen + '\n'
res += '[' + setColor(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'), color='greenFore') + ']\n'
for key in dic:
flg = dic[key] is not None
res += str(key).ljust(20)
res += (setColor(dic[key], color='yellowFore')
if flg else '').ljust(20) + '\n'
res += '-' * formLen
return res
def setColor(string, color):
'''设置颜色'''
convertColor = {
'redFore': colorama.Fore.RED + colorama.Back.RESET,
'redBack': colorama.Fore.WHITE + colorama.Back.RED,
'greenFore': colorama.Fore.GREEN + colorama.Back.RESET,
'greenBack': colorama.Fore.BLACK + colorama.Back.GREEN,
'yellowFore': colorama.Fore.YELLOW + colorama.Back.RESET,
}
return colorama.Style.BRIGHT + convertColor[color] + string + colorama.Style.RESET_ALL
if __name__ == "__main__":
a = 'This is red.'
b = setColor(a, 'redFore')
print(b)
print(log_cn(logData))
| 25.571429
| 111
| 0.541899
| 322
| 2,506
| 4.164596
| 0.304348
| 0.03132
| 0.056674
| 0.080537
| 0.50261
| 0.50261
| 0.445936
| 0.430276
| 0.404922
| 0.404922
| 0
| 0.022283
| 0.265762
| 2,506
| 97
| 112
| 25.835052
| 0.706522
| 0.0834
| 0
| 0.383333
| 0
| 0
| 0.124271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.033333
| 0
| 0.166667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6843679e999329dca1a8986c704607c2cb84a96
| 433
|
py
|
Python
|
2 - Automation tools with IP hiding techniques/checkValidJson.py
|
Phong940253/facebook-data-extraction
|
fa64680dcff900db4d852af06ff792ccf4d5be33
|
[
"MIT"
] | null | null | null |
2 - Automation tools with IP hiding techniques/checkValidJson.py
|
Phong940253/facebook-data-extraction
|
fa64680dcff900db4d852af06ff792ccf4d5be33
|
[
"MIT"
] | null | null | null |
2 - Automation tools with IP hiding techniques/checkValidJson.py
|
Phong940253/facebook-data-extraction
|
fa64680dcff900db4d852af06ff792ccf4d5be33
|
[
"MIT"
] | null | null | null |
import json
import glob
groupPost = glob.glob("rawData/*/*/*.json")
pagePost = glob.glob("rawData/*/*.json")
groupPagePost = groupPost + pagePost
def is_json(myjson):
try:
json.load(myjson)
except ValueError as e:
return False
return True
for postFile in groupPagePost:
with open(postFile, "r", encoding="utf-8") as f:
valid = is_json(f)
if not valid:
print(postFile)
| 19.681818
| 52
| 0.628176
| 55
| 433
| 4.909091
| 0.6
| 0.059259
| 0.111111
| 0.140741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003086
| 0.251732
| 433
| 21
| 53
| 20.619048
| 0.830247
| 0
| 0
| 0
| 0
| 0
| 0.092379
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.3125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c686c1dded95c4fb11f50e8f958330e48395c1cb
| 304
|
py
|
Python
|
34.PySimpleGUI.py
|
sarincr/GUI-With-Tkinter-using-Python
|
3b57fc4aeed9e4a3018fc940bafdb4160ec853fc
|
[
"MIT"
] | null | null | null |
34.PySimpleGUI.py
|
sarincr/GUI-With-Tkinter-using-Python
|
3b57fc4aeed9e4a3018fc940bafdb4160ec853fc
|
[
"MIT"
] | null | null | null |
34.PySimpleGUI.py
|
sarincr/GUI-With-Tkinter-using-Python
|
3b57fc4aeed9e4a3018fc940bafdb4160ec853fc
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as PySG
lay = [ [PySG.Text("What's your name?")],
[PySG.Input()],
[PySG.Button('Ok')] ]
wd = PySG.Window('Python Simple GUI', lay)
event, values = wd.read()
print('Hello', values[0])
wd.close()
| 21.714286
| 48
| 0.457237
| 33
| 304
| 4.212121
| 0.757576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005348
| 0.384868
| 304
| 13
| 49
| 23.384615
| 0.737968
| 0
| 0
| 0
| 0
| 0
| 0.134868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c688fe0af58ac798c7af0c9f68af25aff660071c
| 5,304
|
py
|
Python
|
models/ScrabbleGAN.py
|
iambhuvi/ScrabbleGAN
|
30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b
|
[
"MIT"
] | 9
|
2021-02-02T06:31:32.000Z
|
2021-11-03T11:19:58.000Z
|
models/ScrabbleGAN.py
|
iambhuvi/ScrabbleGAN
|
30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b
|
[
"MIT"
] | 1
|
2021-12-01T12:13:14.000Z
|
2021-12-01T12:13:14.000Z
|
models/ScrabbleGAN.py
|
iambhuvi/ScrabbleGAN
|
30dce26a1a103a0fd6ce7269d6ccdcaccb32fd3b
|
[
"MIT"
] | 6
|
2021-02-02T06:31:49.000Z
|
2022-01-21T14:33:43.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from models.model_utils import BigGAN as BGAN
from utils.data_utils import *
import pandas as pd
class Recognizer(nn.Module):
def __init__(self, cfg):
super(Recognizer, self).__init__()
input_size = 1
conv_is = [1] + cfg.r_fs
self.convs = nn.Sequential(
nn.Sequential(
nn.Conv2d(conv_is[0], cfg.r_fs[0], kernel_size=cfg.r_ks[0], padding=cfg.r_pads[0]),
nn.ReLU(True),
nn.MaxPool2d(2)
),
nn.Sequential(
nn.Conv2d(conv_is[1], cfg.r_fs[1], kernel_size=cfg.r_ks[1], padding=cfg.r_pads[1]),
nn.ReLU(True),
nn.MaxPool2d(2)
),
nn.Sequential(
nn.Conv2d(conv_is[2], cfg.r_fs[2], kernel_size=cfg.r_ks[2], padding=cfg.r_pads[2]),
nn.BatchNorm2d(cfg.r_fs[2]),
nn.ReLU(True)
),
nn.Sequential(
nn.Conv2d(conv_is[3], cfg.r_fs[3], kernel_size=cfg.r_ks[3], padding=cfg.r_pads[3]),
nn.ReLU(True),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))
),
nn.Sequential(
nn.Conv2d(conv_is[4], cfg.r_fs[4], kernel_size=cfg.r_ks[4], padding=cfg.r_pads[4]),
nn.BatchNorm2d(cfg.r_fs[4]),
nn.ReLU(True)
),
nn.Sequential(
nn.Conv2d(conv_is[5], cfg.r_fs[5], kernel_size=cfg.r_ks[5], padding=cfg.r_pads[5]),
nn.ReLU(True),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))
),
nn.Sequential(
nn.Conv2d(conv_is[6], cfg.r_fs[6], kernel_size=cfg.r_ks[6], padding=cfg.r_pads[6]),
nn.BatchNorm2d(cfg.r_fs[6]),
nn.ReLU(True)
)
)
self.output = nn.Linear(512, cfg.num_chars)
self.prob = nn.LogSoftmax(dim=2)
def forward(self, x):
out = self.convs(x)
out = out.squeeze(2) # [b, c, w]
out = out.permute(0, 2, 1) # [b, w, c]
# Predict for len(num_chars) classes at each timestep
out = self.output(out)
out = self.prob(out)
return out
class ScrabbleGAN(nn.Module):
def __init__(self, cfg, char_map):
super().__init__()
self.z_dist = torch.distributions.Normal(loc=0, scale=1.)
self.z_dim = cfg.z_dim
# Get word list from lexicon to be used to generate fake images
if cfg.dataset == 'IAM':
self.fake_words = pd.read_csv(cfg.lexicon_file, sep='\t', names=['words'])
# filter words with len >= 20
self.fake_words = self.fake_words.loc[self.fake_words.words.str.len() < 20]
self.fake_words = self.fake_words.words.to_list()
else:
exception_chars = ['ï', 'ü', '.', '_', 'ö', ',', 'ã', 'ñ']
self.fake_words = pd.read_csv(cfg.lexicon_file, '\t')['lemme']
self.fake_words = [word.split()[-1] for word in self.fake_words
if (pd.notnull(word) and all(char not in word for char in exception_chars))]
fake_words_clean = []
for word in self.fake_words:
word_set = set(word)
if len(word_set.intersection(char_map.keys())) == len(word_set):
fake_words_clean.append(word)
self.fake_words = fake_words_clean
self.fake_y_dist = torch.distributions.Categorical(
torch.tensor([1. / len(self.fake_words)] * len(self.fake_words)))
self.batch_size = cfg.batch_size
self.num_chars = cfg.num_chars
self.word_map = WordMap(char_map)
self.batch_size = cfg.batch_size
self.num_chars = cfg.num_chars
self.config = cfg
self.R = Recognizer(cfg)
self.G = BGAN.Generator(resolution=cfg.resolution, G_shared=cfg.g_shared,
bn_linear=cfg.bn_linear, n_classes=cfg.num_chars, hier=True)
self.D = BGAN.Discriminator(resolution=cfg.resolution, bn_linear=cfg.bn_linear, n_classes=cfg.num_chars)
def forward_fake(self, z=None, fake_y=None, b_size=None):
b_size = self.batch_size if b_size is None else b_size
# If z is not provided, sample it
if z is None:
self.z = self.z_dist.sample([b_size, self.z_dim]).to(self.config.device)
else:
self.z = z.repeat(b_size, 1).to(self.config.device)
# If fake words are not provided, sample it
if fake_y is None:
# Sample lexicon indices, get words, and encode them using char_map
sample_lex_idx = self.fake_y_dist.sample([b_size])
fake_y = [self.fake_words[i] for i in sample_lex_idx]
fake_y, fake_y_lens = self.word_map.encode(fake_y)
self.fake_y_lens = fake_y_lens.to(self.config.device)
# Convert y into one-hot
self.fake_y = fake_y.to(self.config.device)
self.fake_y_one_hot = F.one_hot(fake_y, self.num_chars).to(self.config.device)
self.fake_img = self.G(self.z, self.fake_y_one_hot)
def create_model(config, char_map):
model = ScrabbleGAN(config, char_map)
model.to(config.device)
return model
| 37.617021
| 112
| 0.572587
| 784
| 5,304
| 3.664541
| 0.202806
| 0.034807
| 0.063348
| 0.04873
| 0.350505
| 0.269405
| 0.204664
| 0.204664
| 0.183084
| 0.132962
| 0
| 0.020985
| 0.299208
| 5,304
| 140
| 113
| 37.885714
| 0.751951
| 0.061086
| 0
| 0.277778
| 0
| 0
| 0.00503
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046296
| false
| 0
| 0.064815
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c689b60ebca7bfda5e5401b93bdc1651fc7b24be
| 2,745
|
py
|
Python
|
jobbing/controllers/providers_controller.py
|
davidall-amdocs/jobbing
|
b13311da07606366dfbe2eb737483a5820038557
|
[
"Apache-2.0"
] | null | null | null |
jobbing/controllers/providers_controller.py
|
davidall-amdocs/jobbing
|
b13311da07606366dfbe2eb737483a5820038557
|
[
"Apache-2.0"
] | 1
|
2021-06-10T03:34:07.000Z
|
2021-06-10T03:34:07.000Z
|
jobbing/controllers/providers_controller.py
|
davidall-amdocs/jobbing
|
b13311da07606366dfbe2eb737483a5820038557
|
[
"Apache-2.0"
] | 1
|
2022-02-14T15:51:01.000Z
|
2022-02-14T15:51:01.000Z
|
from flask import abort
from jobbing.models.user_profile import UserProfile # noqa: E501
from jobbing.models.service import Service # noqa: E501
from jobbing.DBModels import Profile as DBProfile
from jobbing.DBModels import Service as DBService
from jobbing.login import token_required
@token_required
def get_provider_by_id(provider_id): # noqa: E501
"""get_provider_by_id
Shows a provider identified by id # noqa: E501
:param provider_id: id del proveedor de servicios
:type provider_id: int
:rtype: UserProfile
"""
profile = DBProfile.query.filter(DBProfile.provider_id == provider_id).first()
if profile == None:
abort(404)
return UserProfile(userprofile_id=profile.userprofile_id,
first_name=profile.first_name,
second_name=profile.second_name,
first_surname=profile.first_surname,
second_surname=profile.second_surname,
birthdate=profile.birthdate,
curp=profile.curp,
mobile_number=profile.mobile_number,
home_number=profile.home_number,
office_number=profile.office_number,
facebook_profile=profile.facebook_profile,
linkedin_profile=profile.linkedin_profile,
twitter_profile=profile.twitter_profile,
id_image=profile.id_image,
status=profile.status,
created=profile.created,
updated=profile.updated,
credentials_id=profile.credentials_id,
org_id=profile.org_id,
address=profile.address)
@token_required
def get_services_by_provider_id(provider_id): # noqa: E501
"""get_services_by_provider_id
Show all Services that offers a provider # noqa: E501
:param provider_id: Id de Provider
:type provider_id: int
:rtype: List[Service]
"""
services = DBService.query.filter(DBService.user_id == provider_id)
results = [
Service(
id = serv.service_id,
category_id = serv.category_id,
description = serv.description,
years_of_experience = serv.years_of_experience,
price_of_service = serv.price_of_service,
work_zone = serv.work_zone,
services_provided = serv.services_provided,
five_stars = serv.five_stars,
four_starts = serv.four_starts,
three_starts = serv.three_starts,
two_starts = serv.two_starts,
one_start = serv.one_start,
read_only = serv.read_only,
status_id = serv.status_id,
user_id = serv.user_id
) for serv in services]
return results
| 34.746835
| 82
| 0.647723
| 315
| 2,745
| 5.368254
| 0.285714
| 0.06505
| 0.028386
| 0.022472
| 0.107037
| 0.056771
| 0
| 0
| 0
| 0
| 0
| 0.010687
| 0.284153
| 2,745
| 78
| 83
| 35.192308
| 0.849873
| 0.135155
| 0
| 0.037736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.113208
| 0
| 0.188679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c68c3919e177e8d1de7b30c2a650b62b74c47975
| 6,811
|
py
|
Python
|
bin/extract_bcs.py
|
dmaticzka/bctools
|
e4733b1f59a151f8158a8173a3cde48a5d119bc2
|
[
"Apache-2.0"
] | null | null | null |
bin/extract_bcs.py
|
dmaticzka/bctools
|
e4733b1f59a151f8158a8173a3cde48a5d119bc2
|
[
"Apache-2.0"
] | 3
|
2016-04-24T14:26:17.000Z
|
2017-04-28T15:17:20.000Z
|
bin/extract_bcs.py
|
dmaticzka/bctools
|
e4733b1f59a151f8158a8173a3cde48a5d119bc2
|
[
"Apache-2.0"
] | 2
|
2016-05-06T03:57:25.000Z
|
2018-11-06T10:57:32.000Z
|
#!/usr/bin/env python
import argparse
import logging
import re
from sys import stdout
from Bio.SeqIO.QualityIO import FastqGeneralIterator
# avoid ugly python IOError when stdout output is piped into another program
# and then truncated (such as piping to head)
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
tool_description = """
Exract barcodes from a FASTQ file according to a user-specified pattern. Starting from the 5'-end, positions marked by X will be moved into a separate FASTQ file. Positions marked bv N will be kept.
By default output is written to stdout.
Example usage:
- remove barcode nucleotides at positions 1-3 and 6-7 from FASTQ; write modified
FASTQ entries to output.fastq and barcode nucleotides to barcodes.fa:
fastq_extract_barcodes.py barcoded_input.fastq XXXNNXX --out output.fastq --bcs barcodes.fastq
"""
# parse command line arguments
parser = argparse.ArgumentParser(description=tool_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
# positional arguments
parser.add_argument(
"infile",
help="Path to fastq file.")
parser.add_argument(
"pattern",
help="Pattern of barcode nucleotides starting at 5'-end. X positions will be moved to the header, N positions will be kept.")
# optional arguments
parser.add_argument(
"-o", "--outfile",
help="Write results to this file.")
parser.add_argument(
"-b", "--bcs",
dest="out_bc_fasta",
help="Write barcodes to this file in FASTQ format.")
parser.add_argument(
"--fasta-barcodes",
dest="save_bcs_as_fa",
action="store_true",
help="Save extracted barcodes in FASTA format.")
parser.add_argument(
"-a", "--add-bc-to-fastq",
dest="add_to_head",
help="Append extracted barcodes to the FASTQ headers.",
action="store_true")
parser.add_argument(
"-v", "--verbose",
help="Be verbose.",
action="store_true")
parser.add_argument(
"-d", "--debug",
help="Print lots of debugging information",
action="store_true")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(filename)s - %(levelname)s - %(message)s")
elif args.verbose:
logging.basicConfig(level=logging.INFO, format="%(filename)s - %(levelname)s - %(message)s")
else:
logging.basicConfig(format="%(filename)s - %(levelname)s - %(message)s")
logging.info("Parsed arguments:")
logging.info(" infile: '{}'".format(args.infile))
logging.info(" pattern: '{}'".format(args.pattern))
if args.outfile:
logging.info(" outfile: enabled writing to file")
logging.info(" outfile: '{}'".format(args.outfile))
if args.out_bc_fasta:
logging.info(" bcs: enabled writing barcodes to fastq file")
logging.info(" bcs: {}".format(args.out_bc_fasta))
if args.save_bcs_as_fa:
logging.info(" fasta-barcodes: write barcodes in fasta format instead of fastq")
logging.info("")
# check if supplied pattern is valid
valid_pattern = re.compile("^[XN]+$")
pattern_match = valid_pattern.match(args.pattern)
if pattern_match is None:
raise ValueError("Error: supplied pattern '{}' is not valid.".format(args.pattern))
# check if at least one barcode position is included in the pattern
has_bcpos_pattern = re.compile("X")
pattern_match = has_bcpos_pattern.search(args.pattern)
if pattern_match is None:
raise ValueError("Error: supplied pattern '{}' does not contain a barcode position 'X'.".format(args.pattern))
logging.info("Barcode pattern analysis:")
# get X positions of pattern string
barcode_nt_pattern = re.compile("X+")
barcode_positions = []
for m in re.finditer(barcode_nt_pattern, args.pattern):
logging.info(' found barcode positions in pattern: %02d-%02d: %s' % (m.start(), m.end(), m.group(0)))
barcode_positions.append((m.start(), m.end()))
logging.info(" barcode positions: {}".format(barcode_positions))
# get last position of a barcode nt in the pattern
# reads must be long enough for all
min_readlen = barcode_positions[-1][-1]
logging.info(" last position of a barcode nt in pattern: {}".format(min_readlen))
logging.info("")
# get coordinates of nucleotides to keep
# the tail after the last barcode nt is handled separately
seq_positions = []
last_seq_start = 0
for bcstart, bcstop in barcode_positions:
seq_positions.append((last_seq_start, bcstart))
last_seq_start = bcstop
logging.info(" sequence positions: {}".format(seq_positions))
logging.info(" start of sequence tail: {}".format(last_seq_start))
samout = (open(args.outfile, "w") if args.outfile is not None else stdout)
if args.out_bc_fasta is not None:
faout = open(args.out_bc_fasta, "w")
for header, seq, qual in FastqGeneralIterator(open(args.infile)):
# skip reads that are too short to extract the full requested barcode
if len(seq) < min_readlen:
logging.warning("skipping read '{}', is too short to extract the full requested barcode".format(header))
logging.debug("seq: {}".format(seq))
logging.debug("len(seq): {}".format(len(seq)))
continue
# extract barcode nucleotides
barcode_list = []
barcode_qual_list = []
for bcstart, bcstop in barcode_positions:
barcode_list.append(seq[bcstart:bcstop])
barcode_qual_list.append(qual[bcstart:bcstop])
barcode = "".join(barcode_list)
barcode_quals = "".join(barcode_qual_list)
logging.debug("extracted barcode: {}".format(barcode))
# create new sequence and quality string without barcode nucleotides
new_seq_list = []
new_qual_list = []
for seqstart, seqstop in seq_positions:
new_seq_list.append(seq[seqstart:seqstop])
new_qual_list.append(qual[seqstart:seqstop])
new_seq_list.append(seq[last_seq_start:])
new_qual_list.append(qual[last_seq_start:])
new_seq = "".join(new_seq_list)
new_qual = "".join(new_qual_list)
# check if at least one nucleotide is left. having none would break fastq
if len(new_seq) == 0:
logging.warning("skipping read '{}', no sequence remains after barcode extraction".format(header))
logging.debug("seq: {}".format(seq))
logging.debug("len(seq): {}".format(len(seq)))
continue
# write barcode nucleotides into header
if args.add_to_head:
annotated_header = " ".join([header, barcode])
else:
annotated_header = header
samout.write("@%s\n%s\n+\n%s\n" % (annotated_header, new_seq, new_qual))
# write barcode to fasta if requested
if args.out_bc_fasta is not None:
if args.save_bcs_as_fa:
faout.write(">{}\n{}\n".format(header, barcode))
else:
faout.write("@{}\n{}\n+\n{}\n".format(header, barcode, barcode_quals))
# close files
samout.close()
if args.out_bc_fasta is not None:
faout.close()
| 39.143678
| 198
| 0.707238
| 959
| 6,811
| 4.895725
| 0.241919
| 0.03983
| 0.028967
| 0.017891
| 0.204899
| 0.161022
| 0.119915
| 0.094782
| 0.072417
| 0.059638
| 0
| 0.002646
| 0.167817
| 6,811
| 173
| 199
| 39.369942
| 0.825688
| 0.12377
| 0
| 0.227941
| 0
| 0.014706
| 0.322293
| 0.004203
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044118
| 0
| 0.044118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c68e39b0e1053cfb768407c21209e2d2583bacc2
| 1,226
|
py
|
Python
|
main.py
|
pranavbaburaj/sh
|
dc0da9e10e7935310ae40d350c1897fcd65bce8f
|
[
"MIT"
] | 4
|
2021-01-30T12:25:21.000Z
|
2022-03-13T07:23:19.000Z
|
main.py
|
pranavbaburaj/sh
|
dc0da9e10e7935310ae40d350c1897fcd65bce8f
|
[
"MIT"
] | 3
|
2021-02-26T13:11:17.000Z
|
2021-06-04T17:26:05.000Z
|
main.py
|
pranavbaburaj/sh
|
dc0da9e10e7935310ae40d350c1897fcd65bce8f
|
[
"MIT"
] | 1
|
2021-02-08T10:18:29.000Z
|
2021-02-08T10:18:29.000Z
|
import pyfiglet as figlet
import click as click
from project import Project, ApplicationRunner
# The application package manager
# get
from package import PackageManager
# print out the application name
def print_app_name(app_name):
figlet_object = figlet.Figlet(font='slant')
return figlet_object.renderText(str(app_name))
# call the project class
# and create a new project
def create_new_project(project_name):
print(print_app_name(project_name))
new_project = Project(project_name)
# call teh run class
# and run the specified project
def run_project(project_name):
run = ApplicationRunner(project_name)
# call the package manager
# and install packages
def get_package(package):
package_manager = PackageManager(package)
@click.command()
@click.argument('command', type=str)
@click.argument('name', type=str)
def index(command, name):
if command == "new":
create_new_project(name)
elif command == "run":
run_project(name)
elif command == "install" or command == "i" or command == "get":
get_package(name)
else:
print(f"{command}:command not found")
if __name__ == "__main__":
index()
| 24.52
| 69
| 0.693312
| 157
| 1,226
| 5.216561
| 0.312102
| 0.094017
| 0.065934
| 0.053724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212887
| 1,226
| 50
| 70
| 24.52
| 0.848705
| 0.170473
| 0
| 0
| 0
| 0
| 0.070907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.142857
| 0
| 0.357143
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6904f6da38987f613861eec004342d5edfec9c2
| 1,339
|
py
|
Python
|
src/21.py
|
peter-hunt/project-euler-solution
|
ce5be80043e892e3a95604bd5ebec9dc88c7c037
|
[
"MIT"
] | null | null | null |
src/21.py
|
peter-hunt/project-euler-solution
|
ce5be80043e892e3a95604bd5ebec9dc88c7c037
|
[
"MIT"
] | null | null | null |
src/21.py
|
peter-hunt/project-euler-solution
|
ce5be80043e892e3a95604bd5ebec9dc88c7c037
|
[
"MIT"
] | null | null | null |
"""
Amicable numbers
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and
142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
"""
from math import floor, sqrt
limit = 10_000
def initial_func(limit):
def sum_divisors(n):
result = 1
for i in range(2, floor(sqrt(n))):
if n % i == 0:
if i == n // i:
result += i
else:
result += i + n // i
return result
amicables = {*()}
result = 0
for i in range(2, limit):
if i in amicables:
continue
other = sum_divisors(i)
if other == i:
continue
if sum_divisors(other) == i:
amicables.add(i)
result += i
if other < limit:
amicables.add(other)
result += other
return result
def improved_func(limit):
pass
# 31626
print(initial_func(limit))
# print(improved_func(limit))
| 21.596774
| 79
| 0.551158
| 203
| 1,339
| 3.600985
| 0.389163
| 0.049248
| 0.065663
| 0.021888
| 0.032832
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075145
| 0.353996
| 1,339
| 61
| 80
| 21.95082
| 0.768786
| 0.38835
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.033333
| 0.033333
| 0
| 0.2
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|