seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43427847703 | from .common import deploy
def _parse_sub(subparsers):
parser = subparsers.add_parser("rtt_isolated",
help="Round-trip time for each node in isolation (only node on network)")
return parser
def _main(args, script_fmt):
cmd_list = [
"cd mqtt-benchmark",
script_fmt.format(pub="topic", sub="topic")
]
return deploy (cmd_list, args.devices, sync=True)
| arjunr2/mqtt-benchmark | bench_scripts/rtt_isolated.py | rtt_isolated.py | py | 405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "common.deploy",
"line_number": 14,
"usage_type": "call"
}
] |
74473655465 | from inspect import getsource
from IPython.core.display import HTML, display
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
_formatter = HtmlFormatter()
def get_source(obj, preprocess=None):
# comments = f'# decorated by: {obj.decorated_by}\n' if hasattr(obj, 'decorated_by') else ''
if hasattr(obj, 'original_function'):
obj = obj.original_function
if hasattr(obj, '__source__'):
source = obj.__source__
else:
source = getsource(obj)
if preprocess:
source = preprocess(source)
return HTML(highlight(source, PythonLexer(), _formatter))
def show_source(obj):
display(get_source(obj))
def embed_source_styling(custom_styles='.highlight{margin-left:10px!important; font-size:11px}'):
default_highlight_style = _formatter.get_style_defs('.highlight')
html = HTML(f'<style>{default_highlight_style}{custom_styles}</style>')
display(html)
| krassowski/jupyter-helpers | jupyter_helpers/source.py | source.py | py | 983 | python | en | code | 45 | github-code | 36 | [
{
"api_name": "pygments.formatters.HtmlFormatter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "inspect.getsource",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "IPython.core.display.HTML",
"line_number": 21,
"usage_type": "call"
},
{
"api_... |
28969560543 | from django.contrib import admin
from ..models import Player
class PlayerAdmin(admin.ModelAdmin):
list_display = (
'name',
'lastname',
'birth_date',
'team',
'photo',
'position',
'player_number',
'is_first_team',
)
admin.site.register(Player, PlayerAdmin)
| dexer13/rebus-project | world_cup/admin/player_admin.py | player_admin.py | py | 332 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 19,
"usage_type": "call"
},... |
19160015551 | """
Contains author & document records in dict() form for self-contained testing
"""
import contextlib
import copy
import time
from collections import defaultdict
from itertools import zip_longest
from unittest.mock import MagicMock
import path_finder
from cache.cache_buddy import CacheMiss, AUTHOR_VERSION_NUMBER, \
DOCUMENT_VERSION_NUMBER
from names.ads_name import ADSName
# Monkey-patch path_finder to recognize our bibcodes and ORCID IDs
path_finder.is_bibcode = lambda x: x.startswith("paper")
path_finder.is_orcid_id = lambda x: "ORCID" in str(x)
path_finder.normalize_orcid_id = lambda x: x
r"""
The authorship graph:
D -- J -- I
| |
K -- A == B == C == F -- H
| | \\ //
L E ---- G
"""
TIME = int(time.time())
empty_document = {
'doctype': 'article', 'keywords': [],
'publication': 'mock', 'pubdate': 'never',
'citation_count': 0, 'read_count': 0,
'timestamp': TIME, 'version': DOCUMENT_VERSION_NUMBER}
documents = {
'paperAB': {
'title': 'Paper Linking A & B',
'authors': ['Author, A.', 'Author, Bbb'],
'affils': ['Univ of A', 'B Center'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperAB2': {
'title': 'Second Paper Linking A & B',
'authors': ['Author, B.', 'Author, Aaa'],
'affils': ['Univ of B', 'A Institute'],
'orcid_ids': ['ORCID B'],
'orcid_id_src': '3',
**empty_document
},
'paperAE': {
'title': 'Paper Linking A & E',
'authors': ['Author, Aaa', 'Author, Eee E.'],
'affils': ['A Institute', 'E Center for E'],
'orcid_ids': ['ORCID A'],
'orcid_id_src': '1',
**empty_document
},
'paperAK': {
'title': 'Paper Linking A & K',
'authors': ['Author, Aaa', 'Author, K.'],
'affils': ['A Institute', 'K Center for K'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperBC': {
'title': 'Paper Linking B & C',
'authors': ['Author, C.', 'Author, B.'],
'affils': ['University of C', 'Univ of B'],
'orcid_ids': ['', 'ORCID B'],
'orcid_id_src': '0,1',
**empty_document
},
'paperBCG': {
'title': 'Paper Linking B, C & G',
'authors': ['Author, Bbb', 'Author, C. C.', 'Author, G.'],
'affils': ['B Institute', 'Univ. C', 'G Center for G'],
'orcid_ids': ['Not ORCID B'],
'orcid_id_src': '1',
**empty_document
},
'paperBD': {
'title': 'Paper Linking B & D',
'authors': ['Author, B.', 'Author, D.'],
'affils': ['B Institute', 'D Center for D'],
'orcid_ids': ['ORCID B', 'ORCID D'],
'orcid_id_src': '1,1',
**empty_document
},
'paperBG': {
'title': 'Paper Linking B & G',
'authors': ['Author, Bbb', 'Author, G.'],
'affils': ['B Institute', 'G Center for G'],
'orcid_ids': ['ORCID B'],
'orcid_id_src': '1',
**empty_document
},
'paperCF': {
'title': 'Paper Linking C & F',
'authors': ['Author, C.', 'Author, F.'],
'affils': ['C Institute', 'F Center for F'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperCF2': {
'title': 'Second Paper Linking C & F',
'authors': ['Author, C.', 'Author, F.'],
'affils': ['C University', 'F Center for F'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperCG': {
'title': 'Paper Linking C & G',
'authors': ['Author, C.', 'Author, G.'],
'affils': ['C Institute', 'G Center for G at Gtown'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperDJ': {
'title': 'Paper Linking D & J',
'authors': ['Author, D.', 'Author, J. J.'],
'affils': ['D Institute', 'J Institute, U. J. @ Jtown'],
'orcid_ids': ['', 'ORCID E'],
'orcid_id_src': '0,2',
**empty_document
},
'paperEG': {
'title': 'Paper Linking E & G',
'authors': ['Author, Eee E.', 'Author, G.'],
'affils': ['E Institute', 'G Center for G, Gtown'],
'orcid_ids': ['ORCID E'],
'orcid_id_src': '3',
**empty_document
},
'paperFH': {
'title': 'Paper Linking F & H',
'authors': ['Author, F.', 'Author, H.'],
'affils': ['F Institute | Fville', 'H Center for H'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperFI': {
'title': 'Paper Linking F & I',
'authors': ['Author, F.', 'Author, I.'],
'affils': ['F Institute, Fville, Fstate, 12345', 'I Center for I'],
'orcid_ids': ['', 'ORCID I'],
'orcid_id_src': '0,3',
**empty_document
},
'paperIJ': {
'title': 'Paper Linking J & I',
'authors': ['Author, J. J.', 'Author, I.'],
'affils': ['J Center, University of J, Other town', 'I Center for I'],
'orcid_ids': ['', 'ORCID I'],
'orcid_id_src': '0,2',
**empty_document
},
'paperKL': {
'title': 'Paper Linking K & L',
'authors': ['Author, L.', 'Author, K.'],
'affils': ['L Institute', 'K Center for K'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperKL2': {
'title': "Paper Linking K and two L's",
'authors': ['Author, L.', 'Author, L. L.', 'Author, K.'],
'affils': ['L Institute', 'L U', 'K Center for K'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
'paperUncon': {
'title': 'Paper Linking Uncon1 & Uncon2',
'authors': ['author, unconnected b.', 'author, unconnected a.'],
'affils': ['B Institute', 'A Center for A'],
'orcid_ids': [],
'orcid_id_src': '',
**empty_document
},
}
authors = {author for doc in documents.values() for author in doc['authors']}
for bibcode, document in documents.items():
document['bibcode'] = bibcode
def refresh():
pass
store_document = MagicMock()
store_documents = store_document
def delete_document(*args, **kwargs):
raise RuntimeError("Should not delete from mock cache")
def load_document(key):
try:
return copy.deepcopy(documents[key])
except KeyError:
raise CacheMiss(key)
def load_documents(keys):
return [load_document(key) for key in keys]
store_author = MagicMock()
delete_author = delete_document
def author_is_in_cache(key):
try:
load_author(key)
return True
except CacheMiss:
return False
def authors_are_in_cache(keys):
return [author_is_in_cache(key) for key in keys]
def load_author(key):
if key[0] in '<>=':
raise CacheMiss(key)
orcid = "ORCID" in key
if orcid:
name = None
else:
name = ADSName.parse(key)
docs = []
coauthors = defaultdict(list)
appears_as = defaultdict(list)
for bibcode, document in documents.items():
matched = None
# Go through the document's authors until/if we find our search author
for orcid_id, author in zip_longest(
document['orcid_ids'], document['authors']):
if orcid and orcid_id == key:
matched = author
aname = ADSName.parse(author)
if name is None or aname.is_more_specific_than(name):
name = aname
elif not orcid and name == author:
matched = author
if matched is not None:
docs.append(bibcode)
idx = len(docs) - 1
appears_as[matched].append(idx)
for coauthor in document['authors']:
if coauthor != matched:
coauthors[coauthor].append(idx)
if len(docs) or key.endswith("nodocs"):
for coauthor, coauthor_dat in coauthors.items():
coauthors[coauthor] = ','.join(str(i) for i in coauthor_dat)
for alias, alias_dat in appears_as.items():
appears_as[alias] = ','.join(str(i) for i in alias_dat)
return {
# defaultdict doesn't play nicely with AuthorRecord's asdict()
'name': name.qualified_full_name,
'documents': docs,
'coauthors': dict(**coauthors),
'appears_as': dict(**appears_as),
'timestamp': TIME,
'version': AUTHOR_VERSION_NUMBER,
}
else:
raise CacheMiss(key)
def load_authors(keys):
return [load_author(key) for key in keys]
def store_progress_data(*args, **kwargs):
pass
delete_progress_data = delete_document
def load_progress_data(*args, **kwargs):
raise RuntimeError("Should not load progress from mock cache")
def clear_stale_data(*args, **kwargs):
pass
# A dummy batch manager
@contextlib.contextmanager
def batch():
yield True
| svank/appa-backend | appa/tests/mock_backing_cache.py | mock_backing_cache.py | py | 9,020 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "path_finder.is_bibcode",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "path_finder.is_orcid_id",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "path_finder.normalize_orcid_id",
"line_number": 20,
"usage_type": "attribute"
... |
61098782 | from collections import deque
from typing import Union
import numpy as np
from stlpy.STL import STLTree, STLFormula, LinearPredicate
COLORED = False
if COLORED:
from termcolor import colored
else:
def colored(text, color):
return text
class STL:
def __init__(self, ast: Union[list, str, STLTree, STLFormula, LinearPredicate]):
self.ast = ast
self.single_operators = ("~", "G", "F")
self.binary_operators = ("&", "|", "->", "U")
self.sequence_operators = ("G", "F", "U")
self.stlpy_form = None
self.expr_repr = None
"""
Syntax Functions
"""
def __and__(self, other: 'STL') -> 'STL':
ast = ["&", self.ast, other.ast]
return STL(ast)
def __or__(self, other: 'STL') -> 'STL':
ast = ["|", self.ast, other.ast]
return STL(ast)
def __invert__(self) -> 'STL':
ast = ["~", self.ast]
return STL(ast)
def implies(self, other: 'STL') -> 'STL':
ast = ["->", self.ast, other.ast]
return STL(ast)
def eventually(self, start: int = 0, end: int = None):
ast = ["F", self.ast, start, end]
return STL(ast)
def always(self, start: int = 0, end: int = None) -> 'STL':
ast = ["G", self.ast, start, end]
return STL(ast)
def until(self, other: 'STL', start: int = 0, end: int = None) -> 'STL':
ast = ["U", self.ast, other.ast, start, end]
return STL(ast)
def get_stlpy_form(self):
# catch already converted form
if self.stlpy_form is None:
self.stlpy_form = self._to_stlpy(self.ast)
return self.stlpy_form
def _to_stlpy(self, ast) -> STLTree:
if self._is_leaf(ast):
if isinstance(ast, str):
raise ValueError(f"str variable {ast} not supported")
self.stlpy_form = ast
return ast
if ast[0] == "~":
self.stlpy_form = self._handle_not(ast)
elif ast[0] == "G":
self.stlpy_form = self._handle_always(ast)
elif ast[0] == "F":
self.stlpy_form = self._handle_eventually(ast)
elif ast[0] == "&":
self.stlpy_form = self._handle_and(ast)
elif ast[0] == "|":
self.stlpy_form = self._handle_or(ast)
elif ast[0] == "->":
self.stlpy_form = self._handle_implies(ast)
elif ast[0] == "U":
self.stlpy_form = self._handle_until(ast)
else:
raise ValueError(f"Unknown operator {ast[0]}")
return self.stlpy_form
def _handle_not(self, ast):
sub_form = self._to_stlpy(ast[1])
return sub_form.negation()
def _handle_and(self, ast):
sub_form_1 = self._to_stlpy(ast[1])
sub_form_2 = self._to_stlpy(ast[2])
return sub_form_1 & sub_form_2
def _handle_or(self, ast):
sub_form_1 = self._to_stlpy(ast[1])
sub_form_2 = self._to_stlpy(ast[2])
return sub_form_1 | sub_form_2
def _handle_implies(self, ast):
sub_form_1 = self._to_stlpy(ast[1])
sub_form_2 = self._to_stlpy(ast[2])
return sub_form_1.negation() | sub_form_2
def _handle_eventually(self, ast):
sub_form = self._to_stlpy(ast[1])
return sub_form.eventually(ast[2], ast[3])
def _handle_always(self, ast):
sub_form = self._to_stlpy(ast[1])
return sub_form.always(ast[2], ast[3])
def _handle_until(self, ast):
sub_form_1 = self._to_stlpy(ast[1])
sub_form_2 = self._to_stlpy(ast[2])
return sub_form_1.until(sub_form_2, ast[3], ast[4])
@staticmethod
def _is_leaf(ast):
return issubclass(type(ast), STLFormula) or isinstance(ast, str)
def simplify(self):
if self.stlpy_form is None:
self.get_stlpy_form()
self.stlpy_form.simplify()
def __repr__(self):
if self.expr_repr is not None:
return self.expr_repr
single_operators = ("~", "G", "F")
binary_operators = ("&", "|", "->", "U")
time_bounded_operators = ("G", "F", "U")
# traverse ast
operator_stack = [self.ast]
expr = ""
cur = self.ast
def push_stack(ast):
if isinstance(ast, str) and ast in time_bounded_operators:
time_window = f"[{cur[-2]}, {cur[-1]}]"
operator_stack.append(time_window)
operator_stack.append(ast)
while operator_stack:
cur = operator_stack.pop()
if self._is_leaf(cur):
expr += cur.__str__()
elif isinstance(cur, str):
if cur == "(" or cur == ")":
expr += cur
elif cur.startswith("["):
expr += colored(cur, "yellow") + " "
else:
if cur in ("G", "F"):
if cur == "F":
expr += colored("F", "magenta")
else:
expr += colored(cur, "magenta")
elif cur in ("&", "|", "->", "U"):
expr += " " + colored(cur, "magenta")
if cur != "U":
expr += " "
elif cur in ("~",):
expr += colored(cur, "magenta")
elif cur[0] in single_operators:
# single operator
if not self._is_leaf(cur[1]):
push_stack(")")
push_stack(cur[1])
if not self._is_leaf(cur[1]):
push_stack("(")
push_stack(cur[0])
elif cur[0] in binary_operators:
# binary operator
if not self._is_leaf(cur[2]) and cur[2][0] in binary_operators:
push_stack(")")
push_stack(cur[2])
push_stack("(")
else:
push_stack(cur[2])
push_stack(cur[0])
if not self._is_leaf(cur[1]) and cur[1][0] in binary_operators:
push_stack(")")
push_stack(cur[1])
push_stack("(")
else:
push_stack(cur[1])
self.expr_repr = expr
return expr
def get_all_predicates(self):
all_preds = []
queue = deque([self.ast])
while queue:
cur = queue.popleft()
if self._is_leaf(cur):
all_preds.append(cur)
elif cur[0] in self.single_operators:
queue.append(cur[1])
elif cur[0] in self.binary_operators:
queue.append(cur[1])
queue.append(cur[2])
else:
raise RuntimeError("Should never visit here")
return all_preds
def inside_rectangle_formula(bounds, y1_index, y2_index, d, name=None):
"""
Create an STL formula representing being inside a
rectangle with the given bounds:
::
y2_max +-------------------+
| |
| |
| |
y2_min +-------------------+
y1_min y1_max
:param bounds: Tuple ``(y1_min, y1_max, y2_min, y2_max)`` containing
the bounds of the rectangle.
:param y1_index: index of the first (``y1``) dimension
:param y2_index: index of the second (``y2``) dimension
:param d: dimension of the overall signal
:param name: (optional) string describing this formula
:return inside_rectangle: An ``STLFormula`` specifying being inside the
rectangle at time zero.
"""
assert y1_index < d, "index must be less than signal dimension"
assert y2_index < d, "index must be less than signal dimension"
# Unpack the bounds
y1_min, y1_max, y2_min, y2_max = bounds
# Create predicates a*y >= b for each side of the rectangle
a1 = np.zeros((1, d));
a1[:, y1_index] = 1
right = LinearPredicate(a1, y1_min)
left = LinearPredicate(-a1, -y1_max)
a2 = np.zeros((1, d));
a2[:, y2_index] = 1
top = LinearPredicate(a2, y2_min)
bottom = LinearPredicate(-a2, -y2_max)
# Take the conjuction across all the sides
inside_rectangle = right & left & top & bottom
# set the names
if name is not None:
inside_rectangle.__str__ = lambda: str(name)
inside_rectangle.__repr__ = lambda: str(name)
return inside_rectangle
def outside_rectangle_formula(bounds, y1_index, y2_index, d, name=None):
"""
Create an STL formula representing being outside a
rectangle with the given bounds:
::
y2_max +-------------------+
| |
| |
| |
y2_min +-------------------+
y1_min y1_max
:param bounds: Tuple ``(y1_min, y1_max, y2_min, y2_max)`` containing
the bounds of the rectangle.
:param y1_index: index of the first (``y1``) dimension
:param y2_index: index of the second (``y2``) dimension
:param d: dimension of the overall signal
:param name: (optional) string describing this formula
:return outside_rectangle: An ``STLFormula`` specifying being outside the
rectangle at time zero.
"""
assert y1_index < d, "index must be less than signal dimension"
assert y2_index < d, "index must be less than signal dimension"
# Unpack the bounds
y1_min, y1_max, y2_min, y2_max = bounds
# Create predicates a*y >= b for each side of the rectangle
a1 = np.zeros((1, d))
a1[:, y1_index] = 1
right = LinearPredicate(a1, y1_max)
left = LinearPredicate(-a1, -y1_min)
a2 = np.zeros((1, d))
a2[:, y2_index] = 1
top = LinearPredicate(a2, y2_max)
bottom = LinearPredicate(-a2, -y2_min)
# Take the disjuction across all the sides
outside_rectangle = right | left | top | bottom
# set the names
if name is not None:
outside_rectangle.__str__ = lambda: str(name)
outside_rectangle.__repr__ = lambda: str(name)
return outside_rectangle
| ZikangXiong/STL-Mobile-Robot | src/stl_mob/stl/stl.py | stl.py | py | 10,389 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "stlpy.STL.STLTree",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "stlpy.STL.STLFormula",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "stlpy.STL.LinearP... |
41566426879 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_readonly(host):
f = '/mnt/ro/hello-ro'
with host.sudo('test'):
c = host.run('touch %s', f)
assert c.rc == 1
assert not host.file(f).exists
def test_readwrite(host):
f = '/mnt/rw/hello-rw'
with host.sudo('test'):
c1 = host.run('touch %s', f)
assert c1.rc == 0
assert host.file(f).exists
with host.sudo('test'):
c2 = host.run('rm %s', f)
assert c2.rc == 0
assert not host.file(f).exists
| ome/ansible-role-nfs-mount | molecule/default/tests/test_default.py | test_default.py | py | 640 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "testinfra.utils.ansible_runner.utils.ansible_runner.AnsibleRunner",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "testinfra.utils.ansible_runner.utils",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "testinfra.utils.ansible_runner",
"l... |
889427858 | from connection import create_connection
import numpy as np,numpy.random
from numpy.core.fromnumeric import size
import requests
from bson.objectid import ObjectId
from tag_classes import classifications
import random
def random_classification():
random_classifcations = {}
for tag in classifications.keys():
random_classifcations[tag] = random.choice(classifications[tag])
return random_classifcations
def classify_tags(par,document_id):
try:
tag_1_applicability = []
tag_2_area = []
tag_3_covenant_type = []
tag_4_covenant_title_tag = []
tag_5_covenant_description_sub_tags = []
Tag_6_User_Defined = []
for i in par:
res = requests.post("http://127.0.0.1:5000/classify/tags",json = {"data":i}).json()
tag_1_applicability.append(res["tag_1_applicability"])
tag_2_area.append(res["tag_2_area"])
tag_3_covenant_type.append(res["tag_3_covenant_type"])
tag_4_covenant_title_tag.append(res["tag_4_covenant_title_tag"])
tag_5_covenant_description_sub_tags.append(res["tag_5_covenant_description_sub_tags"])
Tag_6_User_Defined.append(res["Tag_6_User_Defined"])
tags = {"tag_1_applicability":tag_1_applicability,"tag_2_area":tag_2_area,
"tag_3_covenant_type":tag_3_covenant_type,"tag_4_covenant_title_tag":tag_4_covenant_title_tag,
"tag_5_covenant_description_sub_tags":tag_5_covenant_description_sub_tags,
"Tag_6_User_Defined":Tag_6_User_Defined}
db= create_connection()
for i in tags:
db.update({'_id': ObjectId('{}'.format(document_id)) },{ "$set" : {i:tags[i]}})
print("Tags inserted in Document ID {}".format(document_id))
return "Updated"
except Exception as e:
print(e)
return e
| saarthakbabuta1/loan-agreement | classify.py | classify.py | py | 1,858 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tag_classes.classifications.keys",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tag_classes.classifications",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 14,
"usage_type": "call"
},
{
"api_na... |
20580965010 | from django import forms
from .models import Recipe
from channel.models import Channel
class RecipeForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(RecipeForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['trigger_channel'].initial = self.instance.trigger.channel
self.fields['action_channel'].initial = self.instance.action.channel
trigger_channel = forms.ModelChoiceField(queryset=Channel.objects.all())
action_channel = forms.ModelChoiceField(queryset=Channel.objects.all())
class Meta:
model = Recipe
fields = ('trigger', 'action')
| theju/dtwt | recipe/forms.py | forms.py | py | 650 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelChoiceField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "... |
20681014178 | __author__ = 'elmira'
import numpy as np
import itertools
from matplotlib import mlab
import re
with open('corpus1.txt', encoding='utf-8') as f:
news = f.read()
with open('corpus2.txt', encoding='utf-8') as f:
anna = f.read()
anna_sentences = re.split(r'(?:[.]\s*){3}|[.?!]', anna)
news_sentences = re.split(r'(?:[.]\s*){3}|[.?!]', news)
def words(sentence):
return sentence.lower().split()
def word_lens(sentence):
return [len(i) for i in sentence]
def different_letters(sentence):
russian_letters = 'ёйцукенгшщзхъфывапролджэячсмитьбю'
# число различных букв в предложении,
letters = set()
for word in sentence:
for letter in word:
if letter in russian_letters:
letters.add(letter)
return len(letters)
def vowels(word):
vowel_arr = 'ёуеэоаыяию'
num = 0
for letter in word:
if letter in vowel_arr:
num += 1
return num
def vowels_in_sent(sentence):
# число гласных в предложении,
return [vowels(word) for word in sentence]
anna_sent = [words(sentence) for sentence in anna_sentences if len(words(sentence)) > 0]
news_sent = [words(sentence) for sentence in news_sentences if len(words(sentence)) > 0]
anna_data = [(sum(word_lens(sentence)), # длина предложения в буквах,
different_letters(sentence), # число различных букв в предложении,
sum(vowels_in_sent(sentence)), # число гласных в предложении,
np.median(word_lens(sentence)), # медиана числа букв в слове,
np.median(vowels_in_sent(sentence))) # медиана числа гласных в слове.
for sentence in anna_sent]
news_data = [(sum(word_lens(sentence)),
different_letters(sentence),
sum(vowels_in_sent(sentence)),
np.median(word_lens(sentence)),
np.median(vowels_in_sent(sentence)))
for sentence in news_sent]
from matplotlib import pyplot as plt
anna_data = np.array(anna_data)
news_data = np.array(news_data)
# ВОТ ДЗ:
data = np.vstack((anna_data, news_data))
p = mlab.PCA(data, True)
N = len(anna_data)
plt.plot(p.Y[:N,0], p.Y[:N,1], 'og', p.Y[N:,0], p.Y[N:,1], 'sb')
plt.show()
print(p.Wt) | elmiram/homework | seminar9/task1 (2 points)/genre-by-letters.py | genre-by-letters.py | py | 2,438 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.split",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 52,
... |
35387136484 | #!/usr/bin/env python3
from sys import stderr
from multilanguage import Env, Lang, TALcolors
from TALinputs import TALinput
from TALfiles import TALfilesHelper
import os
import random
import networkx as nx
import vertex_cover_lib as vcl
import matplotlib
import multiprocessing
# METADATA OF THIS TAL_SERVICE:
args_list = [
('source',str),
('collection',str),
('instance_id',int),
('instance_format',str),
('num_vertices',int),
('num_edges',int),
('plot',bool),
('plot_sol',bool),
('seed',str),
('vc_sol_val',str),
('display',bool),
('silent',bool),
('lang',str),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"), print_opening_msg = 'now')
TALf = TALfilesHelper(TAc, ENV)
chk_backend = False
if matplotlib.get_backend().lower() in map(str.lower,vcl.backends):
chk_backend = True
## Input Sources
if TALf.exists_input_file('instance'):
instance = vcl.get_instance_from_str(TALf.input_file_as_str('instance'), instance_format_name=ENV["instance_format"])
TAc.print(LANG.render_feedback("successful-load", 'The file you have associated to `instance` filehandler has been successfully loaded.'), "yellow", ["bold"])
elif ENV["source"] == 'terminal':
instance = {}
instance['num_vertices'] = ENV['num_vertices']
instance['num_edges'] = ENV['num_edges']
#TAc.print(LANG.render_feedback("waiting-line", f'#? Waiting for the graph.\nGraph format: (x,y) (w,z) ... (n,m)\n'), "yellow")
TAc.print(LANG.render_feedback("waiting-line", f'#? Waiting for the graph.\n'), "yellow")
TAc.print(LANG.render_feedback("insert-edges", f'Given {ENV["num_vertices"]} vertices labelled with the naturals in the interval [0,{ENV["num_vertices"]-1}], you are now expected to enter {ENV["num_edges"]} edges. To specify an edge, simply enter its two endonodes separated by spaces.'), "yellow", ["bold"])
edges = []
for i in range(1,1+ENV["num_edges"]):
TAc.print(LANG.render_feedback("insert-edge", f'Insert the two endpoints of edge {i}, that is, enter a line with two naturals in the interval [0,{ENV["num_vertices"]-1}], separated by spaces.'), "yellow", ["bold"])
u,v = TALinput(int, 2, TAc=TAc)
edges.append([u,v])
for u,v in edges:
if u not in range(ENV['num_vertices']) or v not in range(ENV['num_vertices']):
TAc.print(f'Edge ({u}, {v}) is not a valid edge for the graph. Aborting.\n', "red", ["bold"], flush=True)
exit(0)
if len(edges) != ENV['num_edges']:
TAc.print(LANG.render_feedback("wrong-edges-number", f'\nWrong number of edges ({len(edges)} instead of {ENV["num_edges"]})\n'), "red", ["bold"])
exit(0)
G = nx.Graph()
G.add_nodes_from([int(v) for v in range(ENV['num_vertices'])])
G.add_edges_from(edges)
instance['graph'] = G
instance_str = vcl.instance_to_str(instance, format_name=ENV['instance_format'])
output_filename = f"terminal_instance.{ENV['instance_format']}.txt"
elif ENV["source"] == 'randgen_1':
# Get random instance
instance = vcl.instances_generator(1, 1, ENV['num_vertices'], ENV['num_edges'], ENV['seed'])[0]
else: # take instance from catalogue
#instance_str = TALf.get_catalogue_instancefile_as_str_from_id_and_ext(ENV["instance_id"], format_extension=vcl.format_name_to_file_extension(ENV["instance_format"],'instance'))
instance_str = TALf.get_catalogue_instancefile_as_str_from_id_collection_and_ext(ENV["collection"], ENV["instance_id"], format_extension=vcl.format_name_to_file_extension(ENV["instance_format"],'instance'))
instance = vcl.get_instance_from_str(instance_str, instance_format_name=ENV["instance_format"])
TAc.print(LANG.render_feedback("instance-from-catalogue-successful", f'The instance with instance_id={ENV["instance_id"]} has been successfully retrieved from the catalogue.'), "yellow", ["bold"], flush=True)
if ENV['display']:
TAc.print(LANG.render_feedback("this-is-the-instance", '\nThis is the instance:\n'), "white", ["bold"], flush=True)
TAc.print(vcl.instance_to_str(instance,ENV["instance_format"]), "white", ["bold"], flush=True)
if ENV['vc_sol_val'] == '0': # manual insertion
TAc.print(LANG.render_feedback("insert-opt-value", f'\nWrite here your conjectured maximal matching size for this graph if you have one. Otherwise, if you only intend to be told about the approximation, enter "C".'), "yellow", ["bold"], flush=True)
if ENV['plot'] and chk_backend:
proc = multiprocessing.Process(target=vcl.plot_graph, args=(instance['graph'],))
proc.start()
#vcl.plot_graph(instance['graph'])
choice = TALinput(str, 1, TAc=TAc)
if choice[0] != 'C' and choice[0] != 'c':
if not choice[0].isdigit():
TAc.print(LANG.render_feedback("invalid-input", f'Input must be an integer number or "C". Aborting.\n'), "red", ["bold"], flush=True)
if ENV['plot'] and chk_backend:
proc.terminate()
exit(0)
TAc.print(LANG.render_feedback("waiting-matching", f'Please, provide the maximal matching:'), "yellow", ["bold"], flush=True)
answer = []
for i in range(int(choice[0])):
TAc.print(LANG.render_feedback("insert-edge", f'Insert the two endpoints of edge {i}, that is, enter a line with two naturals in the interval [0,{ENV["num_vertices"]-1}], separated by spaces.'), "yellow", ["bold"], flush=True)
u,v = TALinput(int, 2, TAc=TAc)
answer.append((u,v))
else:
answer = [eval(t) for t in ENV['vc_sol_val'].split()]
choice = ' '
if choice[0] != 'C' and choice[0] != 'c':
for t in answer:
if t not in instance['graph'].edges():
TAc.print(LANG.render_feedback("edge-not-in-graph", f'Edge {t} is not an edge of the graph. Aborting.\n'), "red", ["bold"], flush=True)
if ENV['plot'] and chk_backend:
proc.terminate()
exit(0)
if (ENV['source'] == "catalogue" and instance['exact_sol'] == 1) or (ENV['source'] != "catalogue"):
size_sol,appr_sol,max_matching = vcl.calculate_approx_vc(instance['graph'], 'greedy')
else:
#appr_sol = instance['sol'].replace(')(',' ').replace('(','').replace(')','').replace(',','')
#max_matching = instance['sol']
if not instance['weighted']:
sol = instance['sol'].split('\n')
appr_sol = sol[0]
max_matching = sol[1]
size_sol = len([int(i) for i in appr_sol.split() ])
else:
size_sol,appr_sol,max_matching = vcl.calculate_approx_vc(instance['graph'], 'greedy')
if choice[0] == 'C' or choice[0] == 'c':
TAc.print(LANG.render_feedback("best-sol", f'A possible 2-approximated vertex cover is: '), "green", ["bold"], flush=True, end='')
TAc.print(f'{appr_sol}.', "white", ["bold"], flush=True)
TAc.print(LANG.render_feedback("min-maximal-matching", f'A possible maximal matching is: '), "green", ["bold"], flush=True, end='')
TAc.print(f'{max_matching}.', "white", ["bold"], flush=True)
TAc.print(LANG.render_feedback("size-sol", f'The size of the 2-approximated vertex cover is: '), "green", ["bold"], flush=True, end='')
TAc.print(f'{size_sol}.', "white", ["bold"], flush=True)
else:
for e in answer:
if e not in instance['graph'].edges():
TAc.print(LANG.render_feedback("edge-not-in-graph", f'Edge {e} not in the graph. Aborting.'), "red", ["bold"], flush=True)
if ENV['plot'] and chk_backend:
proc.terminate()
exit(0)
size_ans = 2 * (len(answer))
is_vertex_cover, reason, data = vcl.verify_approx_vc(answer, instance['graph'], 1)
if is_vertex_cover:
if size_ans == size_sol:
TAc.OK()
TAc.print(LANG.render_feedback("right-best-sol", f'We agree, the solution you provided is a valid 2-approximation vertex cover for the graph.'), "white", ["bold"], flush=True)
elif size_ans > size_sol:
TAc.print(LANG.render_feedback("right-sol", f'The solution you provided is a valid 2-approximation vertex cover for the graph. You can improve your approximation.'), "yellow", ["bold"], flush=True)
else:
TAc.OK()
TAc.print(LANG.render_feedback("new-best-sol", f'Great! The solution you provided is a valid 2-approximation vertex cover for the graph and it\'s better than mine!'), "green", ["bold"], flush=True)
if ENV['source'] == 'catalogue' and not instance['exact_sol'] and not instance['weighted']:
#path=os.path.join(ENV.META_DIR, 'instances_catalogue', 'all_instances')
path=os.path.join(ENV.META_DIR, 'instances_catalogue', ENV['collection'])
instance_filename = f'instance_{str(ENV["instance_id"]).zfill(3)}'
answer = ' '.join(map(str, answer))
risp = f'{answer.replace(",", " ").replace("(", "").replace(")","")}'
#matching = f'{answer.replace(",",", ").replace(") (", ")(")}'
matching = f'{answer.replace(",",", ")}'
new_data = f'{risp}\n{matching}'
#vcl.update_instance_txt(path, instance_filename, answer)
vcl.update_instance_txt(path, instance_filename, new_data)
else:
TAc.NO()
TAc.print(LANG.render_feedback("wrong-sol", f'We don\'t agree, the solution you provided is not a valid 2-approximation vertex cover for the graph.'), "red", ["bold"], flush=True)
if reason == 1:
TAc.print(LANG.render_feedback("edge-incident", f'Reason: edge {data} incident to another one.'), "red", ["bold"], flush=True)
elif reason == 2:
TAc.print(LANG.render_feedback("not-vertex-cover", f'Reason: not a vertex cover. Edges not covered: '), "red", ["bold"], flush=True, end='')
for t in data:
TAc.print(f'{t} ', "red", ["bold"], flush=True, end='')
elif reason == 3:
TAc.print(LANG.render_feedback("node-already-visited", f'Reason: vertex {data} already visited.'), "red", ["bold"], flush=True)
print()
if ENV['plot_sol'] and chk_backend:
if ENV['plot']:
proc.terminate()
if choice[0] != 'C' and choice[0] != 'c':
vertices = ' '.join(map(str, answer)).replace('(', '').replace(') (',' ').replace(')','').replace(',',' ')
proc1 = multiprocessing.Process(target=vcl.plot_2app_vc, args=(instance['graph'],vertices,answer))
proc1.start()
#vcl.plot_2app_vc(instance['graph'], vertices, answer)
else:
proc1 = multiprocessing.Process(target=vcl.plot_2app_vc, args=(instance['graph'],appr_sol,[eval(t) for t in max_matching.replace(', ',',').split()]))
proc1.start()
#vcl.plot_2app_vc(instance['graph'], appr_sol, [eval(t) for t in max_matching.replace(', ',',').split()])
exit(0)
| romeorizzi/TALight | example_problems/tutorial/vertex_cover/services/check_approx_vc_driver.py | check_approx_vc_driver.py | py | 10,368 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "multilanguage.Env",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "multilanguage.TALcolors",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "multilanguage.Lang",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "TALfiles... |
27478835009 | # 1 задание
my_list = [1, 1.2, None, True, 'Text', ['list'], {'key_1':'Val_1'}]
for itam in my_list:
print(type(itam))
# 2 задание
my_list2 = input('Введите элементы списка через запятую: ')
my_list2 = my_list2.split(',')
print(my_list2)
my_list2_len = len(my_list2) if len(my_list2) % 2 ==0 else len(my_list2)-1
i=0
while i <= my_list2_len-1:
if i%2 ==0:
my_list2[i], my_list2[i+1] = my_list2[i+1], my_list2[i]
i+=1
else:
i+=1
print(my_list2)
# 3 задание
month = {'1':'winter', '2':'winter', '3':'spring', '4':'spring', '5':'spring', '6':'summer', '7':'summer', '8':'summer', '9':'autumn', '10':'autumn', '11':'autumn', '12':'winter'}
try:
print(month[input('Введите номер месяца: ')])
except KeyError:
print(f'Такого номера месяца не существует')
try:
month_input = int(input('Введите номер месяца: '))
except:
print(f'Такого номера месяца не существует')
month_input = int(input('Введите номер месяца заново: '))
winter = [1,2,12]
spring = [3,4,5]
summer = [6,7,8]
autumn = [9,10,11]
if month_input in winter:
print("Winter")
elif month_input in spring:
print('Spring')
elif month_input in summer:
print('Summer')
elif month_input in autumn:
print('Autumn')
else: print(f'Такого номера месяца не существует')
# 4 задание
str = input('Введите строку: ')
str_list = str.split(' ')
print(str_list)
print(str_list[1])
i=0
while i<len(str_list):
print(f'{i+1}. {str_list[i][:10]}')
i+=1
# 5 задание
my_list5 = [7,5,3,3,2]
tuple(my_list5)
am_inputs = int(input('Введите количество вводов в рейтинг: '))
q = 1
print(type(q))
print(type(am_inputs))
while q <= am_inputs:
user_input = int(input('Введите значение в рейтинг: '))
result = sorted([user_input] + (my_list5), reverse=True)
q+=1
print(result)
# 6 задание
import sys
import os
import json
with open('goods_base.jon', 'r') as f:
lines = (f.readlines())
def add_good():
goods_dict = {}
goods_dict['Название'] = input('Введите название товара: ')
goods_dict['Цена'] = int(input('Введите цену товара: '))
goods_dict['Количество'] = int(input('Введите количество товара: '))
goods_dict['Единицы измерения'] = input('Введите единицы измерения товара: ')
new_good = (len(lines) + 1,goods_dict)
print(type(new_good))
json_new_good = json.dumps(new_good)
with open('goods_base.jon', 'a',encoding='utf-8') as f:
json.dump(new_good,f)
f.write('\n')
print(len(lines))
add_good()
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
print(goods)
print(len(lines))
names = []
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
names.append(goods[1]['Название'])
price = []
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
price.append(goods[1]['Цена'])
ammount = []
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
ammount.append(goods[1]['Количество'])
units = []
with open('goods_base.jon', 'r') as f:
for line in lines:
goods = tuple(json.loads(line))
units.append(goods[1]['Единицы измерения'])
analis = {
'Название':[names],
'Цена':[price],
'Количество':[ammount],
'Единицы измерения':[units]
}
for key,val in analis.items():
print(key, val[0]) # не понял почему у меня массив вложен в массив, откуда второй массив взялся???
| Glen1679/GeekBrains | Homework2.py | Homework2.py | py | 4,241 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 119... |
16528703119 | from scipy import signal
from pywebio.input import *
from pywebio.output import *
from pywebio import start_server
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import io
def fig2img(fig):
"""
Converts a Matplotlib figure to a PIL Image and return it
"""
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
img = Image.open(buf)
return img
def plot_mag(w,mag):
"""
Plots magnitude graph
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Magnitude plot",fontsize=16)
plt.semilogx(w, mag)
plt.grid(True)
return plt.gcf()
def plot_freqrsp(w,H):
"""
Plots frequency response
"""
plt.figure(figsize=(12,5))
plt.title(f"Frequency response",fontsize=16)
plt.plot(H.real, H.imag, "b")
plt.plot(H.real, -H.imag, "r")
plt.grid(True)
return plt.gcf()
def plot_phase(w,phase):
"""
Plots phase graph
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Phase plot",fontsize=16)
plt.semilogx(w, phase)
plt.grid(True)
return plt.gcf()
def plot_impulse(t,y):
"""
Plots impulse response
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title("Impulse response",fontsize=16)
plt.plot(t,y)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.grid(True)
return plt.gcf()
def plot_step(t,y):
"""
Plots step response
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title("Step response",fontsize=16)
plt.plot(t,y)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.grid(True)
return plt.gcf()
def system(num,den):
"""
Generates plots from a given system input
"""
remove(scope='raw')
with use_scope(name='raw',clear=True,) as img:
#sys = signal.TransferFunction([20,5], [10, 100,1])
sys = signal.TransferFunction(num, den)
w=[10**(i/10) for i in range(-30,41)]
# Bode
w, mag, phase = signal.bode(sys,w=w)
f1 = plot_mag(w,mag)
im1 = fig2img(f1)
put_image(im1)
f2 = plot_phase(w,phase)
im2 = fig2img(f2)
put_image(im2)
# Freq response
w, H = signal.freqresp(sys,w=w)
f3 = plot_freqrsp(w,H)
im3 = fig2img(f3)
put_image(im3)
# Impulse response
t, y = signal.impulse(sys)
f4 = plot_impulse(t,y)
im4 = fig2img(f4)
put_image(im4)
# Step response
t, y = signal.step(sys)
f5 = plot_step(t,y)
im5 = fig2img(f5)
put_image(im5)
def app():
"""
Main app
"""
put_markdown("""
# LTI system demo (using `Scipy.signal`)
## [Dr. Tirthajyoti Sarkar](https://www.linkedin.com/in/tirthajyoti-sarkar-2127aa7/)
## What is a LTI system anyway?
In system analysis, among other fields of study, a linear time-invariant system (or *"LTI system"*) is a system that produces an output signal from any input signal subject to the constraints of **linearity** and **time-invariance**. LTI system theory is an area of applied mathematics which has direct applications in electrical circuit analysis and design, signal processing and filter design, control theory, mechanical engineering, image processing, the design of measuring instruments of many sorts, NMR spectroscopy, and many other technical areas where systems of ordinary differential equations present themselves.
## What are we doing here?
From a given transfer function, we calculate and display the following,
- Bode magnitude plot
- Bode phase plot
- Frequency response plot (real vs. imaginary)
- Impulse response plot
- Step response plot
""", strip_indent=4)
tf = input_group("Transfer function",[input("Input the coefficients of numerator:", type=TEXT,name='num',
help_text='Example: 2,1. No gap between a number and the commas, please.'),
input("Input the coefficients of denominator:", type=TEXT,name='den',
help_text='Example: 5,-2,11. No gap between a number and the commas, please.')],
)
num = [float(n) for n in tf['num'].split(',')]
den = [float (n) for n in tf['den'].split(',')]
system(num,den)
if __name__ == '__main__':
start_server(app,port=9999,debug=True) | tirthajyoti/PyWebIO | apps/bode.py | bode.py | py | 4,531 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "io.BytesIO",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"li... |
2251885893 | import math
import numpy as np
import pygame as pg
def box_l2_loss(obj1, obj2):
r1 = np.array([obj1.rect.x, obj1.rect.y, obj1.rect.width, obj1.rect.height])
r2 = np.array([obj2.rect.x, obj2.rect.y, obj2.rect.width, obj2.rect.height])
return np.linalg.norm(r1 - r2)
def move_from_vector(vector):
angle, speed = vector
rad_angle = angle * math.pi / 180
dx = speed * math.cos(rad_angle)
dy = speed * math.sin(rad_angle)
return dx, dy
def draw_obj(list_obj):
for obj in list_obj:
obj.draw()
def remove_corps(list_obj):
return [obj for obj in list_obj if obj.alive]
def predation(list_obj):
names = [obj.name for obj in list_obj]
for obj in list_obj:
idx_prey = names.index(obj.prey) if obj.prey in names else -1
if obj.prey != -1 and obj.prey != list_obj[idx_prey].prey:
obj.grow()
obj.prey = -1
list_obj[idx_prey].eated()
def check_borders(obj_list):
width, height = pg.display.get_surface().get_size()
for el in obj_list:
if el.x < 0:
el.x = 0
if el.y < 0:
el.y = 0
if el.x > height - 20:
el.x = height - 20
if el.y > width - 20:
el.y = width - 20
def matprint(mat, fmt="g"):
col_maxes = [max([len(("{:"+fmt+"}").format(x)) for x in col]) for col in mat.T]
for x in mat:
for i, y in enumerate(x):
print(("{:"+str(col_maxes[i])+fmt+"}").format(y), end=" ")
print("")
| thbeucher/Games | life_games/utils.py | utils.py | py | 1,395 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_numbe... |
41763630844 | import random
from terminaltables import AsciiTable
import curses
GAME_TITLE = "`•.,¸¸ [ JEU DU TAQUIN ] ¸¸,.•´"
# Nombre de cases par côté
TAQUIN_SIZE = 4
# Valeur de la case vide
EMPTY_CASE_VALUE = ""
# Taquin correct, dans l'ordre
CORRECT_SOLUTION = [list(a) for a in zip(*[iter(list(range(1, TAQUIN_SIZE ** 2)) + [EMPTY_CASE_VALUE])] * TAQUIN_SIZE)]
# Jeu en cours
CURRENT_STATE = []
def get_available_movements():
# TODO : retourner une liste de mouvements possibles ["LEFT", "UP"]
return []
def move():
# TODO : appliquer le mouvement de la case vide
pass
def has_won():
# TODO : vérifier si le jeu est gagné
pass
def handle_keypress(screen):
try:
key = screen.getkey().upper()
except:
return
height, width = screen.getmaxyx()
screen.erase()
available_movements = get_available_movements()
if key == "KEY_DOWN":
screen.addstr(height - 1, 0, "↓ DOWN - A FAIRE", curses.A_REVERSE)
if "DOWN" in available_movements:
move("DOWN")
elif key == "KEY_UP":
screen.addstr(height - 1, 0, "↑ UP - A FAIRE", curses.A_REVERSE)
if "UP" in available_movements:
move("UP")
elif key == "KEY_LEFT":
screen.addstr(height - 1, 0, "← LEFT - A FAIRE", curses.A_REVERSE)
if "LEFT" in available_movements:
move("LEFT")
elif key == "KEY_RIGHT":
screen.addstr(height - 1, 0, "→ RIGHT - A FAIRE", curses.A_REVERSE)
if "RIGHT" in available_movements:
move("RIGHT")
elif key in ("Q",):
raise KeyboardInterrupt
def get_state_as_str(state):
table = AsciiTable(state)
table.inner_heading_row_border = False
table.inner_row_border = True
table.justify_columns[0] = "center"
table.justify_columns[1] = "center"
return table.table
def display_output(screen, state):
# Title
screen.addstr(0, 0, GAME_TITLE, curses.color_pair(1))
# Table game
screen.addstr(2, 0, get_state_as_str(state), curses.color_pair(1))
# Controls
screen.addstr(4 + TAQUIN_SIZE * 2, 0, "Utiliser les flêches pour déplacer la case vide.")
screen.addstr(5 + TAQUIN_SIZE * 2, 0, "(r)eset | (s)olution | (q)uitter")
def init_state():
cases = list(range(1, TAQUIN_SIZE ** 2)) + [EMPTY_CASE_VALUE]
random.shuffle(cases)
return [list(a) for a in zip(*[iter(cases)] * TAQUIN_SIZE)]
def main():
global CURRENT_STATE
"""Fonction principale de l'application"""
try:
# Initalisation de l'UI
stdscr = curses.initscr()
curses.start_color()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.noecho()
stdscr.keypad(True)
stdscr.nodelay(True)
# Récupération d'un taquin tiré aléatoirement
CURRENT_STATE = init_state()
while True:
# Attend une action et affiche le résultat
handle_keypress(stdscr)
display_output(stdscr, CURRENT_STATE)
# Frequence de rafraichissement
curses.napms(50) # ms
except KeyboardInterrupt:
pass
finally:
# Lorsqu'on quite, on restaure l'environnement du terminal
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
if __name__ == "__main__":
main()
| martync/taquin-py | taquin.py | taquin.py | py | 3,373 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "curses.A_REVERSE",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "curses.A_REVERSE",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "curses.A_REVERSE",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "cur... |
4392041881 | import json
import base64
import pymongo
import time
from json.encoder import JSONEncoder
from azure.storage.queue import (
QueueClient,
BinaryBase64EncodePolicy,
BinaryBase64DecodePolicy
)
azure_storage_account = None
mongo_connect = None
queue = "test"
queue = "general-image-2-crawl"
cookies = []
with open("local.settings.json") as fin:
settings = json.load(fin)
azure_storage_account = settings.get("AzureStorageAccount")
mongo_connect = settings.get("MongoDBConnectionString")
if not azure_storage_account or not mongo_connect:
raise Exception("Null Settings on AzureStorageAccount or mongo connect")
# Setup Base64 encoding and decoding functions
base64_queue_client = QueueClient.from_connection_string(
conn_str=azure_storage_account, queue_name=queue,
message_encode_policy = BinaryBase64EncodePolicy(),
message_decode_policy = BinaryBase64DecodePolicy()
)
mongo_client = pymongo.MongoClient(mongo_connect)
mongo_db = 'dev'
mongo_collection = "mingju5"
mongo_docs = mongo_client[mongo_db][mongo_collection]
with open("data/mingju.csv", 'r', encoding='utf-8') as fin:
fin.readline()
for idx, line in enumerate(fin):
if idx < 4747:
continue
gs = line.split(",")
assert len(gs) == 4
doc = mongo_docs.find_one({"url":gs[1]})
if doc and 'sent_baidu_img_res' in doc and doc['sent_baidu_img_res'] and 'data' in doc['sent_baidu_img_res'] and doc['sent_baidu_img_res']['data']:
for i, image_info in enumerate(doc['sent_baidu_img_res']['data']):
d_int, d_str = {}, {}
if 'thumbURL' not in image_info:
continue
for key, value in image_info.items():
if value:
if type(value) is int:
d_int[key] = value
if type(value) is str:
d_str[key] = value
d_str["source_mingju"] = gs[0]
d_str["source_mingju_url"] = gs[1]
d_str["source_mingju_author_title"] = gs[2]
d_str["source_mingju_poem_url"] = gs[3]
d_int['bdDisplayNum'] = doc['sent_baidu_img_res'].get('displayNum', 0)
d = {
"image_url" : image_info['thumbURL'],
"add_string_info" : d_str,
"add_int_info" : d_int
}
base64_queue_client.send_message(JSONEncoder().encode(d).encode('utf-8'))
if doc:
doc['crawled'] = int(time.time())
mongo_docs.update_one({'url':gs[1]}, {"$set":doc})
print(idx, gs[0], "Done") | harveyaot/AlphaTaiBai | scripts/send_imageurl2crawl.py | send_imageurl2crawl.py | py | 2,867 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "azure.storage.queue.QueueClient.from_connection_string",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "azure.storage.queue.QueueClient",
"line_number": 25,
"usage_type": "name... |
22625649989 | import pygame
import random
import time
#飞机大战
#手机上单手操作游戏
#屏幕长方形
# **************************我方飞机
class Hero(object):
def __init__(self, _screen, _x, _y):
self.image = pygame.image.load("images\hero.gif")
self.rect = self.image.get_rect()
self.width = self.rect.width
self.height = self.rect.height
self.screen = _screen
self.x = _x
self.y = _y
def show(self, _x, _y):
self.x = _x
self.y = _y
self.width = self.rect.width
self.height = self.rect.height
self.screen.blit(self.image, (self.x, self.y))
pygame.init()
pygame.mixer.init()
font = pygame.font.Font("C:\Windows\Fonts\SimHei.ttf",25)
back_music = pygame.mixer.Sound("sound\game_music.ogg")
back_music.play()
# ****************** 音乐 ****************************
screen = pygame.display.set_mode((495,800))
bg = pygame.image.load(r"images\background.png")
bg = pygame.transform.scale(bg, (498, 800))
# **********************************子弹
bullet = pygame.image.load(r"images\bullet.png")
b_rect = bullet.get_rect()
b_w = b_rect.width
b_h = b_rect.height
b_x = []
b_y = []
b_v = 30
times = b_v
# ***********************敌方飞机
# 小型战机
enemy1 = pygame.image.load(r"images\enemy0_down1.png")
enemy2 = pygame.image.load(r"images\enemy0_down2.png")
enemy3 = pygame.image.load(r"images\enemy0_down3.png")
enemy4 = pygame.image.load(r"images\enemy0_down4.png")
enemy = pygame.image.load(r"images\enemy0.png")
list_enemy_down = []
list_enemy_down.append(enemy1)
list_enemy_down.append(enemy2)
list_enemy_down.append(enemy3)
list_enemy_down.append(enemy4)
e_rect = enemy.get_rect()
e_h = e_rect.height
e_w = e_rect.width
#中型战机
mid_enemy = pygame.image.load(r"images\enemy1.png")
mid_enemy1 = pygame.image.load(r"images\enemy1_down1.png")
mid_enemy2 = pygame.image.load(r"images\enemy1_down2.png")
mid_enemy3 = pygame.image.load(r"images\enemy1_down3.png")
mid_enemy4 = pygame.image.load(r"images\enemy1_down4.png")
mid_rect = mid_enemy.get_rect()
mid_h = mid_rect.height
mid_w = mid_rect.width
mid_ex = []
mid_ey = []
heroA = Hero(screen,100,100)
# 敌方飞机产地坐标
list_ex = []
list_ey = []
for i in range(5):
enemyx = random.randint(50,400)
enemyy = random.randint(-100,-50)
list_ex.append(enemyx)
list_ey.append(enemyy)
midx = random.randint(50, 400)
midy = random.randint(-300, -100)
def collsion(bullet_x,bullet_y,bullet_rect,p_x,p_y,p_rect):
if bullet_x + bullet_rect.width > p_x and \
bullet_x < p_x + p_rect.width and \
bullet_y < p_y + p_rect.height and \
bullet_y + bullet_rect.height > p_y:
print("发生碰撞")
return True
else:
return False
# 爆炸函数
# def boom(_screen,list_time,list_x,list_y,_flag, list_image):
# if _flag == 1:
# start = time.time()
# for i in range(len(list_time)):
# if start-list_time[i] < 0.2:
# _screen.blit(list_image[0], (list_x[i], list_y[i]))
# elif 0.2 < start-list_time[i] < 0.4:
# _screen.blit(list_image[1], (list_x[i], list_y[i]))
# elif 0.4 < start-list_time[i] < 0.6:
# _screen.blit(list_image[2], (list_x[i], list_y[i]))
# elif 0.6 < start-list_time[i] < 0.8:
# _screen.blit(list_image[3], (list_x[i], list_y[i]))
shoot_speed = 5
#小型机
end = []
boom_x = []
boom_y = []
flag = 0
#中型机
mid_end = []
mid_boom_x = []
mid_boom_y = []
mid_flag = 0
# 得分
score = 0
blood = 5
#发射中型机
send = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
screen.blit(bg, (0, 0))
hx, hy = pygame.mouse.get_pos()
pygame.mouse.set_visible(False)
heroA.show(hx-heroA.width/2, hy-heroA.height/2)
# 画出敌方飞机
for i in range(5):
screen.blit(enemy,(list_ex[i],list_ey[i]))
if list_ey[i] < 800:
list_ey[i] += 1
else:
list_ey[i] = random.randint(-100,-50)
screen.blit(mid_enemy, (midx, midy))
if score != 0 and score%12 == 0:
send = score
if send != 0 and send % 12 == 0:
midy += 0.5
if midy > 800:
send = 0
midy = random.randint(-300, -100)
# 我方发射子弹
if times:
times -= 1
else:
b_x.append(hx - b_w/2+2)
b_y.append(hy - heroA.height / 2- b_h)
times = b_v
for i in range(len(b_x)):
screen.blit(bullet, (b_x[i], b_y[i]))
b_y[i] -= shoot_speed
# if b_y[i] < 0: #假设迭代到3,出界后移除,前后面的有关i的代码就会出错
# b_y.pop(i)
for j in range(len(list_ex)):
if collsion(b_x[i], b_y[i], b_rect, list_ex[j], list_ey[j], e_rect):
b_y[i] = -100 #子弹消失
score += 1
flag = 1
end.append(time.time())
boom_x.append(list_ex[j])
boom_y.append(list_ey[j])
list_ey[j] = random.randint(-100, -50) # 飞机消失
if collsion(b_x[i], b_y[i], b_rect, midx, midy, mid_rect):
blood -= 1
b_y[i] = -100 # 子弹消失
if blood <= 0:
mid_flag = 1
mid_end.append(time.time())
mid_boom_x.append(midx)
mid_boom_y.append(midy)
midy = random.randint(-300, -100) # 飞机消失
midx = random.randint(50, 400)
score += 1
blood = 5
#小型飞机爆炸
if flag == 1:
start = time.time()
for i in range(len(end)):
if start-end[i] < 0.2:
screen.blit(enemy1, (boom_x[i], boom_y[i]))
elif 0.2 < start-end[i] < 0.4:
screen.blit(enemy2, (boom_x[i], boom_y[i]))
elif 0.4 < start-end[i] < 0.6:
screen.blit(enemy3, (boom_x[i], boom_y[i]))
elif 0.6 < start-end[i] < 0.8:
screen.blit(enemy4, (boom_x[i], boom_y[i]))
#中型飞机爆炸
if mid_flag == 1:
mid_start = time.time()
for i in range(len(mid_end)):
if start-end[i] < 0.2:
screen.blit(mid_enemy1, (mid_boom_x[i], mid_boom_y[i]))
elif 0.2 < mid_start-mid_end[i] < 0.4:
screen.blit(mid_enemy2, (mid_boom_x[i], mid_boom_y[i]))
elif 0.4 < mid_start-mid_end[i] < 0.6:
screen.blit(mid_enemy3, (mid_boom_x[i], mid_boom_y[i]))
elif 0.6 < mid_start-mid_end[i] < 0.8:
screen.blit(mid_enemy4, (mid_boom_x[i], mid_boom_y[i]))
# 子弹优化,节省空间
for i in b_y:
index = b_y.index(i)
if i < 0:
b_y.pop(index)
b_x.pop(index)
scorep = font.render("得分:"+str(score),True,(255,255,255))
screen.blit(scorep,(10,20))
pygame.display.update()
# if a ==0:
# bx = hx - h_w / 10
# by = hy - h_h /2
# a = 1
# by -= shoot_speed
# screen.blit(bullet, (bx, by))
# if by < 0:
# a = 0
| gaicigame99/GuangdongUniversityofFinance-Economics | airplaneWar/黄海辉/飞机大战.py | 飞机大战.py | py | 7,173 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pygame.image.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
... |
4801788061 | from django.contrib.auth import get_user_model
from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
from manager.models import Task, TaskType
class TaskPublicTest(TestCase):
def test_task_list_public(self):
res = self.client.get(reverse("manager:task-list"))
self.assertNotEquals(res.status_code, 200)
self.assertRedirects(res, "/accounts/login/?next=%2Ftasks%2F")
class TaskPrivateTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="testuser",
password="testpassword"
)
self.client = Client()
self.client.force_login(self.user)
self.task_type = TaskType.objects.create(
name="personal"
)
self.task = Task.objects.create(
title="Test Task",
owner=self.user,
description="This is a test task description.",
priority="URGENT",
task_type=self.task_type
)
def test_task_list(self):
url = reverse("manager:task-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "manager/tasks_list.html")
self.assertIn(self.task, response.context["object_list"])
def test_task_create(self):
url = reverse("manager:task-create")
data = {
"title": "New Task",
"description": "This is a new task description.",
"date": timezone.datetime(2023, 7, 24).date(),
"priority": "TO-DO",
"task_type": self.task_type.id,
}
res = self.client.post(url, data)
self.assertEqual(res.status_code, 302)
self.assertEqual(Task.objects.count(), 2)
new_task = Task.objects.get(title="New Task")
self.assertEqual(new_task.owner, self.user)
def test_task_update(self):
url = reverse("manager:task-update", args=[self.task.id])
data = {
"title": "Updated Task",
"description": "This is an updated task description.",
"date": timezone.datetime(2023, 7, 24).date(),
"priority": "TO-DO",
"task_type": self.task_type.id,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
updated_task = Task.objects.get(id=self.task.id)
self.assertEqual(updated_task.title, "Updated Task")
def test_task_delete(self):
url = reverse("manager:task-delete", args=[self.task.id])
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Task.objects.count(), 0)
def test_task_complete(self):
url = reverse("manager:task-complete", args=[self.task.id])
data = {"complete": "true"}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.task.refresh_from_db()
self.assertTrue(self.task.completed)
| kovaliskoveronika/task_manager | manager/tests/test_views_task.py | test_views_task.py | py | 3,088 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.test.TestCase",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.c... |
74974903784 | """empty message
Revision ID: 3c8f0856b635
Revises: a7b5e34eac58
Create Date: 2018-02-24 13:05:25.721719
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3c8f0856b635'
down_revision = 'a7b5e34eac58'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('API',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('api', sa.String(length=255), nullable=True),
sa.Column('method', sa.String(length=24), nullable=True),
sa.Column('desc', sa.String(length=512), nullable=True),
sa.Column('param', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('API')
# ### end Alembic commands ###
| LDouble/cernet_ipv6_server | migrations/versions/3c8f0856b635_.py | 3c8f0856b635_.py | py | 911 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
41852000033 | from flask import send_file, Flask, redirect, render_template, url_for
# from crypt import methods
import logging
from nltk.stem import WordNetLemmatizer
from fuzzywuzzy import fuzz
from nltk.corpus import wordnet
import nltk
from flask import send_from_directory, Flask, request, render_template, url_for, redirect, jsonify
from firebase_admin import credentials, firestore, initialize_app
import requests
import os.path
from werkzeug.utils import secure_filename
app = Flask(__name__)
app.secret_key = "somesecretkey"
app.config['ALLOWED_EXTENSIONS'] = ['.jpg', '.png']
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
UPLOAD_FOLDER = os.path.join(os.getcwd(), 'uploads')
# [logging config
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(filename)s:%(funcName)s:%(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
# logging config]
cred = credentials.Certificate('key.json')
default_app = initialize_app(cred)
db = firestore.client()
# <<<<<<< HEAD
todo_ref = db.collection('todos')
UPLOAD_FOLDER = os.path.join(os.getcwd(), 'uploads')
# =======
# todo_ref = db.collection('keywords')
# >>>>>>> 84dd66fafd764c527993fc9ae8ebd16abc773985
BASE = "http://127.0.0.1:5000/"
# nltk.download('punkt')
# nltk.download('averaged_perceptron_tagger')
# nltk.download('wordnet')
# nltk.download('omw-1.4')
# Lemmatize with POS Tag
# Init the Wordnet Lemmatizer
lemmatizer = WordNetLemmatizer()
it = {}
# it = {'1.Welcome to Python.org':'Python is a popular general-purpose programming language. It is used in machine learning, web development, desktop applications, and many other fields.','Introduction to Python - W3Schools' : '2.Python is a popular programming language. It was created by Guido van Rossum, and released in 1991. It is used for: web development (server-side),',
# '3.Python Programming Language - GeeksforGeeks':' Python is a high-level, general-purpose and a very popular programming language. Python programming language (latest Python 3) is being used ...',
# '4.Lists in python' : 'In Python, a list is created by placing elements inside square brackets [] , separated by commas. ... A list can have any number of items and they may be of ...' ,
# '5. Data Structures — Python 3.10.6 documentation':'List comprehensions provide a concise way to create lists. Common applications are to make new lists where each element is the result of some operations applied ...',
# '6.Python Lists and List Manipulation | by Michael Galarnykhttps://towardsdatascience.com › python-basics-6-lists-a...':'Each item in a list has an assigned index value. It is important to note that python is a zero indexed based language. All this means is that the first item in ...',
# '7.Python Programming - Wikibooks, open books for an open world' : 'This book describes Python, an open-source general-purpose interpreted programming language available for the most popular operating systems.',
# '8.Complete Python Programming Python Basics to Advanced ...https://www.udemy.com › ... › Python':'10-Aug-2022 — Learn Python programming Python functions Python loops Python files Python DB Python OOP Python regex Python GUI game.',
# '9.Python 3 Programming Specialization - Courserahttps://www.coursera.org › ... › Software Development':'Offered by University of Michigan. Become a Fluent Python Programmer. Learn the fundamentals and become an independent programmer. Enroll for free.'
# }
def get_wordnet_pos(word):
# Map POS tag to first character lemmatize() accepts
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
@app.route("/")
def home():
return render_template("form.html")
@app.route("/learn", methods=['GET', 'POST'])
def lear():
return render_template("index.html",it = it)
@app.route('/res', methods=['POST'])
def my_form_post():
text = request.form['text']
# Init Lemmatizer
lemmatizer = WordNetLemmatizer()
# Lemmatize a Sentence with the appropriate POS tag
sentence = text
dict_keywords = {"class": 0, "variable": 0, "setup": 0,
"object": 0, "function": 0, "comment": 0,"python":0 , "list" : 0,"dictionary": 0, "tuple":0 }
sentence_list = [lemmatizer.lemmatize(
w, get_wordnet_pos(w)) for w in nltk.word_tokenize(sentence)]
print(sentence_list)
# for word in sentence_list:
# if word in dict_keywords:
# dict_keywords[word] = dict_keywords[word] + 1
for word in sentence_list:
for key in dict_keywords:
if fuzz.ratio(word, key) > 50:
dict_keywords[key] = dict_keywords[key] + 1
print(dict_keywords)
words = []
list_labels = {
"list" : "Lists are one of 4 built-in data types in Python used to store collections of data, the other 3 are Tuple, Set, and Dictionary, all with different qualities and usage.Python Lists are just like dynamically sized arrays, declared in other languages (vector in C++ and ArrayList in Java). In simple language, a list is a collection of things, enclosed in [ ] and separated by commas.... read more ",
"python": "Python is a high-level, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. Python is dynamically-typed and garbage-collected. It supports multiple programming paradigms, including structured, object-oriented and functional programming.Dictionaries are used to store data values in key:value pairs. A dictionary is a collection which is ordered*, changeable and do not allow duplicates...... read more",
"tup" : "xyz"
}
#
for key in dict_keywords:
if dict_keywords[key] > 0:
words.append(key)
it[key] = list_labels[key]
print(words)
return redirect("http://127.0.0.1:5000/learn", code=302)
@app.route('/download/<path:filename>', methods=['GET'])
def download(filename):
"""Download a file."""
shepherd = filename
stt = "{}.txt".format(shepherd)
logging.info('Downloading file= [%s]', stt)
logging.info(app.root_path)
full_path = os.path.join(app.root_path, UPLOAD_FOLDER)
logging.info(full_path)
return send_from_directory(full_path, stt, as_attachment=True)
# @app.route('/download')
# def download_file():
# p = "lists.txt"
# return send_file(p,as_attachment=True)
# @app.route("/<name>")
# def user(name):
# return f"Hello {name}!"
if __name__ == "__main__":
app.run()
# @app.route('/add', methods=['POST'])
# def create():
# """
# create() : Add document to Firestore collection with request body
# Ensure you pass a custom ID as part of json body in post request
# e.g. json={'id': '1', 'title': 'Write a blog post'}
# """
# try:
# id = request.json['id']
# todo_ref.document(id).set(request.json)
# return jsonify({"success": True}), 200
# except Exception as e:
# return f"An Error Occured: {e}"
# @app.route('/list', methods=['GET'])
# def read():
# """
# read() : Fetches documents from Firestore collection as JSON
# todo : Return document that matches query ID
# all_todos : Return all documents
# """
# try:
# # Check if ID was passed to URL query
# todo_id = request.args.get('id')
# if todo_id:
# todo = todo_ref.document(todo_id).get()
# return jsonify(todo.to_dict()), 200
# else:
# all_todos = [doc.to_dict() for doc in todo_ref.stream()]
# return jsonify(all_todos), 200
# except Exception as e:
# return f"An Error Occured: {e}"
# @app.route('/callDelete', methods=['GET'])
# def callDelete():
# return render_template("delete.html")
# @app.route('/deleteByPost', methods=['POST'])
# def deleteByPost():
# id = request.form.get('id')
# response = requests.delete(
# BASE + f"delete?id={id}")
# response.raise_for_status() # raises exception when not a 2xx response
# if response.status_code != 204:
# return response.json()
# return False
# @app.route('/delete', methods=['GET', 'DELETE'])
# def delete():
# """
# delete() : Delete a document from Firestore collection
# """
# try:
# # Check for ID in URL query
# todo_id = request.args.get('id')
# todo_ref.document(todo_id).delete()
# return jsonify({"success": True}), 200
# except Exception as e:
# return f"An Error Occured: {e}"
# @app.route('/addByPost', methods=['POST'])
# def addByPost():
# id = request.form.get('id')
# title = request.form.get('title')
# response = requests.post(
# BASE + "add", json={'id': id, 'title': title})
# response.raise_for_status() # raises exception when not a 2xx response
# if response.status_code != 204:
# return response.json()
# return False
# @app.route('/callAdd', methods=['GET'])
# def callAdd():
# return render_template("add.html")
| Rohit-S-Singh/Research-Project | app.py | app.py | py | 9,288 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_n... |
16505504555 | from nltk.tag.hmm import *
import codecs
import statistics
import numpy as np
from sklearn.metrics import confusion_matrix
import metrics
from metrics import EditDistance
from hmm import HMM
from memm import MEMM
from crf_word import CRF as CRF_WORD
from crf_sentence import CRF as CRF_SENT
from rnn import Encoder as RNN
from post_proc.syllabification import syllabification
from post_proc.post_processing import romanize
stage_names = ['', 'Vowels', 'Syllabification', 'Romanization']
def PrintConfMat(conf_mat):
precision, recall = metrics.MicroAvg(conf_mat)
f1 = metrics.Fscore(precision, recall, 1)
print('MicroAvg:')
print(' Precision = {}\n Recall = {}\n F1 = {}'.format(precision, recall,f1))
precision, recall = metrics.MacroAvg(conf_mat)
f1 = metrics.Fscore(recall, precision, 1)
print('MacroAvg:')
print(' Precision = {}\n Recall = {}\n F1 = {}'.format(precision, recall, f1))
print('Avg Accuracy:', metrics.AvgAcc(conf_mat))
#conf_mat = metrics.NormalizeConfusion(conf_mat)
#print('ConfMat:\n', np.array_str(conf_mat, max_line_width=300, precision=3))
def LoadTestData(file='data/HaaretzOrnan_annotated_test.txt'):
sents, vow_words, syll_words, rom_words = [[]], [], [], []
with codecs.open(file, encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip()
if line.startswith(u'#'):
continue
if len(line) == 0:
if len(sents[-1])>0:
sents.append([])
continue
split_line = line.split(u' ')
sents[-1].append(split_line[2])
vow_words.append(split_line[3].replace(u'-', u''))
syll_words.append(split_line[3])
rom_words.append(split_line[4])
if len(sents[-1])==0:
sents.remove(sents[-1])
return sents, vow_words, syll_words, rom_words
def CalcConfMatrix(pred, gold):
vow = list(u'euioa*')
vow_idx = {x: i for i, x in enumerate(vow)}
conf_mat = np.zeros((len(vow), len(vow)))
for j in range(1, len(pred), 2):
conf_mat[vow_idx[pred[j]], vow_idx[gold[j]]] += 1
return conf_mat
def TestModel(model, data):
conf_mat = None
dist = [None]
pred_stage = [None]
pred_stage.append(model.predict(data[0])) # predict test data
pred_stage[1] = [w for sent in pred_stage[1] for w in sent] # flatten sentences for metric calculation
pred_stage.append([syllabification(w) for w in pred_stage[1]]) # calculate syllabification
pred_stage.append([romanize(w) for w in pred_stage[2]]) # calculate romanization
# Calculate confusuion matrix
conf_mat = np.zeros((6,6))
for i, w in enumerate(pred_stage[1]):
conf_mat += CalcConfMatrix(w, data[1][i])
for stage in range(1,4):
tmp_dist = [EditDistance(w, data[stage][i]) for i, w in enumerate(pred_stage[stage])]
dist.append((sum(tmp_dist)/len(tmp_dist), statistics.median(tmp_dist), min(tmp_dist), max(tmp_dist))) # avg,med.min,max
return conf_mat, dist
def test():
data = LoadTestData()
untrained_models = []
config = {'ngram': 3, 'est': 'add-delta', 'delta': 0.3}
untrained_models.append((HMM(config), 'HMM. config: {}'.format(config)))
config = {'ftrs': ('IS_FIRST', 'IS_LAST', 'VAL', 'PRV_VAL', 'NXT_VAL', 'FRST_VAL', 'LST_VAL', 'SCND_VAL', 'SCND_LST_VAL')}
untrained_models.append((MEMM(config), 'MEMM. config: {}'.format(config)))
config = {'ftrs': ('IS_FIRST', 'IS_LAST', 'IDX', 'VAL', 'PRV_VAL', 'NXT_VAL', 'FRST_VAL', 'LST_VAL', 'SCND_VAL', 'SCND_LST_VAL')}
untrained_models.append((CRF_WORD(config), 'CRF. config: {}'.format(config)))
trained_models = [(model.prep_data().shuffle(0xfab1e).split(0).train(),name) for model,name in untrained_models]
config = {'n_layers': 3, 'hidden_dim': 32, 'embedding': 'mds', 'win_len': 4,"device":"cpu"}
rnn = RNN(config)
trained_models.append((rnn.prep_model().load('rnn_model.bin'), 'RNN. config: {}'.format(config)))
for model,name in trained_models:
trained_model = model
conf_mat, dist = TestModel(trained_model, data)
print('\n')
print(name)
print('='*80)
print('Vowel metrics:')
print('-'*50)
PrintConfMat(conf_mat)
print('-'*50)
print('Edit distance:')
print('-'*50)
for stage in range(1,4):
print('Stage = {}:'.format(stage_names[stage]))
print(' Average = {}\n Median = {}\n Min = {}\n Max = {}'.format(dist[stage][0],dist[stage][1],dist[stage][2],dist[stage][3]))
if __name__ == "__main__":
test()
| albert-shalumov/nlp_proj | test.py | test.py | py | 4,674 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "metrics.MicroAvg",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "metrics.Fscore",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "metrics.MacroAvg",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "metrics.Fscore",
... |
74027964263 | from mysql_connect import MysqlConnect
from s_config import config
import requests
import re
import json
import csv
import time
import random
def get_video_type(video_name):
res = re.findall(r'-(.*)-|_(.*)_', video_name)
if len(res):
for item in res[0]:
if item:
return item
else:
return None
def get_greater_30(v_id, error_file):
url = "http://s.video.qq.com/get_playsource?id=" + v_id + "&type=4&range=1-10000&otype=json"
session = requests.session()
res = session.get(url).text
json_re = re.match("QZOutputJson=(.*)", res).groups()
if len(json_re):
json_res = json.loads(json_re[0][:-1])
print(url, json_res)
try:
json_res['PlaylistItem']
except:
error_file.write(v_id + "\n")
return None
if not json_res['PlaylistItem']:
error_file.write(v_id + "\n")
return None
if 'videoPlayList' in json_res['PlaylistItem']:
return json_res['PlaylistItem']['videoPlayList']
else:
error_file.write(v_id + "\n")
return None
else:
error_file.write(v_id + "\n")
return None
def main():
video_time = "20180518"
mc = MysqlConnect(config)
csv_file_1 = open('data/' + video_time + '_video_greater_30.csv', 'w', newline='', encoding="utf-8")
csv_writer_1 = csv.writer(csv_file_1)
csv_file_2 = open('data/' + video_time + '_video_type.csv', 'w', newline='', encoding="utf-8")
csv_writer_2 = csv.writer(csv_file_2)
error_file = open('data/' + video_time + 'error_item', 'a', encoding='utf-8')
sql = """select detail_title,detail_pid from tx_jieshaoye where update_date = """ + video_time + """ and episodes > 30"""
res = mc.exec_query(sql)
for item in res:
re_json = get_greater_30(item[1], error_file)
if not re_json:
time.sleep(3)
re_json = get_greater_30(item[1], error_file) # 再请求一次
if not re_json:
print("error: ", item[0])
else:
csv_writer_2.writerow([item[0], get_video_type(item[0])])
for ep_item in re_json:
# print(item[0], get_video_type(item[0]), re_json)
# if "番外" in ep_item['episode_number'] or int(ep_item['episode_number']) > 30:
# print(ep_item['title'], ep_item['playUrl'], ep_item['episode_number'])
has_no_num = re.findall(r"[^\d]+", ep_item['episode_number'])
if len(has_no_num) or int(ep_item['episode_number']) > 30:
# print(ep_item['title'], ep_item['playUrl'])
if len(has_no_num):
csv_writer_1.writerow([item[0] + ep_item['title'], ep_item['playUrl']])
else:
csv_writer_1.writerow([ep_item['title'], ep_item['playUrl']])
time.sleep(random.randint(1, 3))
csv_file_1.close()
csv_file_2.close()
error_file.close()
mc.close()
if __name__ == '__main__':
main()
| jercheng/js_video_scrapy | crawl/v_qq_com/data_base/t_main2.py | t_main2.py | py | 3,091 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.session",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": ... |
18446560206 | # -*- coding: utf-8 -*-
"""
@author: DongXiaoning
"""
import numpy as np
import operator
import collections
import sklearn.datasets
# compute gini index
def compute_gini(group):
m,n = group.shape
data = group[:,:-1]
label = group[:,-1]
dict_label = collections.Counter(label)
group_size = float(m)
if group_size == 0:
gini_index = 0
else:
proportion = np.array(list(dict_label.values()))/group_size
gini_index = 1 - np.dot(proportion,proportion)
return gini_index
def compute_information_gain(gini_group,gini_subgroup1,weight1,gini_subgroup2,weight2):
return gini_group - (gini_subgroup1 * weight1 + gini_subgroup2 * weight2)
def predict(data,stump):
if data[stump[1]] >= stump[4]:
return 0
return 1
if __name__ == '__main__':
breast_dataset = sklearn.datasets.load_breast_cancer()
breast_data = breast_dataset.data
m,n = breast_data.shape
breast_label =breast_dataset.target
breast_label = breast_dataset.target.reshape(m,1)
group = np.concatenate((breast_data,breast_label),axis = 1)
m,n = group.shape
gini = compute_gini(group)
# compute info gain
largest_info_gain_list = [] # on each attributes
info_gain_dict = {}
for i in range(n-1): # traverse each attribute/col
for j in range(m-1): # traverse each row
# split into two groups
mask = group[:,i] >= group[j][i] # mask is like a filter, which compares each element in space object
index = np.where(mask) # (here is group[:,j]) with group[i][j].
group1 = group[index] # index is a tuple and only has an element(size = 1), the element is a list.
row,col = group1.shape # thus, group[index,:] will output undesirable result.
group1_size = float(row)
mask = group[:,i] < group[j][i]
index = np.where(mask)
group2 = group[index]
row,col = group2.shape
group2_size = float(row)
# group1 : gini and weight
gini_group1 = compute_gini(group1)
weight_group1 = group1_size / m
# group2 : gini and weight
gini_group2 = compute_gini(group2)
weight_group2 = group2_size / m
# info gain
info_gain = compute_information_gain(gini,gini_group1,weight_group1,gini_group2,weight_group2)
info_gain_dict[j] = info_gain
largest_info_gain = max(info_gain_dict.items(),key=operator.itemgetter(1))
print(f'Attribute {i}\'s name is \'{breast_dataset.feature_names[i]}\', split node is in row {largest_info_gain[0]} ---> value is {group[largest_info_gain[0]][i]}, info gain is: {largest_info_gain[1]}')
largest_info_gain_list.append((f'attribute {i}',i,breast_dataset.feature_names[i],largest_info_gain[0],group[largest_info_gain[0]][i],largest_info_gain[1]))
s = max(largest_info_gain_list,key = operator.itemgetter(-1))
print(f'Best split attribute is \'{s[0]}\' : {s[2]}, and split node is in row {s[3]}, value is {s[4]}')
# add test code to test our result
mask = group[:,20] >= 16.82
index = np.where(mask)
group3 = group[index]
mask = group[:,20] < 16.82
index = np.where(mask)
group4 = group[index]
| xndong/ML-foundation-and-techniques | Decision stump/decision_stump.py | decision_stump.py | py | 3,428 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.datasets.l... |
1528415930 |
import cv2
import tensorflow as tf
import numpy as np
import glob
import os
import time
import argparse
import configparser
from auto_pose.ae import factory, utils
parser = argparse.ArgumentParser()
parser.add_argument("experiment_name")
parser.add_argument("-f", "--file_str", required=True, help='folder or filename to image(s)')
# parser.add_argument("-gt_bb", action='store_true', default=False)
arguments = parser.parse_args()
full_name = arguments.experiment_name.split('/')
experiment_name = full_name.pop()
experiment_group = full_name.pop() if len(full_name) > 0 else ''
print('experiment name: ', experiment_name)
print('experiment group: ', experiment_group)
file_str = arguments.file_str
if os.path.isdir(file_str):
files = sorted(glob.glob(os.path.join(str(file_str),'*.png'))+glob.glob(os.path.join(str(file_str),'*.jpg'))+glob.glob(os.path.join(str(file_str),'*.JPG')))
else:
files = [file_str]
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
if workspace_path == None:
print('Please define a workspace path:\n')
print('export AE_WORKSPACE_PATH=/path/to/workspace\n')
exit(-1)
log_dir = utils.get_log_dir(workspace_path,experiment_name,experiment_group)
ckpt_dir = utils.get_checkpoint_dir(log_dir)
start_time = time.time()
encoder = factory.build_codebook_from_name(experiment_name, experiment_group, return_encoder=True)
end_time = time.time()
print("encoder loading: ", str(end_time - start_time))
with tf.Session() as sess:
start_time = time.time()
factory.restore_checkpoint(sess, tf.train.Saver(), ckpt_dir)
end_time = time.time()
print("restoring checkpoint: ", str(end_time - start_time))
# for i in range(1, 8):
for file in files:
im = cv2.imread(file)
im = cv2.resize(im, (256, 256))
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im = np.expand_dims(im, axis=2)
start_time = time.time()
latent_vector = encoder.latent_vector(sess, im)
end_time = time.time()
print('latent vector: ', latent_vector)
print("inference time: ", int(1000 * (end_time - start_time)) / 1000., " fps: ",
int(1 / (end_time - start_time)))
| logivations/AugmentedAutoencoder | auto_pose/test/encoder_inference.py | encoder_inference.py | py | 2,190 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"l... |
770438494 | #Import libraries
import scipy.io as spio
from scipy import fftpack
import matplotlib.pyplot as plt
import numpy as np
#Process the dataset into samples
def process_positions(dataset, positions):
output_range = 10
classification_input = []
for position in positions:
lower = position - output_range
upper = position + output_range
classification_input.append(list(dataset[lower:upper]))
return classification_input
#Put peak through fft
def process_FFT(time_sample):
X = fftpack.fft(time_sample)
return X
#Put all peaks through fft and put them in a list.
def process_all_FFT(time_samples):
freq_samples = []
for sample in time_samples:
freq_samples.append(process_FFT(sample))
unsorted_x = []
#For all the samples, convert imaginary values into real values.
for sample in freq_samples:
new_sample = []
for item in sample:
new_sample.append(item.real)
new_sample.append(item.imag)
unsorted_x.append(list(new_sample))
return unsorted_x
#Convert the dataset into frequency series samples.
def time_freq(dataset, positions):
time_samples = process_positions(dataset, positions)
freq_samples = process_all_FFT(time_samples)
return freq_samples
| khb00/peak_classifier_and_detector | TimeFreq.py | TimeFreq.py | py | 1,337 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.fftpack.fft",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack",
"line_number": 21,
"usage_type": "name"
}
] |
21121065737 | """File system hook for the S3 file system."""
from builtins import super
import posixpath
try:
import s3fs
except ImportError:
s3fs = None
from . import FsHook
class S3Hook(FsHook):
"""Hook for interacting with files in S3."""
def __init__(self, conn_id=None):
super().__init__()
self._conn_id = conn_id
self._conn = None
def get_conn(self):
if s3fs is None:
raise ImportError("s3fs must be installed to use the S3Hook")
if self._conn is None:
if self._conn_id is None:
self._conn = s3fs.S3FileSystem()
else:
config = self.get_connection(self._conn_id)
extra_kwargs = {}
if "encryption" in config.extra_dejson:
extra_kwargs["ServerSideEncryption"] = config.extra_dejson[
"encryption"
]
self._conn = s3fs.S3FileSystem(
key=config.login,
secret=config.password,
s3_additional_kwargs=extra_kwargs,
)
return self._conn
def disconnect(self):
self._conn = None
def open(self, file_path, mode="rb"):
return self.get_conn().open(file_path, mode=mode)
def exists(self, file_path):
return self.get_conn().exists(file_path)
def isdir(self, path):
if "/" not in path:
# Path looks like a bucket name.
return True
parent_dir = posixpath.dirname(path)
for child in self.get_conn().ls(parent_dir, detail=True):
if child["Key"] == path and child["StorageClass"] == "DIRECTORY":
return True
return False
def mkdir(self, dir_path, mode=0o755, exist_ok=True):
self.makedirs(dir_path, mode=mode, exist_ok=exist_ok)
def listdir(self, dir_path):
return [posixpath.relpath(fp, start=dir_path)
for fp in self.get_conn().ls(dir_path, details=False)]
def rm(self, file_path):
self.get_conn().rm(file_path, recursive=False)
def rmtree(self, dir_path):
self.get_conn().rm(dir_path, recursive=True)
# Overridden default implementations.
def makedirs(self, dir_path, mode=0o755, exist_ok=True):
if self.exists(dir_path):
if not exist_ok:
self._raise_dir_exists(dir_path)
else:
self.get_conn().mkdir(dir_path)
def walk(self, root):
root = _remove_trailing_slash(root)
for entry in super().walk(root):
yield entry
def _remove_trailing_slash(path):
if path.endswith("/"):
return path[:-1]
return path
| jrderuiter/airflow-fs | src/airflow_fs/hooks/s3_hook.py | s3_hook.py | py | 2,720 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "builtins.super",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "s3fs.S3FileSystem",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "s3fs.S3FileSystem",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "posixpath.dirname"... |
27502593085 | #!/usr/bin/env python
# coding: utf-8
# In[33]:
import pandas as pd
import streamlit as st
import requests
# In[34]:
username = 'ContainiumTE'
token = 'RRopW0EJvVEcfS5EGt1rxxswfGF5IfzU3Bh4VkPHS10'
github_session = requests.Session()
github_session.auth = (username,token)
# In[30]:
st.title("Discontinuity Weighting Tool")
url = "https://raw.githubusercontent.com/ContainiumTE/discontinuity_refinement/main/table_header.csv"
df_header = pd.read_csv("table_header.csv")
menu = ["Home","Other"]
choice = st.sidebar.selectbox("Menu",menu)
if choice == "Home":
st.subheader("Home")
st.subheader("Import Table format with Headers as follows:")
st.table(df_header)
data_file = st.file_uploader("Upload CSV", type=["csv"])
if data_file is not None:
#st.write(type(data_file))
df_rmr = pd.read_csv(data_file)
# In[50]:
#df_rmr = pd.read_csv('Qjr_selection.csv')
pd.set_option('display.max_columns',500)
pd.set_option('display.max_rows',500)
# In[51]:
df_rmr.columns = df_rmr.columns.str.strip().str.lower().str.replace(' ','_').str.replace('(', '').str.replace(')', '')
df_rmr.head()
# In[52]:
hole_id = df_rmr['hole_id'].unique()
#hole_id
# In[53]:
def joint_roughness1(jr1,jr1_count):
polished_1=0
smooth_planar_2 = 0
rough_planar_3 = 0
slickensided_undulating_4 = 0
smooth_undulating_5 = 0
rough_undulating_6 = 0
slickensided_stepped_7 = 0
smooth_stepped_8 = 0
rough_stepped_9 = 0
pol_rat_1=0
smoot_rat_2=0
rou_rat_3=0
slick_rat_4=0
smoot_und_rat_5=0
rou_und_rat_6=0
slick_ste_rat_7=0
smoot_step_rat_8=0
rou_step_rat_9=0
if jr1=='1 - Polished':
polished_1 = jr1_count
pol_rat_1 = jr1_count*0.45
print("Jr1 Allocated to: 1 - Polished")
elif jr1=='2 - Smooth Planar':
smooth_planar_2= jr1_count
smoot_rat_2 = jr1_count*0.4
print("Jr1 Allocated to: 2 - Smooth Planar")
elif jr1=='3 - Rough Planar':
rough_planar_3 = jr1_count
rou_rat_3 = jr1_count*0.35
print("Jr1 Allocated to: 3 - Rough Planar")
elif jr1=='4 - Slickensided Undulating':
slickensided_undulating_4 = jr1_count
slick_rat_4 = jr1_count*0.3
print("Jr1 Allocated to: 4 - Slickensided Undulating")
elif jr1=='5 - Smooth Undulating':
smooth_undulating_5= jr1_count
smoot_und_rat_5 = jr1_count*0.25
print("Jr1 Allocated to: 5 - Smooth Undulating")
elif jr1=='6 - Rough Undulating':
rough_undulating_6 = jr1_count
rou_und_rat_6 = jr1_count*0.2
print("Jr1 Allocated to: 6 - Rough Undulating")
elif jr1=='7 - Slickensided Stepped':
slickensided_stepped_7 = jr1_count
slick_ste_rat_7 = jr1_count*0.15
print("Jr1 Allocated to: 7 - Slickensided Stepped")
elif jr1=='8 - Smooth Stepped':
smooth_stepped_8 = jr1_count
smoot_step_rat_8 = jr1_count*0.1
print("Jr1 Allocated to: 8 - Smooth Stepped")
elif jr1=='9 - Rough Stepped / Irregular':
rough_stepped_9 = jr1_count
rou_step_rat_9 = jr1_count*0.05
print("Jr1 Allocated to: 9 - Rough Stepped / Irregular")
elif jr1=='':
print("No Jr1")
else:
print("None")
return polished_1, smooth_planar_2, rough_planar_3, slickensided_undulating_4, smooth_undulating_5, rough_undulating_6, slickensided_stepped_7, smooth_stepped_8, rough_stepped_9,pol_rat_1,smoot_rat_2,rou_rat_3,slick_rat_4,smoot_und_rat_5,rou_und_rat_6,slick_ste_rat_7,smoot_step_rat_8, rou_step_rat_9
# In[54]:
def joint_roughness2(jr2,jr2_count):
polished_1_2=0
smooth_planar_2_2 = 0
rough_planar_3_2 = 0
slickensided_undulating_4_2 = 0
smooth_undulating_5_2 = 0
rough_undulating_6_2 = 0
slickensided_stepped_7_2 = 0
smooth_stepped_8_2 = 0
rough_stepped_9_2 = 0
pol_rat_1_2=0
smoot_rat_2_2=0
rou_rat_3_2=0
slick_rat_4_2=0
smoot_und_rat_5_2=0
rou_und_rat_6_2=0
slick_ste_rat_7_2=0
smoot_step_rat_8_2=0
rou_step_rat_9_2=0
if jr2=='1 - Polished':
polished_1_2 = jr2_count
pol_rat_1_2 = jr2_count*0.45
print("Jr2 Allocated to: 1 - Polished")
elif jr2=='2 - Smooth Planar':
smooth_planar_2_2= jr2_count
smoot_rat_2_2 = jr2_count*0.4
print("Jr2 Allocated to: 2 - Smooth Planar")
elif jr2=='3 - Rough Planar':
rough_planar_3_2 = jr2_count
rou_rat_3_2 = jr2_count*0.35
print("Jr2 Allocated to: 3 - Rough Planar")
elif jr2=='4 - Slickensided Undulating':
slickensided_undulating_4_2 = jr2_count
slick_rat_4_2 = jr2_count*0.3
print("Jr2 Allocated to: 4 - Slickensided Undulating")
elif jr2=='5 - Smooth Undulating':
smooth_undulating_5_2= jr2_count
smoot_und_rat_5_2 = jr2_count*0.25
print("Jr2 Allocated to: 5 - Smooth Undulating")
elif jr2=='6 - Rough Undulating':
rough_undulating_6_2 = jr2_count
rou_und_rat_6_2 = jr2_count*0.2
print("Jr2 Allocated to: 6 - Rough Undulating")
elif jr2=='7 - Slickensided Stepped':
slickensided_stepped_7_2 = jr2_count
slick_ste_rat_7_2 = jr2_count*0.15
print("Jr2 Allocated to: 7 - Slickensided Stepped")
elif jr2=='8 - Smooth Stepped':
smooth_stepped_8_2 = jr2_count
smoot_step_rat_8_2 = jr2_count*0.1
print("Jr2 Allocated to: 8 - Smooth Stepped")
elif jr2=='9 - Rough Stepped / Irregular':
rough_stepped_9_2 = jr2_count
rou_step_rat_9_2 = jr2_count*0.05
print("Jr2 Allocated to: 9 - Rough Stepped / Irregular")
elif jr2=='NaN':
print("No Jr2")
else:
print("None")
return polished_1_2, smooth_planar_2_2, rough_planar_3_2, slickensided_undulating_4_2, smooth_undulating_5_2, rough_undulating_6_2, slickensided_stepped_7_2, smooth_stepped_8_2, rough_stepped_9_2,pol_rat_1_2,smoot_rat_2_2,rou_rat_3_2,slick_rat_4_2,smoot_und_rat_5_2,rou_und_rat_6_2,slick_ste_rat_7_2,smoot_step_rat_8_2, rou_step_rat_9_2
# In[55]:
def joint_roughness3(jr3,jr3_count):
polished_1_3=0
smooth_planar_2_3 = 0
rough_planar_3_3 = 0
slickensided_undulating_4_3 = 0
smooth_undulating_5_3 = 0
rough_undulating_6_3 = 0
slickensided_stepped_7_3 = 0
smooth_stepped_8_3 = 0
rough_stepped_9_3 = 0
pol_rat_1_3=0
smoot_rat_2_3=0
rou_rat_3_3=0
slick_rat_4_3=0
smoot_und_rat_5_3=0
rou_und_rat_6_3=0
slick_ste_rat_7_3=0
smoot_step_rat_8_3=0
rou_step_rat_9_3=0
if jr3=='1 - Polished':
polished_1_3 = jr3_count
pol_rat_1_3 = jr3_count*0.45
print("Jr3 Allocated to: 1 - Polished")
elif jr3=='2 - Smooth Planar':
smooth_planar_2_3= jr3_count
smoot_rat_2_3 = jr3_count*0.4
print("Jr3 Allocated to: 2 - Smooth Planar")
elif jr3=='3 - Rough Planar':
rough_planar_3_3 = jr3_count
rou_rat_3_3 = jr3_count*0.35
print("Jr3 Allocated to: 3 - Rough Planar")
elif jr3=='4 - Slickensided Undulating':
slickensided_undulating_4_3 = jr3_count
slick_rat_4_3 = jr3_count*0.3
print("Jr3 Allocated to: 4 - Slickensided Undulating")
elif jr3=='5 - Smooth Undulating':
smooth_undulating_5_3= jr3_count
smoot_und_rat_5_3 = jr3_count*0.25
print("Jr3 Allocated to: 5 - Smooth Undulating")
elif jr3=='6 - Rough Undulating':
rough_undulating_6_3 = jr3_count
rou_und_rat_6_3 = jr3_count*0.2
print("Jr3 Allocated to: 6 - Rough Undulating")
elif jr3=='7 - Slickensided Stepped':
slickensided_stepped_7_3 = jr3_count
slick_ste_rat_7_3 = jr3_count*0.15
print("Jr3 Allocated to: 7 - Slickensided Stepped")
elif jr3=='8 - Smooth Stepped':
smooth_stepped_8_3 = jr3_count
smoot_step_rat_8_3 = jr3_count*0.1
print("Jr3 Allocated to: 8 - Smooth Stepped")
elif jr3=='9 - Rough Stepped / Irregular':
rough_stepped_9_3 = jr3_count
rou_step_rat_9_3 = jr3_count*0.05
print("Jr3 Allocated to: 9 - Rough Stepped / Irregular")
elif jr3=='NaN':
print("No Jr3")
else:
print("None")
return polished_1_3, smooth_planar_2_3, rough_planar_3_3, slickensided_undulating_4_3, smooth_undulating_5_3, rough_undulating_6_3, slickensided_stepped_7_3, smooth_stepped_8_3, rough_stepped_9_3,pol_rat_1_3,smoot_rat_2_3,rou_rat_3_3,slick_rat_4_3,smoot_und_rat_5_3,rou_und_rat_6_3,slick_ste_rat_7_3,smoot_step_rat_8_3, rou_step_rat_9_3
# In[56]:
def sum_of_weighting(count_oj,polished_1,smooth_planar_2,rough_planar_3,slickensided_undulating_4,smooth_undulating_5,rough_undulating_6,slickensided_stepped_7,smooth_stepped_8,rough_stepped_9,pol_rat_1,smoot_rat_2,rou_rat_3,slick_rat_4,smoot_und_rat_5,rou_und_rat_6,slick_ste_rat_7,smoot_step_rat_8, rou_step_rat_9,polished_1_2,smooth_planar_2_2,rough_planar_3_2,slickensided_undulating_4_2,smooth_undulating_5_2,rough_undulating_6_2,slickensided_stepped_7_2,smooth_stepped_8_2,rough_stepped_9_2,pol_rat_1_2,smoot_rat_2_2,rou_rat_3_2,slick_rat_4_2,smoot_und_rat_5_2,rou_und_rat_6_2,slick_ste_rat_7_2,smoot_step_rat_8_2, rou_step_rat_9_2,polished_1_3, smooth_planar_2_3, rough_planar_3_3, slickensided_undulating_4_3, smooth_undulating_5_3, rough_undulating_6_3, slickensided_stepped_7_3, smooth_stepped_8_3, rough_stepped_9_3,pol_rat_1_3,smoot_rat_2_3,rou_rat_3_3,slick_rat_4_3,smoot_und_rat_5_3,rou_und_rat_6_3,slick_ste_rat_7_3,smoot_step_rat_8_3, rou_step_rat_9_3):
sum_total_weighting = pol_rat_1 + smoot_rat_2 + rou_rat_3 + slick_rat_4 + smoot_und_rat_5 + rou_und_rat_6 + slick_ste_rat_7 + smoot_step_rat_8 + rou_step_rat_9 + pol_rat_1_2 + smoot_rat_2_2 + rou_rat_3_2+slick_rat_4_2+smoot_und_rat_5_2+rou_und_rat_6_2+slick_ste_rat_7_2+smoot_step_rat_8_2+ rou_step_rat_9_2+pol_rat_1_3+smoot_rat_2_3+rou_rat_3_3+slick_rat_4_3+smoot_und_rat_5_3+rou_und_rat_6_3+slick_ste_rat_7_3+smoot_step_rat_8_3+ rou_step_rat_9_3
if (count_oj>0) and (sum_total_weighting>0):
count = count_oj
weighting_1 = (polished_1+polished_1_2+polished_1_3)/count
weighting_2 = (smooth_planar_2+smooth_planar_2_2+smooth_planar_2_3)/count
weighting_3 = (rough_planar_3+rough_planar_3_2+rough_planar_3_3)/count
weighting_4 = (slickensided_undulating_4+slickensided_undulating_4_2+slickensided_undulating_4_3)/count
weighting_5 = (smooth_undulating_5+smooth_undulating_5_2+smooth_undulating_5_3)/count
weighting_6 = (rough_undulating_6+rough_undulating_6_2+rough_undulating_6_3)/count
weighting_7 = (slickensided_stepped_7+slickensided_stepped_7_2+slickensided_stepped_7_3)/count
weighting_8 = (smooth_stepped_8+smooth_stepped_8_2+smooth_stepped_8_3)/count
weighting_9 = (rough_stepped_9+rough_stepped_9_2+rough_stepped_9_3)/count
weighting_rating_1 = (pol_rat_1+pol_rat_1_2+pol_rat_1_3)/sum_total_weighting
weighting_rating_2 = (smoot_rat_2+smoot_rat_2_2+smoot_rat_2_3)/sum_total_weighting
weighting_rating_3 = (rou_rat_3+rou_rat_3_2+rou_rat_3_3)/sum_total_weighting
weighting_rating_4 = (slick_rat_4+slick_rat_4_2+slick_rat_4_3)/sum_total_weighting
weighting_rating_5 = (smoot_und_rat_5+smoot_und_rat_5_2+smoot_und_rat_5_3)/sum_total_weighting
weighting_rating_6 = (rou_und_rat_6+rou_und_rat_6_2+rou_und_rat_6_3)/sum_total_weighting
weighting_rating_7 = (slick_ste_rat_7+slick_ste_rat_7_2+slick_ste_rat_7_3)/sum_total_weighting
weighting_rating_8 = (smoot_step_rat_8+smoot_step_rat_8_2+smoot_step_rat_8_3)/sum_total_weighting
weighting_rating_9 = (rou_step_rat_9+rou_step_rat_9_2+rou_step_rat_9_3)/sum_total_weighting
total_rating_1 = weighting_1*weighting_rating_1
total_rating_2 = weighting_2*weighting_rating_2
total_rating_3 = weighting_3*weighting_rating_3
total_rating_4 = weighting_4*weighting_rating_4
total_rating_5 = weighting_5*weighting_rating_5
total_rating_6 = weighting_6*weighting_rating_6
total_rating_7 = weighting_7*weighting_rating_7
total_rating_8 = weighting_8*weighting_rating_8
total_rating_9 = weighting_9*weighting_rating_9
max_rating = max(total_rating_1,total_rating_2,total_rating_3,total_rating_4,total_rating_5,total_rating_6,total_rating_7,total_rating_8,total_rating_9)
ratings = [total_rating_1,total_rating_2,total_rating_3,total_rating_4,total_rating_5,total_rating_6,total_rating_7,total_rating_8,total_rating_9]
index = ratings.index(max_rating)
print("1 ","Polished",polished_1," - ",total_rating_1)
print("2 ","Smoothe Planar",smooth_planar_2," - ",total_rating_2)
print("3 ","Rough Planar",rough_planar_3," - ",total_rating_3)
print("4 ","Slickensided Undulating",slickensided_undulating_4," - ",total_rating_4)
print("5 ","Smooth Undulating",smooth_undulating_5," - ",total_rating_5)
print("6 ","Rough Undulating",rough_undulating_6," - ",total_rating_6)
print("7 ","Slickensided Stepped",slickensided_stepped_7," - ",total_rating_7)
print("8 ","Smoothe Stepped",smooth_stepped_8," - ",total_rating_8)
print("9 ","Rough Stepped",rough_stepped_9," - ",total_rating_9)
#print("The selected Micro Joughness is ",max_rating)
#print(index)
selected_roughness = 0
if index==0:
selected_roughness = '1 - Polished'
elif index==1:
selected_roughness = '2 - Smooth Planar'
elif index==2:
selected_roughness = '3 - Rough Planar'
elif index==3:
selected_roughness = '4 - Slickensided Undulating'
elif index==4:
selected_roughness = '5 - Smooth Undulating'
elif index==5:
selected_roughness = '6 - Rough Undulating'
elif index==6:
selected_roughness = '7 - Slickensided Stepped'
elif index==7:
selected_roughness = '8 - Smooth Stepped'
elif index==8:
selected_roughness = '9 - Rough Stepped/Irregular'
else:
selected_roughness = 'None'
#
else:
print("No Micro Roughness Allocated")
return selected_roughness
# In[57]:
discon_data1 = {'hole_id': [],'from': [],'to': [],'Oj1': [],'Jr1': [],'Oj2': [],'Jr2': [],'Oj3': [],'Jr3': [],'Selected Jr': []}
QJr = pd.DataFrame(discon_data1)
for i in hole_id:
df_b = df_rmr[(df_rmr['hole_id']==i)]
print("Hole ID: ",i)
for k in df_b.index:
from_1 = df_b['from_m'][k]
to_1 = df_b['to_m'][k]
print("Interval Depth (m): ",from_1," - ",to_1)
jr1 = df_b['j1_-_micro_roughness'][k]
jr1_count = df_b['j1_-_oj_count'][k]
jr2 = df_b['j2_-_micro_roughness'][k]
jr2_count = df_b['j2_-_oj_count'][k]
jr3 = df_b['j3_-_micro_roughness'][k]
jr3_count = df_b['j3_-_oj_count'][k]
count_oj = jr1_count + jr2_count + jr3_count
if count_oj > 0:
jr1_result = joint_roughness1(jr1,jr1_count)
jr2_result = joint_roughness2(jr2,jr2_count)
jr3_result = joint_roughness3(jr3,jr3_count)
polished_1,smooth_planar_2,rough_planar_3,slickensided_undulating_4,smooth_undulating_5,rough_undulating_6,slickensided_stepped_7,smooth_stepped_8,rough_stepped_9,pol_rat_1,smoot_rat_2,rou_rat_3,slick_rat_4,smoot_und_rat_5,rou_und_rat_6,slick_ste_rat_7,smoot_step_rat_8, rou_step_rat_9 = jr1_result[0],jr1_result[1],jr1_result[2],jr1_result[3],jr1_result[4],jr1_result[5],jr1_result[6],jr1_result[7],jr1_result[8],jr1_result[9],jr1_result[10],jr1_result[11],jr1_result[12],jr1_result[13],jr1_result[14],jr1_result[15],jr1_result[16],jr1_result[17]
polished_1_2,smooth_planar_2_2,rough_planar_3_2,slickensided_undulating_4_2,smooth_undulating_5_2,rough_undulating_6_2,slickensided_stepped_7_2,smooth_stepped_8_2,rough_stepped_9_2,pol_rat_1_2,smoot_rat_2_2,rou_rat_3_2,slick_rat_4_2,smoot_und_rat_5_2,rou_und_rat_6_2,slick_ste_rat_7_2,smoot_step_rat_8_2, rou_step_rat_9_2 = jr2_result[0],jr2_result[1],jr2_result[2],jr2_result[3],jr2_result[4],jr2_result[5],jr2_result[6],jr2_result[7],jr2_result[8],jr2_result[9],jr2_result[10],jr2_result[11],jr2_result[12],jr2_result[13],jr2_result[14],jr2_result[15],jr2_result[16],jr2_result[17]
polished_1_3,smooth_planar_2_3,rough_planar_3_3,slickensided_undulating_4_3,smooth_undulating_5_3,rough_undulating_6_3,slickensided_stepped_7_3,smooth_stepped_8_3,rough_stepped_9_3,pol_rat_1_3,smoot_rat_2_3,rou_rat_3_3,slick_rat_4_3,smoot_und_rat_5_3,rou_und_rat_6_3,slick_ste_rat_7_3,smoot_step_rat_8_3, rou_step_rat_9_3 = jr3_result[0],jr3_result[1],jr3_result[2],jr3_result[3],jr3_result[4],jr3_result[5],jr3_result[6],jr3_result[7],jr3_result[8],jr3_result[9],jr3_result[10],jr3_result[11],jr3_result[12],jr3_result[13],jr3_result[14],jr3_result[15],jr3_result[16],jr3_result[17]
Qjr = sum_of_weighting(count_oj,polished_1,smooth_planar_2,rough_planar_3,slickensided_undulating_4,smooth_undulating_5,rough_undulating_6,slickensided_stepped_7,smooth_stepped_8,rough_stepped_9,pol_rat_1,smoot_rat_2,rou_rat_3,slick_rat_4,smoot_und_rat_5,rou_und_rat_6,slick_ste_rat_7,smoot_step_rat_8, rou_step_rat_9,polished_1_2,smooth_planar_2_2,rough_planar_3_2,slickensided_undulating_4_2,smooth_undulating_5_2,rough_undulating_6_2,slickensided_stepped_7_2,smooth_stepped_8_2,rough_stepped_9_2,pol_rat_1_2,smoot_rat_2_2,rou_rat_3_2,slick_rat_4_2,smoot_und_rat_5_2,rou_und_rat_6_2,slick_ste_rat_7_2,smoot_step_rat_8_2, rou_step_rat_9_2,polished_1_3, smooth_planar_2_3, rough_planar_3_3, slickensided_undulating_4_3, smooth_undulating_5_3, rough_undulating_6_3, slickensided_stepped_7_3, smooth_stepped_8_3, rough_stepped_9_3,pol_rat_1_3,smoot_rat_2_3,rou_rat_3_3,slick_rat_4_3,smoot_und_rat_5_3,rou_und_rat_6_3,slick_ste_rat_7_3,smoot_step_rat_8_3, rou_step_rat_9_3)
print("Selected Roughness: ",Qjr)
new_row = {'hole_id': i,'from': from_1,'to': to_1, 'Oj1': jr1_count, 'Jr1': jr1, 'Oj2': jr2_count, 'Jr2': jr2, 'Oj3': jr3_count, 'Jr3': jr3, 'Selected Jr': Qjr}
QJr = QJr.append(new_row,ignore_index=True)
else:
new_row = {'hole_id': i,'from': from_1,'to': to_1, 'Oj1': 0, 'Jr1': '', 'Oj2': 0, 'Jr2': '', 'Oj3': 0, 'Jr3': '', 'Selected Jr': ''}
QJr = QJr.append(new_row,ignore_index=True)
#QJr.to_csv('QJr_export.csv')
def convert_df(QJr):
return QJr.to_csv(index=False).encode('utf-8')
csv = convert_df(QJr)
st.download_button("Press to Download",csv,"discontinuity_weighting.csv","text/csv",key='download-csv')
print('Data Export Complete')
# In[ ]:
| ContainiumTE/discontinuity_refinement | Discontinuity_Selector.py | Discontinuity_Selector.py | py | 18,910 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.se... |
3825597824 | """A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name="emu-docker-tools",
version="0.1.0",
description="Tools to create and deploy android emulator docker containers.",
url="https://github.com/kneczaj/android-emulator-docker",
author="Kamil Neczaj",
author_email="kneczaj@protonmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: System :: Emulators",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="android emulator virtualization",
packages=find_packages(),
python_requires=">=3.0, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
install_requires=[
"emu-docker",
],
package_data={},
data_files={},
project_urls={
"Bug Reports": "https://github.com/kneczaj/android-emulator-docker/issues",
"Source": "https://github.com/kneczaj/android-emulator-docker",
},
)
| kneczaj/android-emulator-docker | setup.py | setup.py | py | 1,553 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_... |
26299614326 |
### ===== Load libraries =====
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings import CacheBackedEmbeddings, HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.storage import LocalFileStore
from langchain.text_splitter import TokenTextSplitter
from langchain.llms import HuggingFacePipeline
from langchain.chains import RetrievalQA, LLMChain
from langchain.prompts import PromptTemplate
from huggingface_hub import login as hf_login
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftConfig, PeftModel
import torch
from torch import cuda
import locale
locale.getpreferredencoding = lambda: "UTF-8"
def prepare_data():
# ----- Data Parsing
library = CSVLoader("library_data.csv")
library_data = library.load()
# library_data[0]
# ----- Text Splitter
text_splitter = TokenTextSplitter(
chunk_size=1000,
chunk_overlap = 200,
)
library_doc = text_splitter.split_documents(library_data)
# library_doc[0]
return library_doc
def prepare_data_retriever(library_doc):
# ----- Index / Vector Store (FAISS)
embed_model_id = 'sentence-transformers/all-MiniLM-L6-v2'
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
core_embeddings_model = HuggingFaceEmbeddings(
model_name=embed_model_id,
model_kwargs={'device': device},
encode_kwargs={'device': device, 'batch_size': 32}
)
# CacheBackedEmbeddings saves time and money when user asks same question.
store = LocalFileStore("./cache/")
embedder = CacheBackedEmbeddings.from_bytes_store(
core_embeddings_model, store, namespace=embed_model_id
)
vector_store = FAISS.from_documents(library_doc, embedder)
# ----- Check if the vectorstore is working correctly.
#
# query = "In python, write a code that reads the csv file and plot a scatter plot of x-axis labeled 'Year' and the y-axis labeled 'value'"
#
# embedding_vector = core_embeddings_model.embed_query(query)
# docs = vector_store.similarity_search_by_vector(embedding_vector, k=3)
#
# for page in docs:
# print(page.page_content)
# ----- Build retriever
#
retriever = vector_store.as_retriever(search_type="mmr", search_kwargs={"k": 5})
# docs = retriever.get_relevant_documents("In python, write a code that reads the csv file and plot a scatter plot of x-axis labeled 'Year' and the y-axis labeled 'value'")
return retriever
def load_llm(model_id):
hf_login(token="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") # ENV
# ----- Load model directly
if model_id == "SaloniJhalani/ft-falcon-7b-instruct":
dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
model = AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
load_in_8bit=True,
device_map="auto",
torch_dtype = dtype, #torch.bfloat16
)
else:
model = AutoModelForCausalLM.from_pretrained(model_id,device_map='cuda')
tokenizer = AutoTokenizer.from_pretrained(model_id)
generate_text = transformers.pipeline(
model=model,
tokenizer=tokenizer,
task='text-generation',
return_full_text=True,
temperature=0.0,
max_new_tokens=1024, # a higher number of tokens delays the prompt
repetition_penalty=1.1 # avoid repeating
)
# result = generate_text("Write a code that plot a bar graph to display the value of 'Philosophy and psychology' title_en over the years?")
# result[0]["generated_text"]
llm = HuggingFacePipeline(pipeline=generate_text)
return llm
def prepare_llm(llm, retriever):
# ----- Template for an instruction with no input
prompt = PromptTemplate(
input_variables=["instruction"],
template="{instruction}"
)
# ----- LLMChain
#
# llm_chain = LLMChain(llm=llm, prompt=prompt)
#
# print(llm_chain.predict(
# instruction="Write a code that plot a bar graph to display the value of 'Philosophy and psychology' title_en over the years?"
# ).lstrip())
# ----- RetrievalQA
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever
)
return qa
def execute_code(code):
""" Parse and execute the returned python code """
# Remove "```python" at the beginning
code = code.replace("```python", "")
# Remove "```" at the end
code = code.replace("```", "")
code = code.replace('"""', "")
code = code.split("###")[0]
try:
exec(code)
except Exception as e:
print(f"Error executing code:{str(e)}")
return code
def init_llm_retriever(model_id):
print("\n", " Initialize the chat components ".center(100, "*"), "\n")
library_doc = prepare_data()
retriever = prepare_data_retriever(library_doc)
llm = load_llm(model_id)
qa = prepare_llm(llm, retriever)
print("\n", " LLM is ready ".center(100, "*"), "\n")
return qa
if __name__ == "__main__":
qa = init_llm_retriever("TheBloke/CodeLlama-7B-Python-GPTQ")
| Valkea/Omdena_Falcon | deployment02/backend/llm_setup.py | llm_setup.py | py | 5,289 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "locale.getpreferredencoding",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "langchain.document_loaders.csv_loader.CSVLoader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "langchain.text_splitter.TokenTextSplitter",
"line_number": 3... |
74197853543 | from collections.abc import Iterable
from circkit import Circuit, Operation, Node
import logging
log = logging.getLogger("Transformer")
class Transformer:
"""Base transformer class."""
START_FROM_VARS = False
source_circuit: Circuit = None
current_node: Node = None
current_operation: Operation = None
def transform(self, circuit, **kwargs):
self.before_transform(circuit, **kwargs)
self.visit_all(circuit)
self.output = [
self.make_output(node, self.result[node])
for node in circuit.outputs
]
self.transform_output = self.output
self.after_transform(circuit, **kwargs) # can change self.transform_output
return self.transform_output
def before_transform(self, circuit, **kwargs):
self.source_circuit = circuit
self.result = {}
self._current_stack = []
def after_transform(self, circuit, **kwargs):
self.source_circuit = None
assert not self._current_stack
def visit_all(self, circuit):
if self.START_FROM_VARS:
nodes_to_visit = (
list(circuit.inputs)
+ [node for node in circuit if not node.is_INPUT()]
)
else:
nodes_to_visit = list(circuit)
for node in nodes_to_visit:
self.before_visit(node)
self.visit(node, *[self.result[sub] for sub in node.incoming])
self.after_visit(node)
def before_visit(self, node):
"""Event handler before visiting node"""
self._current_stack.append((
self.current_node,
self.current_operation
))
self.current_node = node
self.current_operation = node.operation
def after_visit(self, node):
"""Event handler after visiting node"""
self.current_node, self.current_operation = self._current_stack.pop()
def on_visit_error(self, node, err):
log.error(f"node: {node} err: {err}")
if hasattr(node, "show_debug"):
node.show_debug()
def visit(self, node, *args):
method_name = f"visit_{node.operation._name}"
method = getattr(self, method_name, self.visit_generic)
try:
result = self.result[node] = method(node, *args)
except Exception as err:
if not self.on_visit_error(node, err):
raise
return result
def visit_generic(self, node, *args):
raise NotImplementedError(
f"Visit method for {node.operation._name} "
f"is not implemented in {type(self)}"
)
def visit_GET(self, node, multi_result):
return multi_result[node.operation.index]
def make_output(self, node, result):
return result
class CircuitTransformer(Transformer):
"""Base class for circuit->circuit transformers."""
DEFAULT_CIRCUIT_CLASS = None
DEFAULT_BASE_RING = None
AUTO_OUTPUT = True
NAME_SUFFIX = None
FORCE_MANY_TO_ONE = False
def create_target_circuit(
self,
source_circuit,
# keyword-only
*, name=None, circuit_class=None, base_ring=None, **kwargs):
if name is None and source_circuit.name and self.NAME_SUFFIX:
name = source_circuit.name + self.NAME_SUFFIX
if circuit_class:
target_circuit_class = circuit_class
elif self.DEFAULT_CIRCUIT_CLASS:
target_circuit_class = self.DEFAULT_CIRCUIT_CLASS
else:
target_circuit_class = type(source_circuit)
if base_ring:
target_base_ring = base_ring
elif self.DEFAULT_BASE_RING:
target_base_ring = self.DEFAULT_BASE_RING
else:
target_base_ring = source_circuit.base_ring
log.debug(
f"{type(self)}: create target circuit {target_circuit_class} "
f"with ring {base_ring}"
)
target_circuit = target_circuit_class(
base_ring=target_base_ring,
name=name,
)
return target_circuit
@property
def base_ring(self):
return self.target_circuit.base_ring
# VSN: It is better to write this prototype in a clearer way
# so that we can understand what we need to pass for kwargs
# (circuit_class, base_ring, etc for create_target_circuit)
def transform(self, circuit, **kwargs):
if not isinstance(circuit, Circuit):
raise TypeError(
"Transformers are defined only for Circuits,"
f" passed: {type(circuit)}"
)
self.source_circuit = circuit
if "target_circuit" in kwargs:
self.target_circuit = kwargs["target_circuit"]
else:
self.target_circuit = self.create_target_circuit(circuit, **kwargs)
super().transform(circuit, **kwargs)
return self.target_circuit
def visit_generic(self, node, *args):
return node.reapply(*args, circuit=self.target_circuit)
def make_output(self, node, result):
""" Default implementation: mark images of output notes as outputs in
new circuit. """
if not self.AUTO_OUTPUT:
return
if isinstance(result, self.target_circuit.Node):
return self.target_circuit.add_output(result)
elif isinstance(result, Iterable):
ret = []
for result_node in result:
ret.append(self.target_circuit.add_output(result_node))
return ret
else:
log.error(f"{type(result)} cannot be outputted")
raise NotImplementedError(f"{type(result)} cannot be outputted")
| hellman/ches2022wbc | circkit/transformers/core.py | core.py | py | 5,704 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "circkit.Circuit",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "circkit.Node",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "circkit.Operation",
... |
8828088539 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import math
import numpy as np
class trackerPoint(object):
def __init__(self, x, y, size, frame):
# KCF tracker init
self.tracker = cv2.TrackerKCF_create()
self.bbox = (x-size/2, y-size/2, size,size)
self.tracker.init(frame, self.bbox)
self.x = x
self.y = y
self.size = size
self.ptsize = 4
def update(self, frame, frameToDrawOn):
ok, self.bbox = self.tracker.update(frame)
if ok:
# Draw the new point
self.x = int(self.bbox[0] + self.size/2)
self.y = int(self.bbox[1] + self.size/2)
p1 = (self.x-self.ptsize, self.y-self.ptsize)
p2 = (self.x+self.ptsize, self.y+self.ptsize)
cv2.rectangle(frameToDrawOn, p1, p2, (0,0,255), -1)
def Dist(p1, p2):
x1 = p1.x
y1 = p1.y
x2 = p2.x
y2 = p2.y
return math.sqrt(math.pow((x2-x1), 2)+math.pow((y2-y1), 2))
def calcAngle(pTrack):
a = Dist(pTrack[0], pTrack[1])
b = Dist(pTrack[1], pTrack[2])
c = Dist(pTrack[2], pTrack[0])
angRad = math.acos(((a*a)+(b*b)-(c*c))/(2*a*b))
return math.degrees(angRad)
def drawLine(frame, pTrack):
cv2.line(frame, (pTrack[0].x,pTrack[0].y), (pTrack[1].x,pTrack[1].y), (255,0,255), 2)
cv2.line(frame, (pTrack[1].x,pTrack[1].y), (pTrack[2].x,pTrack[2].y), (255,0,255), 2)
def main():
# Init kernel for erode / dilate
kernel = np.ones((3,3), np.uint8)
# Init media in/out
cap = cv2.VideoCapture('Video.mp4')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 30.0, (1280,720))
# Read first frame for trackers
ret, frame = cap.read()
# Instantiate trackers at known positions
pList = [(561,421),(656,385),(584,263)]
pTrack = []
for pt in pList:
pTrack.append(trackerPoint(pt[0], pt[1], 80, frame))
while(cap.isOpened()):
# Read new frame
ret, frame = cap.read()
if(frame is None):
break
# Thresholde / Erosion / Dilatation for arm detection
thresh1 = cv2.inRange(frame, (170,170,170), (255,255,255))
thresh1 = cv2.erode(thresh1, kernel, iterations = 3)
thresh1 = cv2.dilate(thresh1, kernel, iterations = 3)
# Mask
res = cv2.bitwise_and(frame, frame, mask=thresh1)
# Update trackers
for p in pTrack:
p.update(res, frame)
drawLine(frame, pTrack)
# Calculate angle between points
ang = calcAngle(pTrack)
strAng = "%2.2f deg" % ang
# Display it
cv2.putText(frame, strAng, (pTrack[1].x+40,pTrack[1].y), cv2.FONT_HERSHEY_DUPLEX, 1, (255,255,255))
# Show image
cv2.imshow('frame', frame)
# Write to output video
out.write(frame)
# "q" key to escape
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything
cap.release()
out.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| ThibaudMZN/GeneralWork | ArmAngleCalculation/ArmAngle.py | ArmAngle.py | py | 3,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.TrackerKCF_create",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_nu... |
69889167786 | import torch
import torch.nn as nn
from attention import MultiheadedAttention
from feed_forward import PositionWiseDenseNetwork, LayerNorm
class DecoderBlock(nn.Module):
def __init__(self,
key_dim: int = 64,
embedding_dim: int = 512,
heads_number: int = 8,
hidden_dim: int = 2048,
dropout_prob: float = 0.1) -> None:
super().__init__()
self.key_dim = key_dim
self.heads_number = heads_number
self.embedding_dim = embedding_dim
self.decoder_self_attention = MultiheadedAttention(key_dim=key_dim,
embedding_dim=embedding_dim,
heads_number=heads_number)
self.layer_norm_0 = LayerNorm(embedding_dim=embedding_dim)
self.dropout_0 = nn.Dropout(p=dropout_prob)
self.decoder_encoder_attention = MultiheadedAttention(key_dim=key_dim,
embedding_dim=embedding_dim,
heads_number=heads_number)
self.layer_norm_1 = LayerNorm(embedding_dim=embedding_dim)
self.dropout_1 = nn.Dropout(p=dropout_prob)
self.position_wise_dense = PositionWiseDenseNetwork(hidden_dim=hidden_dim,
embedding_dim=embedding_dim,
dropout_prob=dropout_prob)
self.layer_norm_2 = LayerNorm(embedding_dim=embedding_dim)
self.dropout_2 = nn.Dropout(p=dropout_prob)
def forward(self,
x: torch.Tensor,
encoder_outputs: torch.Tensor,
encoder_padding_mask: torch.Tensor,
decoder_padding_mask: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]
tokens_in_document = x.shape[1]
decoder_mask = decoder_padding_mask.unsqueeze(dim=1).unsqueeze(dim=2)
subsequent_mask = torch.ones((tokens_in_document, tokens_in_document), dtype=torch.bool)
subsequent_mask = torch.triu(subsequent_mask, diagonal=1)
subsequent_mask = subsequent_mask.unsqueeze(dim=0).unsqueeze(dim=1)
decoder_mask = decoder_mask | subsequent_mask
self_attention_representations = self.decoder_self_attention(x, x, x, decoder_mask)
x = self.layer_norm_0(x + self_attention_representations)
x = self.dropout_0(x)
encoder_padding_mask = encoder_padding_mask.unsqueeze(dim=1).unsqueeze(dim=2)
attention_representations = self.decoder_encoder_attention(x, encoder_outputs, encoder_outputs, encoder_padding_mask)
x = self.layer_norm_1(x + attention_representations)
x = self.dropout_1(x)
position_wise_values = self.position_wise_dense(x)
x = self.layer_norm_2(x + position_wise_values)
x = self.dropout_2(x)
return x
class Decoder(nn.Module):
def __init__(self,
vocabulary_size: int,
blocks_number: int = 8,
key_dim: int = 64,
embedding_dim: int = 512,
heads_number: int = 8,
hidden_dim: int = 2048,
dropout_prob: float = 0.1) -> None:
super().__init__()
self.blocks_number = blocks_number
self.decoder_blocks = nn.ModuleList([DecoderBlock(key_dim=key_dim,
embedding_dim=embedding_dim,
heads_number=heads_number,
hidden_dim=hidden_dim,
dropout_prob=dropout_prob)
for _ in range(self.blocks_number)])
self.output_weights = nn.Parameter(torch.rand(size=(embedding_dim, vocabulary_size)))
nn.init.xavier_uniform_(self.output_weights)
def forward(self,
x: torch.Tensor,
encoder_outputs: torch.Tensor,
decoder_padding_mask: torch.Tensor,
encoder_padding_mask: torch.Tensor) -> torch.Tensor:
for decoder_block in self.decoder_blocks:
x = decoder_block(x, encoder_outputs, encoder_padding_mask, decoder_padding_mask)
output_logits = torch.matmul(x, self.output_weights)
# we don't apply softmax since loss function does it inplace
# tokens_probs = torch.softmax(output_logits, dim=-1)
return output_logits
| KolodziejczykWaldemar/Transformers | decoder.py | decoder.py | py | 4,651 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "attention.MultiheadedAttention",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "feed_forwa... |
21871433231 | import jieba,re
#去除标点
def get_text(file_name):
with open(file_name, 'r', encoding='utf-8') as fr:
text = fr.read()
#删除的标点
del_ch = ['《',',','》','\n','。','、',';','"',\
':',',','!','?',' ']
for ch in del_ch:
text = text.replace(ch,'')
return text
file_name = 'comment.txt'
text = get_text(file_name)
vlist = jieba.lcut(text)#调用jieba实现分词,返回列表
res_dict = {}
#进行词频统计
for i in vlist:
res_dict[i] = res_dict.get(i,0) + 1
res_list = list(res_dict.items())
#print(res_list)
#降序排序
res_list.sort(key = lambda x:x[1], reverse = True)
fin_res_list = []
#去除单个字的词
for item in res_list:
if(len(item[0])>=2):
fin_res_list.append(item)
word_list=[]
words=[]
for i in range(1000):
word,count = fin_res_list[i]
pstr = str(i+1) + ':'
word_list.append(word)
with open('ignore_dict.txt', 'r', encoding='utf-8') as f:
ignore_words = f.read().splitlines()
# 遍历分词
for word in word_list:
if word not in ignore_words:#排除词
word = re.sub(r'[\n ]', '', word)
if len(word) < 1:
continue
words.append(word)
# print(pstr, end=' ')
# print(words[i], count)
with open("res.csv","a+")as fa:
fa.write(str(words[i])+","+str(count)+"\n")
| 2412322029/bilibili-spyder | 词频.py | 词频.py | py | 1,370 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jieba.lcut",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 43,
"usage_type": "call"
}
] |
27541539070 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Travis Anderson"
"""
This is for contacting twitter, and watching a specific user or word
"""
import logging
import tweepy
import time
import os
import datetime
from threading import Thread
import threading
logger = logging.getLogger(__name__)
exit_flag = False
def _start(self, is_async):
"""Monkey patch to allow multi threading so twitter can run and
main program can run"""
self.running = True
if is_async:
logger.warning("Initiating multithread")
self._thread = Thread(
target=self._run, name="Tweepy Thread", daemon=True)
self._thread.start()
else:
self._run()
class WatchTwitter(tweepy.StreamListener):
"""Class that subscribes to keywords on twitter """
def __init__(self):
logger.info("Creating api")
consumer_key = os.getenv("API_KEY")
assert consumer_key is not None
consumer_secret = os.getenv("API_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_SECRET")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
tweepy.Stream._start = _start
self.subscriptions = []
self._stop_event = threading.Event()
self.stream_timestamp = 0
self.master_timestamp = 0
self.register = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stream.running:
self.stream.running = False
def add_subscription(self, subscribe_to):
"""If stream is running adds new subscription, restarts stream"""
if subscribe_to not in self.subscriptions:
logger.info('Adding subscription: {}'.format(subscribe_to))
self.subscriptions.append(subscribe_to)
logger.info(self.subscriptions)
self.stream.running = False
self.start_stream()
else:
logger.info("Already subscribed: {}" .format(self.subscriptions))
def remove_subscription(self, unsubscribe_from):
logger.info("Attempting to remove {}".format(unsubscribe_from))
if unsubscribe_from in self.subscriptions:
logger.info(
'Removing from subscriptions: {}'.format(unsubscribe_from))
self.subscriptions.remove(unsubscribe_from)
self.stream.running = False
self.start_stream()
def pause_stream(self):
if self.stream.running:
logger.info("Pausing all subscriptions: {}".format(
self.subscriptions))
self.stream.running = False
def restart_stream(self):
if not self.stream.running:
logger.info("Restarting stream")
self.start_stream()
def init_stream(self, string):
self.subscriptions.append(string)
self.start_stream()
def start_stream(self):
global exit_flag
exit_flag = False
logger.info('Subscriptions: {}'.format(self.subscriptions))
self.stream = tweepy.Stream(auth=self.api.auth, listener=self)
self.stream.filter(track=self.subscriptions, is_async=True)
def on_status(self, status):
# need a stream handler, if not none run the stream handler and
# send the status to slack, else return not exit flag
logger.info(status.text)
def on_connect(self):
self.stream_timestamp = datetime.datetime.now()
logger.info('Connected to twitter at: {}'.format(
datetime.datetime.now()))
if not self.master_timestamp:
self.master_timestamp = self.stream_timestamp
def log_config():
"""Adjusts how info is displayed in log"""
return logging.basicConfig(
format=(
'%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s '
'[%(threadName) -12s] %(message)s'),
datefmt='%Y-%m-%d %H:%M:%S')
def log_set_level():
"""Sets defaulf log level"""
logger.setLevel(logging.DEBUG)
def init_logger():
logging.basicConfig(
format=(
'%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s '
'[%(threadName) -12s] %(message)s'),
datefmt='%Y-%m-%d %H:%M:%S')
logger.setLevel(logging.DEBUG)
def main():
global exit_flag
log_config()
log_set_level()
tb = WatchTwitter()
tb.init_stream('python')
while not exit_flag:
time.sleep(5)
tb.pause_stream()
time.sleep(5)
tb.add_subscription('Trump')
time.sleep(5)
tb.remove_subscription('Trump')
if __name__ == "__main__":
main()
pass
| tander29/backend-slackbot | twitbot.py | twitbot.py | py | 4,752 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tweepy.StreamListener",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.gete... |
32886590499 | import discord
from discord.ext import commands
from discord.ui import Select, View
from discord.ext.commands import bot
from discord import app_commands
class Select(discord.ui.Select):
def __init__(self):
options=[
discord.SelectOption(label="НАВИГАЦИЯ: команды до игры", value="1", emoji="📜", description="Команды которые вы можете использовать до игры!"),
discord.SelectOption(label="НАВИГАЦИЯ: Команды во время игры", value="2", emoji="🔦", description="Команды которые вы можете использовать во время игры!"),
discord.SelectOption(label="НАВИГАЦИЯ: Предметы", value="3", emoji="🛠", description="Придметы которые есть в игры, и вы их можете использовать!"),
discord.SelectOption(label="НАВИГАЦИЯ: Призраки", value="4", emoji="🎃", description="Все призраки нашей игры!")
]
super().__init__(placeholder="Помощь", max_values=1, min_values=1, options=options)
async def callback(self, interaction: discord.Interaction):
if self.values[0] == "1":
emb = discord.Embed(title="НАВИГАЦИЯ: команды до игры", description='`Join` - присоединиться к игре \n`leave` - Отключиться от игры \n`Start` - начать игру', colour = discord.Color.og_blurple() )
await interaction.response.send_message( embed = emb, ephemeral=True )
elif self.values[0] == "2":
emb = discord.Embed(title="НАВИГАЦИЯ: Команды во время игры", description='`end` - закончить ход \n`use_item` - использовать предмет (1/2/3...) \n`inventory` - показывает предметы в инвентаре \n`ghost` - весь список призраков на сервере\n`theend` - закончить игру (1/2/3)', colour = discord.Color.og_blurple() )
await interaction.response.send_message( embed = emb, ephemeral=True )
elif self.values[0] == "3":
emb = discord.Embed(title="НАВИГАЦИЯ: Предметы", description='1 - соль (спасает один раз) \n2 - крест (спасает один раз) \n3 - Датчик Движения \n4 - Датчик Активности Призрака \n5 - Камера \n6 - пустая книга \n7 - книга (Да/не) \n8 - УФ-Фонарик \n 9 - успокоение для призрака(понимажает минимум до нуля) \n 10 - шкатулка призрака(понимажает максимум на 20 единиц)', colour = discord.Color.og_blurple() )
await interaction.response.send_message( embed = emb, ephemeral=True )
elif self.values[0] == "4":
emb = discord.Embed(title="НАВИГАЦИЯ: Призраки", description='1 - ***Полтергейст*** \n2 - ***Демон*** \n3 - ***Тень*** \n4 - ***Мимик***\n5 - ***Дух***\nузнать лучше можно командой `ghost`')
await interaction.response.send_message( embed = emb, ephemeral=True )
class SelectView(discord.ui.View):
def __init__(self, *, timeout=30):
super().__init__(timeout=timeout)
self.add_item(Select())
class help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@app_commands.command(name = "help", description="Помощь по командам бота!")
async def _help(self, interaction: discord.Interaction):
await interaction.response.send_message("Помощь по командам", view=SelectView(), ephemeral=True)
async def setup(bot):
await bot.add_cog(help(bot)) | FoxSweets/PhasmoBot | cogs/help.py | help.py | py | 3,732 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "discord.ui",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "discord.SelectOption",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "discord.SelectOption",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.Sele... |
37591561830 | #!/usr/bin/env python
'''
Rutgers Data Science Homework Week 3, Assignment #1
To run this script:
pybank.py [--summary_file=SUMMARY_FILE] input_file_1 input_file_2 ...
<Chan Feng> 2018-02
'''
import os
import csv
from argparse import ArgumentParser
_SUMMARY_FILE = 'pybank_summary.txt'
_SUMMARY_FORMAT = '''
Financial Analysis
--------------------------------
Total Month: {total_month}
Total Revenue: ${total_revenue:,}
Average Revenue Change: ${avg_revenue_change:,}
Greatest Increase in Revenue: {greatest_increase_month} (${greatest_increase:,})
Greatest Decrease in Revenue: {greatest_decrease_month} (${greatest_decrease:,})'''
_MONTH_LOOKUP = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12
}
_DATA_DIR = 'raw_data'
def main():
'''
return: 0 for success
'''
arg_parser = ArgumentParser()
arg_parser.add_argument('input_files', type=str, nargs='+',
help='One or more input files')
arg_parser.add_argument('--summary_file', type=str,
help='Default summary file name is ' + _SUMMARY_FILE )
args = arg_parser.parse_args()
data = {}
for input_file in [os.path.join(_DATA_DIR, f) for f in args.input_files]:
gather_data(data, input_file)
summarize(data, args.summary_file or _SUMMARY_FILE)
return 0
def gather_data(data, input_file):
'''
:param data: data object
:param input_file: Input file name
:return: 0 for success
'''
with open(input_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None) # Skip header
for row in reader:
month = normalize_month(row[0])
data[month] = data.get(month, 0) + int(row[1])
def summarize(data, summary_file=None):
'''
:param data: data objectu
:param summary_file: optional summary file name
:return: 0 for success
'''
total_revenue = 0
change = 0
total_change = 0
total_change_cnt = 0
prev_revenue = None
increase_month = None
increase_revenue = 0
decrease_month = None
decrease_revenue = 0
for month in sorted(data, key=month_sort_key):
revenue = data[month]
total_revenue += revenue
if prev_revenue:
change = revenue - prev_revenue
if change > increase_revenue:
increase_month = month
increase_revenue = change
if change < decrease_revenue:
decrease_month = month
decrease_revenue = change
total_change += change
total_change_cnt += 1
prev_revenue = revenue
summary = _SUMMARY_FORMAT.format(
total_month=len(data),
total_revenue=total_revenue,
avg_revenue_change=int(round(total_change/total_change_cnt)),
greatest_increase_month=increase_month,
greatest_increase=increase_revenue,
greatest_decrease_month=decrease_month,
greatest_decrease=decrease_revenue,
)
print(summary)
if summary_file:
with open(summary_file, 'w', newline='') as outfile:
outfile.write(summary)
return 0
def normalize_month(month):
'''
:param month:
:return: month normalized to Jan-12
Assume either Jan-12 or Jan-2012 format.
Production system will need to a lot more sophisticated
'''
(mth, year) = month.split('-')
if int(year) > 2000:
return '{}-{:02d}'.format(mth, int(year) - 2000)
return month
def month_sort_key(month):
'''
Define how month are sorted
:param month: 'Jan-12' format
:return: 12-01
'''
(month, year) = month.split('-')
return '{}-{:02d}'.format(year, _MONTH_LOOKUP[month])
if __name__ == '__main__':
main() | feng443/RUDSWeek3 | PyBank/pybank.py | pybank.py | py | 3,904 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"l... |
74229865385 | import configparser
from pathlib import Path
from flask import Flask
from flask_restful import Resource, Api
import sqlite3
from todo import DB_WRITE_ERROR, SUCCESS
DEFAULT_DB_FILE_PATH = Path.cwd().joinpath(
"." + Path.cwd().stem + "_todo.db"
)
def get_database_path(config_file: Path) -> Path:
"""Return the current path to the to-do database."""
config_parser = configparser.ConfigParser()
config_parser.read(config_file)
return Path(config_parser["General"]["database"])
def init_database(db_path: Path) -> int:
"""Create the to-do database."""
conn = None
try:
conn = sqlite3.connect(db_path) # Empty to-do database
conn.execute("""CREATE TABLE TASKS
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
NAME TEXT NOT NULL,
DESCRIPTION TEXT NOT NULL,
START_DATE DATE,
DUE_DATE DATE,
PRIORITY INT,
COMPLETE INT,
DELETED INT);""")
print('sqlite3.version')
return SUCCESS
except OSError:
return DB_WRITE_ERROR
finally:
if conn:
conn.close()
class DatabaseHandler(Resource):
def __init__(self, db_path: Path) -> None:
self._db_path = db_path | CR-Lough/todo_app | core/src/todo/database.py | database.py | py | 1,220 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path.cwd",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line... |
19665159792 | from flask import Flask, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, login_required, current_user, AnonymousUser, roles_required
from flask.ext.security.utils import *
from flask.ext.security.confirmable import *
from flask.ext.principal import Principal, Permission, RoleNeed
from flask.ext.login import LoginManager
from flask_mail import Mail, Message
import hashlib
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://roverpass:roverpass@localhost/roverpass'
db = SQLAlchemy(app)
BASE_URL = 'http://107.170.60.95'
app.jinja_options['extensions'].append('jinja2.ext.loopcontrols')
SQLALCHEMY_BINDS = {
'user_db': app.config['SQLALCHEMY_DATABASE_URI'],
'campground_db': 'postgres://postgres:postgres@localhost/campground'
}
app.secret_key = 'goforfun'
#google api info
GOOGLE_API_KEY='AIzaSyDqQU7ovrKcbjS13lifn83dG6FLmM71hFA'
GOOGLE_URL = 'https://www.googleapis.com/customsearch/v1'
GOOGLE_CX = '011939436523733206751:6hccyfxo7qc'
#flask-security
app.config['SECURITY_POST_LOGIN'] = '/'
#flask-social for facebook and twitter
app.config['SOCIAL_TWITTER'] = {
'consumer_key': 'HXy7JHIBI5kIpfRRPnq0EWlYp',
'consumer_secret': 'LAto3gGXRXwJzD4aKSbMVTs3LuI41GgKKcSIutSnZi5F7Uk4sn'
}
app.config['SOCIAL_FACEBOOK'] = {
'consumer_key' : '1498934676996386',
'consumer_secret' : '3b89f94bb85ae16093bcc550fc9e5b99'
}
#handle permissions via principal
#to restrict view to user type, add decorator:
# @permission_name.require()
#principals = Principal(app)
#flask-login prep
login_manager = LoginManager()
login_manager.login_view = 'login' #after login, there is a "next" variable in the query string that indicates where the user was trying to access
login_manager.login_message = "You must logged in to do that."
login_manager.init_app(app)
#flask-mail
#mail = Mail(app)
#define messages here
#welcome_to_roverpass = Message()
#thank_you_for_opting_in = Message()
#forgot_password = Message()
| rparikh42790/roverpass1 | kickstart.py | kickstart.py | py | 2,047 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.ext.sqlalchemy.SQLAlchemy",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.ext.login.LoginManager",
"line_number": 49,
"usage_type": "call"
}
] |
71648501864 |
from PIL import Image, ImageDraw
import random as rd
import imageio
def create_simple_tile(size: int, bg_color:str, fg_color: str) -> Image:
tile_img = Image.new("RGB", (size, size))
tile_img_draw = ImageDraw.Draw(tile_img)
tile_img_draw.rectangle([(0, 0), (size, size)], fill = bg_color)
tile_img_draw.polygon([(0, 0), (size, 0), (0, size)], fill = fg_color )
return tile_img
def create_smith_tile(size: int, bg_color:str, fg_color: str) -> Image:
tile_img = Image.new("RGB", (size, size))
tile_img_draw = ImageDraw.Draw(tile_img)
tile_img_draw.rectangle([(0, 0), (size, size)], fill = bg_color)
tile_img_draw.arc([(-size//2,-size//2), (size//2, size//2)],0,-270,fill = fg_color)
tile_img_draw.arc([(size//2,size//2), (size +(size//2), size+(size//2))],0,360,fill = fg_color)
return tile_img
def create_base_tile(size: int, bg_color:str, fg_color: str, kind:str) -> Image:
if kind == 'simple':
tile_img = create_simple_tile(size, bg_color, fg_color)
elif kind == 'smith':
tile_img = create_smith_tile(size, bg_color, fg_color)
else:
raise Exception("Sorry, this tiling kind does not exists")
imageio.imsave("base_tile.gif", tile_img)
return tile_img
def paint_a_truchet(how_many_tiles: int, tile_size: int, kind: str) -> Image:
base_tile = create_base_tile(tile_size, 'white', 'black', kind)
w, h = how_many_tiles * tile_size, how_many_tiles * tile_size
img = Image.new("RGB", (w, h))
for i in range(how_many_tiles):
for j in range(how_many_tiles):
offset = (i * tile_size, j * tile_size)
# toss for rotation
base_tile = base_tile.rotate(90 * rd.randint(0,3))
img.paste(base_tile, offset)
return img | antigones/py-truchet | truchet.py | truchet.py | py | 1,782 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.new",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_num... |
27031060369 | import subprocess
import sys
import json
from workflow import Workflow3
log = None
GITHUB_SLUG = 'tilmanginzel/alfred-bluetooth-workflow'
def _read_devices():
proc = subprocess.Popen(['./blueutil', '--paired', '--format=JSON'], stdout=subprocess.PIPE)
devices_raw = json.loads(proc.stdout.read())
bluetooth_devices = []
for device in devices_raw:
if device['name'] and device['address'] and device['connected'] is not None:
is_connected = device['connected']
bluetooth_devices.append({
'type': 'file:skipcheck',
'arg': device['address'],
'subtitle': 'Connected' if is_connected else 'Disconnected',
'connected': is_connected,
'title': device['name'],
'icon': './icons/bluetooth-' + ('connected' if is_connected else 'disconnected') + '.png'
})
return sorted(bluetooth_devices, key = lambda x: (-x['connected'], x['title']))
def main(wf):
if wf.update_available:
wf.add_item('Update available for Bluetooth Connector!',
autocomplete='workflow:update',
valid=False)
query = wf.args[0] if len(wf.args) else None
devices = _read_devices()
filtered_devices = wf.filter(query, devices, key=lambda k: k['title'])
for device in filtered_devices:
item = wf.add_item(
type=device['type'],
title=device['title'],
subtitle=device['subtitle'],
arg=device['arg'],
icon=device['icon'],
valid=True
)
item.setvar('title', device['title'])
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow3(update_settings={'github_slug': GITHUB_SLUG})
log = wf.logger
sys.exit(wf.run(main))
| tilmanginzel/alfred-bluetooth-workflow | alfred_bluetooth_workflow.py | alfred_bluetooth_workflow.py | py | 1,825 | python | en | code | 188 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "workflow.Workflow3",... |
10495520006 | from django.test import TestCase
from djlotrek.templatetags.djlotrek_filters import (
key,
is_in,
is_not_in,
get_class,
get_sorted,
media_url,
regex_match,
)
class TemplateFiltersTestCase(TestCase):
def test_key(self):
"""
templatefilter key is use for get value from dictionary object it's
pass dictionary object and key name then return value if
key exists otherwise return none
"""
my_dict = {"mykey": "value"}
self.assertEqual(key(my_dict, "mykey"), "value")
self.assertEqual(key(my_dict, "nokey"), None)
def test_is_in(self):
"""
templatefilter is_in use check arguments from string list separate
by comma (,) it pass value and arguments string then return a
boolean object of existen of value
"""
self.assertEqual(is_in("ciao", "hello,ciao"), True)
self.assertEqual(is_in("hola", "hello,ciao"), False)
def test_is_not_in(self):
"""
templatefilter is_not_in use to check not existen arguments
from string list separate by comma (,) it pass value and
arguments string then return a boolean object of not existen of value
"""
self.assertEqual(is_not_in("ciao", "hello,ciao"), False)
self.assertEqual(is_not_in("hola", "hello,ciao"), True)
def test_get_class(self):
"""
templatefilter get_class use to get a class name of retrieved class
"""
a = 1
my_dict = {"mykey": "value"}
self.assertEqual(get_class(a), "int")
self.assertEqual(get_class(my_dict), "dict")
def test_get_sorted(self):
"""
templatefilter get_sorted retrive list objects and return sorted
version of it
"""
a = [10, 2, 3, 5, 1]
self.assertEqual(get_sorted(a), [1, 2, 3, 5, 10])
def test_media_url(self):
"""
templatefilter media_url retrive a media object and get the url
"""
self.assertEqual(media_url(None), "")
self.assertEqual(media_url({"a": 2}), "")
def test_regex_match(self):
"""
templatefilter regex_match return True if regex matches
"""
self.assertEqual(
regex_match("Cats are smarter than dogs", "(.*) are (.*?) .*"), True
)
self.assertEqual(
regex_match("Cats are smarter than dogs", "(.*) àre (.*?) .*"), False
)
| lotrekagency/djlotrek | tests/test_templatefilters.py | test_templatefilters.py | py | 2,463 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "djlotrek.templatetags.djlotrek_filters.key",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "djlotrek.templatetags.djlotrek_filters.key",
"line_number": 23,
"usage_ty... |
37502296937 | # https://school.programmers.co.kr/learn/courses/19344/lessons/242261
from collections import deque
dire = [[-1, 0], [1, 0], [0, -1], [0, 1]]
def CHECK(a, b, g):
return not (0 <= a < len(g) and 0 <= b < len(g[0]))
def BFS(graph, visit, RB):
global answer
que = deque()
RB.extend([0, False, False])
que.append(RB)
visit[RB[0]][RB[1]][RB[2]][RB[3]] = 1
visit[RB[2]][RB[3]][RB[0]][RB[1]] = 1
while que:
rx, ry, bx, by, depth, R, B = que.popleft()
if R and B:
return depth
# 빨간거 부터 옮기는 코드
for i in range(4):
Rx, Ry = rx + dire[i][0], ry + dire[i][1]
if R:
Rx, Ry = rx, ry
if CHECK(Rx, Ry, graph) : continue
if Rx == bx and Ry == by: continue
if Rx == RB[0] and Ry == RB[1] : continue
if graph[Rx][Ry] == 5 : continue
for j in range(4):
Bx, By = bx + dire[j][0], by + dire[j][1]
if B:
Bx, By = bx, by
if CHECK(Bx, By, graph) : continue
if Bx == RB[2] and By == RB[3]: continue
if visit[Rx][Ry][Bx][By]: continue
if Bx == Rx and By == Ry: continue
if graph[Bx][By] == 5 : continue
visit[Rx][Ry][Bx][By] = 1
que.append([Rx, Ry, Bx, By, depth + 1, graph[Rx][Ry] == 3, graph[Bx][By] == 4])
# 파란거부터 옮기는 코드
for i in range(4):
Bx, By = bx + dire[i][0], by + dire[i][1]
if B:
Bx, By = bx, by
if CHECK(Bx, By, graph) : continue
if Bx == RB[2] and By == RB[3]: continue
if Bx == rx and By == ry: continue
if graph[Bx][By] == 5 : continue
for j in range(4):
Rx, Ry = rx + dire[j][0], ry + dire[j][1]
if R:
Rx, Ry = rx, ry
if CHECK(Rx, Ry, graph) : continue
if Rx == RB[0] and Ry == RB[1] : continue
if visit[Rx][Ry][Bx][By]: continue
if Rx == Bx and Ry == By: continue
if graph[Rx][Ry] == 5 : continue
visit[Rx][Ry][Bx][By] = 1
que.append([Rx, Ry, Bx, By, depth + 1, graph[Rx][Ry] == 3, graph[Bx][By] == 4])
return 0
def solution(maze):
global answer
RB = [None] * 4
for i in range(len(maze)):
for j in range(len(maze[0])):
if maze[i][j] == 1:
RB[0], RB[1] = i, j
if maze[i][j] == 2:
RB[2], RB[3] = i, j
visit2 = [[[[0] * 4 for _ in range(4)] for __ in range(4)] for __ in range(4)]
return BFS(maze, visit2, RB)
| junsgi/Algorithm | BFS_DFS/기출문제 4번_BFS.py | 기출문제 4번_BFS.py | py | 2,756 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
}
] |
3521482660 | import pytest
import yaml
from meltano.core.behavior.canonical import Canonical
definition = {
# a, b, …, z
chr(ord("a") + i): i if i % 2 else None
for i in range(10)
}
class TestCanonical:
@pytest.fixture
def subject(self):
return Canonical(**definition)
def test_canonical(self, subject):
# make sure the Nones are removed
assert len(list(subject)) == 5
subject.test = "hello"
yaml_definition = "\n".join(f"{k}: {v}" for k, v in iter(subject))
assert yaml.dump(subject).strip() == yaml_definition
def test_false(self, subject):
subject.false_value = False
assert subject.canonical()["false_value"] is False
def test_nested(self, subject):
nested = Canonical(test="value")
subject.nested = nested
assert Canonical.as_canonical(subject)["nested"] == Canonical.as_canonical(
nested
)
def test_nested_empty(self, subject):
nested = Canonical(test="")
subject.nested = nested
assert "nested" not in Canonical.as_canonical(subject)
def test_update_canonical(self, subject):
subject.update(Canonical(test="value"))
assert subject.test == "value"
def test_update_dict(self, subject):
subject.update({"test": "value"})
assert subject.test == "value"
def test_update_kwargs(self, subject):
subject.update(test="value")
assert subject.test == "value"
def test_with_attrs(self, subject):
subject.test = "value"
assert subject.with_attrs().canonical() == subject.canonical()
new = subject.with_attrs(test="other_value")
assert new.test == "other_value"
assert new.canonical() == {**subject.canonical(), "test": "other_value"}
new = subject.with_attrs(new_test="new_value")
assert new.new_test == "new_value"
assert new.canonical() == {**subject.canonical(), "new_test": "new_value"}
def test_defaults(self, subject):
with pytest.raises(AttributeError):
subject.test
subject.test = None
assert subject.test is None
# This would typically be set from a Canonical subclass
subject._defaults["test"] = lambda _: "default"
# Default values show up when getting an attr
assert subject.test == "default"
# But they're not included in the canonical representation
assert "test" not in subject.canonical()
subject.test = "changed"
assert subject.test == "changed"
assert subject.canonical()["test"] == "changed"
def test_fallbacks(self, subject):
# Calling an unknown attribute is not supported
with pytest.raises(AttributeError):
subject.unknown
fallback = Canonical(unknown="value", known="value")
# This would typically be set from a Canonical subclass
subject._fallback_to = fallback
# Unknown attributes fall back
assert subject.unknown == "value"
assert "unknown" not in subject.canonical()
# Known attributes don't fall back
subject.known = None
assert subject.known is None
# Unless we make them
subject._fallbacks.add("known")
assert subject.known == "value"
assert "known" not in subject.canonical()
# Unless there is nothing to fallback to
subject._fallback_to = None
assert subject.known is None
# Defaults are still applied
subject._defaults["known"] = lambda _: "default"
assert subject.known == "default"
assert "known" not in subject.canonical()
# Until a value is set
subject.known = "value"
assert subject.known == "value"
assert subject.canonical()["known"] == "value"
| learningequality/meltano | tests/meltano/core/behavior/test_canonical.py | test_canonical.py | py | 3,836 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "meltano.core.behavior.canonical.Canonical",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "yaml.dump",
"line_number": 24,
"usage_type": "call"
},
{
"api_name"... |
26510682653 | #!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Clean (i.e. remove commented messages) po’s in branches or trunk.
import os
import sys
import collections
try:
import settings
import utils
except:
from . import (settings, utils)
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
BRANCHES_DIR = settings.BRANCHES_DIR
def do_clean(po, strict):
print("Cleaning {}...".format(po))
messages, states, u1 = utils.parse_messages(po)
if strict and states["is_broken"]:
print("ERROR! This .po file is broken!")
return 1
for msgkey in states["comm_msg"]:
del messages[msgkey]
utils.write_messages(po, messages, states["comm_msg"], states["fuzzy_msg"])
print("Removed {} commented messages.".format(len(states["comm_msg"])))
return 0
def main():
import argparse
parser = argparse.ArgumentParser(description="Clean po’s in branches " \
"or trunk (i.e. remove " \
"all commented messages).")
parser.add_argument('-t', '--trunk', action="store_true",
help="Clean po’s in trunk rather than branches.")
parser.add_argument('-s', '--strict', action="store_true",
help="Raise an error if a po is broken.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
if args.langs:
for lang in args.langs:
if args.trunk:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
else:
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
t = do_clean(po, args.strict)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
po = os.path.join(TRUNK_PO_DIR, po)
t = do_clean(po, args.strict)
if t:
ret = t
else:
for lang in os.listdir(BRANCHES_DIR):
for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
if po.endswith(".po"):
po = os.path.join(BRANCHES_DIR, lang, po)
t = do_clean(po, args.strict)
if t:
ret = t
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())
| patins1/raas4emf | build/mac/blender/blender.app/Contents/MacOS/2.64/scripts/modules/bl_i18n_utils/clean_po.py | clean_po.py | py | 3,338 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "settings.TRUNK_PO_DIR",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "settings.BRANCHES_DIR",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "utils.parse_messages",
"line_number": 42,
"usage_type": "call"
},
{
"api_na... |
32138192143 | import discord
from discord.ext import commands
import response
import re
import logging
from get_token import get_token
imageKWS = ['img','imgs','image','images','pic','pics','pictures','picture']
class botName(commands.Bot):
intents = discord.Intents.default()
def __init__(self):
super().__init__(command_prefix='-', intents=self.intents)
self.intents.message_content = True
async def close(self):
await super().close()
async def send_message(message, userMsg, aiMsgContent, isPrivate=False):
try:
res = await response.get_response(userMsg, aiMsgContent)
except Exception as e:
await message.channel.send('Something went wrong, please try again later')
else:
if isPrivate:
await message.author.send(res)
else:
await message.channel.send(res)
async def generate_img(message, userMsg):
try:
res = await response.get_img(userMsg)
except Exception as e:
await message.channel.send('https://media.makeameme.org/created/bad-word-dont.jpg')
else:
await message.channel.send(res)
async def show_help(message):
helpMsg = """
`@MentionBot yourmessage` : chat with AI\n`@MentionBot /h` : show help\n`@MentionBot /p yourmessage` : send private response\n`@MentionBot /i` : generate random image
"""
await message.channel.send(helpMsg)
def run_discord_bot():
bot = botName()
@bot.event
async def on_ready():
print('Bot is running')
@bot.listen('on_message')
async def message_monitor(message):
for x in message.mentions:
if x==bot.user:
userMsg = re.sub(f" *<@{x.id}> *", '', message.content)
if message.reference:
aiMsg = await message.channel.fetch_message(message.reference.message_id)
aiMsgContent = aiMsg.content
else:
aiMsgContent = ''
if userMsg.startswith('/h'):
await show_help(message)
elif userMsg.startswith('/p'):
await message.delete()
private=True
await send_message(message,userMsg,aiMsgContent,private)
elif userMsg.startswith('/i') or any(word in userMsg for word in imageKWS):
await generate_img(message, userMsg)
else:
await send_message(message,userMsg,aiMsgContent)
bot.run(get_token("discord_token"))
run_discord_bot() | benwen2511/chatGBT-discord-bot | main.py | main.py | py | 2,311 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "discord.Intents.default",
"line_number": 11,
"usage_type": "call"
},
{
"api_na... |
7504092122 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import WebDriverException
import time
from django.test import LiveServerTestCase
MAX_WAIT = 10
class NewVisitorTest(LiveServerTestCase):
'''New visitor test'''
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self) -> None:
self.browser.quit()
def wait_for_row_in_list_table(self, row_text):
'''check row in table list'''
start_time = time.time()
while True:
try:
table = self.browser.find_element(By.ID, 'id_list_table')
rows = table.find_elements(By.TAG_NAME, 'td')
self.assertIn(row_text, [ row.text for row in rows ])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_can_start_a_list_and_retrive_it_later(self):
self.browser.get(self.live_server_url)
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element(By.TAG_NAME, 'h1').text
self.assertIn('To-Do', header_text)
inputbox = self.browser.find_element(By.ID, 'id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
inputbox.send_keys('Купить павлиньи перья')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Купить павлиньи перья')
inputbox = self.browser.find_element(By.ID, 'id_new_item')
inputbox.send_keys('Сделать мушку из павлиньих перьев')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('2: Сделать мушку из павлиньих перьев')
self.fail("End test!")
| ollko/tdd_book | functional_tests/tests.py | tests.py | py | 2,043 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.LiveServerTestCase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 16,
"usage_type": "name"
},
{
"api_... |
21200837689 | # coding: utf-8
import websocket
from threading import Thread
import time
from secrets import token_hex
from hashlib import sha256
import hmac
import json
class RealtimeAPIWebsocket:
def __init__(self, logger, parameters, public_handler, private_handler):
self.logger = logger
self._parameters = parameters
self._ws = None
self.auth_retry = 0
self.auth_try_time = 0
self.auth_completed = False
self.RealtimeAPIWebsocket(public_handler, private_handler)
def _auth(self):
self.auth_try_time = time.time()
if self._parameters._config['apikey'] == '' or self._parameters._config['secret'] == '':
return
now = int(time.time())
nonce = token_hex(16)
sign = hmac.new(self._parameters._config['secret'].encode(
'utf-8'), ''.join([str(now), nonce]).encode('utf-8'), sha256).hexdigest()
params = {'method': 'auth', 'params': {
'api_key': self._parameters._config['apikey'], 'timestamp': now, 'nonce': nonce, 'signature': sign}, 'id': 1}
self.logger.info("Auth process started")
self._ws.send(json.dumps(params))
def auth_check(self):
# Private channelの認証が完了していない & 前回のチャレンジから1分以上経過で再トライ
if self.auth_try_time+60 < time.time() and not self.auth_completed:
self.auth_retry = 0
self._auth()
return self.auth_completed
def RealtimeAPIWebsocket(self, public_handler, private_handler):
# ハンドラ呼び出し
def handler(func, *args):
return func(*args)
def on_message(ws, message):
messages = json.loads(message)
# auth レスポンスの処理
if 'id' in messages and messages['id'] == 1:
if 'error' in messages and self.auth_retry < 10:
self.logger.error(
'auth error: {} retry({})'.format(messages["error"], self.auth_retry))
self.auth_retry += 1
self._auth()
elif 'result' in messages and messages['result'] == True:
self.auth_retry = 0
params = [{'method': 'subscribe', 'params': {
'channel': c}} for c in private_handler]
self.logger.info("Websocket auth successed")
mention = '' if not 'websocket_auth' in self._parameters._strategy else self._parameters._strategy[
'websocket_auth']+'\n'
self.auth_completed = True
if self._parameters.no_trade_period:
mention = '' # ノートレード期間はメンション送らない(メンテ時間に毎日メンション来てウザいので)
self._parameters._message = mention+"Websocket auth successed"
self._parameters._parameter_message_send()
self.logger.debug(
"send private api subscribe {}".format(params))
ws.send(json.dumps(params))
return
if messages['method'] != 'channelMessage':
return
params = messages["params"]
channel = params["channel"]
recept_data = params["message"]
realtime_handler = public_handler.get(channel)
if realtime_handler != None:
realtime_handler(recept_data)
return
realtime_handler = private_handler.get(channel)
if realtime_handler != None:
realtime_handler(recept_data)
return
def on_error(ws, error):
self.logger.error(error)
def on_close(ws):
self.auth_completed = False
self._ws = None
self.logger.info("Websocket closed")
mention = '' if not 'websocket_close' in self._parameters._strategy else self._parameters._strategy[
'websocket_close']+'\n'
if self._parameters.no_trade_period:
mention = '' # ノートレード期間はメンション送らない(メンテ時間に毎日メンション来てウザいので)
self._parameters._message = mention+"Websocket closed"
self._parameters._parameter_message_send()
def on_open(ws):
self.auth_completed = False
self._ws = ws
self.logger.info("Websocket connected")
mention = '' if not 'websocket_connect' in self._parameters._strategy else self._parameters._strategy[
'websocket_connect']+'\n'
self._parameters._message = mention+"Websocket connected"
self._parameters._parameter_message_send()
params = [{'method': 'subscribe', 'params': {'channel': c}}
for c in public_handler]
ws.send(json.dumps(params))
self._auth()
def run(ws):
while True:
ws.run_forever()
time.sleep(3)
ws = websocket.WebSocketApp("wss://ws.lightstream.bitflyer.com/json-rpc",
on_message=on_message, on_error=on_error, on_close=on_close)
ws.on_open = on_open
websocketThread = Thread(target=run, args=(ws, ))
websocketThread.start()
| PP-lib/BFS | BFS-X/libs/realtimeapi.py | realtimeapi.py | py | 5,448 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "secrets.token_hex",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "hmac.new",
"line_number": 2... |
70714190504 | #!/usr/bin/env python
# coding: utf-8
# ## Import des librairies
# In[2]:
import numpy as np
import pandas as pd
from pandas_profiling import ProfileReport
import matplotlib.pyplot as plt
import plotly.offline as py
import seaborn as sns
import plotly.graph_objs as go
import plotly
import plotly.figure_factory as ff
from sklearn.model_selection import train_test_split
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFE
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn import preprocessing
from sklearn import metrics
from sklearn.metrics import *
from sklearn.linear_model import LogisticRegressionCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings('ignore')
# # 1.<span style="color:red"> Lecture des Datasets </span>
# In[2]:
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
# ### 1.1 <span style="color:black"> Concaténation en un DataFrame pour appliquer les mêmes changements</span>
#
# In[3]:
df = pd.concat([train,test], axis= 0)
# In[4]:
df.head()
# In[5]:
df.info()
# In[6]:
df.describe(include = 'all' )
# # 2.<span style="color:blue"> EDA </span>
# ### 2.1<span style="color:black"> Distribution de la Target </span>
# In[7]:
df['embauche'].value_counts()
# - **On remarque un fort déséquilibre dans la distribution de la classe "embauche" ce qui affectera l'apprentissage si
# on ne procède pas à une redistribution de cette variable**
# In[8]:
df['embauche'].value_counts().plot(kind='pie',title= 'distribution de la Target', autopct='%.f%%', legend = False, figsize=(12,6), fontsize=12,explode = [0, 0.2]);
# ### 2.2<span style="color:black"> Pandas profiling du Dataset </span>
# In[ ]:
profile = ProfileReport(df, title="Embauche ou pas")
profile
# **Les NaN & valeurs abérrantes présentes dans ce dataset:**
#
# - 5 observations dont l'age est supérieur/égal à 70 ans
# - 479 observations dont l'age est inférieur à 16 ans
# - 2 observations dont l'expérience est inférieur à 0
# - 104 observations dont l'expérience est supérieur à l'age
# - 1465 observations dont la note est supérieur à 100.
# - 908 NaN
#
# <span style="color:blue">**2055 Outliers & 908 NaN soit près de 15% du dataset**</span>
# <span style="color:darkorange"> **Deux méthodologies se présentent:**</span>
#
# **1- Supprimer les Outliers & les NaNs**
#
# **2- Dans la compétition Kaggle, on était face à une contrainte majeure qui était de garder le set de Test complet à
# 5000 lignes, donc on a procédé à une "harmonisation" des NaN et des valeurs aberrantes**
#
#
#
#
# <span style="color:blue">**Outliers de la variable "age"**</span>
# - **On procèdera donc à la correction de l'âge en supposant un age minimal légal de travail de 16 ans et maximal de 70 ans**
#
#
# <span style="color:blue">**Outliers de la variable "diplome"**</span>
# - **On procèdera donc à l'harmonisation de cette variable en tenant compte de la variable "age" comme suit :**
#
# **diplome bac--> age 18 ans / license --> 21 ans / master --> 23 ans / doctorat --> 27 ans**
#
#
# <span style="color:blue">**Outliers de la variable "note"**</span>
# - **Etant donné le concours d'embauche est noté de 0 à 100, on considérera toutes les notes supérieures à la limite comme arrondie à 100**
#
# <span style="color:blue">**Outliers de la variable "exp"**</span>
# - **Sur des observations ou l'expérience dépasse l'âge, cette dernière sera remplacée par la moyenne de l'expérience**
#
# <span style="color:red">**Les valeurs manquantes**</span>
# - **Pour les Nan des variables numériques on imputera la moyenne (mean)**
# - **Pour les Nan des variables catégorielles on imputera le mode (mode)**
#
# <span style="color:green">**Les variables corrélées**</span>
# - **Aucune corrélation notoire ou presque n'a été détectée à part Note/Salaire à près de 40%**
# ### 2.3<span style="color:black"> Traitement des outliers </span>
# **Boxplot Diplome/Age**
# In[9]:
plt.figure(figsize=(12,8))
sns.boxplot(x='diplome',
y='age',
data=df,
palette='winter');
# **Boxplot Diplome/Exp**
# In[10]:
plt.figure(figsize=(12,8))
sns.boxplot(x='diplome',
y='exp',
data=df,
palette='winter');
# **Boxplot Exp/Age**
# In[11]:
plt.figure(figsize=(12,8))
sns.boxplot(x='exp',
y='age',
data=df,
palette='winter');
# In[12]:
#------------#
df.loc[(df['age'] >= 70), 'age'] = round(df['age'].mean(), 0) #5 Observations
df.loc[(df['age'] < 16), 'age'] = round(df['age'].mean(), 0) #479 Observations
#------------#
df.loc[(df['diplome'] == "bac"), 'age'] = 18 #2453 observations
df.loc[(df['diplome'] == "licence"), 'age'] = 21 #7377 observations
df.loc[(df['diplome'] == "master"), 'age'] = 23 #7513 observations
df.loc[(df['diplome'] == "doctorat"), 'age'] = 27 #2547 observations
#------------#
df.loc[(df['exp'] < 0), 'exp'] = round(df['exp'].mean(), 0) #2 observations
df.loc[(df['exp'] > df['age']),'exp'] = round(df['exp'].mean(),0) #104 observations
#------------#
df.loc[(df['note'] > 100), 'note'] = 100 #1465 observations
#------------#
# ### 2.4<span style="color:black"> Traitement des NAN </span>
# In[13]:
plt.figure(figsize=(12,8))
sns.heatmap(df.isnull(),
yticklabels=False,
cbar=False,
cmap='viridis');
# In[14]:
#------Variables Numériques-------#
NUMERICAL = ["age","exp","salaire","note"]
df[NUMERICAL]= df[NUMERICAL].astype(np.float32)
df[NUMERICAL] = df[NUMERICAL].fillna(round(df[NUMERICAL].mean(), 0))
#------Variables Catégorielles-------#
CATEGORICAL = ["cheveux","sexe","diplome","specialite","dispo","date"]
df[CATEGORICAL]= df[CATEGORICAL].astype('category')
df[CATEGORICAL] = df[CATEGORICAL].fillna(df[CATEGORICAL].mode().iloc[0])
# ### 2.5<span style="color:black"> Création de nouvelles features numériques à partir de la date </span>
# In[15]:
df['date'] = pd.to_datetime(df['date'],format="%Y-%m-%d")
df['year']= df['date'].dt.year
df['month']= df['date'].dt.month
df['day']= df['date'].dt.day
# ### 2.6 <span style="color:black"> Création de nouvelles features catégoriques </span>
# In[16]:
df['q_exp'] = pd.qcut(df['exp'],q=3,precision=0)
df['q_age'] = pd.qcut(df['age'], q=3,precision=0)
df['q_note'] = pd.qcut(df['note'],q=4,precision=0)
df['q_salaire'] = pd.qcut(df['salaire'],q=5,precision=0)
# ### 2.4 <span style="color:black"> Redéfinition des Variables numériques/catégorielles/features/Target </span>
# In[17]:
NUMERICAL = ["age","exp","salaire","note","year","month","day"]
df[NUMERICAL]= df[NUMERICAL].astype(np.float32)
# In[18]:
CATEGORICAL = ["cheveux","sexe","diplome","specialite","dispo"]
df[CATEGORICAL]= df[CATEGORICAL].astype('category')
# In[19]:
FEATURES = NUMERICAL + CATEGORICAL + ["q_exp","q_age","q_note",'q_salaire']
TARGET = "embauche"
# ### 2.5 <span style="color:black"> Data Viz </span>
# **Distribution des classes de la variable AGE par rapport à la TARGET**
# In[20]:
plt.figure(figsize=(14,6))
plt.hist(df[df["embauche"]==1]["age"], edgecolor="k",density=True, alpha=0.7, label = "Embauché(e)")
plt.hist(df[df["embauche"]==0]["age"], edgecolor="k",density=True, alpha=0.7, label = "Pas embauché(e)")
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# **Distribution des classes de la variable EXP par rapport à la TARGET**
# In[21]:
plt.figure(figsize=(14,6))
plt.hist(df[df["embauche"]==1]["exp"], edgecolor="k",density=True, alpha=0.7, label = "Embauché(e)")
plt.hist(df[df["embauche"]==0]["exp"], edgecolor="k",density=True, alpha=0.7, label = "Pas embauché(e)")
plt.xlabel("Experience")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# **Distribution des classes de la variable NOTE par rapport à la TARGET**
# In[22]:
plt.figure(figsize=(14,6))
plt.hist(df[df["embauche"]==1]["note"], edgecolor="k",density=True, alpha=0.7, label = "Embauché(e)")
plt.hist(df[df["embauche"]==0]["note"], edgecolor="k",density=True, alpha=0.7, label = "Pas embauché(e)")
plt.xlabel("Note")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# **Distribution des classes de la variable SALAIRE par rapport à la TARGET**
# In[23]:
plt.figure(figsize=(14,6))
plt.hist(df[df["embauche"]==1]["salaire"], edgecolor="k",density=True, alpha=0.7, label = "Embauché(e)")
plt.hist(df[df["embauche"]==0]["salaire"], edgecolor="k",density=True, alpha=0.7, label = "Pas embauché(e)")
plt.xlabel("Salaire")
plt.ylabel("Frequency")
plt.legend()
plt.show()
# **Distribution des classes de la variable YEAR par rapport à la TARGET**
# In[24]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="year",hue="embauche", edgecolor="k")
plt.xlabel("Year")
plt.ylabel("Count")
plt.show()
# **Distribution des classes de la variable MONTH par rapport à la TARGET**
# In[25]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="month",hue="embauche", edgecolor="k")
plt.xlabel("Month")
plt.ylabel("Count")
plt.show()
# **Distribution des classes de la variable DAY par rapport à la TARGET**
# In[26]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="day",hue="embauche", edgecolor="k")
plt.xlabel("day")
plt.ylabel("Count")
plt.show()
# **Distribution de la variable CHEVEUX par rapport à la TARGET**
# In[27]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="cheveux",hue="embauche", edgecolor="k")
plt.xlabel("Cheveux")
plt.ylabel("Count")
plt.show()
# **Distribution de la variable DIPLOME par rapport à la TARGET**
# In[28]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="diplome",hue="embauche", edgecolor="k")
plt.xlabel("Diplome")
plt.ylabel("Count")
plt.show()
# **Distribution de la variable SPECIALITE par rapport à la TARGET**
# In[29]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="specialite",hue="embauche", edgecolor="k")
plt.xlabel("specialite")
plt.ylabel("Count")
plt.show()
# **Distribution de la variable DISPO par rapport à la variable SEXE**
# In[30]:
plt.figure(figsize=(14,6))
sns.countplot(data=df, x="dispo",hue="embauche", edgecolor="k")
plt.xlabel("Dispo")
plt.ylabel("Count")
plt.show()
# ### 2.6 <span style="color:black"> Tests Statistiques </span>
# In[31]:
import scipy
# **CHEVEUX / SALAIRE**
# - Hypothèse H0 : Pas de relation statistiquement significative
# In[32]:
data_blond =df[df["cheveux"]=="blond"]
data_brun = df[df["cheveux"]=="brun"]
data_roux =df[df["cheveux"]=="roux"]
data_chatain =df[df["cheveux"]=="chatain"]
stat, p_value = scipy.stats.kruskal(data_blond["salaire"], data_brun["salaire"],data_roux["salaire"] ,data_chatain["salaire"])
print('Statistics=%.3f, p_value=%.3f' % (stat, p_value))
# interpret
alpha = 0.05
if p_value > alpha:
print('Même distributions (Hypothèse H0 non rejetée)')
else:
print('Distributions différentes (Hypothèse H0 rejetée)')
# **SPECIALITE / SEXE**
# - Hypothèse H0 : Pas de relation statistiquement significative
# In[33]:
data_forage =df[df["specialite"]=="forage"]
data_geologie = df[df["specialite"]=="geologie"]
data_detective =df[df["specialite"]=="detective"]
data_archeologie =df[df["specialite"]=="archeologie"]
stat, p_value = scipy.stats.kruskal(data_forage["sexe"], data_geologie["sexe"],data_detective["sexe"] ,
data_archeologie["sexe"])
print('Statistics=%.3f, p_value=%.3f' % (stat, p_value))
# interpret
alpha = 0.05
if p_value > alpha:
print('Même distributions (Hypothèse H0 non rejetée)')
else:
print('Distributions différentes (Hypothèse H0 rejetée)')
# **EXP / NOTE**
# - Hypothèse H0 : Pas de relation statistiquement significative
# In[34]:
data_exp =df["exp"]
data_note = df["note"]
stat, p_value = scipy.stats.kruskal(data_exp, data_note)
print('Statistics=%.3f, p_value=%.3f' % (stat, p_value))
# interpret
alpha = 0.05
if p_value > alpha:
print('Même distributions (Hypothèse H0 non rejetée)')
else:
print('Distributions différentes (Hypothèse H0 rejetée)')
# In[35]:
plt.figure(dpi=150)
sns.heatmap(df.corr('spearman'),annot=False,cmap='rocket',lw=1);
# In[36]:
from scipy.stats import chi2_contingency
# In[37]:
def test_chi_2(QualVar,target,alpha):
QualVar = pd.DataFrame(QualVar)
liste_chi2 = []
liste_chi2_name = []
# ici on créé le tableau de contingence pour réaliser notre test :
for i in range(len(list(QualVar.columns))):
table = pd.crosstab(QualVar[list(QualVar.columns)[i]],QualVar[target])
stat, p, dof, expected = chi2_contingency(table)
if p <= alpha:
liste_chi2.append(i)
else:
pass
for j in liste_chi2:
liste_chi2_name.append([i.encode('ascii', 'ignore') for i in QualVar.columns][j])
return liste_chi2_name
# In[38]:
liste_chi2_name = test_chi_2(df,"embauche",0.05)
liste_chi2_name
# Les variables listées ci-dessus ont une p_value< 5% et donc présente une significativité statistique pour expliquer la TARGET
# # 3.<span style="color:green"> PREPROCESSING </span>
# ### 3.1<span style="color:black"> Label Encoding </span>
# **Le choix s'est porté sur le label encoding pour éviter une augumentation de la dimension créée par le One hot encoding par exemple, et ce pour plus de performance lors des Tunnings des hyperparamètres**
# In[39]:
df_c=df.copy()
# In[40]:
label_encoder = preprocessing.LabelEncoder()
df_c[CATEGORICAL]=df[CATEGORICAL].apply(label_encoder.fit_transform)
df_c[["q_exp","q_age","q_note",'q_salaire']] = df[["q_exp","q_age","q_note",'q_salaire']].apply(label_encoder.fit_transform)
df_c[TARGET]=df[TARGET]
# ### 3.2<span style="color:black"> Transformation du type </span>
# In[41]:
df_c['age'] = df_c['age'].astype(np.uint8)
df_c['exp'] = df_c['exp'].astype(np.uint8)
df_c['salaire'] = df_c['salaire'].astype(np.uint8)
df_c['cheveux'] = df_c['cheveux'].astype(np.uint8)
df_c['note'] = df_c['note'].astype(np.float16)
df_c['sexe'] = df_c['sexe'].astype(np.uint8)
df_c['diplome'] = df_c['diplome'].astype(np.uint8)
df_c['specialite'] = df_c['specialite'].astype(np.uint8)
df_c['dispo'] = df_c['dispo'].astype(np.uint8)
df_c['year'] = df_c['year'].astype(np.int16)
df_c['month'] = df_c['month'].astype(np.int16)
df_c['day'] = df_c['day'].astype(np.int16)
df_c['q_exp'] = df_c['q_exp'].astype(np.int16)
df_c['q_age'] = df_c['q_age'].astype(np.int16)
df_c['q_salaire'] = df_c['q_salaire'].astype(np.int16)
df_c['q_note'] = df_c['q_note'].astype(np.int16)
# ### 3.3<span style="color:black"> Train/Test Split </span>
# In[42]:
train = df_c.loc[~df_c[TARGET].isna()]
# In[43]:
test = df_c.loc[df_c[TARGET].isna()]
# ### 3.4<span style="color:black"> Oversampling de la classe minoritaire "embauche = 1" </span>
# **Le SMOTETomek procédera à la création de valeurs synthétiques similaires aux vraies valeurs présentes dans le dataset avec une Embauche = 1**
# In[44]:
from imblearn.combine import SMOTETomek
# In[45]:
smotetomek_X = train[FEATURES]
smotetomek_Y = train[TARGET]
smote_tomek = SMOTETomek(random_state=68, sampling_strategy=0.99) #La classe 1 sera 99% de la classe 0
X_resampled, y_resampled = smote_tomek.fit_resample(train[FEATURES], train[TARGET])
smotetomek_X = pd.DataFrame(data = X_resampled,columns=FEATURES)
smotetomek_Y = pd.DataFrame(data = y_resampled,columns=['embauche'])
print ((smotetomek_Y['embauche'] == 1).sum())
print ((smotetomek_Y['embauche'] == 0).sum())
# In[46]:
train_X = smotetomek_X.copy()
# In[47]:
train_Y = smotetomek_Y.copy()
# In[48]:
train_X = train_X[FEATURES]
train_Y = train_Y[TARGET]
test_X = test[FEATURES]
# In[49]:
df_oversampler = pd.concat([train_X,train_Y], axis= 1)
# **Distribution de la target après Oversampling**
# In[50]:
df_oversampler['embauche'].value_counts().plot(kind='pie',title= 'distribution de la Target', autopct='%.f%%', legend = False, figsize=(12,6), fontsize=12,explode = [0, 0.2]);
# ### 3.4<span style="color:black"> Standardisation des données</span>
# **Remarque** :
#
# **La standardisation des données n'est pas nécessaire quand on utilise des algorithmes d'apprentissage non sensibles à l'amplitude des variables tels que**
# - La régression logistique
# - Le Random Forest
# - Les modèles de Gradient boosting
#
# **Hors dans ce projet, on utilisera aussi le SVC, DTC & KNN qui eux sont sensibles à l'amplitude des variables**
# In[51]:
train_X.std()
# In[52]:
test_X.std()
# In[53]:
scaler = StandardScaler()
train_X = scaler.fit_transform(train_X)
test_X = scaler.fit_transform(test_X)
# In[54]:
train_X = train_X.astype('float32')
test_X = test_X.astype('float32')
# # 4.<span style="color:Orange"> MODELISATION </span>
# - Le projet présenté à pour but une classification de la TARGET entre 0 & 1
#
# - On choisira donc des Algorithmes d'apprentissage supervisé pour CLASSIFICATION
#
# - Régression Logistique /Decision Tree/ SVC / KNN / Random Forest / Gradient boosting / XGBoost
#
# - La comparaison des modèles se fera principalement sur le score AUC
#
# - Le tunning des hyperparamètres se fera avec HalvingGridSearchCV qui est une nouvelle classe de tunning des hyperparamètres beaucoup plus rapide que le GridsearchCV avec pratiquement les mêmes résultats
# ### 4.1<span style="color:black"> Tunning des Hyperparamètres avec HalvingGridSearchCV </span>
# In[55]:
def tunning(param_grid,model,X,Y):
halving = HalvingGridSearchCV(model, param_grid = param_grid,scoring="roc_auc", min_resources = "exhaust",
n_jobs = -1,cv = 5, factor = 3, verbose = 1)
halving.fit(X, Y)
print ("Best Score: {}".format(halving.best_score_))
print ("Best params: {}".format(halving.best_params_))
# ### 4.2<span style="color:black"> Evaluation du modèle </span>
# In[56]:
def evaluation(model,z,X,Y):
model.fit(X,Y)
predict = model.predict(X)
proba = model.predict_proba(X)
fig = plt.figure()
#roc_auc_score
model_roc_auc = metrics.roc_auc_score(Y,predict)
#Confusion matrix
conf_matrix = metrics.confusion_matrix(Y,predict)
#plot confusion matrix
plot1 = go.Heatmap(z = conf_matrix ,
x = ["Pred_0","Pred_1"],
y = ["Real_0","Real_1"],
showscale = True,autocolorscale = True,
name = "matrix", transpose = True, visible = True)
#plot roc auc
a,b,c = metrics.roc_curve(Y,proba[:,1])
plot2 = go.Scatter(x = a,y = b,
name = "Roc : " + str(model_roc_auc),
line = dict(color = ('rgb(22, 96, 167)'),width = 2))
plot3 = go.Scatter(x = [0,1],y=[0,1],
line = dict(color = ('rgb(205, 12, 24)'),width = 2,
dash = 'dot'))
#plot coefficients/Features
if z == "coefficients" :
coefficients = pd.DataFrame(model.coef_.ravel())
elif z== "features" :
coefficients = pd.DataFrame(model.feature_importances_)
column_df = pd.DataFrame(FEATURES)
coef_sumry = (pd.merge(coefficients,column_df,left_index= True,
right_index= True, how = "left"))
coef_sumry.columns = ["coefficients","features"]
coef_sumry = coef_sumry.sort_values(by = "coefficients",ascending = False)
plot4 = trace4 = go.Bar(x = coef_sumry["features"],y = coef_sumry["coefficients"],
name = "coefficients",
marker = dict(color = coef_sumry["coefficients"],
colorscale = "Picnic",
line = dict(width = .6,color = "black")))
#Subplots
fig = plotly.subplots.make_subplots(rows=2, cols=2, specs=[[{}, {}], [{'colspan': 2}, None]],
subplot_titles=('Confusion Matrix',
'Receiver operating characteristic',
'Feature Importances'),print_grid=False)
fig.append_trace(plot1,1,1)
fig.append_trace(plot2,1,2)
fig.append_trace(plot3,1,2)
fig.append_trace(plot4,2,1)
fig['layout'].update(showlegend=False, title="Model performance" ,
autosize = False,height = 900,width = 800,
plot_bgcolor = 'rgba(240,240,240, 0.95)',
paper_bgcolor = 'rgba(240,240,240, 0.95)',
margin = dict(b = 195))
fig["layout"]["xaxis2"].update(dict(title = "false positive rate"))
fig["layout"]["yaxis2"].update(dict(title = "true positive rate"))
fig["layout"]["xaxis3"].update(dict(showgrid = True,tickfont = dict(size = 10),
tickangle = 90))
py.iplot(fig);
print ("ROC-AUC : ",model_roc_auc,"\n")
print("score F1 : ", metrics.f1_score(Y, predict),"\n")
print ("Accuracy Score : ",metrics.accuracy_score(Y,predict))
# In[57]:
def evaluation_knn(model,X,Y):
model.fit(X,Y)
predict = model.predict(X)
proba = model.predict_proba(X)
#roc_auc_score
model_roc_auc = metrics.roc_auc_score(Y,predict)
#plot confusion matrix
plot_confusion_matrix(model, X, Y)
plt.show();
print ("ROC-AUC : ",model_roc_auc,"\n")
print("score F1 : ", metrics.f1_score(Y, predict),"\n")
print ("Accuracy Score : ",metrics.accuracy_score(Y,predict))
# In[58]:
def MetricsMaker(model):
# Save Models
# Splits
kf = StratifiedKFold(n_splits=5,shuffle=True,random_state=2021)
split = list(kf.split(train_X,train_Y))
Metrics = {}
Precision, Accuracy, F1_score, Recall_score, ROC_AUC = 0, 0, 0, 0, 0
for i,(train_index, test_index) in enumerate(split):
data_train = train_X[train_index]
y_train = train_Y[train_index]
data_test = train_X[test_index]
y_test = train_Y[test_index]
# create a fitted model
fittedModel = model.fit(data_train,y_train)
y_hat_proba = fittedModel.predict_proba(data_test)[:,1]
y_hat = fittedModel.predict(data_test)
# log_l =
Precision += metrics.precision_score(y_test,y_hat)
Accuracy += metrics.accuracy_score(y_test,y_hat)
F1_score += metrics.f1_score(y_test,y_hat)
Recall_score += metrics.recall_score(y_test,y_hat)
ROC_AUC += metrics.roc_auc_score(y_test,y_hat)
Metrics['Precision'] = Precision / 5
Metrics['Accuracy'] = Accuracy / 5
Metrics['F1_score'] = F1_score / 5
Metrics['Recall_score'] = Recall_score / 5
Metrics['ROC-AUC'] = ROC_AUC / 5
return Metrics
# In[59]:
# Les metrics scores de chaque modeles seront stockés ici!
Metrics = {}
# ### 4.2<span style="color:black"> Régression Logistique </span>
# In[60]:
parameters = {'Cs': [1, 2, 3, 4, 5, 6 ,7 ,8 ,9 ,10]
}
logit = LogisticRegressionCV(random_state= 33,cv=10,max_iter=10000,verbose=1, n_jobs = -1)
#tunning(parameters,logit,train_X,train_Y)
# In[61]:
logReg = LogisticRegressionCV(Cs= 6, random_state= 33,cv=10,max_iter=10000,verbose=1)
Metrics['LogisticRegressionCV'] = MetricsMaker(logReg)
# In[62]:
#Evaluation avec le modèle tunné
logit = LogisticRegressionCV(Cs= 6, random_state= 33,cv=10,max_iter=10000,verbose=1)
evaluation(logit,"coefficients",train_X,train_Y)
# ### 4.3<span style="color:black"> Decision Tree Classifier </span>
# In[63]:
d_t_c = DecisionTreeClassifier(random_state=33)
parameters = {'max_depth': [1, 2, 3, 4, 5, 6, 7],
'max_features': [1, 2, 3, 4, 5],
'criterion': ['gini','entropy'],
'splitter': ['best'],
}
#tunning(parameters,d_t_c,train_X,train_Y.values.ravel())
# In[64]:
D_T_C = DecisionTreeClassifier(random_state=33, criterion = "gini", max_depth=7, max_features = 5, splitter = "best")
Metrics['DecisionTreeClassifier'] = MetricsMaker(D_T_C)
# In[65]:
#Evaluation avec le modèle tunné
d_t_c = DecisionTreeClassifier(random_state=33, criterion = "gini", max_depth=7, max_features = 5, splitter = "best")
evaluation(d_t_c,"features",train_X,train_Y)
# ### 4.4<span style="color:black"> SVC </span>
# **Le Tunning s'est fait un hyperparamètre à la fois malgrè que cela peut fausser les meilleurs combinaisons mais pour éviter une attente trop longue lors de l'execution**
# In[66]:
s_v_c = SVC(random_state=33,verbose=2)
parameters = {'kernel': ["linear","rbf","poly"],
'gamma': [0.1, 1, 10, 100],
'C': [0.1, 1, 10, 100,1000],
'degree': [0, 1, 2, 3, 4, 5, 6]
}
#tunning(parameters,s_v_c,train_X,train_Y.values.ravel())
# In[67]:
S_V_C = SVC(random_state=33, kernel = "rbf", gamma=0.1, C = 10, degree = 4,probability=True,verbose=2 )
Metrics['SVC'] = MetricsMaker(S_V_C)
# In[68]:
#Evaluation avec le modèle tunné
s_v_c = SVC(random_state=33, kernel = "rbf", gamma=0.1, C = 10, degree = 4,probability=True,verbose=2 )
evaluation_knn(s_v_c,train_X,train_Y) #Since rbf Kernel is used
# ### 4.5<span style="color:black"> KNN Classifier </span>
# In[69]:
k_n_n = KNeighborsClassifier(algorithm='auto', n_jobs = -1)
parameters = {
'leaf_size':[5,10,20,30],
'n_neighbors':[3,4,5,8,10,11,12],
'weights' : ['uniform', 'distance'],
'p' : [1,2]
}
#tunning(parameters,k_n_n,train_X,train_Y)
# In[70]:
K_N_N = KNeighborsClassifier(algorithm='auto',leaf_size= 20,n_neighbors= 11, p=1, weights = "distance", n_jobs = -1)
Metrics['KNeighborsClassifier'] = MetricsMaker(K_N_N)
# In[71]:
#Evaluation avec le modèle tunné
k_n_n = KNeighborsClassifier(algorithm='auto',leaf_size= 20,n_neighbors= 11, p=1, weights = "distance", n_jobs = -1)
evaluation_knn(k_n_n,train_X,train_Y)
# ### 4.6<span style="color:black"> Random Forest Classifier </span>
# In[72]:
r_f_c = RandomForestClassifier(random_state=33, verbose=2,n_jobs = -1)
parameters = {
'n_estimators': [5,10,15,20,30,40,50,60,70,80],
'min_samples_split': [3, 5, 10],
'max_depth': [2, 5, 15, 30,50,70,80],
'max_features': ['auto', 'sqrt'],
'bootstrap': [True, False],
'criterion': ['gini','entropy']
}
#tunning(parameters,r_f_c,train_X,train_Y.values.ravel())
# In[73]:
R_F_C = RandomForestClassifier(random_state=33, verbose=2, n_estimators = 70,
min_samples_split= 3, max_depth = 70, max_features = "auto",
bootstrap = "False", criterion = "gini")
Metrics['RandomForestClassifier'] = MetricsMaker(R_F_C)
# In[74]:
#Evaluation avec le modèle tunné
r_f_c = RandomForestClassifier(random_state=33, verbose=2, n_estimators = 70,
min_samples_split= 3, max_depth = 70, max_features = "auto",
bootstrap = "False", criterion = "gini")
evaluation(r_f_c,"features",train_X,train_Y)
# ### 4.7<span style="color:black"> Gradient boosting Classifier </span>
# In[75]:
g_b_c = GradientBoostingClassifier (random_state = 33, verbose=2)
parameters = {'learning_rate' : [0.01,0.02,0.03,0.04,0.06,0.08,0.09],
'loss' : ["deviance", "exponential"],
'subsample' : [0.9, 0.5, 0.2, 0.1],
'n_estimators' : [100,500,1000, 1500],
'max_depth' : [4,6,8,10],
'criterion' : ["friedman_mse", "mse"],
'min_samples_split' : [2,4,6,8,10,12,14],
'min_samples_leaf' : [1,2,3,4],
'max_features' : ["auto", "sqrt", "log2"]
}
#tunning(parameters,g_b_c,train_X,train_Y.values.ravel())
# In[76]:
G_B_C = GradientBoostingClassifier(learning_rate=0.09, n_estimators=500, max_depth = 8, min_samples_split = 12,
max_features='auto', subsample=0.1,criterion= "friedman_mse", min_samples_leaf = 2,
loss = "exponential", random_state=33, verbose = 1)
Metrics['GradientBoostingClassifier'] = MetricsMaker(G_B_C)
# In[77]:
#Evaluation avec le modèle tunné
g_b_c = GradientBoostingClassifier(learning_rate=0.09, n_estimators=500, max_depth = 8, min_samples_split = 12,
max_features='auto', subsample=0.1,criterion= "friedman_mse", min_samples_leaf = 2,
loss = "exponential", random_state=33, verbose = 1)
evaluation(g_b_c,"features",train_X,train_Y)
# ### 4.8<span style="color:black"> XGBoost Classifier </span>
# In[78]:
x_g_c = XGBClassifier(use_label_encoder=False)
parameters = {'nthread':[4,5,6,8,10,12],
'learning_rate': [0.01,0.03,0.05,0.1,0.2,0.3,0.4,0.5],
'max_depth': range (2, 21, 1),
'min_child_weight': [10,12,14,16,18,20],
'subsample': [0.6,0.8,1],
'colsample_bytree': [0.2,0.4,0.5,0.7],
'n_estimators': [100,200,300,400,500]
}
#tunning(parameters,x_g_c,train_X,train_Y.values.ravel())
# In[79]:
X_G_B = XGBClassifier(learning_rate = 0.4,nthread = 10,max_depth = 16, subsample=0.8,colsample_bytree=0.5
,n_estimators = 200, min_child_weight = 16,
use_label_encoder=False, random_state = 33, verbosity=1)
Metrics['XGBClassifier'] = MetricsMaker(X_G_B)
# In[80]:
#Evaluation avec le modèle tunné
x_g_c = XGBClassifier(learning_rate = 0.4,nthread = 10,max_depth = 16, subsample=0.8,colsample_bytree=0.5
,n_estimators = 200, min_child_weight = 16,
use_label_encoder=False, random_state = 33, verbosity=1)
evaluation(x_g_c,"features",train_X,train_Y.values.ravel())
# # 5.<span style="color:Turquoise"> FEATURES SELECTION </span>
# ### 5.1<span style="color:black"> Select KBest </span>
# In[81]:
kbest = SelectKBest(score_func=f_classif, k='all') #Score_func peut etre f_classif ou chi2
fit = kbest.fit(train_X, train_Y.values.ravel())
# In[82]:
np.set_printoptions(precision=3) #Chaque score correspond à une colonne, les variables a retenir sont celles qui ont le meilleur score
d = { label: value for label, value in zip(FEATURES, fit.scores_) }
d
# ### 5.1<span style="color:black"> RFECV avec XGboost Classifier tunné </span>
# In[83]:
train_X = pd.DataFrame(train_X, columns = FEATURES)
# In[84]:
rfecv = RFECV(estimator=x_g_c,cv=5,scoring="f1") ## on peut choisir le min_features_to_select( 1 par défaut)
rfecv = rfecv.fit(train_X, train_Y.values.ravel())
print('Nombre optimal de variables :', rfecv.n_features_)
print('Les meilleures variables :', train_X.columns[rfecv.support_])
best_features = list(train_X.columns[rfecv.support_])
# # 5.<span style="color:Purple"> PREDICTION </span>
# **Les prédictions de la base test se feront avec chaque modèle tunné pour pouvoir comparer le meilleur modèle de classification**
# **Les métriques de comparaison**
#
# `recall` : Nombre de classes trouvées par rapport aux nombres entiers de cette même classe.
#
# `precision` : Combien de classes ont été correctements classifiées
#
# `f1-score` : La moyenne harmonique entre precision & recall
# ## Comparaison
# In[85]:
pd.DataFrame(Metrics)
| bg-mohamed/RFS677-Y | Machine Learning/Machine_Learning_Classification.py | Machine_Learning_Classification.py | py | 31,870 | python | fr | code | 1 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.concat... |
29226135271 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from pymongo import MongoClient
import numpy as np
from tqdm import tqdm
def insertInfo(df):
client = MongoClient('mongodb://localhost:27017/')
infodb = client.Infodb
userInfo = infodb.userInfo
for index, instance in tqdm(df.iterrows(), total=df.shape[0]):
ID = instance["id"]
name = instance["name"]
birth = instance["birth"]
embeddings = instance["embedding"].tobytes()
user = {'_id': ID, 'name': name, 'birth': birth, 'embeddings': embeddings}
try :
userInfo.insert_one(user)
except :
print('ID already exists.')
def load_info(ID):
client = MongoClient('mongodb://localhost:27017/')
infodb = client.Infodb
userInfo = infodb.userInfo
results = userInfo.find({"_id": ID}, {'name': True ,'embeddings': True})
embedding = []
for result in results:
#id = result["_id"]
name = result['name']
embedding_bytes = result["embeddings"]
embedding = np.frombuffer(embedding_bytes, dtype='float32')
return name, embedding
| inhye6-6/project_face_authentication | connect_db.py | connect_db.py | py | 1,131 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
... |
71578894183 | #!/usr/bin/env python
from __future__ import print_function
import vtk
def main():
# Create a square in the x-y plane.
points = vtk.vtkPoints()
points.InsertNextPoint(0.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 0.0, 0.0)
points.InsertNextPoint(1.0, 1.0, 0.0)
points.InsertNextPoint(0.0, 1.0, 0.0)
# Create the polygon
polygon = vtk.vtkPolygon()
polygon.GetPoints().DeepCopy(points)
polygon.GetPointIds().SetNumberOfIds(4) # The 4 corners of the square
for i in range(4):
polygon.GetPointIds().SetId(i, i)
# Inputs
p1 = [0.1, 0, -1.0]
p2 = [0.1, 0, 1.0]
tolerance = 0.001
# Outputs
t = vtk.mutable(0) # Parametric coordinate of intersection (0 (corresponding to p1) to 1 (corresponding to p2))
x = [0.0, 0.0, 0.0]
pcoords = [0.0, 0.0, 0.0]
subId = vtk.mutable(0)
iD = polygon.IntersectWithLine(p1, p2, tolerance, t, x, pcoords, subId)
print("intersected? ", 'Yes' if iD == 1 else 'No')
print("intersection: ", x)
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/GeometricObjects/PolygonIntersection.py | PolygonIntersection.py | py | 1,059 | python | en | code | 319 | github-code | 36 | [
{
"api_name": "vtk.vtkPoints",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "vtk.vtkPolygon",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "vtk.mutable",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "vtk.mutable",
"line_numbe... |
21546274042 | #!/Users/shounak/anaconda3/bin/python3
#This program plots histograms to depict genome-wide methylation patterns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import argparse
import matplotlib
import matplotlib.axes
matplotlib.rcParams['font.family']="monospace"
matplotlib.rcParams['font.monospace']="Courier New"
matplotlib.rcParams['font.size']=24
#argument handling
optparse = argparse.ArgumentParser()
optparse.add_argument("-c","--csvfile",help="list of methylation ratios")
optparse.add_argument("-t","--type",help="methlyation type:CpG, CHG or CHH")
optparse.add_argument("-l","--lookup",help="look-up table to validate methylation sites")
optparse.add_argument("-d","--date",help="day in M/DD format, enclose within quotes")
optparse.add_argument("-o","--outfile",help="output histogram file basename")
argstr = optparse.parse_args()
#Read in the data
reads=pd.read_csv(argstr.csvfile,sep='\t',low_memory=False)
#Read in the validation table
val_tab=pd.read_csv(argstr.lookup,sep=',').rename(columns={"Scaffold":"chrom"}).drop_duplicates()
#Take the intersection of the reads and validation table to filter out the valid calls
ratios=pd.merge(reads,val_tab,on=["chrom","start"],how='inner')
#ratios.to_csv(argstr.outfile+"_all_validated.csv",sep=',',index=False)
#extract the relevant columns; need to replace this with generalized column list
#For 5-aza treated samples
aza_means=ratios.loc[:,(["Aza_1 "+argstr.date+" meth_ratio","Aza_2 "+argstr.date+" meth_ratio","Aza_3 "+argstr.date+" meth_ratio"])].loc[ratios['Type']==argstr.type].mean(axis=1).to_numpy()
#For control (untreated) samples
co_means=ratios.loc[:,(["Co_1 "+argstr.date+" meth_ratio","Co_2 "+argstr.date+" meth_ratio","Co_3 "+argstr.date+" meth_ratio"])].loc[ratios['Type']==argstr.type].mean(axis=1).to_numpy()
#means=pd.concat([aza_means,co_means],axis=1).rename(columns={0:"AZA",1:"CON"})
#hist_data=means.to_numpy()
#create a histogram for the 5-aza methylation calls...
plt.hist(aza_means,bins=np.arange(0.0,1,0.05),alpha=0.5,color="blue",label="AZA")
#... and the control methylation calls for a given methylation type (CpG,CHG or CHG)
plt.hist(co_means,bins=np.arange(0.0,1,0.05),alpha=0.5,color="red",label="CON")
#set the axis labels
plt.xlabel("Methylation ratio",fontsize=28)
plt.ylabel("Counts",fontsize=28)
#set the axis scales so we can compare plots
plt.xlim((0,1.0))
#plt.ylim((0,1.5E4))
#optional; tick label in scientific notation
plt.ticklabel_format(axis="y",scilimits=(2,4),useMathText=True)
# add the legend
plt.legend(fontsize=28,framealpha=1.0)
# and save the figure as a 300 DPI png file
plt.savefig(argstr.type+"_"+argstr.outfile+".png",dpi=300,format="png", bbox_inches='tight')
# close the plt object so that the above plots are not copied unintentionally,
# if this subroutine is called multiple times by the same parent python process
plt.close()
| lanl/DNA_methylation_analysis | Genome_meth_ratio_distribution histograms.py | Genome_meth_ratio_distribution histograms.py | py | 2,883 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_nam... |
18764507379 | """
Given a list of UQ course codes, crawl the UQ course website and scrape
information pertaining to said course.
"""
import sys
import requests
from bs4 import BeautifulSoup
# Headers for making web requests look like a real user (or they may be
# rejected by the UQ website)
headers = requests.utils.default_headers()
headers.update(
{
'User-Agent': 'PreReqBot 1.0',
}
)
# The base URL we want to make requests to
BASE = 'https://my.uq.edu.au/programs-courses/course.html?course_code='
# The internal HTML id's of the blocks of interest
INCOMPAT = "course-incompatible"
PREREQ = "course-prerequisite"
# Converts the resulting HTML to string, and converts commas to "and"
def format_courses(results):
outstring = " " .join(x.text.strip() for x in results)
outstring = outstring.replace(",", " and ")
return outstring.replace(" ", " ") # Remove double spaces
# Run it
def main():
if len(sys.argv) != 2:
print ("Usage: python3 crawl.py [file-of-courses]")
print ()
print ("[file-of-courses] is a one-course-per-line text file.")
sys.exit(1)
# Open the input file
with open(sys.argv[1]) as f:
# For each line in the file
for line in f:
# Grab the course code
code = line.strip()
# Build the URL target
url = BASE + code
# Download the page and get the content
html = requests.get(url, headers=headers).content
# Parse the HTML
parsed_doc = BeautifulSoup(html, "html.parser")
# Extract the elements of interest
incompat = parsed_doc.findAll('p', {'id': INCOMPAT})
prereq = parsed_doc.findAll('p', {'id': PREREQ})
# Print them out
print(code + ",incompatible," + format_courses(incompat))
print(code + ",prerequisite," + format_courses(prereq))
if __name__ == "__main__":
main()
| tompoek/uq-course-prereqs-viz | data-crawler/crawl.py | crawl.py | py | 1,826 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.utils.default_headers",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.utils",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "sys.... |
39398980966 | # Python Project B
# Multinomial Naive Bayes
# By
# Valdar Rudman
# R00081134
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import matplotlib.pyplot as plt
import numpy as np
# Read a file in and split on the white space
def readFile(source):
return open(source).read().split()
# Read a file in and split on new line
def readTweetsFile(source):
return open(source).read().lower().split("\n")
# Gets the probability of the word. This can be the probability
# of a word being positive or negative
def prob_of_word(percentDict, fullDict):
words = {}
for word in percentDict:
words[word] = (percentDict[word] / fullDict[word])
return words
# Takes a list of words in and returns list without stopwords
def removeStopWords(sentence):
stopWords = set(stopwords.words('english'))
words = word_tokenize(sentence)
wordsFiltered = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w)
return wordsFiltered
# Working out if tweets are positive or negative
def posNegTweets(tweets, wordsPos, wordsNeg):
posTweets, negTweets, uknownTweets = 0, 0, 0
for tweet in tweets:
words = tweet.split()
posWords, negWords, uknownWord, count = 0, 0, 0, 1
for word in words:
if word in wordsPos:
posWords += wordsPos[word]
if word in wordsNeg:
negWords += wordsNeg[word]
count += 1
posWords = posWords / count
negWords = negWords / count
if posWords > negWords:
posTweets += 1
elif negWords > posWords:
negTweets += 1
else:
uknownTweets += 1
# Returns a list [percent of positive tweets in the batch, percent of negative tweets in the batch, percent of unkown tweets in the batch]
return [((posTweets / len(tweets)) * 100), ((negTweets / len(tweets)) * 100), ((uknownTweets / len(tweets)) * 100)]
# Graph the before and after results of pre-processing for both negative and positive
def graph(PositiveBeforePP, positiveAfterPP, negativeBeforePP, negativeAfterPP):
BarTitles = ('Pos Before Pre-Processing',
'Pos After Pre-Processing',
'Neg before Pre-Processing',
'Neg After Pre-Processing')
plot = [PositiveBeforePP, positiveAfterPP,
negativeBeforePP, negativeAfterPP]
y_pos = np.arange(len(BarTitles))
plt.bar(y_pos, plot, align='center', alpha=0.1)
plt.xticks(y_pos, BarTitles)
plt.ylabel("Percentage")
plt.xlabel("Data")
plt.title("Tweets Accuracy")
plt.show()
def main():
print("Reading in Training Files...")
posList = readFile("train\\trainPos.txt")
negList = readFile("train\\trainNeg.txt")
posList = [item.lower() for item in posList]
negList = [item.lower() for item in negList]
print("Removing stopwords from training files...")
# print(negList)
posList = removeStopWords(' '.join(posList))
negList = removeStopWords(' '.join(negList))
# Getting unique words for positive and negative as well as getting a full set of them
posSet = set(posList)
negSet = set(negList)
fullSet = posSet|negSet
print("Creating dictionaries...")
# Creating dictionaries to use to keep count of how many times a word show up
posDict = dict.fromkeys(posSet, 0)
negDict = dict.fromkeys(negSet, 0)
fullDict = dict.fromkeys(fullSet, 0)
for word in posList:
posDict[word] = posDict[word] + 1
fullDict[word] = fullDict[word] + 1
for word in negList:
negDict[word] = negDict[word] + 1
fullDict[word] = fullDict[word] + 1
# print("Negative: ", negDict)
# print("Full: ", fullDict)
print("Calculate words pos/neg value...")
wordsPos = prob_of_word(posDict, fullDict)
wordsNeg = prob_of_word(negDict, fullDict)
print("Reading in Pos Tweets and removing stopwords...")
posTweets = readTweetsFile("test\\testPos.txt")
posTweetsCleanedUp = []
for tweet in posTweets:
tweet.lower()
posTweetsCleanedUp.append(' '.join(removeStopWords(tweet)))
print("Reading in Neg Tweets and removing stopwords...")
negTweets = readTweetsFile("test\\testNeg.txt")
negTweetsCleanedUp = []
for tweet in negTweets:
tweet.lower()
negTweetsCleanedUp.append(' '.join(removeStopWords(tweet)))
print("Calculating Pre results...")
posPreResults = posNegTweets(posTweets, wordsPos, wordsNeg)
negPreResults = posNegTweets(negTweets, wordsPos, wordsNeg)
print("Pre Results\nPositive: ", posPreResults, "\nNegative: ", negPreResults)
print("Calculating Post results...")
posPostResults = posNegTweets(posTweetsCleanedUp, wordsPos, wordsNeg)
negPostResults = posNegTweets(negTweetsCleanedUp, wordsPos, wordsNeg)
print("Post Results\nPositive: ", posPostResults, "\nNegative: ", negPostResults)
graph(posPreResults[0], posPostResults[0], negPreResults[1], negPostResults[1])
if __name__ == '__main__':
main()
| ValdarRudman/Multinomial-Naive-Bayes | Multinomial Naive Bayes.py | Multinomial Naive Bayes.py | py | 5,233 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 33,
"usage_type": "call"
},
{
"api... |
933471323 | import abc
from neutron import quota
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as qexception
from neutron.plugins.common import constants
UOS_SERVICE_PROVIDER = 'uos:service_provider'
UOS_NAME = 'uos:name'
UOS_REGISTERNO = 'uos:registerno'
UOS_PORT_DEVICE_NAME = 'uos:port_device_name'
UOS_PORT_DEVICE_OWNER = 'uos:port_device_owner'
UOS_PORT_DEVICE_ID = 'uos:port_device_id'
UOS_RATE_LIMIT = 'rate_limit'
RESOURCE_ATTRIBUTE_MAP = {
'floatingipsets': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'floatingipset_address': {'allow_post': False, 'allow_put': False,
'convert_to': attr._validate_dict_or_none,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True, 'default': list()},
'floatingipset_subnet_id': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_list,
'validate': {'type:uuid_list': None},
'is_visible': True,
'default': None},
'floatingipset_network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'router_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'port_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required_by_policy': True},
'fixed_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
UOS_NAME: {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_REGISTERNO: {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_SERVICE_PROVIDER: {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_list,
'is_visible': True, 'default': ''},
UOS_PORT_DEVICE_NAME: {'allow_post': False, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_PORT_DEVICE_OWNER: {'allow_post': False, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_PORT_DEVICE_ID: {'allow_post': False, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
UOS_RATE_LIMIT: {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_int,
'validate': {'type:fip_rate_limit': None},
'is_visible': True, 'default': 1024}
}
}
class ServiceProviderNotExist(qexception.BadRequest):
message = _("the service provider %(service_provider)s is not exists")
class InputServieProviderNull(qexception.BadRequest):
message = _("the service provider could not be found")
class FloatingipsLenTooLong(qexception.BadRequest):
message = _("In the floatingipset, the num of floatingip must be only one")
class FloatingIPSetNotFound(qexception.NotFound):
message = _("Floating IP Set %(floatingipset_id)s could not be found")
class Uosfloatingipset(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "UnitedStack Floatingipset"
@classmethod
def get_alias(cls):
return "uos_floatingipsets"
@classmethod
def get_description(cls):
return ("Return related resources")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/uos/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-12-25T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns uos floatingipset Resources."""
return []
@classmethod
def get_resources(cls):
"""Returns floatingipset Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
#quota.QUOTAS.register_resource_by_name('floatingset')
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.L3_ROUTER_NAT,
register_quota=True)
def update_attributes_map(self, attributes):
super(Uosfloatingipset, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class FloatingipsetBase(object):
@abc.abstractmethod
def create_floatingipset(self, context, floatingipset):
pass
@abc.abstractmethod
def update_floatingipset(self, context, id, floatingipset):
pass
@abc.abstractmethod
def get_floatingipset(self, context, id, fields=None):
pass
@abc.abstractmethod
def delete_floatingipset(self, context, id):
pass
@abc.abstractmethod
def get_floatingipsets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
def get_floatingipsets_count(self, context, filters=None):
pass
| CingHu/neutron-ustack | neutron/extensions/uosfloatingipset.py | uosfloatingipset.py | py | 6,685 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "neutron.api.v2.attributes._validate_dict_or_none",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "neutron.api.v2.attributes",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "neutron.api.v2.attributes.convert_to_list",
"line_number": 29... |
29423648678 | import logging
from collections import namedtuple, defaultdict
from copy import deepcopy
from dataclasses import dataclass, field
from typing import List, Tuple
import numpy as np
import networkx as nx
import parmed as pm
from IPython.display import display, SVG
from rdkit import Chem
from rdkit.Chem import AllChem, Draw, rdFMCS, rdCoordGen
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.Draw import IPythonConsole
IPythonConsole.molSize = (900, 900) # Change image size
IPythonConsole.ipython_useSVG = True # Change output to SVG
from transformato.system import SystemStructure
from transformato.annihilation import calculate_order_of_LJ_mutations_asfe
logger = logging.getLogger(__name__)
def _flattened(list_of_lists: list) -> list:
return [item for sublist in list_of_lists for item in sublist]
def _performe_linear_charge_scaling(
nr_of_steps: int,
intermediate_factory,
mutation,
):
for lambda_value in np.linspace(1, 0, nr_of_steps + 1)[1:]:
print("####################")
print(
f"Coulomb scaling in step: {intermediate_factory.current_step} with lamb: {lambda_value}"
)
print("####################")
intermediate_factory.write_state(
mutation_conf=mutation,
lambda_value_electrostatic=lambda_value,
)
def _performe_linear_cc_scaling(
nr_of_steps: int,
intermediate_factory,
mutation,
) -> int:
for lambda_value in np.linspace(1, 0, nr_of_steps + 1)[1:]:
print("####################")
print(
f"Perform paramteter scaling on cc in step: {intermediate_factory.current_step} with lamb: {lambda_value}"
)
print("####################")
intermediate_factory.write_state(
mutation_conf=mutation,
common_core_transformation=lambda_value,
)
def perform_mutations(
configuration: dict,
i,
mutation_list: list,
list_of_heavy_atoms_to_be_mutated: list = [],
nr_of_mutation_steps_charge: int = 5,
nr_of_mutation_steps_lj_of_hydrogens: int = 1,
nr_of_mutation_steps_lj_of_heavy_atoms: int = 1,
nr_of_mutation_steps_cc: int = 5,
endstate_correction: bool = False,
):
"""Performs the mutations necessary to mutate the physical endstate to the defined common core.
Args:
configuration (dict): A configuration dictionary.
i ([type]): IntermediateState instance
mutation_list (list): list of mutation objects
list_of_heavy_atoms_to_be_mutated (list, optional): A list of atom indices that define the order in which the vdw parameters of the heavy atoms are turned off. Defaults to [].
nr_of_mutation_steps_charge (int, optional): Nr of steps to turne of the charges. Defaults to 5.
nr_of_mutation_steps_lj_of_hydrogens (int, optional): Nr of steps to turne of lj of hydrogens. Only needed for systems with many hydrogens in dummy region
nr_of_mutation_steps_lj_of_heavy_atoms (int, optional): Nr of steps to turne of the lj of heavy atoms
nr_of_mutation_steps_cc (int, optional): Nr of steps to interpolate between the common core parameters. Defaults to 5.
Returns:
list: list of directories with the parameter and topology files
"""
from transformato.utils import map_lj_mutations_to_atom_idx
######################################
# write endpoint mutation
######################################
print("####################")
print(f"Physical endstate in step: 1")
print("####################")
i.write_state(mutation_conf=[])
######################################
# turn off electrostatics
######################################
m = mutation_list["charge"]
# turn off charges
# if number of charge mutation steps are defined in config file overwrite default or passed value
try:
nr_of_mutation_steps_charge = configuration["system"][i.system.structure][
"mutation"
]["steps_charge"]
print("Using number of steps for charge mutattions as defined in config file")
except KeyError:
pass
_performe_linear_charge_scaling(
nr_of_steps=nr_of_mutation_steps_charge,
intermediate_factory=i,
mutation=m,
)
######################################
# turn off LJ
######################################
######################################
# Turn off hydrogens
if nr_of_mutation_steps_lj_of_hydrogens == 1:
if mutation_list["hydrogen-lj"]:
print("####################")
print(f"Hydrogen vdW scaling in step: {i.current_step} with lamb: {0.0}")
print("####################")
i.write_state(
mutation_conf=mutation_list["hydrogen-lj"],
lambda_value_vdw=0.0,
)
else:
# Scaling lj-parameters in multiple steps
if mutation_list["hydrogen-lj"]:
for lambda_value in np.linspace(
0.75, 0, nr_of_mutation_steps_lj_of_hydrogens + 1
):
print("####################")
print(
f"Hydrogen vdW scaling in step: {i.current_step} with lamb: {lambda_value}"
)
print("####################")
i.write_state(
mutation_conf=mutation_list["hydrogen-lj"],
lambda_value_vdw=lambda_value,
)
######################################
# turn off lj of heavy atoms
# take the order from either config file, passed to this function or the default ordering
try:
list_of_heavy_atoms_to_be_mutated = configuration["system"][i.system.structure][
"mutation"
]["heavy_atoms"]
print("Using ordering of LJ mutations as defined in config file.")
except KeyError:
if not list_of_heavy_atoms_to_be_mutated:
# Use the ordering provided by _calculate_order_of_LJ_mutations
list_of_heavy_atoms_to_be_mutated = [
lj.vdw_atom_idx[0] for lj in (mutation_list["lj"])
]
print("Using calculated ordering of LJ mutations.")
else:
print("Using passed ordering of LJ mutations.")
mapping_of_atom_idx_to_mutation = map_lj_mutations_to_atom_idx(mutation_list["lj"])
for heavy_atoms_to_turn_off_in_a_single_step in list_of_heavy_atoms_to_be_mutated:
logger.info(
f"turning off lj of heavy atom: {heavy_atoms_to_turn_off_in_a_single_step}"
)
try: # heavy_atoms_to_turn_off_in_a_single_step can be a tuple or an integer
mutations = [
mapping_of_atom_idx_to_mutation[heavy_atom_idx]
for heavy_atom_idx in heavy_atoms_to_turn_off_in_a_single_step
]
except TypeError:
mutations = [
mapping_of_atom_idx_to_mutation[
heavy_atoms_to_turn_off_in_a_single_step
]
]
# only used in asfe to ensure that last atom is
# turned off in two steps
if (
heavy_atoms_to_turn_off_in_a_single_step
== list_of_heavy_atoms_to_be_mutated[-1]
and configuration["simulation"]["free-energy-type"] == "asfe"
):
for lambda_value in np.linspace(
0.75, 0, nr_of_mutation_steps_lj_of_heavy_atoms + 1
):
print("####################")
print(
f"Turn off last heavy atom vdW parameter in: {i.current_step} on atoms: {heavy_atoms_to_turn_off_in_a_single_step} with lambda {lambda_value}"
)
print("####################")
i.write_state(
mutation_conf=mutations,
lambda_value_vdw=lambda_value,
)
elif nr_of_mutation_steps_lj_of_heavy_atoms == 1:
print("####################")
print(
f"Turn off heavy atom vdW parameter in: {i.current_step} on atoms: {heavy_atoms_to_turn_off_in_a_single_step}"
)
print("####################")
i.write_state(
mutation_conf=mutations,
lambda_value_vdw=0.0,
)
else:
for lambda_value in np.linspace(
0.75, 0, nr_of_mutation_steps_lj_of_heavy_atoms + 1
):
print("####################")
print(
f"Turn off heavy atom vdW parameter in: {i.current_step} on atoms: {heavy_atoms_to_turn_off_in_a_single_step} with lambda {lambda_value}"
)
print("####################")
i.write_state(
mutation_conf=mutations,
lambda_value_vdw=lambda_value,
)
######################################
# generate terminal LJ
######################################
if not configuration["simulation"]["free-energy-type"] == "asfe":
print("####################")
print(
f"Generate terminal LJ particle in step: {i.current_step} on atoms: {[v.vdw_atom_idx for v in mutation_list['default-lj']]}"
)
print("####################")
i.write_state(
mutation_conf=mutation_list["default-lj"],
lambda_value_vdw=0.0,
)
######################################
# mutate common core
######################################
if mutation_list["transform"]:
try:
nr_of_mutation_steps_cc = configuration["system"][i.system.structure][
"mutation"
]["steps_common_core"]
except KeyError:
nr_of_mutation_steps_cc = nr_of_mutation_steps_cc
# change bonded parameters on common core
_performe_linear_cc_scaling(
nr_of_steps=nr_of_mutation_steps_cc,
intermediate_factory=i,
mutation=mutation_list["transform"],
)
if endstate_correction:
i.endstate_correction()
@dataclass
class DummyRegion:
mol_name: str
match_termin_real_and_dummy_atoms: dict
connected_dummy_regions: list
tlc: str
lj_default: list
def return_connecting_real_atom(self, dummy_atoms: list):
for real_atom in self.match_termin_real_and_dummy_atoms:
for dummy_atom in self.match_termin_real_and_dummy_atoms[real_atom]:
if dummy_atom in dummy_atoms:
logger.debug(f"Connecting real atom: {real_atom}")
return real_atom
logger.critical("No connecting real atom was found!")
return None
@dataclass
class MutationDefinition:
atoms_to_be_mutated: List[int]
common_core: List[int]
dummy_region: DummyRegion
vdw_atom_idx: List[int] = field(default_factory=list)
steric_mutation_to_default: bool = False
def print_details(self):
print("####################")
print(f"Atoms to be mutated: {self.atoms_to_be_mutated}")
print(f"Mutated on common core: {self.common_core}")
if self.vdw_atom_idx:
print(f"VDW atoms to be decoupled: {self.vdw_atom_idx}")
class ProposeMutationRoute(object):
def __init__(
self,
s1: SystemStructure,
s2: SystemStructure = None,
):
"""
A class that proposes the mutation route between two molecules with a
common core (same atom types) based on two mols and generates the mutation
objects to perform the mutation on the psf objects.
Parameters
----------
mol1: Chem.Mol
mol2: Chem.Mol
"""
try:
mol1_name: str = "m1"
mol2_name: str = "m2"
self.system: dict = {"system1": s1, "system2": s2}
self.mols: dict = {mol1_name: s1.mol, mol2_name: s2.mol}
self.graphs: dict = {mol1_name: s1.graph, mol2_name: s2.graph}
# psfs for reference of only ligand
self.psfs: dict = {
mol1_name: s1.psfs["waterbox"][f":{s1.tlc}"],
mol2_name: s2.psfs["waterbox"][f":{s2.tlc}"],
}
self.psf1: pm.charmm.CharmmPsfFile = s1.psfs
self.psf2: pm.charmm.CharmmPsfFile = s2.psfs
self._substructure_match: dict = {mol1_name: [], mol2_name: []}
self.removed_indeces: dict = {mol1_name: [], mol2_name: []}
self.added_indeces: dict = {mol1_name: [], mol2_name: []}
self.s1_tlc = s1.tlc
self.s2_tlc = s2.tlc
self.terminal_real_atom_cc1: list = []
self.terminal_real_atom_cc2: list = []
self.terminal_dummy_atom_cc1: list = []
self.terminal_dummy_atom_cc2: list = []
self.bondCompare = rdFMCS.BondCompare.CompareAny
self.atomCompare = rdFMCS.AtomCompare.CompareElements
self.maximizeBonds: bool = True
self.matchValences: bool = False
self.completeRingsOnly: bool = False
self.ringMatchesRingOnly: bool = True
self.dummy_region_cc1: DummyRegion
self.dummy_region_cc2: DummyRegion
self.asfe: bool = False
self._check_cgenff_versions()
except:
logger.info(
"Only information about one structure, assume an ASFE simulation is requested"
)
mol1_name: str = "m1"
self.system: dict = {"system1": s1}
self.mols: dict = {mol1_name: s1.mol}
self.graphs: dict = {mol1_name: s1.graph}
# psfs for reference of only ligand
self.psfs: dict = {s1.psfs["waterbox"][f":{s1.tlc}"]}
self.psf1: pm.charmm.CharmmPsfFile = s1.psfs
self._substructure_match: dict = {mol1_name: []}
self.removed_indeces: dict = {mol1_name: []}
self.added_indeces: dict = {mol1_name: []}
self.s1_tlc = s1.tlc
self.asfe: bool = True
self.dummy_region_cc1: DummyRegion
def _check_cgenff_versions(self):
cgenff_sys1 = self.system["system1"].cgenff_version
cgenff_sys2 = self.system["system2"].cgenff_version
if cgenff_sys1 == cgenff_sys2:
pass
else:
raise RuntimeError(
f"CGenFF compatibility error. CGenFF: {cgenff_sys1} and CGenFF: {cgenff_sys2} are combined."
)
def _match_terminal_real_and_dummy_atoms_for_mol1(self):
"""
Matches the terminal real and dummy atoms and returns a dict with real atom idx as key and a set of dummy atoms that connect
to this real atom as a set
"""
return self._match_terminal_real_and_dummy_atoms(
self.mols["m1"], self.terminal_real_atom_cc1, self.terminal_dummy_atom_cc1
)
def _match_terminal_real_and_dummy_atoms_for_mol2(self) -> dict:
"""
Matches the terminal real and dummy atoms and returns a dict with real atom idx as key and a set of dummy atoms that connect
to this real atom as a set
"""
return self._match_terminal_real_and_dummy_atoms(
self.mols["m2"], self.terminal_real_atom_cc2, self.terminal_dummy_atom_cc2
)
@staticmethod
def _match_terminal_real_and_dummy_atoms(
mol, real_atoms_cc: list, dummy_atoms_cc: list
) -> dict:
"""
Matches the terminal real and dummy atoms and returns a dict with real atom idx as key and a set of dummy atoms that connect
to this real atom as a set
Parameters
----------
mol : [Chem.Mol]
The mol object with the real and dummy atoms
real_atoms_cc : list
list of real atom idx
dummy_atoms_cc : list
list of dummy atom idx
Returns
-------
[type]
[description]
"""
from collections import defaultdict
real_atom_match_dummy_atom = defaultdict(set)
for real_atom_idx in real_atoms_cc:
real_atom = mol.GetAtomWithIdx(real_atom_idx)
real_neighbors = [x.GetIdx() for x in real_atom.GetNeighbors()]
for dummy_atoms_idx in dummy_atoms_cc:
if dummy_atoms_idx in real_neighbors:
real_atom_match_dummy_atom[real_atom_idx].add(dummy_atoms_idx)
return real_atom_match_dummy_atom
def _set_common_core_parameters(self):
# find terminal atoms
(
self.terminal_dummy_atom_cc1,
self.terminal_real_atom_cc1,
) = self._find_terminal_atom(self.get_common_core_idx_mol1(), self.mols["m1"])
(
self.terminal_dummy_atom_cc2,
self.terminal_real_atom_cc2,
) = self._find_terminal_atom(self.get_common_core_idx_mol2(), self.mols["m2"])
# match terminal real atoms between cc1 and cc2 that connect dummy atoms
cc_idx_mol1 = self.get_common_core_idx_mol1()
cc_idx_mol2 = self.get_common_core_idx_mol2()
matching_terminal_atoms_between_cc = list()
for cc1_idx, cc2_idx in zip(cc_idx_mol1, cc_idx_mol2):
if (
cc1_idx in self.terminal_real_atom_cc1
and cc2_idx in self.terminal_real_atom_cc2
):
logger.info(
f"Dummy regions connect on the same terminal atoms. cc1: {cc1_idx} : cc2: {cc2_idx}"
)
matching_terminal_atoms_between_cc.append((cc1_idx, cc2_idx))
elif (
cc1_idx in self.terminal_real_atom_cc1
and cc2_idx not in self.terminal_real_atom_cc2
) or (
cc1_idx not in self.terminal_real_atom_cc1
and cc2_idx in self.terminal_real_atom_cc2
):
logger.info(
f"Single dummy region connects on terminal atom. cc1: {cc1_idx} : cc2: {cc2_idx}"
)
matching_terminal_atoms_between_cc.append((cc1_idx, cc2_idx))
else:
pass
if not matching_terminal_atoms_between_cc:
raise RuntimeError(
"No terminal real atoms were matched between the common cores. Aborting."
)
self.matching_terminal_atoms_between_cc = matching_terminal_atoms_between_cc
def _match_terminal_dummy_atoms_between_common_cores(
self,
match_terminal_atoms_cc1: dict,
match_terminal_atoms_cc2: dict,
) -> Tuple[list, list]:
cc1_idx = self._substructure_match["m1"]
cc2_idx = self._substructure_match["m2"]
lj_default_cc1 = []
lj_default_cc2 = []
# iterate through the common core substracter (the order represents the matched atoms)
for idx1, idx2 in zip(cc1_idx, cc2_idx):
# if both atoms are terminal atoms connected dummy regions can be identified
if (
idx1 in match_terminal_atoms_cc1.keys()
and idx2 in match_terminal_atoms_cc2.keys()
):
connected_dummy_cc1 = list(match_terminal_atoms_cc1[idx1])
connected_dummy_cc2 = list(match_terminal_atoms_cc2[idx2])
if len(connected_dummy_cc1) == 1 and len(connected_dummy_cc2) == 1:
pass
# multiple, possible dummy regions
elif len(connected_dummy_cc1) > 1 or len(connected_dummy_cc2) > 1:
logger.critical("There is a dual junction. Be careful.")
# NOTE: For now we are just taking the non hydrogen atom
for atom_idx in connected_dummy_cc1:
if self.mols["m1"].GetAtomWithIdx(atom_idx).GetSymbol() != "H":
connected_dummy_cc1 = [atom_idx]
break
for atom_idx in connected_dummy_cc2:
if self.mols["m2"].GetAtomWithIdx(atom_idx).GetSymbol() != "H":
connected_dummy_cc2 = [atom_idx]
break
# hydrogen mutates to dummy atom (but not a LJ particle)
elif len(connected_dummy_cc1) == 0 or len(connected_dummy_cc2) == 0:
logger.debug("Hydrogen to dummy mutation")
raise NotImplementedError()
lj_default_cc1.append(connected_dummy_cc1[0])
lj_default_cc2.append(connected_dummy_cc2[0])
return (lj_default_cc1, lj_default_cc2)
@staticmethod
def _calculate_order_of_LJ_mutations(
connected_dummy_regions: list,
match_terminal_atoms: dict,
G: nx.Graph,
) -> list:
try:
from tf_routes.routes import (
_calculate_order_of_LJ_mutations_new as _calculate_order_of_LJ_mutations_with_bfs,
)
return _calculate_order_of_LJ_mutations_with_bfs(
connected_dummy_regions, match_terminal_atoms, G
)
except ModuleNotFoundError:
ordered_LJ_mutations = []
for real_atom in match_terminal_atoms:
for dummy_atom in match_terminal_atoms[real_atom]:
for connected_dummy_region in connected_dummy_regions:
# stop at connected dummy region with specific dummy_atom in it
if dummy_atom not in connected_dummy_region:
continue
G_dummy = G.copy()
# delete all nodes not in dummy region
remove_nodes = [
node
for node in G.nodes()
if node not in connected_dummy_region
]
for remove_node in remove_nodes:
G_dummy.remove_node(remove_node)
# root is the dummy atom that connects the real region with the dummy region
root = dummy_atom
edges = list(nx.dfs_edges(G_dummy, source=root))
nodes = [root] + [v for u, v in edges]
nodes.reverse() # NOTE: reverse the mutation
ordered_LJ_mutations.append(nodes)
return ordered_LJ_mutations
def _check_for_lp(
self,
odered_connected_dummy_regions_cc_with_lp: list,
psf: pm.charmm.CharmmPsfFile,
tlc: str,
name: str,
) -> list:
"""
With the help of parmed this function will look in the ordered_connected_dummy_regions list if
there is a atom which has lonepairs. It will check wheather the lp belongs to the common core or
to the dummy region and assign it into the sorted list accordingly.
"""
flat_ordered_connected_dummy_regions = [
item
for sublist in odered_connected_dummy_regions_cc_with_lp
for item in sublist
]
lp_dict_dummy_region = defaultdict(list)
lp_dict_common_core = defaultdict(list)
for atom in psf.view[f":{tlc}"].atoms:
if atom.name.find("LP") == False:
print(f"die Atome {atom}")
if atom.frame_type.atom1.idx in flat_ordered_connected_dummy_regions:
lp_dict_dummy_region[atom.frame_type.atom1.idx].append(atom.idx)
elif (
atom.frame_type.atom1.idx not in lp_dict_common_core
and name == "m1"
):
logger.info(f"Adding atom {atom.idx} to the common core of mol1")
self.add_idx_to_common_core_of_mol1([atom.idx])
elif (
atom.frame_type.atom1.idx not in lp_dict_common_core
and name == "m2"
):
logger.info(f"Adding atom {atom.idx} to the common core of mol1")
self.add_idx_to_common_core_of_mol2([atom.idx])
if lp_dict_dummy_region:
for i in odered_connected_dummy_regions_cc_with_lp:
lp_to_insert = []
for atom in i:
if atom in lp_dict_dummy_region.keys():
lp_to_insert.extend(lp_dict_dummy_region[atom])
for lp_num in reversed(lp_to_insert):
i.insert(0, lp_num)
logger.debug(
f"Orderd connected dummy atoms containing the lp {odered_connected_dummy_regions_cc_with_lp}"
)
return odered_connected_dummy_regions_cc_with_lp
def get_idx_of_all_atoms(
self,
mol1_name: str,
):
"""
Iterates over all atoms of the molecule and saves them as a list
----------
mol1_name: str
"""
s1 = []
for atom in self.psf1["waterbox"][f":{self.s1_tlc}"].atoms:
s1.append(atom.idx)
self._substructure_match[mol1_name] = list(s1)
def propose_common_core(self):
"""
Searches for the common core using the rdkit module, in case of asfe only a list of
atoms of the ligand is created
"""
if self.asfe:
self.get_idx_of_all_atoms("m1")
else:
# System for RBFE/RSFE contains two mols
mcs = self._find_mcs("m1", "m2")
return mcs
def finish_common_core(
self,
connected_dummy_regions_cc1: list = [],
connected_dummy_regions_cc2: list = [],
odered_connected_dummy_regions_cc1: list = [],
odered_connected_dummy_regions_cc2: list = [],
):
"""
The dummy region is created and the final atoms connected to the CC are collected. It is possible
to define a dummy region on its own or to change the ordering how the lj parameters of the
heavy atoms in the dummy region are turned off
---------
connected_dummy_regions_cc1: list = []
connected_dummy_regions_cc2: list = []
odered_connected_dummy_regions_cc1: list = []
odered_connected_dummy_regions_cc2: list = []
"""
if not self.asfe:
# set the teriminal real/dummy atom indices
self._set_common_core_parameters()
# match the real/dummy atoms
match_terminal_atoms_cc1 = (
self._match_terminal_real_and_dummy_atoms_for_mol1()
)
match_terminal_atoms_cc2 = (
self._match_terminal_real_and_dummy_atoms_for_mol2()
)
logger.info("Find connected dummy regions")
# define connected dummy regions
if not connected_dummy_regions_cc1:
connected_dummy_regions_cc1 = self._find_connected_dummy_regions(
mol_name="m1",
)
if not connected_dummy_regions_cc2:
connected_dummy_regions_cc2 = self._find_connected_dummy_regions(
mol_name="m2",
)
logger.debug(
f"connected dummy regions for mol1: {connected_dummy_regions_cc1}"
)
logger.debug(
f"connected dummy regions for mol2: {connected_dummy_regions_cc2}"
)
# calculate the ordering or LJ mutations
if not odered_connected_dummy_regions_cc1:
odered_connected_dummy_regions_cc1 = (
self._calculate_order_of_LJ_mutations(
connected_dummy_regions_cc1,
match_terminal_atoms_cc1,
self.graphs["m1"].copy(),
)
)
if not odered_connected_dummy_regions_cc2:
odered_connected_dummy_regions_cc2 = (
self._calculate_order_of_LJ_mutations(
connected_dummy_regions_cc2,
match_terminal_atoms_cc2,
self.graphs["m2"].copy(),
)
)
logger.info(
f"sorted connected dummy regions for mol1: {odered_connected_dummy_regions_cc1}"
)
logger.info(
f"sorted connected dummy regions for mol2: {odered_connected_dummy_regions_cc2}"
)
if odered_connected_dummy_regions_cc1:
odered_connected_dummy_regions_cc1 = self._check_for_lp(
odered_connected_dummy_regions_cc1,
self.psf1["waterbox"],
self.s1_tlc,
"m1",
)
if odered_connected_dummy_regions_cc2:
odered_connected_dummy_regions_cc2 = self._check_for_lp(
odered_connected_dummy_regions_cc2,
self.psf2["waterbox"],
self.s2_tlc,
"m2",
)
# find the atoms from dummy_region in s1 that needs to become lj default
(
lj_default_cc1,
lj_default_cc2,
) = self._match_terminal_dummy_atoms_between_common_cores(
match_terminal_atoms_cc1, match_terminal_atoms_cc2
)
self.dummy_region_cc1 = DummyRegion(
mol_name="m1",
tlc=self.s1_tlc,
match_termin_real_and_dummy_atoms=match_terminal_atoms_cc1,
connected_dummy_regions=odered_connected_dummy_regions_cc1,
lj_default=lj_default_cc1,
)
self.dummy_region_cc2 = DummyRegion(
mol_name="m2",
tlc=self.s2_tlc,
match_termin_real_and_dummy_atoms=match_terminal_atoms_cc2,
connected_dummy_regions=odered_connected_dummy_regions_cc2,
lj_default=lj_default_cc2,
)
# generate charge compmensated psfs
psf1, psf2 = self._prepare_cc_for_charge_transfer()
self.charge_compensated_ligand1_psf = psf1
self.charge_compensated_ligand2_psf = psf2
else:
# all atoms should become dummy atoms in the end
central_atoms = nx.center(self.graphs["m1"])
# Assure, that the central atom is no hydrogen
for atom in self.psf1["waterbox"][f":{self.s1_tlc}"].atoms:
if atom.idx in central_atoms:
if atom.name.startswith("H") == True:
raise RuntimeError(
f"One of the central atoms seems to be a hydrogen atom"
)
# calculate the ordering or LJ mutations
if not odered_connected_dummy_regions_cc1:
odered_connected_dummy_regions_cc1 = (
calculate_order_of_LJ_mutations_asfe(
central_atoms,
self.graphs["m1"].copy(),
)
)
if odered_connected_dummy_regions_cc1:
odered_connected_dummy_regions_cc1 = self._check_for_lp(
odered_connected_dummy_regions_cc1,
self.psf1["waterbox"],
self.s1_tlc,
"m1",
)
self.dummy_region_cc1 = DummyRegion(
mol_name="m1",
tlc=self.s1_tlc,
match_termin_real_and_dummy_atoms=[],
connected_dummy_regions=odered_connected_dummy_regions_cc1,
lj_default=[],
)
def calculate_common_core(self):
self.propose_common_core()
self.finish_common_core()
def _prepare_cc_for_charge_transfer(self):
# we have to run the same charge mutation that will be run on cc2 to get the
# charge distribution AFTER the full mutation
# make a copy of the full psf
m2_psf = self.psfs["m2"][:, :, :]
m1_psf = self.psfs["m1"][:, :, :]
charge_transformed_psfs = []
for psf, tlc, cc_idx, dummy_region in zip(
[m1_psf, m2_psf],
[self.s1_tlc, self.s2_tlc],
[self.get_common_core_idx_mol1(), self.get_common_core_idx_mol2()],
[self.dummy_region_cc1, self.dummy_region_cc2],
):
# set `initial_charge` parameter for Mutation
for atom in psf.view[f":{tlc}"].atoms:
# charge, epsilon and rmin are directly modiefied
atom.initial_charge = atom.charge
offset = min([atom.idx for atom in psf.view[f":{tlc}"].atoms])
# getting copy of the atoms
atoms_to_be_mutated = []
for atom in psf.view[f":{tlc}"].atoms:
idx = atom.idx - offset
if idx not in cc_idx:
atoms_to_be_mutated.append(idx)
logger.debug("############################")
logger.debug("Preparing cc2 for charge transfer")
logger.debug(
f"Atoms for which charge is set to zero: {atoms_to_be_mutated}"
)
logger.debug("############################")
m = Mutation(
atoms_to_be_mutated=atoms_to_be_mutated, dummy_region=dummy_region
)
m.mutate(psf, lambda_value_electrostatic=0.0)
charge_transformed_psfs.append(psf)
return charge_transformed_psfs[0], charge_transformed_psfs[1]
def remove_idx_from_common_core_of_mol1(self, idx_list: list):
for idx in idx_list:
self._remove_idx_from_common_core("m1", idx)
def remove_idx_from_common_core_of_mol2(self, idx_list: list):
for idx in idx_list:
self._remove_idx_from_common_core("m2", idx)
def _remove_idx_from_common_core(self, name: str, idx: int):
if idx in self.added_indeces[name] or idx in self._get_common_core(name):
if idx in self.removed_indeces[name]:
print(f"Idx: {idx} already removed from common core.")
return
self.removed_indeces[name].append(idx)
else:
print(f"Idx: {idx} not in common core.")
def add_idx_to_common_core_of_mol1(self, idx_list: list):
"""Adds a list of atoms to the common core of molecule 1
.. caution::
Be aware of the ordering! Atom idx need to be added to match the ordering of the atom idx of common core 2
Args:
idx_list: Array of atom idxs to add
"""
for idx in idx_list:
self._add_common_core_atom("m1", idx)
logger.warning(
f"ATTENTION: Be aware of the ordering! Atom idx need to be added to match the ordering of the atom idx of common core 2"
)
logger.info(
f"Atom idx of the new common core: {self.get_common_core_idx_mol1()}"
)
def add_idx_to_common_core_of_mol2(self, idx_list: list):
"""Adds a list of atoms to the common core of molecule 1
.. caution::
Be aware of the ordering! Atom idx need to be added to match the ordering of the atom idx of common core 2
Args:
idx_list: Array of atom idxs to add
"""
for idx in idx_list:
self._add_common_core_atom("m2", idx)
logger.warning(
f"ATTENTION: Be aware of the ordering! Atom idx need to be added to match the ordering of the atom idx of common core 1"
)
logger.info(
f" Atom idx of the new common core: {self.get_common_core_idx_mol2()}"
)
def _add_common_core_atom(self, name: str, idx: int):
if idx in self.added_indeces[name] or idx in self._get_common_core(name):
print(f"Idx: {idx} already in common core.")
return
self.added_indeces[name].append(idx)
def get_idx_not_in_common_core_for_mol1(self) -> list:
return self._get_idx_not_in_common_core_for_mol("m1")
def get_idx_not_in_common_core_for_mol2(self) -> list:
return self._get_idx_not_in_common_core_for_mol("m2")
def _get_idx_not_in_common_core_for_mol(self, mol_name: str) -> list:
dummy_list_mol = [
atom.GetIdx()
for atom in self.mols[mol_name].GetAtoms()
if atom.GetIdx() not in self._get_common_core(mol_name)
]
return dummy_list_mol
def get_common_core_idx_mol1(self) -> list:
"""
Returns the common core of mol1.
"""
return self._get_common_core("m1")
def get_common_core_idx_mol2(self) -> list:
"""
Returns the common core of mol2.
"""
return self._get_common_core("m2")
def _get_common_core(self, name: str) -> list:
"""
Helper Function - should not be called directly.
Returns the common core.
"""
keep_idx = []
# BEWARE: the ordering is important - don't cast set!
for idx in self._substructure_match[name] + self.added_indeces[name]:
if idx not in self.removed_indeces[name]:
keep_idx.append(idx)
return keep_idx
def _find_mcs(
self,
mol1_name: str,
mol2_name: str,
iterate_over_matches: bool = False,
max_matches: int = 10,
):
"""
A class that proposes the mutation route between two molecules with a
common core (same atom types) based on two mols and generates the mutation
objects to perform the mutation on the psf objects.
Parameters
----------
mol1_name: str
mol2_name: str
"""
logger.info("MCS starting ...")
logger.debug(f"bondCompare: {self.bondCompare}")
logger.debug(f"atomCompare: {self.atomCompare}")
logger.debug(f"maximizeBonds: {self.maximizeBonds}")
logger.debug(f"matchValences: {self.matchValences} ")
logger.debug(f"ringMatchesRingOnly: {self.ringMatchesRingOnly} ")
logger.debug(f"completeRingsOnly: {self.completeRingsOnly} ")
m1, m2 = [deepcopy(self.mols[mol1_name]), deepcopy(self.mols[mol2_name])]
# second copy of mols - to use as representation with removed hydrogens
remmol1 = deepcopy(m1)
remmol2 = deepcopy(m2)
# removal of hydrogens - if not removed, common core for molecule + hydrogens is computed!
remmol1 = Chem.rdmolops.RemoveAllHs(remmol1)
remmol2 = Chem.rdmolops.RemoveAllHs(remmol2)
# remmols contains both molecules with removed hydrogens
remmols = [remmol1, remmol2]
for m in [m1, m2]:
logger.debug("Mol in SMILES format: {}.".format(Chem.MolToSmiles(m, True)))
# make copy of mols
changed_mols = [Chem.Mol(x) for x in [m1, m2]]
# find substructure match (ignore bond order but enforce element matching)
# findmcs-function is called for mol-objects with removed hydrogens
# original Transformato-parameters (yield bad / for Transformato not usable results for molecules with cyclic structures, e.g., ccores between 2-CPI and 7-CPI)
# especially because completeRingsOnly is set to False
"""
mcs = rdFMCS.FindMCS(
#changed_mols,
remmols,
bondCompare=self.bondCompare,
timeout=120,
atomCompare=self.atomCompare,
maximizeBonds=self.maximizeBonds,
matchValences=self.matchValences,
completeRingsOnly=self.completeRingsOnly,
ringMatchesRingOnly=self.ringMatchesRingOnly,
)
"""
# find_mcs-function from tf_routes:
# yields more reasonable common cores (e.g. for 2-CPI/7-CPI )
# in particular, completeRingsOnly=True is important
mcs = rdFMCS.FindMCS(
remmols,
timeout=120,
ringMatchesRingOnly=True,
completeRingsOnly=True,
ringCompare=Chem.rdFMCS.RingCompare.StrictRingFusion,
bondCompare=rdFMCS.BondCompare.CompareAny,
matchValences=False,
)
logger.debug("Substructure match: {}".format(mcs.smartsString))
# convert from SMARTS
mcsp = Chem.MolFromSmarts(mcs.smartsString, False)
# iterate_over_matches == False: the common core atoms for a single stubstructure match are determined
# possibly a different match yields a bigger ccore - i.e. a ccore with more hydrogens (neopentane - methane)
if iterate_over_matches == False:
s1 = m1.GetSubstructMatch(mcsp)
logger.debug("Substructere match idx: {}".format(s1))
self._show_common_core(
m1, self.get_common_core_idx_mol1(), show_atom_type=False, internal=True
)
s2 = m2.GetSubstructMatch(mcsp)
logger.debug("Substructere match idx: {}".format(s2))
self._show_common_core(
m2, self.get_common_core_idx_mol2(), show_atom_type=False, internal=True
)
# new code: add hydrogens to both common-core-on-molecule-projections
# set with all common core atom indices for both molecules
hit_ats1_compl = list(s1)
hit_ats2_compl = list(s2)
# check for each common core atom whether hydrogen atoms are in its neighbourhood
# s1/s2 contain the mapping of the common core (without hydrogens) to both molecules
# iterating over all mapped atoms, the number of hydrogens attached to the common core atom is determined
# the minimum number (i.e. if the atom of molecule 1 has one hydrogen bond, the atom of molecule 2 zero hydrogen bonds, it is zero) gives the number of hydrogen atoms to add to the common core
for indexpos, indexnr in enumerate(s1):
# get mapped atoms
atom1 = m1.GetAtomWithIdx(s1[indexpos])
atom2 = m2.GetAtomWithIdx(s2[indexpos])
# determine number of hydrogens in the neighbourhood of the atom from molecule1
h_atoms1 = 0
for x in atom1.GetNeighbors():
if x.GetSymbol() == "H":
h_atoms1 = h_atoms1 + 1
# determine number of hydrogens in the neighbourhood of the atom from molecule2
h_atoms2 = 0
for x in atom2.GetNeighbors():
if x.GetSymbol() == "H":
h_atoms2 = h_atoms2 + 1
# find minimum number of hydrogens
min_h_atoms = min(h_atoms1, h_atoms2)
# add minimum number of hydrogens to the ccore for molecule1
h_atoms1 = 0
for x in atom1.GetNeighbors():
if x.GetSymbol() == "H" and h_atoms1 < min_h_atoms:
hit_ats1_compl.append(x.GetIdx())
h_atoms1 = h_atoms1 + 1
# add minimum number of hydrogens to the ccore for molecule2
h_atoms2 = 0
for x in atom2.GetNeighbors():
if x.GetSymbol() == "H" and h_atoms2 < min_h_atoms:
hit_ats2_compl.append(x.GetIdx())
h_atoms2 = h_atoms2 + 1
# create new tuple of common core atom indices with additional hydrogens (molecule 1)
hit_ats1 = tuple(hit_ats1_compl)
# create new tuple of common core atom indices with additional hydrogens (molecule 2)
hit_ats2 = tuple(hit_ats2_compl)
self._substructure_match[mol1_name] = list(hit_ats1)
self._substructure_match[mol2_name] = list(hit_ats2)
# self._substructure_match[mol1_name] = list(s1)
# self._substructure_match[mol2_name] = list(s2)
return mcs
# iterate_over_matches == True: it is iterated over all pairs of substructure matches
# the substructure matches with the biggest emering common cores are finally chosen
# the common cores for different substructure match pairs contain the same heavy atoms, but differ in the number of hydrogens, i.e. the finally chosen matches have the common cores with most hydrogens
else:
s1s = m1.GetSubstructMatches(mcsp, maxMatches=max_matches)
logger.debug("Substructere match idx: {}".format(s1s))
self._show_common_core(
m1, self.get_common_core_idx_mol1(), show_atom_type=False, internal=True
)
s2s = m2.GetSubstructMatches(mcsp, maxMatches=max_matches)
logger.debug("Substructere match idx: {}".format(s2s))
self._show_common_core(
m2, self.get_common_core_idx_mol2(), show_atom_type=False, internal=True
)
curr_size_of_ccores = 0
for s1 in s1s:
for s2 in s2s:
# new code: add hydrogens to both common-core-on-molecule-projections
# set with all common core atom indices for both molecules
hit_ats1_compl = list(s1)
hit_ats2_compl = list(s2)
# check for each common core atom whether hydrogen atoms are in its neighbourhood
# s1/s2 contain the mapping of the common core (without hydrogens) to both molecules
# iterating over all mapped atoms, the number of hydrogens attached to the common core atom is determined
# the minimum number (i.e. if the atom of molecule 1 has one hydrogen bond, the atom of molecule 2 zero hydrogen bonds, it is zero) gives the number of hydrogen atoms to add to the common core
for indexpos, indexnr in enumerate(s1):
# get mapped atoms
atom1 = m1.GetAtomWithIdx(s1[indexpos])
atom2 = m2.GetAtomWithIdx(s2[indexpos])
# determine number of hydrogens in the neighbourhood of the atom from molecule1
h_atoms1 = 0
for x in atom1.GetNeighbors():
if x.GetSymbol() == "H":
h_atoms1 = h_atoms1 + 1
# determine number of hydrogens in the neighbourhood of the atom from molecule2
h_atoms2 = 0
for x in atom2.GetNeighbors():
if x.GetSymbol() == "H":
h_atoms2 = h_atoms2 + 1
# find minimum number of hydrogens
min_h_atoms = min(h_atoms1, h_atoms2)
# add minimum number of hydrogens to the ccore for molecule1
h_atoms1 = 0
for x in atom1.GetNeighbors():
if x.GetSymbol() == "H" and h_atoms1 < min_h_atoms:
hit_ats1_compl.append(x.GetIdx())
h_atoms1 = h_atoms1 + 1
# add minimum number of hydrogens to the ccore for molecule2
h_atoms2 = 0
for x in atom2.GetNeighbors():
if x.GetSymbol() == "H" and h_atoms2 < min_h_atoms:
hit_ats2_compl.append(x.GetIdx())
h_atoms2 = h_atoms2 + 1
# count whether the new common cores are bigger (i.e. contain more hydrogens) than the previous common cores
# if this is the case, the current substructure matches are chosen
if len(hit_ats1_compl) > curr_size_of_ccores:
curr_size_of_ccores = len(hit_ats1_compl)
hit_ats1_compl_final = hit_ats1_compl
hit_ats2_compl_final = hit_ats2_compl
# create new tuple of common core atom indices with additional hydrogens (molecule 1)
hit_ats1 = tuple(hit_ats1_compl_final)
# create new tuple of common core atom indices with additional hydrogens (molecule 2)
hit_ats2 = tuple(hit_ats2_compl_final)
self._substructure_match[mol1_name] = list(hit_ats1)
self._substructure_match[mol2_name] = list(hit_ats2)
# self._substructure_match[mol1_name] = list(s1)
# self._substructure_match[mol2_name] = list(s2)
return mcs
def _return_atom_idx_from_bond_idx(self, mol: Chem.Mol, bond_idx: int):
return (
mol.GetBondWithIdx(bond_idx).GetBeginAtomIdx(),
mol.GetBondWithIdx(bond_idx).GetEndAtomIdx(),
)
def _find_connected_dummy_regions(self, mol_name: str) -> List[set]:
sub = self._get_common_core(mol_name)
#############################
# start
#############################
mol = self.mols[mol_name]
G = self.graphs[mol_name].copy()
# find all dummy atoms
list_of_dummy_atoms_idx = [
atom.GetIdx() for atom in mol.GetAtoms() if atom.GetIdx() not in sub
]
nr_of_dummy_atoms = len(list_of_dummy_atoms_idx) + 1
list_of_real_atoms_idx = [
atom.GetIdx() for atom in mol.GetAtoms() if atom.GetIdx() in sub
]
# remove real atoms from graph to obtain multiple connected compounds
for real_atom_idx in list_of_real_atoms_idx:
G.remove_node(real_atom_idx)
# find these connected compounds
from networkx.algorithms.components import connected_components
unique_subgraphs = [
c for c in sorted(nx.connected_components(G), key=len, reverse=True)
]
return unique_subgraphs
def show_common_core_on_mol1(self, show_atom_types: bool = False):
"""
Shows common core on mol1
"""
return self._show_common_core(
self.mols["m1"],
self.get_common_core_idx_mol1(),
show_atom_types,
internal=False,
)
def show_common_core_on_mol2(self, show_atom_types: bool = False):
"""
Shows common core on mol2
"""
return self._show_common_core(
self.mols["m2"],
self.get_common_core_idx_mol2(),
show_atom_types,
internal=False,
)
def _show_common_core(
self, mol, highlight: list, show_atom_type: bool, internal: bool
):
"""
Helper function - do not call directly.
Show common core.
"""
# https://rdkit.blogspot.com/2015/02/new-drawing-code.html
mol = deepcopy(mol)
drawer = rdMolDraw2D.MolDraw2DSVG(500, 500)
drawer.SetFontSize(6)
opts = drawer.drawOptions()
if show_atom_type:
for i in mol.GetAtoms():
opts.atomLabels[i.GetIdx()] = (
str(i.GetProp("atom_index")) + ":" + i.GetProp("atom_type")
)
elif mol.GetNumAtoms() < 30:
for i in mol.GetAtoms():
opts.atomLabels[i.GetIdx()] = (
str(i.GetProp("atom_index")) + ":" + i.GetProp("atom_name")
)
rdCoordGen.AddCoords(mol) # Create Cordinates
drawer.DrawMolecule(mol, highlightAtoms=highlight)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace("svg:", "")
if internal:
display(SVG(svg))
return svg
def generate_mutations_to_common_core_for_mol1(self) -> dict:
"""
Generates the mutation route to the common fore for mol1.
----------
mutations: list
list of mutations
"""
m = self._mutate_to_common_core(
self.dummy_region_cc1, self.get_common_core_idx_mol1(), mol_name="m1"
)
if not self.asfe:
m["transform"] = self._transform_common_core()
return m
def generate_mutations_to_common_core_for_mol2(self) -> dict:
"""
Generates the mutation route to the common fore for mol2.
Returns
----------
mutations: list
list of mutations
"""
if not self.terminal_real_atom_cc1:
raise RuntimeError("First generate the MCS")
m = self._mutate_to_common_core(
self.dummy_region_cc2, self.get_common_core_idx_mol2(), mol_name="m2"
)
return m
def _transform_common_core(self) -> list:
"""
Common Core 1 is transformed to Common core 2. Bonded parameters and charges are adjusted.
"""
transformations = []
logger.warning("##############################")
logger.warning("##############################")
logger.warning("Transform common core")
logger.warning("##############################")
logger.warning("##############################")
# test if bonded mutations are necessary
bonded_terms_mutation = False
charge_mutation = False
for cc1, cc2 in zip(
self.get_common_core_idx_mol1() + self.dummy_region_cc1.lj_default,
self.get_common_core_idx_mol2() + self.dummy_region_cc2.lj_default,
):
# did atom type change? if not don't add BondedMutations
atom1 = self.psfs["m1"][cc1]
atom2 = self.psfs["m2"][cc2]
if atom1.type != atom2.type:
logger.warning("##############################")
logger.warning("Atom type transformation")
logger.warning(f"Atom that needs to be transformed: {atom1}.")
logger.warning(f"Atom type of atom in cc1: {atom1.type}.")
logger.warning(f"Template atom: {atom2}.")
logger.warning(f"Atom type of atom in cc2: {atom2.type}.")
bonded_terms_mutation = True
for cc1, cc2 in zip(
self.get_common_core_idx_mol1(), self.get_common_core_idx_mol2()
):
atom1 = self.charge_compensated_ligand1_psf[cc1]
atom2 = self.charge_compensated_ligand2_psf[cc2]
if atom1.charge != atom2.charge:
logger.warning("##############################")
logger.warning("Charge transformation")
logger.warning("Charge needs to be transformed on common core")
logger.warning(f"Atom that needs to be transformed: {atom1}.")
logger.warning(f"Atom charge of atom in cc1: {atom1.charge}.")
logger.warning(f"Template atom: {atom2}.")
logger.warning(f"Atom charge of atom in cc2: {atom2.charge}.")
charge_mutation = True
# if necessary transform bonded parameters
if bonded_terms_mutation or charge_mutation:
logger.warning(f"Bonded parameters mutation: {bonded_terms_mutation}.")
logger.warning(f"Charge parameters mutation: {charge_mutation}.")
t = CommonCoreTransformation(
self.get_common_core_idx_mol1() + self.dummy_region_cc1.lj_default,
self.get_common_core_idx_mol2() + self.dummy_region_cc2.lj_default,
self.psfs["m1"],
self.psfs["m2"],
self.s1_tlc,
self.s2_tlc,
self.charge_compensated_ligand2_psf,
charge_mutation=charge_mutation,
bonded_terms_mutation=bonded_terms_mutation,
)
transformations.append(t)
else:
logger.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
logger.info("No transformations needed.")
logger.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
transformations = []
return transformations
@staticmethod
def _find_terminal_atom(cc_idx: list, mol: Chem.Mol) -> Tuple[list, list]:
"""
Find atoms that connect the molecule to the common core.
Args:
cc_idx (list): common core index atoms
mol ([type]): rdkit mol object
"""
terminal_dummy_atoms = []
terminal_real_atoms = []
for atom in mol.GetAtoms():
idx = atom.GetIdx()
if idx not in cc_idx:
neighbors = [x.GetIdx() for x in atom.GetNeighbors()]
if any([n in cc_idx for n in neighbors]):
terminal_dummy_atoms.append(idx)
if idx in cc_idx:
neighbors = [x.GetIdx() for x in atom.GetNeighbors()]
if any([n not in cc_idx for n in neighbors]):
terminal_real_atoms.append(idx)
logger.info(f"Terminal dummy atoms: {str(list(set(terminal_dummy_atoms)))}")
logger.info(f"Terminal real atoms: {str(list(set(terminal_real_atoms)))}")
return (list(set(terminal_dummy_atoms)), list(set(terminal_real_atoms)))
def _mutate_to_common_core(
self, dummy_region: DummyRegion, cc_idx: list, mol_name: str
) -> dict:
"""
Helper function - do not call directly.
Generates the mutation route to the common fore for mol.
"""
mutations = defaultdict(list)
tlc = self.s1_tlc
if self.asfe:
psf = self.psf1["waterbox"]
cc_idx = [] # no CC in ASFE
list_termin_dummy_atoms = []
else:
# copy of the currently used psf
psf = self.psfs[f"{mol_name}"][:, :, :]
# only necessary for relative binding/solvation free energies
# get the atom that connects the common core to the dummy regiom
match_termin_real_and_dummy_atoms = (
dummy_region.match_termin_real_and_dummy_atoms
)
# get the terminal dummy atoms
list_termin_dummy_atoms = []
for m in match_termin_real_and_dummy_atoms.values():
list_termin_dummy_atoms.extend(list(m))
logger.info(f"Terminal dummy atoms: {list_termin_dummy_atoms}")
if mol_name == "m2":
tlc = self.s2_tlc
# iterate through atoms and select atoms that need to be mutated
atoms_to_be_mutated = []
hydrogens = []
for atom in psf.view[f":{tlc}"].atoms:
# idx = atom.idx - self.offset
idx = atom.idx
if idx not in cc_idx:
if atom.name.find("H") == False and idx not in list_termin_dummy_atoms:
hydrogens.append(idx)
atoms_to_be_mutated.append(idx)
logger.info(
"Will be decoupled: Idx:{} Element:{}".format(idx, atom.name)
)
if atoms_to_be_mutated:
############################################
############################################
# charge mutation
############################################
############################################
m = MutationDefinition(
atoms_to_be_mutated=atoms_to_be_mutated,
common_core=cc_idx,
dummy_region=dummy_region,
vdw_atom_idx=[],
steric_mutation_to_default=False,
)
mutations["charge"].append(m)
############################################
############################################
# LJ mutation
############################################
############################################
# start with mutation of LJ of hydrogens
# Only take hydrogens that are not terminal hydrogens
if hydrogens:
m = MutationDefinition(
atoms_to_be_mutated=atoms_to_be_mutated,
common_core=cc_idx,
dummy_region=dummy_region,
vdw_atom_idx=hydrogens,
steric_mutation_to_default=False,
)
mutations["hydrogen-lj"].append(m)
for region in dummy_region.connected_dummy_regions:
for atom_idx in region:
if (
atom_idx in list_termin_dummy_atoms
and atom_idx in dummy_region.lj_default
):
# test if atom is a terminal atom and there is a corresponding atom on the other cc
# in this case the atom needs to become a default lj particle
m = MutationDefinition(
atoms_to_be_mutated=atoms_to_be_mutated,
common_core=cc_idx,
dummy_region=dummy_region,
vdw_atom_idx=[atom_idx],
steric_mutation_to_default=True,
)
mutations["default-lj"].append(m)
elif atom_idx in hydrogens or psf[atom_idx].type == "LPH":
# already mutated
continue
else:
# normal lj mutation
m = MutationDefinition(
atoms_to_be_mutated=atoms_to_be_mutated,
common_core=cc_idx,
dummy_region=dummy_region,
vdw_atom_idx=[atom_idx],
steric_mutation_to_default=False,
)
mutations["lj"].append(m)
else:
logger.critical("No atoms will be decoupled.")
mutations = defaultdict()
return mutations
class CommonCoreTransformation(object):
def __init__(
self,
cc1_indicies: list,
cc2_indicies: list,
ligand1_psf: pm.charmm.CharmmPsfFile,
ligand2_psf: pm.charmm.CharmmPsfFile,
tlc_cc1: str,
tlc_cc2: str,
charge_compensated_ligand2_psf: pm.charmm.CharmmPsfFile,
charge_mutation: bool,
bonded_terms_mutation: bool,
):
"""
Scale the bonded parameters inside the common core.
Parameters
----------
cc1_indicies : list
indices of cc1
cc2_indicies : list
indices of cc2 (in the same order as cc1)
ligand1_psf : pm.charmm.CharmmPsfFile (copy of only ligand)
ligand2_psf : pm.charmm.CharmmPsfFile (copy of only ligand)
the target psf that is used to generate the new bonded parmaeters
tlc_cc1 : str
three letter code of ligand in cc1
tlc_cc2 : str
three letter code of ligand in cc2
"""
self.cc1_indicies: list = cc1_indicies
self.cc2_indicies: list = cc2_indicies
self.ligand2_psf: pm.charmm.CharmmPsfFile = ligand2_psf
self.ligand1_psf: pm.charmm.CharmmPsfFile = ligand1_psf
self.tlc_cc1: str = tlc_cc1
self.tlc_cc2: str = tlc_cc2
self.atom_names_mapping = self._get_atom_mapping()
self.charge_mutation: bool = charge_mutation
self.bonded_terms_mutation: bool = bonded_terms_mutation
self.charge_compensated_ligand2_psf: pm.charmm.CharmmPsfFile = (
charge_compensated_ligand2_psf
)
logger.info(f"Bonded terms mutation: {bonded_terms_mutation}")
logger.info(f"Charge mutation: {charge_mutation}")
def _get_atom_mapping(self) -> dict:
"""
_get_atom_mapping -- match the atom names of the common cores
Returns
-------
[dict]
matched common core atom names
"""
# Prepare Variables to use for restraint cc checks
global cc_names_struc1, cc_names_struc2
cc_names_struc1 = []
cc_names_struc2 = []
# match atomes in common cores
match_atom_names_cc1_to_cc2 = {}
for cc1_idx, cc2_idx in zip(self.cc1_indicies, self.cc2_indicies):
ligand1_atom = self.ligand1_psf[cc1_idx]
ligand2_atom = self.ligand2_psf[cc2_idx]
match_atom_names_cc1_to_cc2[ligand1_atom.name] = ligand2_atom.name
cc_names_struc1.append(ligand1_atom.name)
cc_names_struc2.append(ligand2_atom.name)
print(f"CC Struc1: {cc_names_struc1}")
print(f"CC Struc2: {cc_names_struc2}")
return match_atom_names_cc1_to_cc2
def _mutate_charges(self, psf: pm.charmm.CharmmPsfFile, scale: float):
# common core of psf 1 is transformed to psf 2
for ligand1_atom in psf.view[f":{self.tlc_cc1}"]:
if ligand1_atom.name not in self.atom_names_mapping:
continue
found = False
# compare to charge compenstated psf 2
for ligand2_atom in self.charge_compensated_ligand2_psf:
if self.atom_names_mapping[ligand1_atom.name] == ligand2_atom.name:
found = True
# are the atoms different?
logger.debug(f"Modifying atom: {ligand1_atom}")
logger.debug(f"Template atom: {ligand2_atom}")
# scale epsilon
modified_charge = (
scale * ligand1_atom.charge + (1 - scale) * ligand2_atom.charge
)
logger.debug(
f"Current charge: {ligand1_atom.charge}; target charge: {ligand2_atom.charge}; modified charge: {modified_charge}"
)
ligand1_atom.charge = modified_charge
if not found:
raise RuntimeError("No corresponding atom in cc2 found")
def _mutate_atoms(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
"""
mutate atom types.
Raises
------
RuntimeError
if common core atoms can not be matched
"""
# what will be changed
mod_type = namedtuple("Atom", "epsilon, rmin")
logger.debug("#######################")
logger.debug("mutate_atoms")
# iterate through the atoms of the ligand of system1
for ligand1_atom in psf.view[f":{self.tlc_cc1}"]:
# continue if not in atom_names_mapping
if ligand1_atom.name not in self.atom_names_mapping:
continue
found = False
# iterate through the atoms the ligand of system2
for ligand2_atom in self.ligand2_psf:
# is there a match up?
if self.atom_names_mapping[ligand1_atom.name] == ligand2_atom.name:
found = True
# are the atoms different?
if ligand1_atom.type != ligand2_atom.type:
if "DDX" in ligand1_atom.type:
logger.warning(
"This is the terminal LJ atom. If everything went correct, this does not have to change atom types."
)
else:
self._modify_type_in_cc(ligand1_atom, psf)
logger.debug(f"Modifying atom: {ligand1_atom}")
logger.debug(f"Template atom: {ligand2_atom}")
# scale epsilon
modified_epsilon = (
lambda_value * ligand1_atom.epsilon
+ (1.0 - lambda_value) * ligand2_atom.epsilon
)
# scale rmin
modified_rmin = (
lambda_value * ligand1_atom.rmin
+ (1.0 - lambda_value) * ligand2_atom.rmin
)
logger.debug(
f"Original LJ: eps: {ligand1_atom.epsilon}; rmin: {ligand1_atom.rmin}"
)
logger.debug(
f"New LJ: eps: {modified_epsilon}; rmin: {modified_rmin}"
)
ligand1_atom.mod_type = mod_type(
modified_epsilon, modified_rmin
)
if not found:
raise RuntimeError("No corresponding atom in cc2 found")
def _mutate_bonds(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
logger.debug("#######################")
logger.debug("mutate_bonds")
mod_type = namedtuple("Bond", "k, req")
for ligand1_bond in psf.view[f":{self.tlc_cc1}"].bonds:
ligand1_atom1_name = ligand1_bond.atom1.name
ligand1_atom2_name = ligand1_bond.atom2.name
# all atoms of the bond must be in cc
# everything outside the cc are bonded terms between dummies or
# between real atoms and dummies and we can ignore them for now
if not all(
elem in self.atom_names_mapping
for elem in [ligand1_atom1_name, ligand1_atom2_name]
):
continue
found = False
for ligand2_bond in self.ligand2_psf.bonds:
ligand2_atom1_name = ligand2_bond.atom1.name
ligand2_atom2_name = ligand2_bond.atom2.name
# all atoms of the bond must be in cc
if not all(
elem in self.atom_names_mapping.values()
for elem in [ligand2_atom1_name, ligand2_atom2_name]
):
continue
# match the two bonds
if sorted(
[
self.atom_names_mapping[e]
for e in [ligand1_atom1_name, ligand1_atom2_name]
]
) == sorted([ligand2_atom1_name, ligand2_atom2_name]):
found = True
# are the bonds different?
if sorted(
[ligand1_bond.atom1.type, ligand1_bond.atom2.type]
) == sorted([ligand2_bond.atom1.type, ligand2_bond.atom2.type]):
continue
logger.debug(f"Modifying bond: {ligand1_bond}")
logger.debug(f"Template bond: {ligand2_bond}")
modified_k = (lambda_value * ligand1_bond.type.k) + (
(1.0 - lambda_value) * ligand2_bond.type.k
)
logger.debug(
f"Current k: {ligand1_bond.type.k}; target k: {ligand2_bond.type.k}; new k: {modified_k}"
)
# interpolating from ligand1 (original) to ligand2 (new) bond parameters
modified_req = (lambda_value * ligand1_bond.type.req) + (
(1.0 - lambda_value) * ligand2_bond.type.req
)
logger.debug(
f"Current req: {ligand1_bond.type.req}; target req: {ligand2_bond.type.req}; new req: {modified_req}"
)
ligand1_bond.mod_type = mod_type(modified_k, modified_req)
logger.debug(ligand1_bond.mod_type)
if not found:
logger.critical(ligand1_bond)
raise RuntimeError(
"No corresponding bond in cc2 found: {}".format(ligand1_bond)
)
def _mutate_angles(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
mod_type = namedtuple("Angle", "k, theteq")
for cc1_angle in psf.view[f":{self.tlc_cc1}"].angles:
ligand1_atom1_name = cc1_angle.atom1.name
ligand1_atom2_name = cc1_angle.atom2.name
cc1_a3 = cc1_angle.atom3.name
# only angles in cc
if not all(
elem in self.atom_names_mapping
for elem in [ligand1_atom1_name, ligand1_atom2_name, cc1_a3]
):
continue
found = False
for cc2_angle in self.ligand2_psf.angles:
ligand2_atom1_name = cc2_angle.atom1.name
ligand2_atom2_name = cc2_angle.atom2.name
cc2_a3 = cc2_angle.atom3.name
# only angles in cc
if not all(
elem in self.atom_names_mapping.values()
for elem in [ligand2_atom1_name, ligand2_atom2_name, cc2_a3]
):
continue
if sorted(
[
self.atom_names_mapping[e]
for e in [ligand1_atom1_name, ligand1_atom2_name, cc1_a3]
]
) == sorted([ligand2_atom1_name, ligand2_atom2_name, cc2_a3]):
found = True
if sorted(
[
cc1_angle.atom1.type,
cc1_angle.atom2.type,
cc1_angle.atom3.type,
]
) == sorted(
[
cc2_angle.atom1.type,
cc2_angle.atom2.type,
cc2_angle.atom3.type,
]
):
continue
logger.debug(f"Modifying angle: {cc1_angle}")
logger.debug(f"Template bond: {cc2_angle}")
logger.debug("Scaling k and theteq")
logger.debug(f"Old k: {cc1_angle.type.k}")
modified_k = (
lambda_value * cc1_angle.type.k
+ (1.0 - lambda_value) * cc2_angle.type.k
)
logger.debug(f"New k: {modified_k}")
logger.debug(f"Old k: {cc1_angle.type.theteq}")
modified_theteq = (
lambda_value * cc1_angle.type.theteq
+ (1.0 - lambda_value) * cc2_angle.type.theteq
)
logging.debug(f"New k: {modified_theteq}")
cc1_angle.mod_type = mod_type(modified_k, modified_theteq)
if not found:
logger.critical(cc1_angle)
raise RuntimeError("No corresponding angle in cc2 found")
def _mutate_torsions(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
mod_type = namedtuple("Torsion", "phi_k, per, phase, scee, scnb")
# get all torsions present in initial topology
for original_torsion in psf.view[f":{self.tlc_cc1}"].dihedrals:
found: bool = False
original_atom1_name = original_torsion.atom1.name
original_atom2_name = original_torsion.atom2.name
original_atom3_name = original_torsion.atom3.name
original_atom4_name = original_torsion.atom4.name
# all atoms must be in the cc
if not all(
elem in self.atom_names_mapping
for elem in [
original_atom1_name,
original_atom2_name,
original_atom3_name,
original_atom4_name,
]
):
continue
# get corresponding torsion types in the new topology
for new_torsion in self.ligand2_psf.dihedrals:
new_atom1_name = new_torsion.atom1.name
new_atom2_name = new_torsion.atom2.name
new_atom3_name = new_torsion.atom3.name
new_atom4_name = new_torsion.atom4.name
# only torsion in cc
if not all(
elem in self.atom_names_mapping.values()
for elem in [
new_atom1_name,
new_atom2_name,
new_atom3_name,
new_atom4_name,
]
):
continue
if sorted(
[
self.atom_names_mapping[e]
for e in [
original_atom1_name,
original_atom2_name,
original_atom3_name,
original_atom4_name,
]
]
) == sorted(
[new_atom1_name, new_atom2_name, new_atom3_name, new_atom4_name]
):
found = True
if sorted(
[
original_torsion.atom1.type,
original_torsion.atom2.type,
original_torsion.atom3.type,
original_torsion.atom4.type,
]
) == sorted(
[
new_torsion.atom1.type,
new_torsion.atom2.type,
new_torsion.atom3.type,
new_torsion.atom4.type,
]
):
continue
mod_types = []
# torsion present at cc1 needs to be turned fully off starting at lambda_vlaue == 1.
f = max((1 - ((1 - lambda_value) * 2)), 0.0)
if f > 0.0 or lambda_value == 0.5:
for torsion_t in original_torsion.type:
modified_phi_k = torsion_t.phi_k * f
mod_types.append(
mod_type(
modified_phi_k,
torsion_t.per,
torsion_t.phase,
torsion_t.scee,
torsion_t.scnb,
)
)
# torsion present at cc2 needs to be fully turned on at lambda_value == 0.0
f = 1 - min((lambda_value) * 2, 1.0)
if f > 0.0:
for torsion_t in new_torsion.type:
modified_phi_k = torsion_t.phi_k * f
if modified_phi_k >= 0.0:
mod_types.append(
mod_type(
modified_phi_k,
torsion_t.per,
torsion_t.phase,
torsion_t.scee,
torsion_t.scnb,
)
)
original_torsion.mod_type = mod_types
if not found:
logger.critical(original_torsion)
raise RuntimeError("No corresponding torsion in cc2 found")
def mutate(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
"""
Mutates the bonded parameters of cc1 to cc2.
Parameters
----------
psf : pm.charmm.CharmmPsfFile
psf that gets mutated
lambda_value : float
lambda_value
"""
assert type(psf) == pm.charmm.CharmmPsfFile
if self.charge_mutation:
logger.info(f" -- Charge parameters from cc1 are transformed to cc2.")
logger.info(f"Lambda value:{lambda_value}")
# scale charge
self._mutate_charges(psf, lambda_value)
if self.bonded_terms_mutation:
logger.info(
f" -- Atom/Bond/Angle/Torsion parameters from cc1 are transformed to cc2."
)
logger.info(f"Lambda value:{lambda_value}")
# scale atoms
self._mutate_atoms(psf, lambda_value)
# scale bonds
self._mutate_bonds(psf, lambda_value)
# scale angles
self._mutate_angles(psf, lambda_value)
# scale torsions
self._mutate_torsions(psf, lambda_value)
@staticmethod
def _modify_type_in_cc(atom: pm.Atom, psf: pm.charmm.CharmmPsfFile):
if hasattr(atom, "initial_type"):
# only change parameters
pass
else:
logger.info(f"Setting RRR atomtype for atom: {atom}.")
atom.initial_type = atom.type
psf.number_of_dummys += 1
atom.type = f"RRR{psf.number_of_dummys}"
class Mutation(object):
def __init__(self, atoms_to_be_mutated: list, dummy_region: DummyRegion):
assert type(atoms_to_be_mutated) == list
self.atoms_to_be_mutated = atoms_to_be_mutated
self.dummy_region = dummy_region
self.tlc = dummy_region.tlc
def _mutate_charge(
self, psf: pm.charmm.CharmmPsfFile, lambda_value: float, offset: int
):
total_charge = int(
round(sum([atom.initial_charge for atom in psf.view[f":{self.tlc}"].atoms]))
)
# scale the charge of all atoms
print(f"Scaling charge on: {self.atoms_to_be_mutated}")
for idx in self.atoms_to_be_mutated:
odx = idx + offset
atom = psf[odx]
logger.debug(f"Scale charge on {atom}")
logger.debug(f"Scaling charge with: {lambda_value}")
logger.debug(f"Old charge: {atom.charge}")
atom.charge = atom.initial_charge * lambda_value
logger.debug(f"New charge: {atom.charge}")
# check to avoid compensating charges when doing asfe
if (
lambda_value != 1
and len(self.dummy_region.match_termin_real_and_dummy_atoms) != 0
):
# compensate for the total change in charge the terminal atom
self._compensate_charge(psf, total_charge, offset)
def _mutate_vdw(
self,
psf: pm.charmm.CharmmPsfFile,
lambda_value: float,
vdw_atom_idx: List[int],
offset: int,
to_default: bool,
):
if not set(vdw_atom_idx).issubset(set(self.atoms_to_be_mutated)):
raise RuntimeError(
f"Specified atom {vdw_atom_idx} is not in atom_idx list {self.atoms_to_be_mutated}. Aborting."
)
logger.info(f"Acting on atoms: {vdw_atom_idx}")
offset = min([a.idx for a in psf.view[f":{self.tlc.upper()}"].atoms])
for i in vdw_atom_idx:
atom = psf[i + offset]
if to_default:
logger.info("Mutate to default")
atom_type_suffix = "DDX"
atom.rmin = 1.5
atom.epsilon = -0.15
else:
logger.info("Mutate to dummy")
atom_type_suffix = f"DDD"
self._scale_epsilon(atom, lambda_value)
self._scale_rmin(atom, lambda_value)
# NOTEthere is always a type change
self._modify_type(atom, psf, atom_type_suffix)
def mutate(
self,
psf: pm.charmm.CharmmPsfFile,
lambda_value_electrostatic: float = 1.0,
lambda_value_vdw: float = 1.0,
vdw_atom_idx: List[int] = [],
steric_mutation_to_default: bool = False,
):
"""Performs the mutation"""
if lambda_value_electrostatic < 0.0 or lambda_value_electrostatic > 1.0:
raise RuntimeError("Lambda value for LJ needs to be between 0.0 and 1.0.")
if lambda_value_vdw < 0.0 or lambda_value_vdw > 1.0:
raise RuntimeError("Lambda value for vdw needs to be between 0.0 and 1.0.")
logger.debug(f"LJ scaling factor: {lambda_value_electrostatic}")
logger.debug(f"VDW scaling factor: {lambda_value_vdw}")
offset = min([a.idx for a in psf.view[f":{self.tlc.upper()}"].atoms])
if lambda_value_electrostatic < 1.0:
self._mutate_charge(psf, lambda_value_electrostatic, offset)
if lambda_value_vdw < 1.0:
self._mutate_vdw(
psf, lambda_value_vdw, vdw_atom_idx, offset, steric_mutation_to_default
)
def _compensate_charge(
self, psf: pm.charmm.CharmmPsfFile, total_charge: int, offset: int
):
"""
_compensate_charge This function compensates the charge changes of a dummy region on the terminal real atom
that connects the specific dummy group to the real region.
Parameters
----------
psf : pm.charmm.CharmmPsfFile
[description]
total_charge : int
[description]
offset : int
[description]
Raises
------
RuntimeError
[description]
"""
# get dummy retions
connected_dummy_regions = self.dummy_region.connected_dummy_regions
logger.debug(f"Compensating charge ...")
# save the atoms that are used for charge compenstation. This is done because if two regions
# use the same atom, a special handling needs to be invoced
compensating_on_this_real_atom = []
# check for each dummy region how much charge has changed and compensate on atom that connects
# the real region with specific dummy regions
for dummy_idx in connected_dummy_regions:
logger.debug(f"Dummy idx region: {dummy_idx}")
connecting_real_atom_for_this_dummy_region = (
self.dummy_region.return_connecting_real_atom(dummy_idx)
)
logger.debug(
f"Connecting atom: {connecting_real_atom_for_this_dummy_region}"
)
if connecting_real_atom_for_this_dummy_region == None:
raise RuntimeError(
"Something went wrong with the charge compensation. Aborting."
)
charge_acceptor = psf[connecting_real_atom_for_this_dummy_region + offset]
charge_to_compenstate_for_region = 0.0
for atom_idx in dummy_idx:
charge_to_compenstate_for_region += (
psf[atom_idx + offset].initial_charge
- psf[atom_idx + offset].charge
)
logger.debug(f"Charge to compensate: {charge_to_compenstate_for_region}")
# adding charge difference to initial charge on real terminal atom
if (
connecting_real_atom_for_this_dummy_region
in compensating_on_this_real_atom
):
charge_acceptor.charge = (
charge_acceptor.charge + charge_to_compenstate_for_region
)
else:
charge_acceptor.charge = (
charge_acceptor.initial_charge + charge_to_compenstate_for_region
)
compensating_on_this_real_atom.append(
connecting_real_atom_for_this_dummy_region
)
# check if rest charge is missing
new_charge = sum(
[atom.charge for atom in psf.view[f":{self.tlc.upper()}"].atoms]
)
if not (np.isclose(new_charge, total_charge, rtol=1e-4)):
raise RuntimeError(
f"Charge compensation failed. Introducing non integer total charge: {new_charge}. Target total charge: {total_charge}."
)
@staticmethod
def _scale_epsilon(atom, lambda_value: float):
logger.debug(atom)
logger.debug(atom.initial_epsilon)
atom.epsilon = atom.initial_epsilon * lambda_value
@staticmethod
def _scale_rmin(atom, lambda_value: float):
logger.debug(atom)
logger.debug(atom.initial_rmin)
atom.rmin = atom.initial_rmin * lambda_value
@staticmethod
def _modify_type(atom, psf, atom_type_suffix: str):
if hasattr(atom, "initial_type"):
# only change parameters
pass
else:
atom.initial_type = atom.type
if atom_type_suffix == "DDD":
psf.number_of_dummys += 1
new_type = f"{atom_type_suffix}{psf.number_of_dummys}"
elif atom_type_suffix == "DDX":
psf.mutations_to_default += 1
new_type = f"{atom_type_suffix}{psf.mutations_to_default}"
atom.type = new_type
def mutate_pure_tautomers(
s1_to_s2: ProposeMutationRoute,
system1: SystemStructure,
system2: SystemStructure,
configuration,
single_state=False,
nr_of_bonded_windows: int = 4,
):
from transformato import (
IntermediateStateFactory,
)
# setup mutation and StateFactory
mutation_list = s1_to_s2.generate_mutations_to_common_core_for_mol1()
i_tautomer1 = IntermediateStateFactory(
system=system1,
configuration=configuration,
)
# write out states
# start with charge
charges = mutation_list["charge"]
for lambda_value in np.linspace(1, 0, 2):
# turn off charges
i_tautomer1.write_state(
mutation_conf=charges,
lambda_value_electrostatic=lambda_value,
)
if single_state:
return (i_tautomer1.output_files, [])
# turn off the lj of the hydrogen
lj = mutation_list["lj"]
i_tautomer1.write_state(
mutation_conf=lj,
lambda_value_vdw=0.0,
)
# transform common core
for lambda_value in np.linspace(1, 0, nr_of_bonded_windows + 1)[1:]:
# turn off charges
i_tautomer1.write_state(
mutation_conf=mutation_list["transform"],
common_core_transformation=lambda_value,
)
# setup other tautomer
mutation_list = s1_to_s2.generate_mutations_to_common_core_for_mol2()
i_tautomer2 = IntermediateStateFactory(
system=system2,
configuration=configuration,
)
# write out states
# start with charge
charges = mutation_list["charge"]
for lambda_value in np.linspace(1, 0, 2):
# turn off charges
i_tautomer2.write_state(
mutation_conf=charges,
lambda_value_electrostatic=lambda_value,
)
# turn off the lj of the hydrogen
lj = mutation_list["lj"]
i_tautomer2.write_state(
mutation_conf=lj,
lambda_value_vdw=0.0,
)
return (i_tautomer1.output_files, i_tautomer2.output_files)
| wiederm/transformato | transformato/mutate.py | mutate.py | py | 90,744 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "rdkit.Chem.Draw.IPythonConsole.molSize",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "rdkit.Chem.Draw.IPythonConsole",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "rdkit.Chem.Draw.IPythonConsole.ipython_useSVG",
"line_number": 17,... |
21054969908 | import numpy as np
from scipy.special import logsumexp, gammaln
from astropy import constants, units as au
from astropy.units import Quantity
Gauss = 1e-4 * au.T
au.set_enabled_equivalencies(au.dimensionless_angles())
def pad_with_absorbing_boundary_conditions(k2, k02, N, *coords, dn_max=0.05):
if dn_max is None:
dn_max = np.max(np.abs(np.sqrt(k2 / k02) - 1.))
print("Using the dn_max={}".format(dn_max))
alpha = np.abs(dn_max)*np.sqrt(k02)#/(np.pi*2.)
l = N / alpha
print("Extinction alpha={}".format(alpha))
print("Extinction l={}".format(l))
def log_Pn(alpha, x, N):
log_res = -np.inf
for n in range(N + 1):
log_res = np.logaddexp(n * (np.log(alpha * x)) - gammaln(n + 1.), log_res)
return np.where(x > 0, log_res, 0.)
def evaluate_k2(alpha, x):
k2 = k02 + alpha**2 - 2j*alpha*np.sqrt(k02)
return k02*np.ones(x.shape)
def _evaluate_k2(alpha, x, N):
return alpha**2 * np.exp(np.log(N - alpha * x + 2j * np.sqrt(k02) * x) + (N - 1) * (np.log(alpha * x))
- log_Pn(alpha, x, N) - gammaln(N + 1.)) + k02
def _add_other_dims(v, shape, i):
"""
[
Args:
v: [D]
shape: (s0,s1,s2,...)
i: int
Returns: same shape as `shape` except ith dim which is D.
"""
dims = list(range(len(shape)))
del dims[i]
v = np.expand_dims(v, dims)
grow = list(shape)
grow[i] = 1
return np.tile(v,grow)
m = []
out_coords = []
for i,x in enumerate(coords):
dx = x[1] - x[0]
M = int(l / dx) + 1
m.append(M)
print("Dimension {} padded by {}".format(i, M))
x_pad = np.arange(1,M+1)*dx
k2_pad = evaluate_k2(alpha, x_pad)
k2_before = _add_other_dims(k2_pad[::-1], k2.shape, i)
k2_after = _add_other_dims(k2_pad, k2.shape, i)
k2 = np.concatenate([k2_before, k2, k2_after], axis=i)
x_out = np.concatenate([x[0] - np.arange(1,M+1)[::-1]*dx, x, x[-1]+np.arange(1,M+1)*dx])
out_coords.append(x_out)
return k2, m, tuple(out_coords)
def pad_with_vacuum_conditions(k2, k02, pad_size, *coords):
def evaluate_k2(x):
return k02*np.ones(x.shape)
def _add_other_dims(v, shape, i):
"""
[
Args:
v: [D]
shape: (s0,s1,s2,...)
i: int
Returns: same shape as `shape` except ith dim which is D.
"""
dims = list(range(len(shape)))
del dims[i]
v = np.expand_dims(v, dims)
grow = list(shape)
grow[i] = 1
return np.tile(v,grow)
m = []
out_coords = []
for i,x in enumerate(coords):
print("Dimension {} padded by {}".format(i, pad_size))
dx = x[1] - x[0]
x_pad = np.arange(1,pad_size+1)*dx
k2_pad = evaluate_k2(x_pad)
m.append(pad_size)
k2_before = _add_other_dims(k2_pad[::-1], k2.shape, i)
k2_after = _add_other_dims(k2_pad, k2.shape, i)
k2 = np.concatenate([k2_before, k2, k2_after], axis=i)
x_out = np.concatenate([x[0] - np.arange(1,pad_size+1)[::-1]*dx, x, x[-1]+np.arange(1, pad_size+1)*dx])
out_coords.append(x_out)
return k2, m, tuple(out_coords)
def appleton_hartree(ne, nu):
def _plasma_freqency_squared(fed):
omega_p_squared = fed * (constants.e.si ** 2 / constants.eps0 / constants.m_e)
return omega_p_squared
omega_0_squared = _plasma_freqency_squared(ne)
dn = omega_0_squared / (2 * np.pi * nu) ** 2
return 1. - dn
def partial_blockage(N, nu, sinusoidal_blockage=False):
"""
| * source
|
| _________________
| | n = 1 - dn
| |________________
|
|
| x receiver
|(0,0)
Args:
x:
z:
nu:
Returns:
"""
ne = 2e12 / au.m ** 3
wavelength = constants.c.si / nu
x = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
z = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
n_ionosphere = appleton_hartree(ne, nu)
k0 = 2. * np.pi / wavelength
X, Z = np.meshgrid(x, z, indexing='ij')
z_bar_bottom = z.min() + 0.5 * (z.max() - z.min())
z_bar_top = z_bar_bottom + 10. * wavelength
x_bar_left = x.min() + 0. * (x.max() - x.min())
where_bar = (X > x_bar_left) & (Z > z_bar_bottom) & (Z < z_bar_top)
if sinusoidal_blockage:
refractive_index = np.where(where_bar, 1. - (1. - n_ionosphere) * np.cos(2 * np.pi * X / (10. * wavelength)),
1.)
else:
refractive_index = np.where(where_bar, n_ionosphere, 1.)
k2 = 4. * np.pi ** 2 * refractive_index ** 2 / wavelength ** 2
return x, z, k2, k0 ** 2
def single_blob(N, nu, l):
"""
| * source
|
| _________________
| | n = 1 - dn
| |________________
|
|
| x receiver
|(0,0)
Args:
x:
z:
nu:
Returns:
"""
ne = 2e12 / au.m ** 3
wavelength = constants.c.si / nu
x = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
z = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
n_ionosphere = appleton_hartree(ne, nu)
k0 = 2. * np.pi / wavelength
X, Z = np.meshgrid(x, z, indexing='ij')
z_blob = z.min() + 0.5 * (z.max() - z.min())
x_blob = x.min() + 0.5 * (x.max() - x.min())
refractive_index = (n_ionosphere - 1) * np.exp(-0.5*((X-x_blob)**2 + (Z-z_blob)**2)/l**2) + 1.
k2 = 4. * np.pi ** 2 * refractive_index ** 2 / wavelength ** 2
return x, z, k2, k0 ** 2
def test_partial_blockage():
import pylab as plt
nu = 100e6 / au.s
N = 1000
x, z, k2, k02 = partial_blockage(N, nu)
scattering_potential = k2 - k02
plt.imshow(scattering_potential.T.value, interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.title(r'Partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {}'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
x, z, k2, k02 = partial_blockage(N, nu, sinusoidal_blockage=True)
scattering_potential = k2 - k02
plt.imshow(scattering_potential.T.value, interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.title(r'Sinusoidal partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {}'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
k2, m, (x,z) = pad_with_absorbing_boundary_conditions(k2, k02, 4, x, z, dn_max=0.01)
scattering_potential = k2 - k02
plt.imshow(np.abs(scattering_potential.T.value), interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
print(x)
plt.plot(Quantity([x[m[0]], x[-m[0]], x[-m[0]], x[m[0]], x[m[0]]]).value, Quantity([z[m[1]], z[m[1]],z[-m[1]],z[-m[1]],z[m[1]]]).value, c='red')
plt.title(r'Sinusoidal partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {} with boundary'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
| Joshuaalbert/born_rime | born_rime/potentials.py | potentials.py | py | 7,396 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "astropy.units.T",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "astropy.units.set_enabled_equivalencies",
"line_number": 7,
"usage_type": "call"
},
{
"api_name"... |
8445188718 |
import operator
import cupy
from cupy._core import internal
from cupy._core._scalar import get_typename
from cupyx.scipy.sparse import csr_matrix
import numpy as np
TYPES = ['double', 'thrust::complex<double>']
INT_TYPES = ['int', 'long long']
INTERVAL_KERNEL = r'''
#include <cupy/complex.cuh>
extern "C" {
__global__ void find_interval(
const double* t, const double* x, long long* out,
int k, int n, bool extrapolate, int total_x) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= total_x) {
return;
}
double xp = *&x[idx];
double tb = *&t[k];
double te = *&t[n];
if(isnan(xp)) {
out[idx] = -1;
return;
}
if((xp < tb || xp > te) && !extrapolate) {
out[idx] = -1;
return;
}
int left = k;
int right = n;
int mid;
bool found = false;
while(left < right && !found) {
mid = ((right + left) / 2);
if(xp > *&t[mid]) {
left = mid + 1;
} else if (xp < *&t[mid]) {
right = mid - 1;
} else {
found = true;
}
}
int default_value = left - 1 < k ? k : left - 1;
int result = found ? mid + 1 : default_value + 1;
while(xp >= *&t[result] && result != n) {
result++;
}
out[idx] = result - 1;
}
}
'''
INTERVAL_MODULE = cupy.RawModule(
code=INTERVAL_KERNEL, options=('-std=c++11',),)
# name_expressions=[f'find_interval<{type_name}>' for type_name in TYPES])
D_BOOR_KERNEL = r'''
#include <cupy/complex.cuh>
#include <cupy/math_constants.h>
#define COMPUTE_LINEAR 0x1
template<typename T>
__global__ void d_boor(
const double* t, const T* c, const int k, const int mu,
const double* x, const long long* intervals, T* out,
double* temp, int num_c, int mode, int num_x) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= num_x) {
return;
}
double xp = *&x[idx];
long long interval = *&intervals[idx];
double* h = temp + idx * (2 * k + 1);
double* hh = h + k + 1;
int ind, j, n;
double xa, xb, w;
if(mode == COMPUTE_LINEAR && interval < 0) {
for(j = 0; j < num_c; j++) {
out[num_c * idx + j] = CUDART_NAN;
}
return;
}
/*
* Perform k-m "standard" deBoor iterations
* so that h contains the k+1 non-zero values of beta_{ell,k-m}(x)
* needed to calculate the remaining derivatives.
*/
h[0] = 1.0;
for (j = 1; j <= k - mu; j++) {
for(int p = 0; p < j; p++) {
hh[p] = h[p];
}
h[0] = 0.0;
for (n = 1; n <= j; n++) {
ind = interval + n;
xb = t[ind];
xa = t[ind - j];
if (xb == xa) {
h[n] = 0.0;
continue;
}
w = hh[n - 1]/(xb - xa);
h[n - 1] += w*(xb - xp);
h[n] = w*(xp - xa);
}
}
/*
* Now do m "derivative" recursions
* to convert the values of beta into the mth derivative
*/
for (j = k - mu + 1; j <= k; j++) {
for(int p = 0; p < j; p++) {
hh[p] = h[p];
}
h[0] = 0.0;
for (n = 1; n <= j; n++) {
ind = interval + n;
xb = t[ind];
xa = t[ind - j];
if (xb == xa) {
h[mu] = 0.0;
continue;
}
w = ((double) j) * hh[n - 1]/(xb - xa);
h[n - 1] -= w;
h[n] = w;
}
}
if(mode != COMPUTE_LINEAR) {
return;
}
// Compute linear combinations
for(j = 0; j < num_c; j++) {
out[num_c * idx + j] = 0;
for(n = 0; n < k + 1; n++) {
out[num_c * idx + j] = (
out[num_c * idx + j] +
c[(interval + n - k) * num_c + j] * ((T) h[n]));
}
}
}
'''
D_BOOR_MODULE = cupy.RawModule(
code=D_BOOR_KERNEL, options=('-std=c++11',),
name_expressions=[f'd_boor<{type_name}>' for type_name in TYPES])
DESIGN_MAT_KERNEL = r'''
#include <cupy/complex.cuh>
template<typename U>
__global__ void compute_design_matrix(
const int k, const long long* intervals, double* bspline_basis,
double* data, U* indices, int num_intervals) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= num_intervals) {
return;
}
long long interval = *&intervals[idx];
double* work = bspline_basis + idx * (2 * k + 1);
for(int j = 0; j <= k; j++) {
int m = (k + 1) * idx + j;
data[m] = work[j];
indices[m] = (U) (interval - k + j);
}
}
'''
DESIGN_MAT_MODULE = cupy.RawModule(
code=DESIGN_MAT_KERNEL, options=('-std=c++11',),
name_expressions=[f'compute_design_matrix<{itype}>'
for itype in INT_TYPES])
def _get_module_func(module, func_name, *template_args):
def _get_typename(dtype):
typename = get_typename(dtype)
if dtype.kind == 'c':
typename = 'thrust::' + typename
return typename
args_dtypes = [_get_typename(arg.dtype) for arg in template_args]
template = ', '.join(args_dtypes)
kernel_name = f'{func_name}<{template}>' if template_args else func_name
kernel = module.get_function(kernel_name)
return kernel
def _get_dtype(dtype):
"""Return np.complex128 for complex dtypes, np.float64 otherwise."""
if cupy.issubdtype(dtype, cupy.complexfloating):
return cupy.complex_
else:
return cupy.float_
def _as_float_array(x, check_finite=False):
"""Convert the input into a C contiguous float array.
NB: Upcasts half- and single-precision floats to double precision.
"""
x = cupy.ascontiguousarray(x)
dtyp = _get_dtype(x.dtype)
x = x.astype(dtyp, copy=False)
if check_finite and not cupy.isfinite(x).all():
raise ValueError("Array must not contain infs or nans.")
return x
def _evaluate_spline(t, c, k, xp, nu, extrapolate, out):
"""
Evaluate a spline in the B-spline basis.
Parameters
----------
t : ndarray, shape (n+k+1)
knots
c : ndarray, shape (n, m)
B-spline coefficients
xp : ndarray, shape (s,)
Points to evaluate the spline at.
nu : int
Order of derivative to evaluate.
extrapolate : int, optional
Whether to extrapolate to ouf-of-bounds points, or to return NaNs.
out : ndarray, shape (s, m)
Computed values of the spline at each of the input points.
This argument is modified in-place.
"""
n = t.shape[0] - k - 1
intervals = cupy.empty_like(xp, dtype=cupy.int64)
# Compute intervals for each value
interval_kernel = _get_module_func(INTERVAL_MODULE, 'find_interval')
interval_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,),
(t, xp, intervals, k, n, extrapolate, xp.shape[0]))
# Compute interpolation
num_c = int(np.prod(c.shape[1:]))
temp = cupy.empty(xp.shape[0] * (2 * k + 1))
d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', c)
d_boor_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,),
(t, c, k, nu, xp, intervals, out, temp, num_c, 1,
xp.shape[0]))
def _make_design_matrix(x, t, k, extrapolate, indices):
"""
Returns a design matrix in CSR format.
Note that only indices is passed, but not indptr because indptr is already
precomputed in the calling Python function design_matrix.
Parameters
----------
x : array_like, shape (n,)
Points to evaluate the spline at.
t : array_like, shape (nt,)
Sorted 1D array of knots.
k : int
B-spline degree.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points.
indices : ndarray, shape (n * (k + 1),)
Preallocated indices of the final CSR array.
Returns
-------
data
The data array of a CSR array of the b-spline design matrix.
In each row all the basis elements are evaluated at the certain point
(first row - x[0], ..., last row - x[-1]).
indices
The indices array of a CSR array of the b-spline design matrix.
"""
n = t.shape[0] - k - 1
intervals = cupy.empty_like(x, dtype=cupy.int64)
# Compute intervals for each value
interval_kernel = _get_module_func(INTERVAL_MODULE, 'find_interval')
interval_kernel(((x.shape[0] + 128 - 1) // 128,), (128,),
(t, x, intervals, k, n, extrapolate, x.shape[0]))
# Compute interpolation
bspline_basis = cupy.empty(x.shape[0] * (2 * k + 1))
d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', x)
d_boor_kernel(((x.shape[0] + 128 - 1) // 128,), (128,),
(t, None, k, 0, x, intervals, None, bspline_basis, 0, 0,
x.shape[0]))
data = cupy.zeros(x.shape[0] * (k + 1), dtype=cupy.float_)
design_mat_kernel = _get_module_func(
DESIGN_MAT_MODULE, 'compute_design_matrix', indices)
design_mat_kernel(((x.shape[0] + 128 - 1) // 128,), (128,),
(k, intervals, bspline_basis, data, indices,
x.shape[0]))
return data, indices
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. seealso:: :class:`scipy.interpolate.splder`
See Also
--------
splantider, splev, spalde
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
# (and append traling dims, if necessary)
dt = t[k+1:-1] - t[1:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = cupy.r_[c, np.zeros((k,) + c.shape[1:])]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError as e:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n) from e
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. seealso:: :class:`scipy.interpolate.splantider`
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + (None,)*len(c.shape[1:])
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = cupy.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
c = cupy.r_[cupy.zeros((1,) + c.shape[1:]),
c, [c[-1]] * (k+2)]
# New knots
t = cupy.r_[t[0], t, t[-1]]
k += 1
return t, c, k
class BSpline:
r"""Univariate spline in the B-spline basis.
.. math::
S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x)
where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
and knots `t`.
Parameters
----------
t : ndarray, shape (n+k+1,)
knots
c : ndarray, shape (>=n, ...)
spline coefficients
k : int
B-spline degree
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
or to return nans.
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
If 'periodic', periodic extrapolation is used.
Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
t : ndarray
knot vector
c : ndarray
spline coefficients
k : int
spline degree
extrapolate : bool
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
axis : int
Interpolation axis.
tck : tuple
A read-only equivalent of ``(self.t, self.c, self.k)``
Notes
-----
B-spline basis elements are defined via
.. math::
B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
+ \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
**Implementation details**
- At least ``k+1`` coefficients are required for a spline of degree `k`,
so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
``j > n``, are ignored.
- B-spline basis elements of degree `k` form a partition of unity on the
*base interval*, ``t[k] <= x <= t[n]``.
- Based on [1]_ and [2]_
.. seealso:: :class:`scipy.interpolate.BSpline`
References
----------
.. [1] Tom Lyche and Knut Morken, Spline methods,
http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
.. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
"""
def __init__(self, t, c, k, extrapolate=True, axis=0):
self.k = operator.index(k)
self.c = cupy.asarray(c)
self.t = cupy.ascontiguousarray(t, dtype=cupy.float64)
if extrapolate == 'periodic':
self.extrapolate = extrapolate
else:
self.extrapolate = bool(extrapolate)
n = self.t.shape[0] - self.k - 1
axis = internal._normalize_axis_index(axis, self.c.ndim)
# Note that the normalized axis is stored in the object.
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (n, ...),
# and axis !=0 means that we have c.shape (..., n, ...)
# ^
# axis
self.c = cupy.moveaxis(self.c, axis, 0)
if k < 0:
raise ValueError("Spline order cannot be negative.")
if self.t.ndim != 1:
raise ValueError("Knot vector must be one-dimensional.")
if n < self.k + 1:
raise ValueError("Need at least %d knots for degree %d" %
(2*k + 2, k))
if (cupy.diff(self.t) < 0).any():
raise ValueError("Knots must be in a non-decreasing order.")
if len(cupy.unique(self.t[k:n+1])) < 2:
raise ValueError("Need at least two internal knots.")
if not cupy.isfinite(self.t).all():
raise ValueError("Knots should not have nans or infs.")
if self.c.ndim < 1:
raise ValueError("Coefficients must be at least 1-dimensional.")
if self.c.shape[0] < n:
raise ValueError(
"Knots, coefficients and degree are inconsistent.")
dt = _get_dtype(self.c.dtype)
self.c = cupy.ascontiguousarray(self.c, dtype=dt)
@classmethod
def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
"""Construct a spline without making checks.
Accepts same parameters as the regular constructor. Input arrays
`t` and `c` must of correct shape and dtype.
"""
self = object.__new__(cls)
self.t, self.c, self.k = t, c, k
self.extrapolate = extrapolate
self.axis = axis
return self
@property
def tck(self):
"""Equivalent to ``(self.t, self.c, self.k)`` (read-only).
"""
return self.t, self.c, self.k
@classmethod
def basis_element(cls, t, extrapolate=True):
"""Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
Parameters
----------
t : ndarray, shape (k+2,)
internal knots
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval,
``t[0] .. t[k+1]``, or to return nans.
If 'periodic', periodic extrapolation is used.
Default is True.
Returns
-------
basis_element : callable
A callable representing a B-spline basis element for the knot
vector `t`.
Notes
-----
The degree of the B-spline, `k`, is inferred from the length of `t` as
``len(t)-2``. The knot vector is constructed by appending and
prepending ``k+1`` elements to internal knots `t`.
.. seealso:: :class:`scipy.interpolate.BSpline`
"""
k = len(t) - 2
t = _as_float_array(t)
t = cupy.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
c = cupy.zeros_like(t)
c[k] = 1.
return cls.construct_fast(t, c, k, extrapolate)
@classmethod
def design_matrix(cls, x, t, k, extrapolate=False):
"""
Returns a design matrix as a CSR format sparse array.
Parameters
----------
x : array_like, shape (n,)
Points to evaluate the spline at.
t : array_like, shape (nt,)
Sorted 1D array of knots.
k : int
B-spline degree.
extrapolate : bool or 'periodic', optional
Whether to extrapolate based on the first and last intervals
or raise an error. If 'periodic', periodic extrapolation is used.
Default is False.
Returns
-------
design_matrix : `csr_matrix` object
Sparse matrix in CSR format where each row contains all the basis
elements of the input row (first row = basis elements of x[0],
..., last row = basis elements x[-1]).
Notes
-----
In each row of the design matrix all the basis elements are evaluated
at the certain point (first row - x[0], ..., last row - x[-1]).
`nt` is a length of the vector of knots: as far as there are
`nt - k - 1` basis elements, `nt` should be not less than `2 * k + 2`
to have at least `k + 1` basis element.
Out of bounds `x` raises a ValueError.
.. note::
This method returns a `csr_matrix` instance as CuPy still does not
have `csr_array`.
.. seealso:: :class:`scipy.interpolate.BSpline`
"""
x = _as_float_array(x, True)
t = _as_float_array(t, True)
if extrapolate != 'periodic':
extrapolate = bool(extrapolate)
if k < 0:
raise ValueError("Spline order cannot be negative.")
if t.ndim != 1 or np.any(t[1:] < t[:-1]):
raise ValueError(f"Expect t to be a 1-D sorted array_like, but "
f"got t={t}.")
# There are `nt - k - 1` basis elements in a BSpline built on the
# vector of knots with length `nt`, so to have at least `k + 1` basis
# elements we need to have at least `2 * k + 2` elements in the vector
# of knots.
if len(t) < 2 * k + 2:
raise ValueError(f"Length t is not enough for k={k}.")
if extrapolate == 'periodic':
# With periodic extrapolation we map x to the segment
# [t[k], t[n]].
n = t.size - k - 1
x = t[k] + (x - t[k]) % (t[n] - t[k])
extrapolate = False
elif not extrapolate and (
(min(x) < t[k]) or (max(x) > t[t.shape[0] - k - 1])
):
# Checks from `find_interval` function
raise ValueError(f'Out of bounds w/ x = {x}.')
# Compute number of non-zeros of final CSR array in order to determine
# the dtype of indices and indptr of the CSR array.
n = x.shape[0]
nnz = n * (k + 1)
if nnz < cupy.iinfo(cupy.int32).max:
int_dtype = cupy.int32
else:
int_dtype = cupy.int64
# Preallocate indptr and indices
indices = cupy.empty(n * (k + 1), dtype=int_dtype)
indptr = cupy.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype)
# indptr is not passed to CUDA as it is already fully computed
data, indices = _make_design_matrix(
x, t, k, extrapolate, indices
)
return csr_matrix(
(data, indices, indptr),
shape=(x.shape[0], t.shape[0] - k - 1)
)
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate a spline function.
Parameters
----------
x : array_like
points to evaluate the spline at.
nu : int, optional
derivative to evaluate (default is 0).
extrapolate : bool or 'periodic', optional
whether to extrapolate based on the first and last intervals
or return nans. If 'periodic', periodic extrapolation is used.
Default is `self.extrapolate`.
Returns
-------
y : array_like
Shape is determined by replacing the interpolation axis
in the coefficient array with the shape of `x`.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = cupy.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = cupy.ascontiguousarray(cupy.ravel(x), dtype=cupy.float_)
# With periodic extrapolation we map x to the segment
# [self.t[k], self.t[n]].
if extrapolate == 'periodic':
n = self.t.size - self.k - 1
x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] -
self.t[self.k])
extrapolate = False
out = cupy.empty(
(len(x), int(np.prod(self.c.shape[1:]))), dtype=self.c.dtype)
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[1:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
dim_order = list(range(out.ndim))
dim_order = (
dim_order[x_ndim:x_ndim+self.axis] +
dim_order[:x_ndim] +
dim_order[x_ndim+self.axis:])
out = out.transpose(dim_order)
return out
def _ensure_c_contiguous(self):
if not self.t.flags.c_contiguous:
self.t = self.t.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def _evaluate(self, xp, nu, extrapolate, out):
_evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),
self.k, xp, nu, extrapolate, out)
def derivative(self, nu=1):
"""
Return a B-spline representing the derivative.
Parameters
----------
nu : int, optional
Derivative order.
Default is 1.
Returns
-------
b : BSpline object
A new instance representing the derivative.
See Also
--------
splder, splantider
"""
c = self.c
# pad the c array if needed
ct = len(self.t) - len(c)
if ct > 0:
c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])]
tck = splder((self.t, c, self.k), nu)
return self.construct_fast(*tck, extrapolate=self.extrapolate,
axis=self.axis)
def antiderivative(self, nu=1):
"""
Return a B-spline representing the antiderivative.
Parameters
----------
nu : int, optional
Antiderivative order. Default is 1.
Returns
-------
b : BSpline object
A new instance representing the antiderivative.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
See Also
--------
splder, splantider
"""
c = self.c
# pad the c array if needed
ct = len(self.t) - len(c)
if ct > 0:
c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])]
tck = splantider((self.t, c, self.k), nu)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(*tck, extrapolate=extrapolate,
axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral of the spline.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval,
``t[k] .. t[-k-1]``, or take the spline to be zero outside of the
base interval. If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
I : array_like
Definite integral of the spline over the interval ``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Prepare self.t and self.c.
self._ensure_c_contiguous()
# Swap integration bounds if needed.
sign = 1
if b < a:
a, b = b, a
sign = -1
n = self.t.size - self.k - 1
if extrapolate != "periodic" and not extrapolate:
# Shrink the integration interval, if needed.
a = max(a, self.t[self.k].item())
b = min(b, self.t[n].item())
# if self.c.ndim == 1:
# # Fast path: use FITPACK's routine
# # (cf _fitpack_impl.splint).
# integral = splint(a, b, self.tck)
# return integral * sign
out = cupy.empty(
(2, int(np.prod(self.c.shape[1:]))), dtype=self.c.dtype)
# Compute the antiderivative.
c = self.c
ct = len(self.t) - len(c)
if ct > 0:
c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])]
ta, ca, ka = splantider((self.t, c, self.k), 1)
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
ts, te = self.t[self.k], self.t[n]
period = te - ts
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
# Evaluate the difference of antiderivatives.
x = cupy.asarray([ts, te], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral = out[1] - out[0]
integral *= n_periods
else:
integral = cupy.zeros((1, int(np.prod(self.c.shape[1:]))),
dtype=self.c.dtype)
# Map a to [ts, te], b is always a + left.
a = ts + (a - ts) % period
b = a + left
# If b <= te then we need to integrate over [a, b], otherwise
# over [a, te] and from xs to what is remained.
if b <= te:
x = cupy.asarray([a, b], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
else:
x = cupy.asarray([a, te], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
x = cupy.asarray([ts, ts + b - te], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
else:
# Evaluate the difference of antiderivatives.
x = cupy.asarray([a, b], dtype=cupy.float_)
_evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, extrapolate, out)
integral = out[1] - out[0]
integral *= sign
return integral.reshape(ca.shape[1:])
| cupy/cupy | cupyx/scipy/interpolate/_bspline.py | _bspline.py | py | 29,962 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupy.RawModule",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cupy.RawModule",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "cupy.RawModule",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "cupy._core._scalar.get... |
29752329020 | # days to seconds, hours to minutes
# to repr an interval of time,create timedelta instance like this
from datetime import timedelta
a = timedelta(days=2, hours=6)
b = timedelta(hours=4.5)
c = a + b
print(c.days)
print(c.seconds)
print(c.seconds / 3600)
print(c.total_seconds() / 3600)
from datetime import datetime
a = datetime(2012, 9, 23)
print(a + timedelta(days=10))
b = datetime(2012, 12, 2)
d = b - a
print('interval days',d.days)
now = datetime.today()
print('Time and Date: ',now)
print(now + timedelta(minutes=10))
# datetime is aware of leap years
a = datetime(2012, 3, 1)
b = datetime(2012, 2, 28)
print(a - b)
c = datetime(2013, 3, 1)
d = datetime(2013, 2, 28)
print(c-d)
a1 = datetime(2012, 9, 23)
# print(a1 + timedelta(months=1)) month is an invalid keyword
from dateutil.relativedelta import relativedelta
print(a1 + relativedelta(months=+1))
print(a1 + relativedelta(months=+4))
# Time between 2 dates
b = datetime(2012, 12, 21)
d = b - a
print(d)
d = relativedelta(b, a)
print(d)
# print(d.months, d.days)
# Determining Last Friday's Date
# you want to find last occurence of a day of the week.Last friday Example.
from datetime import datetime,timedelta
weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
def get_previous_byday(dayname, start_date=None):
if start_date is None:
start_date = datetime.today()
day_num = start_date.weekday()
day_num_target = weekdays.index(dayname)
days_ago = (7 + day_num - day_num_target) % 7
if days_ago == 0:
days_ago = 7
target_date = start_date - timedelta(days=days_ago)
return target_date
print(get_previous_byday('Saturday'))
# performing same calculation using the relativedelta() function
# from dateutil
from dateutil.rrule import *
d = datetime.now()
# next friday
print(d + relativedelta(weekday=FR))
# last Friday
print(d + relativedelta(weekday=FR(-1)))
| pranavchandran/redtheme_v13b | chapter_2_strings_and_text/days_to_seconds/days_to_seconds_other.py | days_to_seconds_other.py | py | 1,950 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.timedelta",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.timede... |
39253734345 | from functools import reduce
from collections import Counter
import math
import operator
import numpy as np
class SpamHamClassifier(object):
def __init__(self, training_data, vocabulary_size,
compute_mutual_information, lambda_constant=0):
self._num_training_data = len(training_data)
self._lambda_constant = lambda_constant
self._num_ham_documents = 0
self._num_spam_documents = 0
self._ham_counter = Counter()
self._spam_counter = Counter()
vocabulary = Counter()
for data in training_data:
counter = Counter(data.tokens)
vocabulary.update(counter)
vectorized = self._vectorize(counter)
if data.label == 'ham':
self._num_ham_documents += 1
self._ham_counter.update(vectorized)
elif data.label == 'spam':
self._num_spam_documents += 1
self._spam_counter.update(vectorized)
self._probability_ham = np.divide(
self.num_ham_documents,
self.num_training_data
)
self._probability_spam = np.divide(
self.num_spam_documents,
self.num_training_data
)
if compute_mutual_information:
word_mi = {}
for word, frequency in vocabulary.items():
pwordspam = self.spam_counter[word] / len(training_data)
pwordham = self.ham_counter[word] / len(training_data)
pnotwordspam = (len(training_data) - self.spam_counter[word]) / len(training_data)
pnotwordham = (len(training_data) - self.ham_counter[word]) / len(training_data)
pword = frequency / len(training_data)
pnotword = (len(training_data) - frequency) / len(training_data)
mi = np.sum([
np.multiply(
pwordham,
np.log(
np.divide(
pwordham,
np.multiply(pword, self.probability_ham)
)
)
),
np.multiply(
pwordspam,
np.log(
np.divide(
pwordspam,
np.multiply(pword, self.probability_spam)
)
)
),
np.multiply(
pnotwordham,
np.log(
np.divide(
pnotwordspam,
np.multiply(pnotword, self.probability_ham)
)
)
),
np.multiply(
pnotwordspam,
np.log(
np.divide(
pnotwordspam,
np.multiply(pnotword, self.probability_spam)
)
)
)
])
word_mi[word] = mi
word_mi = sorted(
word_mi.items(), key=lambda kv: kv[1], reverse=True)
vocabulary = word_mi[:vocabulary_size]
else:
vocabulary = vocabulary.most_common(vocabulary_size)
self._vocabulary = [v[0]
for v in vocabulary]
self._ham_counter = Counter({
k: v for k, v in self.ham_counter.items() if k in self.vocabulary
})
self._spam_counter = Counter({
k: v for k, v in self.spam_counter.items() if k in self.vocabulary
})
@property
def num_training_data(self):
return self._num_training_data
@property
def num_spam_documents(self):
return self._num_spam_documents
@property
def num_ham_documents(self):
return self._num_ham_documents
@property
def lambda_constant(self):
return self._lambda_constant
@property
def vocabulary(self):
return self._vocabulary
@property
def spam_counter(self):
return self._spam_counter
@property
def ham_counter(self):
return self._ham_counter
@property
def probability_spam(self):
return self._probability_spam
@property
def probability_ham(self):
return self._probability_ham
def _vectorize(self, counter):
return Counter({x: 1 for x in counter})
def classify(self, document):
vector = self._vectorize(document.tokens)
document_likelihood_spam = self._compute_likelihood(
vector,
self.num_spam_documents,
self.spam_counter
)
document_likelihood_ham = self._compute_likelihood(
vector,
self.num_ham_documents,
self.ham_counter
)
probability_ham_document = self._compute_bayes(
document_likelihood_ham,
document_likelihood_spam
)
if probability_ham_document >= 0.5:
return 'ham'
return 'spam'
def _compute_likelihood(self, document, label_total, labelled_counter):
tmp = []
vocabulary = self.vocabulary
if self.lambda_constant:
vocabulary = list(document.keys())
for word in vocabulary:
count = labelled_counter[word]
if not document[word]:
count = label_total - labelled_counter[word]
likelihood = np.divide(
np.add(count, self.lambda_constant),
np.add(
label_total,
np.multiply(self.lambda_constant, len(self.vocabulary))
)
)
if likelihood == 0:
return 0.0
tmp.append(np.log(likelihood))
return np.exp(np.sum(tmp), dtype=np.float128)
def _compute_bayes(self, ham_likelihood, spam_likelihood):
return np.divide(
np.multiply(ham_likelihood, self.probability_ham),
np.add(
np.multiply(ham_likelihood, self.probability_ham),
np.multiply(spam_likelihood, self.probability_spam)
)
)
| jvmsangkal/spam-filter-py | spamfilter/classifier.py | classifier.py | py | 6,440 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "collectio... |
42886474434 | import logging
from json import JSONDecodeError
from typing import Dict, Any
import requests
from .exceptions import TrefleException
from .models import Result
class RestAdapter:
def __init__(self, token: str,
logger: logging.Logger = None):
"""
Constructor for RestAdapter
:param token:
:param logger: (optional) If your app has a logger,
pass it in here.
"""
self._logger = logger or logging.getLogger(__name__)
self._token = token
def _make_request(self, http_method: str, url: str, ep_params=None,
data: Dict = None, **kwargs) -> (Result, Any):
if kwargs:
url = url.format(**kwargs)
ep_params["token"] = self._token
log_line_pre = f"method={http_method}, url={url}, params={ep_params.items()}"
log_line_post = ', '.join((log_line_pre, "success={}, status_code={}, message={}"))
# Log HTTP params and perform an HTTP request, catching and
# re-raising any exceptions
try:
self._logger.debug(msg=log_line_pre)
response = requests.request(method=http_method, url=url,
params=ep_params,
json=data,
timeout=None)
except requests.exceptions.RequestException as exception:
self._logger.error(msg=(str(exception)))
raise TrefleException("Request Failed") from exception
# Deserialize JSON output to Python object, or
# return failed Result on exception
try:
data_out = response.text
except (ValueError, JSONDecodeError) as exception:
raise TrefleException("Bad JSON in response") from exception
# If status_code in 200-299 range, return success Result with data,
# otherwise raise exception
is_success = 299 >= response.status_code >= 200 # 200 to 299 is OK
log_line = log_line_post.format(is_success, response.status_code, response.reason)
if is_success:
self._logger.debug(msg=log_line)
return Result(response.status_code, message=response.reason), data_out
self._logger.error(msg=log_line)
raise TrefleException(f"{response.status_code}: {response.reason}")
def get(self, url: str, ep_params=None, **kwargs) -> Result:
if ep_params is None:
ep_params = {}
return self._make_request(http_method='get', url=url, ep_params=ep_params,
kwargs=kwargs)
def post(self, url: str, ep_params=None, data: Dict = None,
**kwargs) -> Result:
if ep_params is None:
ep_params = {}
return self._make_request(http_method='post', url=url, ep_params=ep_params,
data=data, kwargs=kwargs)
| Overlrd/trefle | src/trefleapi/rest_adapter.py | rest_adapter.py | py | 2,929 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.Logger",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "requests.request",
... |
5255273641 | import os
from re import I
import sys
from openpyxl import Workbook
from openpyxl.styles import Border, Side, PatternFill, Font, Alignment
from datetime import datetime
sys.path.insert(0, os.path.abspath('..\\pycatia'))
from pycatia import catia
from pycatia.enumeration.enumeration_types import cat_work_mode_type
caa = catia()
documents = caa.documents
document = caa.active_document
product = document.product
product.apply_work_mode(cat_work_mode_type.index("DESIGN_MODE"))
class excel:
def __init__(self):
self.wb = Workbook()
self.ws = self.wb.create_sheet("开料与加工清单",0)
self.ds = self.wb.create_sheet("图纸清单",1)
self.ws['A1'].value = "编号"
#self.ws.merge_cells('B5:G5')
#self.ws.merge_cells('N5:S5')
self.ws['B1'].value = "图号"
self.ws['H1'].value = "类型"
self.ws['I1'].value = "材质"
self.ws['J1'].value = "规格"
self.ws['K1'].value = "(长)"
self.ws['L1'].value = "(宽)"
self.ws['M1'].value = "(后)"
self.ws['N1'].value = "重量(kg)"
self.ws['O1'].value = "总量"
self.ws['U1'].value = "加工方式#1"
self.ws['V1'].value = "加工方式#2"
self.ws['W1'].value = "备注"
self.ws.merge_cells('B1:G1')
self.ws.merge_cells('O1:T1')
self.ws['B1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['H1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['I1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['J1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['K1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['L1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['M1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['N1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['O1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['U1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['V1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['W1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws.column_dimensions['A'].width = 5
self.ws.column_dimensions['B'].width = 10
self.ws.column_dimensions['C'].width = 10
self.ws.column_dimensions['D'].width = 10
self.ws.column_dimensions['E'].width = 10
self.ws.column_dimensions['F'].width = 10
self.ws.column_dimensions['G'].width = 10
self.ws.column_dimensions['H'].width = 9
self.ws.column_dimensions['I'].width = 12
self.ws.column_dimensions['J'].width = 30
self.ws.column_dimensions['K'].width = 7
self.ws.column_dimensions['L'].width = 7
self.ws.column_dimensions['M'].width = 7
self.ws.column_dimensions['N'].width = 10
self.ws.column_dimensions['O'].width = 3
self.ws.column_dimensions['P'].width = 3
self.ws.column_dimensions['Q'].width = 3
self.ws.column_dimensions['R'].width = 3
self.ws.column_dimensions['S'].width = 3
self.ws.column_dimensions['T'].width = 3
self.ws.column_dimensions['U'].width = 12
self.ws.column_dimensions['V'].width = 12
self.ws.column_dimensions['W'].width = 12
#self.ws.column_dimensions['X'].width = 9
#self.ws.merge_cells('A1:X2')
#self.ws['A1'] = "开工与加工清单"
#self.ws['A3'] = "工号"
#self.ws['A4'] = "更新日期"
#self.ws['W3'] = "模型"
#self.ws['W4'] = "编写人"
self.ds['A2'].value = "编号"
self.ds['B2'].value = "类型"
self.ds['C2'].value = "图号"
self.ds['D2'].value = "图"
self.ds['E2'].value = "幅"
self.ds['F2'].value = "页数"
self.ds['G2'].value = "版本"
self.ds['H2'].value = "总生产数量"
self.ws['A1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['B1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['C1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['D1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['E1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['F1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['G1'].alignment = Alignment(horizontal="center", vertical="center")
self.ws['H1'].alignment = Alignment(horizontal="center", vertical="center")
self.ds.column_dimensions['A'].width = 4.5
self.ds.column_dimensions['B'].width = 8
self.ds.column_dimensions['C'].width = 15
self.ds.column_dimensions['D'].width = 3
self.ds.column_dimensions['E'].width = 3
self.ds.column_dimensions['F'].width = 4.5
self.ds.column_dimensions['G'].width = 4.5
self.ds.column_dimensions['H'].width = 11
def input(self, input_row, input_column, input_value):
self.ws.cell(row=input_row, column=input_column).value = input_value
def save_excel(self):
self.wb.save("BOM and process list.xlsx")
class process:
def __init__(self):
self.iteration = 2
self.blank = " "
self.excel = excel()
self.fillrowno = 1
self.partlist = []
def prod_process(self, obje, current_layer, listofpart, newlistofpart):
if "APC" in obje.part_number:
listofpart.append(obje.part_number)
self.iteration += 1
self.excel.input(self.iteration, 1, self.iteration-1)
self.excel_write(self.iteration, current_layer, obje)
self.excel.input(self.iteration, current_layer+13, listofpart.count(obje.part_number))
self.excel.save_excel()
def quantity_update(self, obje1, current_layer1, listofpart1, newlistofpart1, indexlist):
self.excel.input(indexlist[newlistofpart1.index(obje1.part_number)], current_layer1+13, listofpart1.count(obje1.part_number))
def excel_write(self, rowno, columnno, target_obj):
weight = round(target_obj.analyze.mass,2)
partno = target_obj.part_number
definition = target_obj.definition
self.excel.input(rowno, columnno, partno)
self.excel.input(rowno, 14, weight)
category = " "
definition_text = " "
if target_obj.is_catpart():
part_parameters = target_obj.parameters
if part_parameters.is_parameter("Material"):
materialv = part_parameters.item("Material").value
if part_parameters.is_parameter("THK"):
thkv = round(part_parameters.item("THK").value,1)
if part_parameters.is_parameter("W"):
Wid = part_parameters.item("W").value
if part_parameters.is_parameter("L"):
Len = float(part_parameters.item("L").value)
if part_parameters.is_parameter("D_in"):
D_inv = float(part_parameters.item("D_in").value)
if part_parameters.is_parameter("D_out"):
Diav = float(part_parameters.item("D_out").value)
if part_parameters.is_parameter("D"):
Diav = float(part_parameters.item("D").value)
if part_parameters.is_parameter("A"):
Ah = part_parameters.item("A").value
if part_parameters.is_parameter("B"):
Bh = part_parameters.item("B").value
if part_parameters.is_parameter("t"):
tv = part_parameters.item("t").value
if part_parameters.is_parameter("model"):
model = part_parameters.item("model").value
if part_parameters.is_parameter("Model"):
model = part_parameters.item("Model").value
if part_parameters.is_parameter("W"):
if part_parameters.is_parameter("L"):
if part_parameters.is_parameter("THK"):
category = "钢板"
definition_text = str(category) + " " + str(int(thkv)) + "THK" + "x" + str(int(Wid)) + "x"+ str(int(Len))
elif part_parameters.is_parameter("D_in"):
if part_parameters.is_parameter("D_out"):
if part_parameters.is_parameter("L"):
category = "圆管"
definition_text = str(category) + " " + str(int(Diav)) + "x" + str(int(D_inv)) + "x" + "L=" + str(int(Len))
elif part_parameters.is_parameter("THK"):
category = "钢板"
definition_text = str(category) + " " + str(int(thkv)) + "THK" + "x" + str(int(Diav))
elif part_parameters.is_parameter("D"):
if part_parameters.is_parameter("THK"):
category = "钢板"
definition_text = str(category) + " " + str(int(thkv)) + "THK" + "x" + str(int(Diav))
elif part_parameters.is_parameter("L"):
category = "圆钢"
definition_text = str(category) + " " + "D" + str(int(Diav)) + "x" + "L=" + str(int(Len))
elif part_parameters.is_parameter("D_out"):
if part_parameters.is_parameter("THK"):
category = "钢板"
definition_text = str(category) + " " + str(int(thkv)) + "THK" + "x" + str(int(Diav))
elif part_parameters.is_parameter("L"):
category = "圆钢"
definition_text = str(category) + " " + "D" + str(int(Diav)) + "x" + "L=" + str(int(Len))
elif part_parameters.is_parameter("A"):
if part_parameters.is_parameter("t"):
if part_parameters.is_parameter("B"):
category = "扁通"
definition_text = str(model) + "," + "L=" + str(int(Len))
else:
category = "方通"
definition_text = str(model) + "," + "L=" + str(int(Len))
elif "角钢" in definition:
category = "角钢"
if part_parameters.is_parameter("model"):
definition_text = str(model) + "," + "L=" + str(int(Len))
elif part_parameters.is_parameter("Model"):
definition_text = str(model) + "," + "L=" + str(int(Len))
elif "槽钢" in definition:
category = "槽钢"
if part_parameters.is_parameter("model"):
definition_text = str(model) + "," + "L=" + str(int(Len))
elif part_parameters.is_parameter("Model"):
definition_text = str(model) + "," + "L=" + str(int(Len))
else :
category = "其他"
definition_text = target_obj.definition
'''
elif "扁通" in definition:
category = "扁通"
if part_parameters.is_parameter("Model"):
definition_text = str(category) + str(model) + "L=" + str(int(Len)) + "mm"
else:
definition_text = target_obj.definition
elif "圆通" in definition:
category = "圆通"
if part_parameters.is_parameter("Model"):
definition_text = str(category) + str(model) + "L=" + str(int(Len)) + "mm"
else:
definition_text = target_obj.definition
elif "方通" in definition:
category = "方通"
if part_parameters.is_parameter("Model"):
definition_text = str(category) + str(model) + "L=" + str(int(Len)) + "mm"
else:
definition_text = target_obj.definition
elif "钢板" in definition:
category = "钣金"
'''
self.excel.input(rowno, 8, category)
if part_parameters.is_parameter("L"):
self.excel.input(rowno, 11, Len)
if part_parameters.is_parameter("W"):
self.excel.input(rowno, 12, Wid)
if part_parameters.is_parameter("THK"):
self.excel.input(rowno, 13, thkv)
elif part_parameters.is_parameter("t"):
self.excel.input(rowno, 13, tv)
if part_parameters.is_parameter("Material"):
self.excel.input(rowno, 9, materialv)
self.excel.input(rowno, 10, definition_text)
else:
category = "组装件"
self.excel.input(rowno, 8, category)
self.excel.input(rowno, 10, definition_text)
p = process()
list_1 = []
newlist_1 = []
pl1 = []
npl1 = []
ql1 = []
index1 = []
stime = datetime.now()
p.excel.input(2,2,product.part_number)
p.excel.input(2,1,1)
for product1 in product.products:
if "APC" in product1.part_number:
ql1.append(product1.part_number)
if product1.part_number not in pl1:
npl1.append(product1.part_number)
p.prod_process(product1, 3, list_1, newlist_1)
index1.append(p.iteration)
print("-------------")
print(index1)
print(npl1)
print("-------------")
if product1.is_catproduct():
list_2 = []
newlist_2 = []
pl2 = []
npl2 = []
ql2 = []
index2 = []
for product2 in product1.products:
if "APC" in product2.part_number:
ql2.append(product2.part_number)
if product2.part_number not in pl2:
npl2.append(product2.part_number)
p.prod_process(product2, 4, list_2, newlist_2)
index2.append(p.iteration)
if product2.is_catproduct():
list_3 = []
newlist_3 = []
pl3 = []
npl3 = []
ql3 = []
index3 = []
for product3 in product2.products:
if "APC" in product3.part_number:
ql3.append(product3.part_number)
if product3.part_number not in pl3:
npl3.append(product3.part_number)
p.prod_process(product3, 5, list_3, newlist_3)
index3.append(p.iteration)
if product3.is_catproduct():
list_4 = []
newlist_4 = []
pl4 = []
npl4 = []
ql4 = []
index4 = []
for product4 in product3.products:
if "APC" in product4.part_number:
ql4.append(product4.part_number)
if product4.part_number not in pl4:
npl4.append(product4.part_number)
p.prod_process(product4, 6, list_4, newlist_4)
index4.append(p.iteration)
if product4.is_catproduct():
list_5 = []
newlist_5 = []
pl5 = []
npl5 = []
ql5 = []
index5 = []
for product5 in product4.products:
if "APC" in product5.part_number:
ql5.append(product5.part_number)
if product5.part_number not in pl5:
npl5.append(product5.part_number)
p.prod_process(product5, 7, list_5, newlist_5)
index5.append(p.iteration)
if product5.is_catproduct():
list_6 = []
newlist_6 = []
pl6 = []
npl6 = []
ql6 = []
index6 = []
for product6 in product5.products:
if "APC" in product6.part_number:
ql6.append(product6.part_number)
if product6.part_number not in pl6:
npl6.append(product6.part_number)
p.prod_process(product6, 8, list_6, newlist_6)
index6.append(p.iteration)
elif product6.part_number in npl6:
p.quantity_update(product6, 8, ql6, npl6, index6)
#else :
# p.prod_process(product5, 6, list_5, newlist_5)
# index5.append(p.iteration)
pl5.append(product5.part_number)
elif product4.part_number in npl5:
p.quantity_update(product5, 7, ql5, npl5, index5)
#else :
# p.prod_process(product4, 5, list_4, newlist_4)
# index4.append(p.iteration)
pl4.append(product4.part_number)
elif product4.part_number in npl4:
p.quantity_update(product4, 6, ql4, npl4, index4)
#else :
# p.prod_process(product3, 4, list_3, newlist_3)
# index3.append(p.iteration)
pl3.append(product3.part_number)
elif product3.part_number in npl3:
p.quantity_update(product3, 5, ql3, npl3, index3)
#else :
# p.prod_process(product2, 3, list_2, newlist_2)
# index2.append(p.iteration)
pl2.append(product2.part_number)
elif product2.part_number in npl2:
p.quantity_update(product2, 4, ql2, npl2, index2)
#else:
# p.prod_process(product1, 2, list_1, newlist_1)
# index1.append(p.iteration)
pl1.append(product1.part_number)
elif product1.part_number in npl1:
p.quantity_update(product1, 3, ql1, npl1, index1)
p.excel.save_excel()
drawinglist =[]
max = int(p.excel.ws.max_row)
for r in range(2, max):
for c in range(2, 6):
drawingno = p.excel.ws.cell(r,c).value
if drawingno not in drawinglist and drawingno != None:
drawinglist.append(drawingno)
drawinglist.sort()
for i in range(0,len(drawinglist)):
p.excel.ds.cell(row=i+3,column=1).value = i+1
p.excel.ds.cell(row=i+3,column=3).value = drawinglist[i]
p.excel.ds.cell(row=i+3,column=4).value = "A"
p.excel.ds.cell(row=i+3,column=5).value = "3"
p.excel.ds.cell(row=i+3,column=7).value = "A"
qty=0
max = int(p.excel.ds.max_row)
for ii in range(2, max):
dwgno = str(p.excel.ds.cell(row=ii+1, column=3).value)
print(dwgno)
for product1 in product.products:
if dwgno in product1.part_number:
qty = qty + 1
if product1.is_catproduct():
for product2 in product1.products:
if dwgno in product2.part_number:
qty = qty + 1
if product2.is_catproduct():
for product3 in product2.products:
if dwgno in product3.part_number:
qty = qty + 1
if product3.is_catproduct():
for product4 in product3.products:
if dwgno in product4.part_number:
qty = qty + 1
if product4.is_catproduct():
for product5 in product4.products:
if dwgno in product5.part_number:
qty = qty + 1
if product5.is_catproduct():
for product6 in product5.products:
if dwgno in product6.part_number:
qty = qty + 1
p.excel.ds.cell(row=ii+1,column=8).value = qty
qty=0
p.excel.save_excel()
etime = datetime.now()
print("Start Time: ", stime.strftime("%H:%M:%S"))
print("End Time: ", etime.strftime("%H:%M:%S"))
| kang851216/CATIA_macro | manufacturing and process list_adding drawing list_test.py | manufacturing and process list_adding drawing list_test.py | py | 24,204 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
35865754669 | """
no longer needed since pointnet2_ssg_cls can provide this form
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys,os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'ops','pointnet2_ops_lib', 'pointnet2_ops'))
from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
# from .pointnet2_modules import PointnetSAModule, PointnetSAModuleMSG
from .pointnet2_ssg_cls import PointNet2SSGCls
class PointNet2MSGCls(PointNet2SSGCls):
"""PointNet2 MSG for classification
"""
def _build_model(self):
# call the base method and then override SA_modules
super()._build_model()
self.SA_modules = nn.ModuleList()
for i in range(len(self.radii)):
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=self.npoints[i],
radii=self.radii[i],
nsamples=self.nsamples[i],
mlps=self.mlps[i],
use_xyz=self.use_xyz
)
)
self.SA_modules.append(
PointnetSAModule(
mlp=self.mlps[-1],
use_xyz=self.use_xyz
)
)
| PointCloudYC/PointNet-modern.pytorch | models/pointnet2/pointnet2_msg_cls.py | pointnet2_msg_cls.py | py | 1,339 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line... |
73881080745 | from flask import Flask, request
import json
from jwt.exceptions import JWTException
from jwt.jwt import JWT
from jwt.jwk import OctetJWK
def login(app: Flask):
@app.post("/api/auth/login")
def test():
reqest_data = request.get_json()
try:
jwt = JWT()
login = reqest_data["login"]
password = reqest_data["password"]
secure_key = reqest_data["__secure_key"]
jwt.decode(secure_key, key=OctetJWK(b'123'))
return json.dumps({
"access_token": "None",
"logout_hash": "None",
"user_id": 0
}), 200, {
'Content-Type': 'application/json'
}
except KeyError as e:
return json.dumps({
"error": str(e),
"error_code": 0
}), 400, {
'Content-Type': 'application/json'
}
except JWTException:
return '{"error":"secure_key is invalid", "error_code": 0}', 400, {
'Content-Type': 'applicaiton/json'
}
return test
| Axime/Aska2.0 | server/routes/auth/login.py | login.py | py | 1,121 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "jwt.exceptions",
... |
3204081333 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 21:36:27 2019
@author: Rodrigo
"""
import csv
import sqlite3
e = csv.writer(open('output.csv', 'w'))
e.writerow(['cpf','UC'])
conn = sqlite3.connect('enel.db')
cursor = conn.cursor()
# lendo os dados
cursor.execute("""
SELECT * FROM enel;
""")
for linha in cursor.fetchall():
e.writerow(linha)
print(linha)
conn.close()
| rasiqueira/enel | bd.py | bd.py | py | 409 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.writer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
}
] |
31628499109 | import traceback
import sys
from discord.ext import commands
import discord
class ErrorHandler(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, 'on_error'):
return
ignored = (commands.CommandNotFound)
error = getattr(error, 'original', error)
if isinstance(error, ignored):
print("Command not found: ", error)
return
elif isinstance(error, commands.DisabledCommand):
return await ctx.send(f'{ctx.command} has been disabled.')
elif isinstance(error, commands.NoPrivateMessage):
try:
return await ctx.author.send(f'{ctx.command} can not be used in Private Messages.')
except:
pass
elif isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):
return await ctx.send(error)
else:
print(error)
return
def setup(bot):
bot.add_cog(ErrorHandler(bot))
| docgonzo2015/Botler-discord-bot | cogs/errors.py | errors.py | py | 1,104 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.CommandNotFound",
"line_number": 15,
"usage_type": "attribute"
},... |
27980759583 | """
Simulated Annealing Class
"""
import pickle
import random
import math
import numpy as np
import sklearn
import pandas as pd
import configparser
import random
from pathlib import Path
import joblib
from Utils.attack_utils import get_constrains
from Models.scikitlearn_wrapper import SklearnClassifier
from Utils.data_utils import split_to_datasets
def get_config():
config = configparser.ConfigParser()
# config.read(sys.argv[1])
config.read('configurations.txt')
config = config['DEFAULT']
return config
def date_change(current):
# year and month are not change. only the day
dates = []
new_date = current.copy() # 20180200
while (
new_date / 100 == current / 100 and new_date % 100 <= 30): # stay in same year and month, day can increase until 30
new_date = new_date + 1
dates.append(new_date)
return dates
def time_change(current):
new_time = current.copy()
times = []
new_date = current.copy() # 235959
while (new_time / 10000 < 24):
while ((new_time / 100) % 100 < 60):
while (new_time % 100 < 60):
new_time = new_time + 29 # should be 1
times.append(new_time)
new_time = (new_time / 100 + 2) * 100 # add minute #should be +1
times.append(new_time)
new_time = (new_time / 10000 + 1) * 10000 # add hour
times.append(new_time)
return times
def get_feature_range(dataset_name):
feature_range = {}
if dataset_name == "RADCOM":
# feature_range = {
# 'agg_count': range(1, 300, 1), # 0
# 'delta_delta_delta_from_previous_request': range(0, 1000, 10), # 100000, 1 # 1
# 'delta_delta_from_previous_request': range(0, 1000, 10), # 2
# 'delta_from_previous_request': range(0, 1000, 10), # 3
# 'delta_from_start': range(0, 1000, 10), # 4
# 'effective_peak_duration': range(0, 1000, 10), # 100000, 0.01 # 5
# # 'index':range(), # 6
# # 'minimal_bit_rate':range(), # 7
# 'non_request_data': range(0, 100, 1), # 8
# # 'peak_duration':range(), # 9
# # 'peak_duration_sum':range(), # 10
# 'previous_previous_previous_previous_total_sum_of_data_to_sec': range(0, 100000, 1000), # 100000000 # 11
# 'previous_previous_previous_total_sum_of_data_to_sec': range(0, 100000, 1000), # 12
# 'previous_previous_total_sum_of_data_to_sec': range(0, 100000, 1000), # 13
# 'previous_total_sum_of_data_to_sec': range(0, 100000, 1000), # 14
# 'sum_of_data': range(0, 100000, 1000), # 100000000, 1 # 15
# 'total_sum_of_data': range(0, 100000, 1000), # 100000000, 1 # 16
# 'total_sum_of_data_to_sec': range(0, 1000, 10), # 1000000, 1 # 17
# 'serv_label': range(1, 3, 1), # 0,1,2 # 18
# 'start_of_peak_date': date_change(), # 19
# 'start_of_peak_time': date_change(), # 20
# 'end_of_peak_date': time_change(), # 21
# 'end_of_peak_time': time_change(), # 22
# }
feature_range = {
'previous_previous_previous_previous_total_sum_of_data_to_sec': range(0, 100000, 100), # 100000000 # 11
'previous_previous_previous_total_sum_of_data_to_sec': range(0, 100000, 100), # 12
'previous_previous_total_sum_of_data_to_sec': range(0, 100000, 100), # 13
'previous_total_sum_of_data_to_sec': range(0, 100000, 100), # 14
'total_sum_of_data_to_sec': range(0, 1000, 10), # 1000000, 1 # 17
}
elif dataset_name == "HATE":
feature_range = {
'c_work_empath': np.linspace(0.1, 0.9, 100),
'normal_neigh': np.linspace(0.1, 0.9, 100),
'c_legend_empath': np.linspace(0.1, 0.9, 100),
'c_cleaning_empath': np.linspace(0.1, 0.9, 100),
'sleep_empath': np.linspace(0.1, 0.9, 100),
'c_furniture_empath': np.linspace(0.1, 0.9, 100),
'c_ridicule_empath': np.linspace(0.1, 0.9, 100),
'c_fire_empath': np.linspace(0.1, 0.9, 100),
'hate_neigh': np.linspace(0.1, 0.9, 100),
}
"""
'sports_empath': np.linspace(0.1, 0.9, 100),
'statuses_count': np.linspace(0, 1000, 10),
'surprise_empath': np.linspace(0.1, 0.9, 100),
'tourism_empath': np.linspace(0.1, 0.9, 100),
'urban_empath': np.linspace(0.1, 0.9, 100),
'vacation_empath': np.linspace(0.1, 0.9, 100),
'warmth_empath': np.linspace(0.1, 0.9, 100),
'work_empath': np.linspace(0.1, 0.9, 100),
'youth_empath': np.linspace(0.1, 0.9, 100),
'zest_empath': np.linspace(0.1, 0.9, 100),
"""
elif dataset_name == 'CREDIT':
feature_range = {
'PREV_ACTIVE_INSTALMENT_PAYMENT_DIFF_MEAN': np.linspace(0.1, 0.9, 100),
'PREV_Consumer_AMT_CREDIT_SUM': np.linspace(0.1, 0.9, 100),
#'PREV_NAME_CONTRACT_STATUS_Refused_MEAN': np.linspace(0.1, 0.9, 10),
'NAME_EDUCATION_TYPE': {0,0.25,0.5,0.75},
'AMT_ANNUITY': np.linspace(0.1, 0.9, 100),
'PREV_Cash_SIMPLE_INTERESTS_MEAN': np.linspace(0.1, 0.9, 100),
'CREDIT_TO_GOODS_RATIO': np.linspace(0.1, 0.9, 100),
'DAYS_EMPLOYED': np.linspace(0.1, 0.9, 100),
'CREDIT_TO_ANNUITY_RATIO': np.linspace(0.1, 0.9, 100),
}
return feature_range
class SimulatedAnnealing:
def __init__(self, initialSolution, solutionEvaluator, initialTemp, finalTemp, tempReduction, neighborOperator=None,
iterationPerTemp=200, alpha=10, beta=5, record_id=0, record_true_class=0, model_name=""):
self.solution = initialSolution
self.evaluate = solutionEvaluator
self.initialTemp = initialTemp
self.currTemp = initialTemp
self.finalTemp = finalTemp
self.iterationPerTemp = iterationPerTemp
self.alpha = alpha
self.beta = beta
self.neighborOperator = self.neighbor_operator_func
self.record_id = record_id
self.record_true_class = record_true_class
df_temp = pd.DataFrame(self.solution).T
self.path_to_file = "results/" + model_name + f"/solution_{self.record_id}_{self.record_true_class}.csv"
output_dir = Path("results/" + model_name)
output_dir.mkdir(parents=True, exist_ok=True)
df_temp.to_csv(self.path_to_file, index=False)
self.max_cost = self.evaluate(self.solution.values.reshape(1, -1))[0][self.record_true_class]
self.best_solution = self.solution
if tempReduction == "linear":
self.decrementRule = self.linearTempReduction
elif tempReduction == "geometric":
self.decrementRule = self.geometricTempReduction
elif tempReduction == "slowDecrease":
self.decrementRule = self.slowDecreaseTempReduction
else:
self.decrementRule = tempReduction
def linearTempReduction(self):
self.currTemp -= self.alpha
def geometricTempReduction(self):
self.currTemp *= self.alpha
def slowDecreaseTempReduction(self):
self.currTemp = self.currTemp / (1 + self.beta * self.currTemp)
def isTerminationCriteriaMet(self):
# can add more termination criteria
return self.currTemp <= self.finalTemp or self.neighborOperator(self.solution) == 0
def neighbor_operator_func(self, current):
# return all neighbor of cuurent
# neighbor is a sample that differ from current in one editable feature
editable = perturbability
neighbors = []
for feature in editable.Row: # for each feature
if editable[editable['Row'] == feature]['Perturbability'].values[0] == 1: # the feature can be edited
if feature in feature_range:
for change in feature_range[feature]:
neighbor = current.copy()
if neighbor[feature] != change: # different value for specific feature
neighbor[feature] = change
neighbors.append(neighbor)
return neighbors
def run(self):
while not self.isTerminationCriteriaMet():
new_sol_value = 0
# iterate that number of times, based on the temperature
for i in range(self.iterationPerTemp):
# get all the neighbors
neighbors = self.neighborOperator(self.solution)
if len(neighbors) == 0:
continue
# print("Number of neighbors: ", len(neighbors))
'''
# pick a random neighbor
# newSolution = random.choice(neighbors)
'''
# get 10 random neighbors and pick the best one -> minimal cost
reandom_neighbors = random.sample(neighbors, 100)
# predict the cost of each neighbor and get the solution with the minimal cost -> the best neighbor
# neighbors_cost = []
# for neighbor in reandom_neighbors:
# neighbors_cost.append(self.evaluate(neighbor.values.reshape(1, -1))[0][self.record_true_class])
# newSolution = reandom_neighbors[np.argmin(neighbors_cost)]
newSolution = reandom_neighbors[np.argmin(self.evaluate(reandom_neighbors), axis=0)[self.record_true_class]]
# df_temp = pd.DataFrame(newSolution).T
# df_old_sols = pd.read_csv(self.path_to_file)
# all_df = pd.concat([df_old_sols, df_temp], axis=0, ignore_index=True)
'''
# check if the neighbor is already in the path
old_shape = all_df.shape
all_df.drop_duplicates(inplace=True)
if old_shape != all_df.shape: # duplicate -> new neighbor in path already -> do not add to neighbors
continue
# no duplicate -> new neighbor not in path:
'''
# get the cost between the two solutions
# cost = self.evaluate(self.solution) - self.evaluate(newSolution)
curr_sol_val = self.evaluate(self.solution.values.reshape(1, -1))[0][self.record_true_class]
new_sol_val = self.evaluate(newSolution.values.reshape(1, -1))[0][self.record_true_class]
if new_sol_val < 0.5:
print("find attacked sample!!!")
#print("Best Cost: ", new_sol_val)
return [1, newSolution]
cost = curr_sol_val - new_sol_val
# if the new solution is better, accept it
if cost >= 0:
self.solution = newSolution
# self.path = pd.concat([self.path, self.solution], axis=1)
# self.path_score.append(new_sol_val)
# all_df.to_csv(self.path_to_file, index=False)
if new_sol_val < self.max_cost: # new best solution
self.max_cost = new_sol_val
self.best_solution = self.solution
# self.currTemp = self.initialTemp
print("Best Cost: ", self.evaluate(self.solution.values.reshape(1, -1))[0][self.record_true_class])
# if the new solution is not better, accept it with a probability of e^(-cost/temp)
else:
if random.uniform(0, 0.7) < math.exp(-cost / (self.currTemp*2)):
self.solution = newSolution
# self.path = pd.concat([self.path, self.solution], axis=1)
# self.path_score.append(new_sol_val)
#all_df.to_csv(self.path_to_file, index=False)
#print("Current Temperature: ", self.currTemp)
print("Current Cost: ", self.evaluate(self.solution.values.reshape(1, -1))[0][self.record_true_class])
'''
if new_sol_val > self.max_cost: # current solution is not the best
self.currTemp += self.alpha # increase temperature because we are not improving
self.solution = self.best_solution
'''
# decrement the temperature
self.decrementRule()
if self.neighborOperator(self.solution) == 0:
print('no neighbors')
return[0, None]
if __name__ == '__main__':
# Set parameters
configurations = get_config()
data_path = configurations["data_path"]
raw_data_path = configurations["raw_data_path"]
perturbability_path = configurations["perturbability_path"]
results_path = configurations["results_path"]
seed = int(configurations["seed"])
exclude = configurations["exclude"]
dataset_name = raw_data_path.split("/")[1]
datasets = split_to_datasets(raw_data_path, save_path=data_path)
x_attack = datasets.get("x_test")
y_attack = datasets.get("y_test")
if ('RADCOM' in dataset_name):
x_attack = pd.read_csv('Datasets/RADCOM/x_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
y_attack = pd.read_csv('Datasets/RADCOM/y_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
# model = pickle.load(open('Models/RADCOM/RADCOM_target_GB_seed-42_lr-0.01_estimators-500_maxdepth-9.pkl', 'rb'))
model = pickle.load(open('Models/RADCOM/RADCOM_target_RF_seed-42_estimators-500_maxdepth-9.pkl', 'rb'))
# model = pickle.load(open('RADCOM_target_XGB_seed-42_lr-0.1_estimators-70_maxdepth-8', 'rb'))
elif ('HATE' in dataset_name):
x_attack = pd.read_csv('Datasets/HATE/x_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
y_attack = pd.read_csv('Datasets/HATE/y_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
#x_attack = pd.read_csv('Datasets/HATE/x_orig_attack.csv')
#y_attack = pd.read_csv('Datasets/HATE/y_orig_attack.csv')
#model = joblib.load('Models/HATE/rf_sota_model.pkl')
#model = pickle.load(open('Models/HATE/HATE_target_RF_seed-42_estimators-100_maxdepth-3.pkl', 'rb'))
#model = pickle.load(open('Models/HATE/HATE_target_XGB_seed-42_lr-0.1_estimators-70_maxdepth-8.pkl', 'rb'))
model = pickle.load(open('Models/HATE/HATE_target_GB_seed-42_lr-1.0_estimators-100_maxdepth-3.pkl', 'rb'))
elif ('CREDIT' in dataset_name):
x_attack = pd.read_csv('Datasets/CREDIT/x_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
y_attack = pd.read_csv('Datasets/CREDIT/y_test_seed_42_val_size_0.25_surrgate_train_size_0.5.csv')
#x_attack = pd.read_csv('Datasets/HATE/x_orig_attack.csv')
#y_attack = pd.read_csv('Datasets/HATE/y_orig_attack.csv')
model = pickle.load(open('Models/CREDIT/CREDIT_target_RF_seed-42_estimators-200_maxdepth-9.pkl', 'rb'))
#model = pickle.load(open('Models/CREDIT/CREDIT_target_GB_seed-42_lr-1.0_estimators-100_maxdepth-3.pkl', 'rb'))
perturbability = pd.read_csv(perturbability_path)
feature_range = get_feature_range(dataset_name)
model_name = model.__class__.__name__
print("model name: ", model_name)
attack_x = datasets.get("x_test")
attack_y = datasets.get("y_test")
preds = model.predict(attack_x)
eq = np.equal(preds,attack_y['pred'])
i=0
#target_model = SklearnClassifier(model=target, columns=attack_x.columns)
#constrains, perturbability = get_constrains(dataset_name, perturbability_path)
columns_names = list(attack_x.columns)
#random.seed(seed)
#np.random.seed(seed)
#print (target_models_names[j])
num_success = 0
mis = 0
well = 0
attack_set = []
orig = []
a=[]
while i < attack_x.shape[0]: # 10 random records to attack
record = attack_x.loc[i]
record_true_class = int(attack_y.pred[i])
record_pred = int(preds[i])
#print("true label: ", int(record_true_class))
prediction_record = model.predict(record.values.reshape(1, -1))[0]
if (record_pred != prediction_record):
print('pred != pred')
if (record_pred != record_true_class):
print("record is misclassified")
mis += 1
i += 1
continue
i += 1
well +=1
SA = SimulatedAnnealing(initialSolution=record, solutionEvaluator=model.predict_proba,
initialTemp=100, finalTemp=0.01,
tempReduction="linear",
iterationPerTemp=100, alpha=10, beta=5, record_id=i, record_true_class=int(record_true_class),
model_name=model_name)
attack_res = SA.run()
if (attack_res[0] == 1):
num_success = num_success+1
rec = list((attack_res[1].values.reshape(1,-1)).flatten())
attack_set.append(rec)
orig.append(rec)
#print("final solution for sample : ", SA.max_cost)
print("======================+=======================")
print(i, " samples")
print(num_success, " samples success attack")
print(mis, "mis")
print(well, "well")
print('a', a)
attack_sets = pd.DataFrame(attack_set, columns=columns_names)
origs = pd.DataFrame(orig, columns=columns_names)
attack_sets.to_csv("Datasets/HATE/HATE_adv_"+ model_name +".csv",index=False)
attack_sets.to_csv("Datasets/HATE/HATE_orig_"+ model_name +".csv",index=False)
| adiashk/search_AI_project | Simulated_Annealing.py | Simulated_Annealing.py | py | 17,737 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.linspac... |
9993622660 | import json
from django.core.management import call_command
from django.core.management.base import BaseCommand
from people.models import Person, Address
class Command(BaseCommand):
help = 'Loads sample data into the database'
def handle(self, *args, **options):
# Clear the database
call_command('flush', '--noinput')
with open('sample_data.json') as f:
people_data = json.load(f)
for person_data in people_data:
address_data = person_data.pop('address')
address = Address.objects.create(**address_data)
Person.objects.create(address=address, **person_data)
self.stdout.write(self.style.SUCCESS('Successfully loaded sample data'))
| finlay422/challenge_project | people/management/commands/load_sample_data.py | load_sample_data.py | py | 734 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.core.management.call_command",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
... |
36568920733 | from django.urls import path, re_path
from . import views
app_name = 'adminapp'
urlpatterns = [
path('', views.login, name='login'),
path('category/add/', views.add_category, name='add_category'),
path('article/add/', views.add_post),
path('article/list/', views.post_list),
path('category/list/', views.category_list),
path('event/list/', views.event_list),
path('event/add/', views.add_event),
path('page/list/', views.page_list),
path('menu/list/', views.menu_list),
path('banner/add/', views.add_banner),
path('banner/list/', views.banner_list),
re_path('banner/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_banner),
re_path('banner/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_banner),
re_path('category/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_category),
re_path('article/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_post),
re_path('category/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_category),
re_path('event/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_event),
re_path('article/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_post),
re_path('article/change_state/(?P<pk>[a-zA-Z0-9_-]+)/', views.post_state),
re_path('event/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_event),
re_path('event/change_state/(?P<pk>[a-zA-Z0-9_-]+)/', views.event_state),
path('admin_logout/', views.admin_logout),
path('menu/add/', views.add_menu),
re_path('menu/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_menu),
re_path('menu/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_menu),
re_path('menu/change_state/(?P<pk>[a-zA-Z0-9_-]+)/', views.menu_state),
re_path('menu/lvl-up/(?P<pk>[a-zA-Z0-9_-]+)/', views.menu_lvl_up),
re_path('menu/lvl-down/(?P<pk>[a-zA-Z0-9_-]+)/', views.menu_lvl_down),
re_path('delete_gal_img/(?P<pk>[a-zA-Z0-9_-]+)/(?P<pid>[a-zA-Z0-9_-]+)',
views.delete_gal_image),
re_path('delete_page_imgs/(?P<pk>[a-zA-Z0-9_-]+)/(?P<pid>[a-zA-Z0-9_-]+)',
views.delete_page_images),
path('page/add/', views.add_page),
re_path('page/edit/(?P<pk>[a-zA-Z0-9_-]+)/', views.edit_page),
re_path('page/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_page),
path("ajax/photos/upload/", views.upload_photos, name="upload_photos"),
path("ajax/photos/recent/", views.recent_photos, name="recent_photos"),
path('change_password/', views.change_password),
# path("tags/list/", views.tags_list),
# re_path('tag/delete/(?P<pk>[a-zA-Z0-9_-]+)/', views.delete_tag),
]
| MicroPyramid/ngo-cms | admin/urls.py | urls.py | py | 2,514 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
35648526405 | from src.knowledge_graph import KGEntity, KGProperty
from .kgqa_dataset import KGQADataSet
from .kgqa_data import KGQAData
from typing import List
import logging
import json
class Mintaka(KGQADataSet):
def load(self, path: str) -> List[KGQAData]:
datasets: List[KGQAData] = []
with open(path, encoding='utf-8') as f:
json_dict = json.load(f)
for mintaka_data in json_dict:
question_id = mintaka_data["id"]
raw_question = mintaka_data["question"]
answer_data = mintaka_data["answer"]["answer"]
answers = []
if answer_data:
for answer in answer_data:
if type(answer) is dict:
answers.append(KGEntity(answer["label"]["en"]))
elif type(answer) is bool:
# add additional answers yes/no for true/false question
if answer == True:
answers.append(KGEntity("True"))
answers.append(KGEntity("Yes"))
elif answer == False:
answers.append(KGEntity("False"))
answers.append(KGEntity("No"))
else:
print("Invalid boolean value")
continue
else:
answers.append(KGEntity(str(answer)))
else:
continue
datasets.append(KGQAData(question_id, raw_question, answers))
logging.info(f"number of parsed questions: {len(datasets)}")
return datasets | bumsikki/KAPPR | src/dataset/mintaka.py | mintaka.py | py | 1,837 | python | en | code | null | github-code | 36 | [
{
"api_name": "kgqa_dataset.KGQADataSet",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "kgqa_data.KGQAData",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "json.load",
... |
3843330309 | import numpy
import numpy as np
import pandas as pd
import pygad
import tlsh
import json
from tools import featurer
import sys
import csv
import tensorflow as tf
from tensorflow import keras
from keras import layers
import filenames_modified as filenames
MALWAREIDX = int(sys.argv[1])
BATCH_SIZE = 10
# print(MALWAREIDX)
arm_training = pd.read_csv(filenames.arm_training, header=None, index_col=False)
arm_validation = pd.read_csv(filenames.arm_validation, header=None, index_col=False)
arm_test = pd.read_csv(filenames.arm_test, header=None, index_col=False)
dataset_arm_training = np.asarray(arm_training.drop(columns=arm_training.columns[-2:]))
dataset_arm_validation = np.asarray(arm_validation.drop(columns=arm_validation.columns[-2:]))
dataset_arm_test = np.asarray(arm_test.drop(columns=arm_test.columns[-2:]))
labels_arm_training = np.asarray(arm_training[arm_training.columns[-1]])
labels_arm_validation = np.asarray(arm_validation[arm_validation.columns[-1]])
labels_arm_test = np.asarray(arm_test[arm_test.columns[-1]])
names_arm_training = arm_training[arm_training.columns[-2]]
names_arm_validation = arm_validation[arm_validation.columns[-2]]
names_arm_test = arm_test[arm_test.columns[-2]]
df_arm_malware_forpoison = pd.read_csv(filenames.forpoison_arm_malware, header=None, index_col=False)
df_arm_malware_forpoison = df_arm_malware_forpoison.drop(columns=df_arm_malware_forpoison.columns[-2:])
topredict = np.asarray([df_arm_malware_forpoison.iloc[MALWAREIDX],])
malwareTLSH = ""
mybytes = ""
def myfunc(solution):
additional = np.array(solution).tobytes()
return str(tlsh.hash(mybytes + additional))
def fitness_func(solution, solution_idx):
poisonedTLSH = myfunc(solution)
return 1 / tlsh.diff(malwareTLSH, poisonedTLSH)
# print(sys.argv)
num_generations = 500
num_parents_mating = 8
sol_per_pop = 20
# num_genes = 20
gene_type = numpy.uint8
init_range_low = 0
init_range_high = 255
stop_criteria = "saturate_200"
# percents = np.append(np.arange(0.5, 5.1, 0.5), [10, 20])
percents = [5, 10, 20]
benignnumbers = [30, 40]
BENIGNNUMBER = 50
with open(filenames.poisonJSON) as poison_json:
poison = json.load(poison_json)
with open(filenames.dir_malware_arm + str(poison["arm"]["malware"][MALWAREIDX]), "rb") as malware:
malwareread = malware.read()
malwareTLSH = str(tlsh.hash(malwareread))
for BENIGNNUMBER in benignnumbers:
with open("{}genetic_idx-{}_bening-{}_percent-5-10-20.csv".format(filenames.dir_results, MALWAREIDX, BENIGNNUMBER), "w") as results_file:
csv_writer_r = csv.writer(results_file, lineterminator="\n")
for percent in percents:
# for num_genes in range(10, 101, 10):
with open(filenames.dir_poison_data_genetic + "percent_" + str(percent) + "_" + str(MALWAREIDX) + ".csv", "w") as f:
csv_writer = csv.writer(f, lineterminator="\n")
for i in range(BENIGNNUMBER):
print("*{}: {}% - {}*".format(str(MALWAREIDX), percent, i))
filename = str(poison["arm"]["benign"][i])
with open(filenames.dir_bening_arm + filename, "rb") as benign:
mybytes = benign.read()
lenbytes = len(mybytes)
num_genes = int(lenbytes * percent / 100)
ga = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
fitness_func=fitness_func,
sol_per_pop=sol_per_pop,
num_genes=num_genes,
gene_type=gene_type,
init_range_low=init_range_low,
init_range_high=init_range_high,
stop_criteria=stop_criteria
)
ga.run()
# ga.plot_fitness()
best_solution, best_fitness, best_idx = ga.best_solution()
# print(best_solution, " - ", 1 / best_fitness)
csv_writer.writerow(featurer(myfunc(best_solution)))
file_poison_arm_BM = filenames.dir_poison_data_genetic + "percent_" + str(percent) + "_" + str(MALWAREIDX) + ".csv"
poisoned_arm_training = pd.read_csv(file_poison_arm_BM, index_col=False, header=None)
poisoned_arm_training_base = poisoned_arm_training.sample(frac=1)
poisoned_arm_training_new = arm_training.append(poisoned_arm_training, ignore_index=True).sample(frac=1)
dataset_poisoned_arm_training_base = np.asarray(
poisoned_arm_training_base.drop(columns=poisoned_arm_training_base.columns[-2:]))
dataset_poisoned_arm_training_new = np.asarray(
poisoned_arm_training_new.drop(columns=poisoned_arm_training_new.columns[-2:]))
labels_poisoned_arm_training_base = np.asarray(poisoned_arm_training_base[poisoned_arm_training_base.columns[-1]])
labels_poisoned_arm_training_new = np.asarray(poisoned_arm_training_new[poisoned_arm_training_new.columns[-1]])
# MODIFIED
base_model = keras.models.load_model(filenames.base_model)
base_model.fit(dataset_poisoned_arm_training_base, labels_poisoned_arm_training_base, epochs=10, batch_size=BATCH_SIZE,
validation_data=(dataset_arm_validation, labels_arm_validation), verbose=0)
[_, binary_accuracy_appended] = base_model.evaluate(dataset_arm_test, labels_arm_test, verbose=0)
# print(binary_accuracy_appended)
# base_model.save(filenames.models_iterative + "modified" + str(num_genes))
[[predict_appended]] = base_model.predict(topredict, verbose=0)
# print(predict_appended)
# # NEW
# poison_model = keras.Sequential(
# [
# layers.Dense(1, input_shape=(131,), activation="sigmoid")
# ]
# )
# poison_model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
# metrics=[tf.keras.metrics.BinaryAccuracy()])
# poison_model.fit(dataset_poisoned_arm_training_new, labels_poisoned_arm_training_new, epochs=10, batch_size=BATCH_SIZE,
# validation_data=(dataset_arm_validation, labels_arm_validation), verbose=0)
# [_, binary_accuracy_new] = poison_model.evaluate(dataset_arm_test, labels_arm_test, verbose=0)
# # print(binary_accuracy_appended)
# # base_model.save(filenames.models_iterative + "poison" + str(num_genes))
# [[predict_new]] = poison_model.predict(topredict, verbose=0)
# # print(predict_new)
results = [percent,
binary_accuracy_appended,
predict_appended]
# binary_accuracy_new,
# predict_new]
print(results)
csv_writer_r.writerow(results)
print("{} DONE".format(MALWAREIDX)) | ZsZs88/Poisoning | genetic_modified.py | genetic_modified.py | py | 7,404 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "filenames_modified.arm_training",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pa... |
23108069207 | from flask import Flask, app
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
# init SQLAlchemy so we can use it later in our models
db = SQLAlchemy()
def create_app():
application = Flask(__name__)
application.config['SECRET_KEY'] = '9OLWxND4o83j4K4iuopO'
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(application)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(application)
from .models import User
# blueprint for auth routes in our app
from . import auth
application.register_blueprint(auth.bp)
@login_manager.user_loader
def load_user(id):
# since the user_id is just the primary key of our user table, use it in the query for the user
return User.query.get(id)
return application | peastuti/sb-admin-2-flask-login | project/__init__.py | __init__.py | py | 946 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_login.LoginManager",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models... |
7773371889 | import random
import time
from pathlib import Path
from typing import Any
import numpy as np
from midi.decode import get_array_of_notes
from midi.encode import get_file_from_standard_features
from models.music_model import MusicModel, ProgressCallback, ProgressMetadata
class MarkovChain(MusicModel):
n_gram_size: int
def __init__(self, n_gram_size: int = 1) -> None:
self.data: list = []
self.tokens: set = set()
self.n_grams: set = set()
self.tokens_list: list[tuple] = []
self.n_grams_list: list[tuple] = []
self.probabilities: np.ndarray
self.n_gram_size = n_gram_size
def train(self, epochs: int | None, x_train: Any, y_train: Any, progress_callback: ProgressCallback,
checkpoint_path: Path | None = None) -> None:
# count probabilities
n = len(self.n_grams_list[0])
n_gram_next: np.ndarray = np.ndarray(
(len(self.n_grams_list, )), dtype=object)
for i in range(n_gram_next.shape[0]):
n_gram_next[i] = []
start = time.time()
time.perf_counter()
for i in range(len(self.data)):
elapsed = time.time() - start
progress_callback([(elapsed, 100 * i / len(self.data))])
for j in range(len(self.data[i]) - 1 - self.n_gram_size):
curr_n_gram = tuple(self.data[i][j:j + n])
next_note = self.data[i][j + n]
n_gram_next[self.n_grams_list.index(
curr_n_gram)].append(next_note)
elapsed = time.time() - start
progress_callback([(elapsed, 100)])
self.probabilities = np.ndarray(
(len(self.n_grams_list, )), dtype=object)
for i in range(n_gram_next.shape[0]):
self.probabilities[i] = {}
len_n_gram_next = len(n_gram_next)
for i in range(len_n_gram_next):
for j in range(len(n_gram_next[i])):
if len(n_gram_next[i]) <= 1:
self.probabilities[n_gram_next[i]][j] = 1
else:
if self.probabilities[i].get(n_gram_next[i][j]) is None:
self.probabilities[i][n_gram_next[i][j]] = float(
n_gram_next[i].count(n_gram_next[i][j]) / len(n_gram_next[i]))
def create_dataset(self, dataset: list[tuple[Any, Any]]) -> tuple[Any, Any]:
self.generate_tokens()
self.generate_n_grams(self.n_gram_size)
return 0, 0
def generate_tokens(self) -> None:
for i in range(len(self.data)):
for j in range(len(self.data[i])):
notes = []
for k in range(128):
if self.data[i][j][k]:
notes.append(k)
self.data[i][j] = tuple(notes)
self.tokens.add(tuple(notes))
def prepare_data(self, midi_file: Path) -> tuple[Any, Any]:
data_lines = get_array_of_notes(midi_file, False, False)
for i in range(len(data_lines)): # serialize tracks
self.data.append(data_lines[i].tolist())
return 0, 0
def save(self, path: Path) -> None:
np.savez(path, probabilities=np.asarray(self.probabilities, dtype=object),
tokens=np.asarray(self.tokens_list, dtype=object))
def load(self, path: Path) -> None:
path = path if path.name.endswith('.npz') else path.with_suffix('.npz')
data = np.load(path, allow_pickle=True)
self.probabilities = data['probabilities']
self.tokens_list = data['tokens']
def generate_n_grams(self, n: int) -> None:
print("Generating " + str(n) + "-grams")
for i in range(len(self.data)):
for j in range(len(self.data[i]) - n + 1):
self.n_grams.add(tuple(self.data[i][j:j + n]))
self.tokens_list = list(self.tokens)
self.n_grams_list = list(self.n_grams)
print(str(len(self.n_grams_list)) + " " + str(n) + "-grams generated!")
def model_summary(self) -> str:
return ("Markov chain basing on " +
str(self.n_gram_size) + "-grams:\n" +
str(len(self.tokens_list)) + " tokens\n" +
str(len(self.n_grams_list)) + " n_grams\n" +
str(len(self.data)) + " files")
def generate(self, path: Path, seed: int | None = None) -> None:
assert len(self.tokens_list) > 0, "Model was not initiated with data"
if seed is not None:
random.seed(seed)
result = self.predict(random.choice(self.tokens_list), 512, False, 0)
get_file_from_standard_features(
result, 1000000, path, False, True, False, [8 for _ in result])
def predict(self, initial_notes: tuple, length: int, deterministic: bool, rand: int) -> np.ndarray:
# deterministic - if True, next note will be always note with maximum probability
# - if False, next note will be sampled according to all notes probability
# rand - % chance of selecting random token next (int [0;100])
prediction = []
previous_n_gram = initial_notes
for i in range(len(initial_notes)):
prediction.append(initial_notes[i])
# generating length - initial_token
for i in range(length - len(self.tokens_list[0])):
idx = None
if tuple(previous_n_gram) in self.n_grams:
idx = self.n_grams_list.index(previous_n_gram)
else:
idx = random.randrange(len(self.probabilities))
probs = self.probabilities[idx]
while len(probs) == 0:
idx = random.randrange(len(self.probabilities))
probs = self.probabilities[idx]
next_note = None
if random.randrange(100) < rand:
next_note = random.choice(self.tokens_list)
elif deterministic:
next_note = max(probs, key=probs.get)
else:
next_note = random.choices(
list(probs.keys()), weights=probs.values(), k=1)[0]
prediction.append(next_note)
if next_note is not None:
previous_n_gram = next_note
result = np.full((len(prediction), 128), False)
for i in range(len(prediction)):
if isinstance(prediction[i], int):
result[i][prediction[i]] = True
else:
for j in range(len(prediction[i])):
note = prediction[i][j]
result[i][note] = True
return result
@staticmethod
def get_progress_metadata() -> ProgressMetadata:
return ProgressMetadata(x_label='Time [s]', y_label='Progress [%]', legends=['Markov Chain'])
| piotrowskv/music_generation | models/models/markov_chain/markov_chain.py | markov_chain.py | py | 6,790 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.music_model.MusicModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "models.mu... |
71877366824 | #!/usr/bin/env python
# coding: utf-8
import sys
import io
import json
import numpy as np
from matplotlib import pyplot as plt
from tensorflow import keras
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from pathlib import Path
import cv2
import skimage
from tensorflow.keras.applications import ResNet50V2, ResNet50
from tensorflow.keras.regularizers import l2
from tensorflow.keras import layers
from tensorflow.keras.layers import Input, Conv2DTranspose
from tensorflow.keras.layers import concatenate
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Dense, Flatten, MaxPooling2D, BatchNormalization, Conv2D, Dropout, LeakyReLU
from tensorflow.keras.regularizers import l2
from adamp_tf import AdamP
from sgdp_tf import SGDP
from collections import Callable
import time
from tensorflow.keras import backend as K
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
tf.test.is_gpu_available()
tf.config.list_physical_devices('GPU')
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
from skimage.transform import resize
import albumentations as A
def augment_img_mask(x, y):
transform = A.Compose(
[
A.VerticalFlip(p=0.5),
A.HorizontalFlip(p=0.5),
A.ElasticTransform(p=0.5, alpha=240, sigma=240 * 0.05, alpha_affine=240 * 0.03)
]
)
transform_image = transform(image=x, mask=y)
return transform_image['image'], transform_image['mask']
class DataGeneratorDivide(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, path_to_dataset, batch_size=32,
shuffle=True, use_augmentations=False,
mode='train', val_percent=0.3):
"""
mode: train or val
"""
self.batch_size = batch_size
self.path_to_dataset = path_to_dataset
self.val_percent = val_percent
self.mode = mode
self.initialize()
self.shuffle = shuffle
self.on_epoch_end()
self.use_aug = use_augmentations
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.X) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X, Y = self.__data_generation(indexes)
return X, Y
def initialize(self):
slice_nums = list(set(
int(file.name.split('_')[-1].split('.')[0]) for file in (self.path_to_dataset / 'gt').iterdir()
))
slice_nums = sorted(list(slice_nums))
num_of_slices = len(slice_nums)
val_num = int(num_of_slices * self.val_percent)
if self.mode == 'train':
curr_slices_to_use = slice_nums[val_num:]
else:
curr_slices_to_use = slice_nums[:val_num]
self.curr_slices_to_use = curr_slices_to_use
self.X, self.Y = [], []
for file in (self.path_to_dataset / 'images').iterdir():
slice_num = int(file.name.split('_')[-1].split('.')[0])
if slice_num in self.curr_slices_to_use:
self.X.append(file)
self.Y.append(self.path_to_dataset / 'gt' / file.name)
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.X))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, indexes):
'Generates data containing batch_size samples'
# Resize or padd?
# imread (M, N, 3). can take only first dim to make them grey
# but resnet preprocessing wants rgb images!!!
X = [np.load(self.X[ind]) for ind in indexes]
Y = [np.load(self.Y[ind]) for ind in indexes]
# batch_shapes = [el.shape for el in X]
max_w, max_h = 256, 256
# print(max_w, max_h)
# # Generate data
for i, img in enumerate(X):
w, h = X[i].shape
X[i] = resize(X[i], (256, 256), preserve_range=True)
Y[i] = resize(Y[i], (256, 256), preserve_range=True)
if self.use_aug:
X[i], Y[i] = augment_img_mask(X[i], Y[i])
# y[i] = y[i][:, :, np.newaxis]
# X[i] = (np.pad(X[i], pad_width=((0, max_w - w), (0, max_h - h), (0, 0))))
# X[i] = tf.keras.applications.resnet.preprocess_input(X[i])
# np.pad(y[i], pad_width=((0, max_w - w), (0, max_h - h), (0, 0)))
# X[i], y[i] = np.pad()
X, Y = np.array(X)[:, :, :, np.newaxis], np.array(Y)[:, :, :, np.newaxis]
# X_padded = np.zeros([X.shape[0], 512, 512, 1])
# X_padded[:, :X.shape[1], :X.shape[2], :] = X
# Y_padded = np.zeros([Y.shape[0], 512, 512, 1])
# Y_padded[:, :Y.shape[1], :Y.shape[2], :] = Y
return X, Y
def get_model(
weight_decay=0.0001,
start_neuron_number=16
):
keras.backend.clear_session()
wd_reg = l2(weight_decay)
inputs = Input((256, 256, 1))
x = inputs
# s = Lambda(lambda x: x / 255) (inputs)
c1 = Conv2D(start_neuron_number * 1, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (x)
# c1 = Dropout(0.1) (c1)
c1 = BatchNormalization()(c1)
c1 = Conv2D(start_neuron_number * 1, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(start_neuron_number * 2, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (p1)
# c2 = Dropout(0.1) (c2)
c2 = BatchNormalization()(c2)
c2 = Conv2D(start_neuron_number * 2, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(start_neuron_number * 4, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (p2)
# c3 = Dropout(0.2) (c3)
c3 = BatchNormalization()(c3)
c3 = Conv2D(start_neuron_number * 4, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (p3)
# c4 = Dropout(0.2) (c4)
c4 = BatchNormalization()(c4)
c4 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal',kernel_regularizer=wd_reg, padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
# p4 = p3
c5 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (p4)
# c5 = Dropout(0.3) (c5)
c5 = BatchNormalization()(c5)
c5 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c5)
u6 = Conv2DTranspose(start_neuron_number * 8, (4, 4), strides=(2, 2), padding='same', kernel_regularizer=wd_reg) (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal',kernel_regularizer=wd_reg, padding='same') (u6)
# c6 = Dropout(0.2) (c6)
c6 = BatchNormalization()(c6)
c6 = Conv2D(start_neuron_number * 8, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c6)
u7 = Conv2DTranspose(start_neuron_number * 4, (4, 4), strides=(2, 2), padding='same', kernel_regularizer=wd_reg) (c6)
u7 = concatenate([u7, c3])
u7 = Dropout(0.2)(u7)
c7 = Conv2D(start_neuron_number * 4, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (u7)
# c7 = BatchNormalization()(c7)
c7 = Conv2D(start_neuron_number * 4, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c7)
u8 = Conv2DTranspose(start_neuron_number * 2, (4, 4), strides=(2, 2), padding='same', kernel_regularizer=wd_reg) (c7)
u8 = concatenate([u8, c2])
u8 = Dropout(0.2)(u8)
c8 = Conv2D(start_neuron_number * 2, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (u8)
# c8 = BatchNormalization()(c8)
c8 = Conv2D(start_neuron_number * 2, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c8)
u9 = Conv2DTranspose(start_neuron_number, (4, 4), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
u9 = Dropout(0.2)(u9)
c9 = Conv2D(start_neuron_number, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (u9)
c9 = BatchNormalization()(c9)
c9 = Conv2D(start_neuron_number, (3, 3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=wd_reg, padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='linear', dtype='float32') (c9)
model = keras.Model(inputs=[inputs], outputs=[outputs])
return model
def l1(y_true, y_pred):
#print(y_true)
#print(y_pred)
"""Calculate the L1 loss used in all loss calculations"""
if K.ndim(y_true) == 4:
return K.mean(K.abs(y_pred - y_true), axis=[1,2,3])
elif K.ndim(y_true) == 3:
return K.mean(K.abs(y_pred - y_true), axis=[1,2])
else:
raise NotImplementedError("Calculating L1 loss on 1D tensors? should not occur for this network")
def compute_perceptual(vgg_out, vgg_gt):
"""Perceptual loss based on VGG16, see. eq. 3 in paper"""
loss = 0
for o, g in zip(vgg_out, vgg_gt):
loss += l1(o, g)
return loss
def gram_matrix(x, norm_by_channels=False):
"""Calculate gram matrix used in style loss"""
# Assertions on input
# print(K.ndim(x), x.shape)
assert K.ndim(x) == 4, 'Input tensor should be a 4d (B, H, W, C) tensor'
assert K.image_data_format() == 'channels_last', "Please use channels-last format"
#import pdb
#pdb.set_trace()
# Permute channels and get resulting shape
x = K.permute_dimensions(x, (0, 3, 1, 2))
shape = K.shape(x)
B, C, H, W = shape[0], shape[1], shape[2], shape[3]
# Reshape x and do batch dot product
features = K.reshape(x, K.stack([B, C, H*W]))
gram = K.batch_dot(features, features, axes=2)
# Normalize with channels, height and width
gram = gram / K.cast(C * H * W, x.dtype)
return gram
def compute_style(vgg_out, vgg_gt):
"""Style loss based on output/computation, used for both eq. 4 & 5 in paper"""
loss = 0
for o, g in zip(vgg_out, vgg_gt):
loss += l1(gram_matrix(o), gram_matrix(g))
return loss
def get_extracted_values(feature_extractor, y_true, y_pred):
vgg_out = feature_extractor(y_true)
vgg_gt = feature_extractor(y_pred)
if not isinstance(vgg_out, list):
vgg_out = [vgg_out]
vgg_gt = [vgg_gt]
# TODO: make output of autoencoder float32 / это же слои!!! я не смогу так сделать
vgg_out_ = []
vgg_gt_ = []
for el1, el2 in zip(vgg_out, vgg_gt):
vgg_out_.append(K.cast(el1, 'float32'))
vgg_gt_.append(K.cast(el2, 'float32'))
vgg_gt = vgg_gt_
vgg_out = vgg_out_
return vgg_gt, vgg_out
def compute_loss_tv(P):
# Calculate total variation loss
a = l1(P[:,1:,:,:], P[:,:-1,:,:])
b = l1(P[:,:,1:,:], P[:,:,:-1,:])
return a+b
def loss_total(
feature_extractor_content,
feature_extractor_style
):
"""
Creates a loss function which sums all the loss components
and multiplies by their weights. See paper eq. 7.
"""
def loss(y_true, y_pred):
y_true = K.cast(y_true, 'float32')
# Here I assume that rectangular shape is always the same
mask = np.zeros(y_true.shape)
xmin, xmax, ymin, ymax = (55, 200, 86, 169)
mask[:, xmin-20:xmax+20, ymin-20:ymax+20] = 1
mask = K.cast(mask, 'float32')
vgg_gt_c, vgg_out_c = get_extracted_values(feature_extractor_content, y_true, y_pred)
vgg_gt_s, vgg_out_s = get_extracted_values(feature_extractor_style, y_true, y_pred)
loss_mae_hole = l1(mask * y_true, mask * y_pred)
loss_mae_valid = l1((1 - mask) * y_true, (1 - mask) * y_pred)
loss_perceptual = compute_perceptual(vgg_out_c, vgg_gt_c)
loss_style = compute_style(vgg_out_s, vgg_gt_s)
loss_tv_val = compute_loss_tv(P=mask * y_pred)
# Return loss function
return loss_mae_valid, loss_mae_hole, loss_perceptual, loss_style, loss_tv_val
return loss
def make_linear_lr(min_lr, max_lr, number_of_steps):
def gen_lr(step):
return (max_lr - min_lr) / number_of_steps * step + min_lr
return gen_lr
def make_cosine_anneal_lr(learning_rate, alpha, decay_steps):
def gen_lr(global_step):
global_step = tf.minimum(global_step, decay_steps)
global_step = tf.cast(global_step, tf.float32)
cosine_decay = 0.5 * (1 + tf.math.cos(3.1415926 * global_step / decay_steps)) # changed np.pi to 3.14
decayed = (1 - alpha) * cosine_decay + alpha
decayed_learning_rate = learning_rate * decayed
return decayed_learning_rate
return gen_lr
def make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps):
gen_lr_1 = make_linear_lr(min_lr, max_lr, number_of_steps)
gen_lr_2 = make_cosine_anneal_lr(max_lr, alpha, decay_steps)
def gen_lr(global_step):
a = global_step < number_of_steps
a = tf.cast(a, tf.float32)
b = 1. - a
return a * gen_lr_1(global_step) + b * gen_lr_2(global_step - number_of_steps)
return gen_lr
class CosineAnnealingWithWarmUP(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, min_lr, max_lr, number_of_steps, alpha, decay_steps):
super(CosineAnnealingWithWarmUP, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.number_of_steps = number_of_steps
self.alpha = alpha
self.decay_steps = decay_steps
self.gen_lr_ca = make_cosine_annealing_with_warmup(min_lr, max_lr, number_of_steps, alpha, decay_steps)
def __call__(self, step):
return self.gen_lr_ca(step)
def get_config(self):
config = {
'min_lr': self.min_lr,
'max_lr': self.max_lr,
'number_of_steps': self.number_of_steps,
'alpha': self.alpha,
'decay_steps': self.decay_steps
}
return config
def choose_optimizer(
optimizer_name='Adam',
learning_rate_fn=0.001
):
# (learning_rate=learning_rate_fn)
if optimizer_name == 'Adam':
optimizer = tf.keras.optimizers.Adam
elif optimizer_name == 'SGD':
optimizer = tf.keras.optimizers.SGD
elif optimizer_name == 'AdamP':
optimizer = AdamP
else:
print('Choosing SGDP')
optimizer = SGDP
optimizer_with_lr = optimizer(learning_rate_fn)
return optimizer_with_lr
def choose_learning_rate_func(
type_lr_func='constant', max_lr = 0.001,
warmup_steps = 900, max_number_of_steps = 60_000,
epochs=60
):
if type_lr_func == 'constant':
return max_lr
else:
return CosineAnnealingWithWarmUP(.0000001, max_lr, warmup_steps, 0, max_number_of_steps)
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def main(params):
weight_decay = params['weight_decay']
start_neuron_number = params['start_neuron_number']
optimizer_name = params['optimizer_name']
type_lr_func = params['type_lr_func']
max_lr = params['max_lr']
warmup_steps = params['warmup_steps']
max_number_of_steps = params['max_number_of_steps']
epochs = params['epochs']
save_model_tensorboard = params['save_model_tensorboard']
style_layer_names = params['style_layer_names']
content_layer_name = params['content_layer_name']
mae_valid_weight = params['mae_valid_weight']
mae_hole_weight = params['mae_hole_weight']
perceptual_weight = params['perceptual_weight']
style_weight = params['style_weight']
tv_weight = params['tv_weight']
model = get_model(weight_decay, start_neuron_number)
path_to_dataset = Path('./dataset')
autoencoder = tf.keras.models.load_model('./best_weights_24.h5', compile=False)
autoencoder.trainable = False
feature_extractor_style = keras.Model(
inputs=autoencoder.input,
outputs=[autoencoder.get_layer(l).output for l in style_layer_names]
)
feature_extractor_content = keras.Model(
inputs=autoencoder.input,
outputs=[autoencoder.get_layer(content_layer_name).output]
)
optimizer = choose_optimizer(
optimizer_name,
choose_learning_rate_func(type_lr_func, max_lr, warmup_steps, max_number_of_steps, epochs)
)
dg_train = DataGeneratorDivide(
path_to_dataset, mode='train',
val_percent=0.2, use_augmentations=True,
batch_size=6
)
dg_val = DataGeneratorDivide(path_to_dataset, mode='val', val_percent=0.2, batch_size=6)
writer = tf.summary.create_file_writer(save_model_tensorboard)
global_step = 0
for ind in range(epochs):
model.save(f'./{save_model_tensorboard}.h5')
print(f'{ind} epoch')
dg_train.on_epoch_end()
for ind, (x, y) in enumerate(dg_val):
if ind == 1:
break
prediction = model.predict(x)
fig, axes = plt.subplots(1, 3, figsize=(10, 5))
for pred, x_, y_ in zip(prediction, x, y):
axes[0].imshow(pred, cmap='gray')
axes[1].imshow(x_, cmap='gray')
axes[2].imshow(y_, cmap='gray')
# plt.show()
with writer.as_default():
tf.summary.image("Val data", plot_to_image(fig), step=global_step)
start = time.time()
for step_num, (inputs, targets) in enumerate(dg_train):
global_step += 1
with tf.GradientTape() as tape:
predictions = model(inputs)
func = loss_total(feature_extractor_content, feature_extractor_style)
loss_value_list = func(targets, predictions)
loss_value =\
mae_valid_weight * loss_value_list[0] +\
mae_hole_weight * loss_value_list[1] +\
perceptual_weight * loss_value_list[2] +\
style_weight * loss_value_list[3] +\
tv_weight * loss_value_list[4]
gradients = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
if step_num % 10 == 0:
with writer.as_default():
tf.summary.scalar("loss_train", loss_value.numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_mae_valid", loss_value_list[0].numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_mae_hole", loss_value_list[1].numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_percept" , loss_value_list[2].numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_style", loss_value_list[3].numpy().mean(), step=global_step)
tf.summary.scalar("loss_train_tv", loss_value_list[4].numpy().mean(), step=global_step)
if isinstance(optimizer.lr, Callable):
cur_lr = optimizer.lr(global_step).numpy()
else:
cur_lr = optimizer.lr.numpy()
tf.summary.scalar("learning_rate", cur_lr, step=global_step)
writer.flush()
end = time.time()
print(f'Training took {end - start}')
start = time.time()
val_loss_value = 0
corr_coef_value = 0
batch_num = 0
for step_num, (inputs, targets) in enumerate(dg_val):
predictions = model(inputs)
corr_coefs = []
for pred, x_, y_ in zip(predictions, inputs, targets):
xmin, xmax = min(np.where(x_ < 0.001)[0]), max(np.where(x_ < 0.001)[0])
ymin, ymax = min(np.where(x_ < 0.001)[1]), max(np.where(x_ < 0.001)[1])
y_ = y_[xmin-10:xmax+10, ymin-10:ymax+10]
pred = pred[xmin-10:xmax+10, ymin-10:ymax+10]
corr_coef = np.corrcoef(y_.ravel(), pred.numpy().ravel())[0, 1]
corr_coefs.append(corr_coef)
corr_coef_value += np.mean(corr_coefs)
func = loss_total(feature_extractor_content, feature_extractor_style)
loss_value_list = func(targets, predictions)
loss_value =\
mae_valid_weight * loss_value_list[0] +\
mae_hole_weight * loss_value_list[1] +\
perceptual_weight * loss_value_list[2] +\
style_weight * loss_value_list[3] +\
tv_weight * loss_value_list[4]
val_loss_value += loss_value.numpy().mean()
batch_num += 1
with writer.as_default():
tf.summary.scalar("loss_val", val_loss_value / batch_num, step=global_step)
tf.summary.scalar("corr_coeff_val", corr_coef_value / batch_num, step=global_step)
writer.flush()
end = time.time()
print(f'Val took {end - start}')
if __name__ == '__main__':
path_to_json = sys.argv[1]
with open(path_to_json, 'r') as f:
params = json.load(f)
main(params)
| DanilKonon/Seismic_Data_Inpainting | unet_autoencoder.py | unet_autoencoder.py | py | 22,825 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.compat.v1.ConfigProto",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.InteractiveSession",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tensorflow.test.is_gpu_available",
"line_number": 34,
"usage_typ... |
2286169944 | """
Created on Sat Sep 25 00:00:00 2018
@author: Nikhil
"""
"""
If you have any questions or suggestions regarding this script,
feel free to contact me via nikhil.ss4795@gmail.com
"""
# Polynomial Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
plt.scatter(X, y, color = 'red')
plt.title('Salary vs Experience')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Fitting Linear Regression to the dataset
from sklearn.linear_model import LinearRegression
linear_reg = LinearRegression()
linear_reg.fit(X, y)
# Visualising the Linear Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, linear_reg.predict(X), color = 'blue')
plt.title('Salary vs Experience')
plt.xlabel('Experience')
plt.ylabel('Salary')
plt.show()
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree = 4)
X_polynomial = poly_features.fit_transform(X)
poly_features.fit(X_polynomial, y)
polynomial_regression = LinearRegression()
polynomial_regression.fit(X_polynomial, y)
# Visualising the Polynomial Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, polynomial_regression.predict(poly_features.fit_transform(X)), color = 'blue')
plt.title('Salary vs Experience')
plt.xlabel('Experience')
plt.ylabel('Salary')
plt.show()
# Predicting a new result with Linear Regression
linear_reg.predict(6.5)
# Predicting a new result with Polynomial Regression
polynomial_regression.predict(poly_features.fit_transform(6.5))
"""
If you have any questions or suggestions regarding this script,
feel free to contact me via nikhil.ss4795@gmail.com
"""
| Nikhil4795/Polynomial_Linear_Regression | Polynomial_regression_2/polynomial_regression.py | polynomial_regression.py | py | 1,854 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotli... |
8460276839 | from time import sleep
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from appium.webdriver.extensions.android.gsm import GsmCallActions
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestBrowser():
def setup(self):
des_caps = {
'platformName':'android',
'platformVersion':'6.0',
'appPackage':'com.xueqiu.android',
'appActivity':'com.xueqiu.android.common.MainActivity',
# 'browserName':'Browser',
# 不停止APP,不清除app数据,不卸载app
'noReset':True,
# 停止app,清除app数据卸载app
# 'fullReset':True,
# 不停止测试app的进程
'dontStopAppOnReset':True,
'deviceName':'127.0.0.1:7555',
'autoGrantPermissions':True,
# 自动启动模拟器 emulator -list-avds 中的 Pixel_23_6
# 只能是安卓自带的模拟器 第三方的不可以
# 'avd':'Pixel_23_6'
'newCommandTimeout':300
}
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', des_caps)
self.driver.implicitly_wait(10)
def teardown(self):
self.driver.quit()
def test_mobile(self):
pass
# self.driver.make_gsm_call('15910852286',GsmCallActions.CALL)
# self.driver.send_sms('15910852286','hello appium api')
# # 录屏 8.0版本以上可以 华为不可
# self.driver.start_recording_screen()
# # 开启飞行模式
# self.driver.set_network_connection(1)
# self.driver.get_screenshot_as_file('./photos/img.png')
# sleep(3)
# self.driver.set_network_connection(4)
# sleep(3)
# self.driver.stop_recording_screen()
| yyw15910852287/hogwarts_appium | 交互api/test_jiaohu.py | test_jiaohu.py | py | 1,876 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "appium.webdriver.Remote",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "appium.webdriver",
"line_number": 32,
"usage_type": "name"
}
] |
40961448639 | # coding: utf-8
import itertools
import re
from simpleai.search import (backtrack, CspProblem, LEAST_CONSTRAINING_VALUE,
min_conflicts, MOST_CONSTRAINED_VARIABLE)
largos = {
'1H': 2, '2H': 3, '4H': 2, '5H': 2, '7H': 2, '8H': 2, '10H': 3, '11H': 2,
'1V': 2, '2V': 2, '3V': 3, '4V': 2, '6V': 3, '7V': 2, '8V': 2, '9V': 2,
}
palabras = set(re.sub(r'[^\w] ', '', '''Este es un texto para sacar palabras y asi
emular las claves del diccionario expuesto en el ejercicio.
Artificial Intelligence (AI) is a big field, and this is a big book. We have tried to explore the
full breadth of the field, which encompasses logic, probability, and continuous mathematics;
perception, reasoning, learning, and action; and everything from microelectronic devices to
robotic planetary explorers. The book is also big because we go into some depth.
The subtitle of this book is “A Modern Approach.” The intended meaning of this rather
empty phrase is that we have tried to synthesize what is now known into a common frame-
work, rather than trying to explain each subfield of AI in its own historical context. We
apologize to those whose subfields are, as a result, less recognizable.
How to use Machine Learning on a Very Complicated Problem
So far in Part 1, 2 and 3, we’ve used machine learning to solve isolated problems that have only
one step — estimating the price of a house, generating new data based on existing data and telling
if an image contains a certain object. All of those problems can be solved by choosing one machine
learning algorithm, feeding in data, and getting the result.
But face recognition is really a series of several related problems:
First, look at a picture and find all the faces in it
Second, focus on each face and be able to understand that even if a face is turned in a weird
direction or in bad lighting, it is still the same person.
Third, be able to pick out unique features of the face that you can use to tell it apart from other
people— like how big the eyes are, how long the face is, etc.
Finally, compare the unique features of that face to all the people you already know to determine
the person’s name.
As a human, your brain is wired to do all of this automatically and instantly. In fact, humans are
too good at recognizing faces and end up seeing faces in everyday objects:
Computers are not capable of this kind of high-level generalization (at least not yet…), so we have
to teach them how to do each step in this process separately.
We need to build a pipeline where we solve each step of face recognition separately and pass the
result of the current step to the next step. In other words, we will chain together several machine
learning algorithms:
''').lower().split())
variables = []
dominios = {}
for var, largo in largos.items():
# agrego variables
variables.append(var)
# optamos por restringir el dominio a solo las palabras que poseen el largo
# para completar la variable. Otra posibilidad es agregar restricciones.
dominios[var] = [x for x in palabras if len(x) == largo]
restricciones = []
def distinto_valor(variables, valores):
'Compara que los valores de las variables sean distintos'
return valores[0] != valores[1]
# Todas las variables tienen que ser distintas. Con este diccionario no alcanza
# para que se cumpla esta restriccion; si se quiere ver un resultado hay que
# comentar esta restriccion o agregar un texto que contenga mas palabras para
# formar el vocabulario.
for var1, var2 in itertools.combinations(variables, 2):
restricciones.append(((var1, var2), distinto_valor))
def interseccion(pos1, pos2):
'''
Devuelve una "restriccion" que controla que la interseccion de la primer
palabra[pos1] sea igual a la segunda palabra[pos2].
'''
def restriccion(variables, valores):
return valores[0][pos1] == valores[1][pos2]
return restriccion
# Agregamos las intersecciones donde tienen que coincidir los caracteres
restricciones.append((('1H', '1V'), interseccion(0, 0)))
restricciones.append((('2H', '2V'), interseccion(0, 0)))
restricciones.append((('2H', '3V'), interseccion(2, 0)))
restricciones.append((('4H', '4V'), interseccion(0, 0)))
restricciones.append((('4H', '2V'), interseccion(1, 1)))
restricciones.append((('5H', '4V'), interseccion(1, 1)))
restricciones.append((('7H', '7V'), interseccion(0, 0)))
restricciones.append((('8H', '8V'), interseccion(0, 0)))
restricciones.append((('8H', '7V'), interseccion(1, 1)))
restricciones.append((('6V', '10H'), interseccion(2, 0)))
restricciones.append((('10H', '8V'), interseccion(2, 1)))
restricciones.append((('11H', '9V'), interseccion(1, 1)))
problem = CspProblem(variables, dominios, restricciones)
print('backtrack:')
result = backtrack(problem,
variable_heuristic=MOST_CONSTRAINED_VARIABLE,
value_heuristic=LEAST_CONSTRAINING_VALUE,
inference=True)
posiciones = {
'1H': (0, 0), '2H': (0, 3), '4H': (1, 2), '5H': (2, 1), '7H': (3, 3),
'8H': (4, 2), '10H': (5, 0), '11H': (5, 4),
'1V': (0, 0), '2V': (0, 3), '3V': (0, 5), '4V': (1, 2), '6V': (3, 0),
'7V': (3, 3), '8V': (4, 2), '9V': (4, 5),
}
posiciones_letras = {}
crucigrama = [['\u25A0'] * 6 for x in range(6)]
for palabra, (fila, columna) in posiciones.items():
for letra in range(largos[palabra]):
fila_letra = fila
columna_letra = columna
if palabra.endswith('H'):
columna_letra += letra
else:
fila_letra += letra
crucigrama[fila_letra][columna_letra] = result[palabra][letra]
print(result)
print('\n'.join(['| ' + ' | '.join(palabra) + ' |' for palabra in crucigrama]))
| ucse-ia/ucse_ia | practicas/crucigramas.py | crucigramas.py | py | 5,727 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "simpleai.search.CspProblem",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "simpleai.se... |
27040895007 | import argparse
import auxil.mydata as mydata
import auxil.mymetrics as mymetrics
import gc
import tensorflow as tf
import keras.backend as K
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.losses import categorical_crossentropy
from keras.layers import *
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import regularizers
from keras.models import Model
from keras.utils import to_categorical as keras_to_categorical
import numpy as np
import sys
class AttentionBlock(Layer):
def __init__(self, filters):
super(AttentionBlock, self).__init__()
self.filters = filters
#self.init = RandomNormal()
def call(self, x):
conv_3d = Conv3D(filters = self.filters, kernel_size=3, strides = 1, padding = 'same')(x)
conv_3d_shape = conv_3d._keras_shape
print(conv_3d_shape)
conv_3d = Reshape((conv_3d_shape[1], conv_3d_shape[2], conv_3d_shape[3]*conv_3d_shape[4]))(conv_3d)
conv_2d = Conv2D(filters = self.filters, kernel_size=3, strides = 1, padding = 'same')(conv_3d)
conv_2d_shape = conv_2d._keras_shape
print(conv_2d_shape)
conv_2d = Reshape((conv_2d_shape[1],conv_2d_shape[2]*conv_2d_shape[3]))(conv_2d)
conv_1d = Conv1D(filters = self.filters, kernel_size=3, strides = 1, padding = 'same')(conv_2d)
conv_1d_shape = conv_1d._keras_shape
print(conv_1d_shape)
gap = GlobalAveragePooling1D()(conv_1d)
fc = Dense(self.filters, use_bias = True)(gap)
softmax = Activation('softmax')(fc)
reshape_1d = Reshape((1, self.filters))(softmax)
deconv_1d = Conv1D(filters = self.filters, kernel_size = 3, strides = 1, padding = 'same')(reshape_1d)
reshape_2d = Reshape((1,1, self.filters))(deconv_1d)
deconv_2d = Conv2DTranspose(filters = self.filters, kernel_size=3, strides = 1, padding = 'same')(reshape_2d)
reshape_3d = Reshape((1,1,1, self.filters))(deconv_2d)
deconv_3d = Conv3DTranspose(filters = self.filters, kernel_size = 3, strides = 1, padding = 'same')(reshape_3d)
x = tf.multiply(deconv_3d, x)
return x
def set_params(args):
args.batch_size = 64
args.epochs = 200
return args
def get_model_compiled(shapeinput, num_class, w_decay=0):
inputs = Input((shapeinput[0],shapeinput[1],shapeinput[2],1))
filters = [4,4,4,8]
x = Conv3D(filters=4,use_bias=False,kernel_size=(3,3,5), padding = 'valid',strides = 1)(inputs)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
for i in range(4):
x = Conv3D(filters=filters[i],use_bias=False, kernel_size=(3,3,5),padding = 'valid',strides = 1)(x)
a1 = AttentionBlock(filters[i])(x)
#a1 = LeakyReLU()(a1)
b1 = AttentionBlock(filters[i])(x)
#b1 = LeakyReLU()(b1)
x = Add()([a1,b1])
x = Dropout(0.4)(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = Dropout(0.85)(x)
x = Flatten()(x)
x = Dropout(0.85)(x)
x = Dense(units=128, use_bias=True)(x)
x = LeakyReLU()(x)
x = Dense(units=64, use_bias=True)(x)
x = LeakyReLU()(x)
output_layer = Dense(units=num_class, activation='softmax')(x)
clf = Model(inputs=inputs, outputs=output_layer)
clf.compile(loss='categorical_crossentropy',
optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])
return clf
def main():
parser = argparse.ArgumentParser(description='Algorithms traditional ML')
parser.add_argument('--dataset', type=str, required=True,
choices=["IP", "UP", "SV", "UH",
"DIP", "DUP", "DIPr", "DUPr"],
help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')
parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
parser.add_argument('--components', default=None,
type=int, help='dimensionality reduction')
parser.add_argument('--spatialsize', default=9,
type=int, help='windows size')
parser.add_argument('--wdecay', default=0.02, type=float,
help='apply penalties on layer parameters')
parser.add_argument('--preprocess', default="standard",
type=str, help='Preprocessing')
parser.add_argument('--splitmethod', default="sklearn",
type=str, help='Method for split datasets')
parser.add_argument('--random_state', default=42, type=int,
help='The seed of the pseudo random number generator to use when shuffling the data')
parser.add_argument('--tr_percent', default=0.1,
type=float, help='samples of train set')
parser.add_argument('--use_val', action='store_true',
help='Use validation set')
parser.add_argument('--val_percent', default=0.1,
type=float, help='samples of val set')
parser.add_argument(
'--verbosetrain', action='store_true', help='Verbose train')
#########################################
parser.add_argument('--set_parameters', action='store_false',
help='Set some optimal parameters')
############## CHANGE PARAMS ############
parser.add_argument('--batch_size', default=64, type=int,
help='Number of training examples in one forward/backward pass.')
parser.add_argument('--epochs', default=100, type=int,
help='Number of full training cycle on the training set')
#########################################
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
if args.set_parameters:
args = set_params(args)
pixels, labels, num_class = \
mydata.loadData(args.dataset, num_components=args.components,
preprocessing=args.preprocess)
pixels, labels = mydata.createImageCubes(
pixels, labels, windowSize=args.spatialsize, removeZeroLabels=False)
stats = np.ones((args.repeat, num_class+3)) * -1000.0 # OA, AA, K, Aclass
for pos in range(args.repeat):
rstate = args.random_state+pos if args.random_state != None else None
if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
x_train, x_test, y_train, y_test = \
mydata.load_split_data_fix(
args.dataset, pixels) # , rand_state=args.random_state+pos)
else:
pixels = pixels[labels != 0]
labels = labels[labels != 0] - 1
x_train, x_test, y_train, y_test = \
mydata.split_data(
pixels, labels, args.tr_percent, rand_state=rstate)
if args.use_val:
x_val, x_test, y_val, y_test = \
mydata.split_data(
x_test, y_test, args.val_percent, rand_state=rstate)
inputshape = x_train.shape[1:]
clf = get_model_compiled(inputshape, num_class, w_decay=args.wdecay)
valdata = (x_val, keras_to_categorical(y_val, num_class)) if args.use_val else (
x_test, keras_to_categorical(y_test, num_class))
clf.fit(x_train, keras_to_categorical(y_train, num_class),
batch_size=args.batch_size,
epochs=args.epochs,
verbose=args.verbosetrain,
validation_data=valdata,
callbacks=[ModelCheckpoint("/tmp/best_model.h5", monitor='val_accuracy', verbose=0, save_best_only=True)])
clf.load_weights("/tmp/best_model.h5")
clf.compile(loss='categorical_crossentropy',
optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])
print("PARAMETERS", clf.count_params())
stats[pos, :] = mymetrics.reports(
np.argmax(clf.predict(x_test), axis=1), y_test)[2]
print(args.dataset, list(stats[-1]))
if __name__ == '__main__':
main()
| deeplearning2020/comparison | algorithms/proposed.py | proposed.py | py | 7,971 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.multiply",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "keras.optimizers.Adam",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "argparse... |
39974821125 |
# USAGE
# python align_faces.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg
# import the necessary packages
from imutils.face_utils import FaceAligner
from imutils.face_utils import rect_to_bb
import argparse
import imutils
import dlib
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# 0.25 is the desired zoom 0.25 is the default
fa = FaceAligner(predictor, desiredLeftEye=(0.25, 0.25),desiredFaceWidth=112)
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=800)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
# loop over the face detections
# rect contains the bounding boxes
for rect in rects:
# extract the ROI of the *original* face, then align the face
# using facial landmarks
(x, y, w, h) = rect_to_bb(rect)
faceAligned = fa.align(image, gray, rect)
#faceAligned = cv2.resize(faceAligned, (224, 224))
import uuid
f = str(uuid.uuid4())
# write resulting image
cv2.imwrite("/home/monete/monete@gmail.com/studying/IA/thesis/deeplearning/dataset/fer2013/output/7-surprise/" + f + ".png", faceAligned)
# display the output images
#cv2.imshow("Aligned", faceAligned)
#cv2.waitKey(0)
| juanluisrosaramos/dataset_tuning | align_faces.py | align_faces.py | py | 1,729 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dlib.get_frontal_face_detector",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dlib.shape_predictor",
"line_number": 24,
"usage_type": "call"
},
{
"api_n... |
41907946588 | import torch
import torch.nn as nn
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv_1 = self._con_dw_sep(3, 16)
self.conv_2 = self._con_dw_sep(16, 32)
self.conv_3 = self._con_dw_sep(32, 64)
self.fc1 = nn.Linear(10816, 512)
self.fc2 = nn.Linear(512, 1)
self.dropout = nn.Dropout(0.5)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def _con_dw_sep(self, C_in, C_out):
conv_layer = nn.Sequential(
nn.Conv2d(C_in, C_in, kernel_size = 4, groups=C_in),
nn.Conv2d(C_in, C_out , kernel_size=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
return conv_layer
def forward(self, x):
out = self.conv_1(x)
out = self.conv_2(out)
out = self.conv_3(out)
out = out.view(-1, 10816)
out = self.dropout(out)
out = self.fc1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.fc2(out)
out = out.squeeze()
out = self.sigmoid(out)
return out.float()
| CSID-DGU/2022-2-SCS4031-EZ_SW | age_prediction_model/model.py | model.py | py | 1,212 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
14566552628 | from django.contrib.auth.models import User
from django.db import models
import cover.models
from documents.models import (Book, Chunk, Image, BookPublishRecord,
ImagePublishRecord)
from documents.signals import post_publish
from dvcs.signals import post_publishable
def book_changed(sender, instance, created, **kwargs):
instance.touch()
for c in instance:
c.touch()
models.signals.post_save.connect(book_changed, sender=Book)
def chunk_changed(sender, instance, created, **kwargs):
instance.book.touch()
instance.touch()
models.signals.post_save.connect(chunk_changed, sender=Chunk)
def image_changed(sender, instance, created, **kwargs):
instance.touch()
models.signals.post_save.connect(image_changed, sender=Image)
def publish_listener(sender, *args, **kwargs):
if isinstance(sender, BookPublishRecord):
sender.book.touch()
for c in sender.book:
c.touch()
elif isinstance(sender, ImagePublishRecord):
sender.image.touch()
post_publish.connect(publish_listener)
def chunk_publishable_listener(sender, *args, **kwargs):
sender.tree.touch()
if isinstance(sender.tree, Chunk):
sender.tree.book.touch()
post_publishable.connect(chunk_publishable_listener)
def publishable_listener(sender, *args, **kwargs):
sender.tree.touch()
post_publishable.connect(publishable_listener, sender=Image)
def listener_create(sender, instance, created, **kwargs):
if created:
instance.chunk_set.create(number=1, slug='1')
models.signals.post_save.connect(listener_create, sender=Book)
def cover_changed(sender, instance, created, **kwargs):
for book in instance.book_set.all():
book.build_cover()
models.signals.post_save.connect(cover_changed, sender=cover.models.Image)
| fnp/redakcja | src/documents/models/listeners.py | listeners.py | py | 1,794 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.db.models.signals.post_save.connect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
... |
11476729859 | """AD&D Second Edition Combat Simulator"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='adnd2e-combat-simulator',
version='1.0.2',
description='A tool to simulate combat in AD&D 2nd Edition',
long_description=long_description,
url='https://github.com/gene1wood/adnd2e-combat-simulator',
author='Gene Wood',
author_email='gene_wood@cementhorizon.com',
license='GPL-3.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'Topic :: Games/Entertainment :: Role-Playing',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
keywords='ad&d d&d adnd dnd combat',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['PyYAML', 'dice', 'colorama'],
package_data={
'adnd2e_combat_simulator': ['combatants.example.yaml'],
},
entry_points={
'console_scripts': [
'battle=adnd2e_combat_simulator:main',
],
},
)
| gene1wood/adnd2e-combat-simulator | setup.py | setup.py | py | 1,409 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number":... |
17884750945 | import pprint
import threading
from typing import Dict, TYPE_CHECKING
from PySide2.QtWidgets import QTabWidget, QTextBrowser, QWidget
from lib.comm import get_var, set_var
from widgets import PMTableView, PMGTableWidget, PMDockObject, PMGTableViewer, PMGJsonTree
if TYPE_CHECKING:
from lib.extensions.extensionlib.extension_lib import extension_lib
class AbstractViewer(object):
"""
抽象视图
"""
@staticmethod
def is_valid(data) -> bool:
"""
判断data是否为合法的变量类型
"""
return True
def set_data(self, data: object, metadata: dict):
"""
设置其显示数据的值为data,显示的元数据为meadata。
"""
pass
class PDDataViewer(PMGTableViewer, AbstractViewer):
"""
显示Pandas数据的视图
"""
def __init__(self, parent=None):
PMGTableViewer.__init__(self, parent, table_view=PMTableView())
AbstractViewer.__init__(self)
# self.action_split_by_columns:QAction = self.table_view.menu.addAction('提取当前列')
# self.action_split_by_columns.triggered.connect(self.split_by_columns)
def split_by_columns(self):
# row =
# self.table_view.data
print('splitted!')
@staticmethod
def is_valid(data):
import pandas as pd
return isinstance(data, pd.DataFrame)
def set_data(self, data: object, metadata: dict = None):
super().set_data(data)
class NPDataViewer(PMGTableViewer, AbstractViewer):
"""
显示numpy.ndarray的视图
"""
def __init__(self, parent=None):
PMGTableViewer.__init__(self, parent, table_view=PMTableView())
AbstractViewer.__init__(self)
@staticmethod
def is_valid(data):
import numpy
return isinstance(data, numpy.ndarray)
def set_data(self, data: object, metadata: dict = None):
super(NPDataViewer, self).set_data(data)
class JsonViewer(PMGJsonTree, AbstractViewer):
"""
树状图,专门显示dict型数据。
"""
def __init__(self, parent=None):
PMGJsonTree.__init__(self, parent)
# AbstractViewer.__init__(self)
@staticmethod
def is_valid(data) -> bool:
return isinstance(data, dict)
def set_data(self, data: Dict[str, object], metadata: dict = None) -> None:
self.set_data_dic({self.tr('Data:'): data})
self.expandToDepth(1)
class GeneralIterableViewer(PMGTableWidget, AbstractViewer):
"""
显示可迭代对象的视图
这个变量可以为列表、每行长度不等的二维嵌套列表等。
解析方式为先从第一个可迭代维度上解析,取出元素,也就是data[0],data[1]。。。data[len(data)-1],逐行显示。
如果元素不可迭代,那么就填在对应行的第一列;如果元素可迭代的,那么就把元素依次填写在同一行各个列中。
data[0][1],data[0][2]....
"""
def __init__(self, parent=None):
PMGTableWidget.__init__(self, parent)
AbstractViewer.__init__(self)
@staticmethod
def is_valid(data: object):
import numpy
import pandas
if isinstance(data, numpy.ndarray) or isinstance(
data, pandas.DataFrame):
return False
return PMGTableWidget.check_data_can_be_displayed_by_table(data=data)
def set_data(self, data: 'np.ndarray', metadata: dict = None):
super().set_data_2d(data)
class GeneralObjectViewer(QTextBrowser, AbstractViewer):
"""
一个文本显示控件
专门显示metadata。
"""
def __init__(self, parent=None):
QTextBrowser.__init__(self, parent)
AbstractViewer.__init__(self)
@staticmethod
def is_valid(data: object):
import numpy
import pandas
if isinstance(data, numpy.ndarray) or isinstance(
data, pandas.DataFrame):
return False
elif GeneralIterableViewer.is_valid(data):
return False
return True
def set_data(self, data: object, metadata: dict = None):
self.setText(self.tr('value:') + '\n\n ' + pprint.pformat(data)
+ '\n\n\n' + self.tr('meta data:') + '\n\n' + pprint.pformat(metadata))
viewer_classes = [
PDDataViewer,
NPDataViewer,
GeneralIterableViewer,
JsonViewer,
GeneralObjectViewer]
def build_viewer(data: object, metadata: object) -> 'QWidget':
"""
创建变量视图的工厂函数。
"""
for viewer_class in viewer_classes:
if viewer_class.is_valid(data):
viewer = viewer_class()
viewer.set_data(data, metadata)
return viewer
def get_viewer_class(data):
for viewer_class in viewer_classes:
if viewer_class.is_valid(data):
return viewer_class
class PMVariableViewerWidget(QTabWidget, PMDockObject):
"""
在这里采用了多继承的方式。注意,一定要把PMDockObject写在右边。
"""
if TYPE_CHECKING:
lib = extension_lib
def __init__(self, parent=None):
super().__init__(parent)
self.setTabsClosable(True)
self.var_view_tables: Dict[str, object] = {}
self.tabCloseRequested.connect(self.on_tab_close_request)
self.variable_view_factory = None
def is_temporary(self) -> bool:
return True
def get_widget_text(self) -> str:
return self.tr('Variable Viewer')
def set_lib(self, lib):
'''
设置回调函数。注意,只有主线程中才能刷新界面,否则将引起崩溃。
:param varname:
:param variable:
:return:
'''
self.lib = lib
def on_changed(varname: str, variable, source: str):
if threading.current_thread() is threading.main_thread():
if varname in self.var_view_tables:
self.show_data(varname, raise_window=False)
def on_deletion(varname: str, provider: str):
if threading.current_thread() is threading.main_thread():
if varname in self.var_view_tables:
tab = self.var_view_tables.pop(varname)
index = self.indexOf(tab)
self.removeTab(index)
self.lib.Data.add_data_changed_callback(on_changed)
self.lib.Data.add_data_deleted_callback(on_deletion)
def show_data(self, dataname: str, raise_window=True):
"""
显示数据,显示数据之后,使得上层控件将其提升到上层可见。特别适用于几个dockwidget叠在一起的情况。
如果与已有的数据不是同一种类型,就移除原先的,重建新的。
:param dataname:
:return:
"""
from lib.comm.base import DataDesc
desc: DataDesc = self.lib.Data.get_data_desc(dataname)
if desc.big:
data = get_var(dataname, preview=True)
else:
data = get_var(dataname)
try:
dataview: 'QWidget' = self.var_view_tables.get(dataname)
metadata = self.lib.Data.get_metadata(dataname)
except BaseException:
import traceback
traceback.print_exc()
return
last_index = self.count()
if dataview is not None:
if not isinstance(dataview, get_viewer_class(data)):
index = self.indexOf(dataview)
self.removeTab(index)
last_index = index
self.var_view_tables.pop(dataname)
dataview = None
if dataview is None:
dataview = build_viewer(data, metadata)
self.insertTab(last_index, dataview, dataname)
self.addTab(dataview, dataname)
self.var_view_tables[dataname] = dataview
dataview.set_data(data, metadata)
if hasattr(dataview, 'data_modified_signal'):
def set_var_data_modified():
set_var(dataname, dataview.get_data())
dataview.data_modified_signal.connect(set_var_data_modified)
dataview.setWindowTitle(dataname)
dataview.windowTitleChanged.connect(self.on_tab_window_title_changed)
self.setCurrentWidget(dataview)
if raise_window:
self.lib.UI.raise_dock_into_view('data_view_table')
def on_tab_window_title_changed(self, title: str):
widget = self.sender()
self.setTabText(self.indexOf(widget), title)
def on_tab_close_request(self, close_index: int):
self.var_view_tables.pop(self.tabText(close_index))
tab_to_close: 'QTextBrowser' = self.widget(close_index)
tab_to_close.deleteLater()
self.removeTab(close_index)
| pyminer/pyminer | pyminer/packages/workspace_inspector/data_viewer.py | data_viewer.py | py | 8,746 | python | en | code | 77 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "widgets.PMGTableViewer",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "widgets.PMGTableViewer.__init__",
"line_number": 38,
"usage_type": "call"
},
{
"api_na... |
23701537076 | import argparse
import os
import shutil
import numpy as np
import torch
import torchvision
from torch import nn as nn
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import helpers
from dcgan import generators, discriminators
from dcgan.train_config import TrainConfig
def train(dataset: Dataset, train_config: TrainConfig,
generator, discriminator):
global_step = epoch = 0
if train_config.overwrite and os.path.exists(train_config.experiment_dirpath):
shutil.rmtree(train_config.experiment_dirpath)
real_images_writer = SummaryWriter(f"{train_config.experiment_dirpath}/real")
fake_images_writer = SummaryWriter(f"{train_config.experiment_dirpath}/fake")
stats_writer = SummaryWriter(f"{train_config.experiment_dirpath}/stats")
dataloader = DataLoader(
dataset=dataset, batch_size=train_config.batch_size, shuffle=True, num_workers=train_config.num_workers
)
num_iterations_per_epoch = len(dataset) // train_config.batch_size
generator = generator.to(device=train_config.device).train()
discriminator = discriminator.to(device=train_config.device).train()
criterion = torch.nn.BCELoss()
gen_opt = torch.optim.Adam(params=generator.parameters(), lr=train_config.lr, betas=(0.5, 0.999))
disc_opt = torch.optim.Adam(params=discriminator.parameters(), lr=train_config.lr, betas=(0.5, 0.999))
while True:
for batch_idx, (real_img_batch, labels) in tqdm(enumerate(dataloader), total=num_iterations_per_epoch, leave=False):
img_batch = normalize(real_img_batch)
if train_config.conditional_dim > 0:
conditional_input = helpers.conditional_input_encoder_discriminator(
labels=labels, cardinality=train_config.conditional_dim, spatial_size=train_config.image_size
)
img_batch = torch.cat([img_batch, conditional_input], dim=1)
img_batch = img_batch.to(device=train_config.device)
# train discriminator
noise = torch.randn(size=(len(labels), train_config.z_dim))
if train_config.conditional_dim > 0:
conditional_input = helpers.conditional_input_encoder_generator(
labels=labels, cardinality=train_config.conditional_dim
)
noise = torch.cat([noise, conditional_input], dim=1)
noise = noise.to(device=train_config.device)
fake_img_batch = generator(noise)
if train_config.conditional_dim > 0:
conditional_input = helpers.conditional_input_encoder_discriminator(
labels=labels, cardinality=train_config.conditional_dim, spatial_size=train_config.image_size
).to(device=train_config.device)
fake_img_batch = torch.cat([fake_img_batch, conditional_input], dim=1)
real_proba = discriminator(img_batch)
fake_proba = discriminator(fake_img_batch.detach())
disc_loss = (criterion(real_proba, torch.ones_like(real_proba)) +
criterion(fake_proba, torch.zeros_like(fake_proba)))
disc_loss = disc_loss / 2
disc_opt.zero_grad()
disc_loss.backward()
disc_opt.step()
# train generator
fake_proba = discriminator(fake_img_batch)
gen_loss = criterion(fake_proba, torch.ones_like(fake_proba))
gen_opt.zero_grad()
gen_loss.backward()
gen_opt.step()
if global_step % train_config.send_every == 0:
stats_writer.add_scalar("generator loss", gen_loss, global_step=global_step)
stats_writer.add_scalar("discriminator loss", disc_loss, global_step=global_step)
stats_writer.add_scalar("total loss", gen_loss + disc_loss, global_step=global_step)
if global_step % train_config.show_every == 0:
# visualize
real_images_grid = torchvision.utils.make_grid(
real_img_batch, normalize=True
)
real_images_writer.add_image("real images", real_images_grid, global_step=epoch)
generated_images = generate(train_config=train_config, generator=generator)
generated_images = torchvision.utils.make_grid(
generated_images, normalize=True
)
fake_images_writer.add_image("fake images", generated_images, global_step=global_step)
global_step += 1
epoch += 1
def normalize(x):
return 2 * x - 1
def generate(train_config: TrainConfig, generator: nn.Module) -> torch.Tensor:
noise = torch.randn(train_config.batch_size, train_config.z_dim)
if train_config.conditional_dim > 0:
label = np.random.randint(low=0, high=train_config.conditional_dim)
labels = np.asarray([label] * train_config.batch_size)
labels = torch.from_numpy(labels)
conditional_input = helpers.conditional_input_encoder_generator(
labels=labels, cardinality=train_config.conditional_dim
)
noise = torch.cat([noise, conditional_input], dim=1)
noise = noise.to(device=train_config.device)
with torch.no_grad():
generated_images = generator(noise).view(train_config.batch_size, -1, train_config.image_size, train_config.image_size)
return generated_images
def get_dataset_and_in_channels(dataset_name: str, image_size: int):
name_to_dataset_cls = {
"mnist": (torchvision.datasets.MNIST, 1),
"cifar-10": (torchvision.datasets.CIFAR10, 3)
}
dataset_cls, in_channels = name_to_dataset_cls[dataset_name]
dataset = dataset_cls(
root="data/", train=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.Resize(image_size),
torchvision.transforms.ToTensor()
]),
download=True
)
return dataset, in_channels
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name")
parser.add_argument("--dataset", choices=["mnist", "cifar-10"], default="mnist")
parser.add_argument("--image_size", type=int, default=32)
return parser.parse_args()
def main():
args = parse_args()
exp_dir = "../experiments"
if args.exp_name is not None:
exp_dir = f"{exp_dir}/{args.exp_name}"
dataset, in_channels = get_dataset_and_in_channels(dataset_name=args.dataset, image_size=args.image_size)
config = TrainConfig(
experiment_dirpath=exp_dir,
image_size=args.image_size,
in_channels=in_channels
)
generator = generators.DCGenerator.from_train_config(config)
discriminator = discriminators.DCDiscriminator.from_train_config(config)
train(
dataset=dataset, train_config=config, generator=generator, discriminator=discriminator
)
if __name__ == '__main__':
main()
| dfridman1/GANs | dcgan/train.py | train.py | py | 7,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "dcgan.train_config.TrainConfig",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name":... |
5515862018 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from vgg import load_pretrained_VGG16_pool5
import cifar10_utils
import tensorflow as tf
import numpy as np
LEARNING_RATE_DEFAULT = 1e-4
BATCH_SIZE_DEFAULT = 128
MAX_STEPS_DEFAULT = 15000
EVAL_FREQ_DEFAULT = 1000
CHECKPOINT_FREQ_DEFAULT = 5000
PRINT_FREQ_DEFAULT = 10
OPTIMIZER_DEFAULT = 'ADAM'
REFINE_AFTER_K_STEPS_DEFAULT = 0
DATA_DIR_DEFAULT = './cifar10/cifar-10-batches-py'
LOG_DIR_DEFAULT = './logs/cifar10'
CHECKPOINT_DIR_DEFAULT = './checkpoints'
def train_step(loss):
"""
Defines the ops to conduct an optimization step. You can set a learning
rate scheduler or pick your favorite optimizer here. This set of operations
should be applicable to both ConvNet() and Siamese() objects.
Args:
loss: scalar float Tensor, full loss = cross_entropy + reg_loss
Returns:
train_op: Ops for optimization.
"""
########################
# PUT YOUR CODE HERE #
########################
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)
########################
# END OF YOUR CODE #
########################
return train_op
def fully_connected_layers(vgg_output):
# dense layers
with tf.name_scope('dense'):
flat = tf.reshape(vgg_output, [vgg_output.get_shape()[0].value, -1], name='flat_out')
xavier = tf.contrib.layers.xavier_initializer()
const0 = tf.constant_initializer(0.)
l2_reg = tf.contrib.layers.l2_regularizer(0.1)
n_classes = 10
with tf.name_scope('dense1'):
w1 = tf.get_variable('w1', shape=[flat.get_shape()[1], 384], dtype=tf.float32,
initializer=xavier, regularizer=l2_reg)
b1 = tf.get_variable('b1', shape=[384], dtype=tf.float32,
initializer=const0)
fc1 = tf.nn.relu(tf.matmul(flat, w1) + b1, name='d1_out')
# fc2 Multiplication [384, 192]
# ReLU
with tf.name_scope('dense2'):
w2 = tf.get_variable('w2', shape=[384, 192], dtype=tf.float32,
initializer=xavier, regularizer=l2_reg)
b2 = tf.get_variable('b2', shape=[192], dtype=tf.float32,
initializer=const0)
fc2 = tf.nn.relu(tf.matmul(fc1, w2) + b2, name='d2_out')
# fc3 Multiplication [192, 10]
with tf.name_scope('dense3'):
w3 = tf.get_variable('w3', shape=[192, n_classes], dtype=tf.float32,
initializer=xavier, regularizer=l2_reg)
b3 = tf.get_variable('b3', shape=[n_classes], dtype=tf.float32,
initializer=const0)
fc3 = tf.matmul(fc2, w3) + b3
return fc3
def vgg_loss(logits, labels):
ce_loss = tf.nn.softmax_cross_entropy_with_logits(logits, labels)
ce_loss = tf.reduce_mean(ce_loss)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
reg_loss = tf.to_float(0.)
if None not in reg_losses: # this IS meant to switch while building the graph
reg_loss = reduce(lambda x, y: tf.add(x, y), reg_losses)
loss = ce_loss + reg_loss
tf.scalar_summary('ce_loss', ce_loss)
tf.scalar_summary('reg_loss', reg_loss)
tf.scalar_summary('full_loss', loss)
return loss
def accuracy(logits, labels):
guesses = tf.argmax(logits, dimension=1)
targets = tf.argmax(labels, dimension=1)
score = tf.to_int32(tf.equal(guesses, targets))
acc = tf.reduce_sum(score) / tf.size(score)
tf.scalar_summary('accuracy', acc)
return acc
def train():
"""
Performs training and evaluation of your model.
First define your graph using vgg.py with your fully connected layer.
Then define necessary operations such as trainer (train_step in this case),
savers and summarizers. Finally, initialize your model within a
tf.Session and do the training.
---------------------------------
How often to evaluate your model:
---------------------------------
- on training set every PRINT_FREQ iterations
- on test set every EVAL_FREQ iterations
---------------------------
How to evaluate your model:
---------------------------
Evaluation on test set should be conducted over full batch, i.e. 10k images,
while it is alright to do it over minibatch for train set.
"""
# Set the random seeds for reproducibility. DO NOT CHANGE.
tf.set_random_seed(42)
np.random.seed(42)
########################
# PUT YOUR CODE HERE #
########################
cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
data_dims = list(cifar10.train.images.shape[1:])
n_classes = 10
with tf.Graph().as_default():
x_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size] + data_dims)
y_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size, n_classes])
stopgrads = tf.placeholder(dtype=tf.bool)
pool5, assign_ops = load_pretrained_VGG16_pool5(x_pl, scope_name='vgg')
pool5 = tf.cond(stopgrads, lambda: tf.stop_gradient(pool5), lambda: pool5)
logits = fully_connected_layers(pool5)
loss = vgg_loss(logits, y_pl)
acc = accuracy(logits, y_pl)
train_op = train_step(loss)
summary_op = tf.merge_all_summaries()
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init_op)
sess.run(assign_ops)
train_summary_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/train', sess.graph)
test_summary_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/test', sess.graph)
for step in range(FLAGS.max_steps):
x, y = cifar10.train.next_batch(FLAGS.batch_size)
switch = True if step < FLAGS.refine_after_k else False
feed = {x_pl: x, y_pl: y, stopgrads: switch}
train_loss, train_acc, summary_str, _ = sess.run([loss, acc, summary_op, train_op], feed_dict=feed)
if step == 0 or (step + 1) % FLAGS.print_freq == 0 or step + 1 == FLAGS.max_steps:
print('TRAIN step: ', str(step), ' err: ', str(train_loss), ' acc: ', str(train_acc))
train_summary_writer.add_summary(summary_str, step)
train_summary_writer.flush()
if step == 0 or (step + 1) % FLAGS.eval_freq == 0 or step + 1 == FLAGS.max_steps:
x, y = cifar10.test.images, cifar10.test.labels
num_batches = int(np.floor(x.shape[0] / FLAGS.batch_size))
test_err = 0.
test_acc = 0.
for idx in range(num_batches):
x_batch = x[idx * FLAGS.batch_size:(idx + 1) * FLAGS.batch_size, :, :, :]
y_batch = y[idx * FLAGS.batch_size:(idx + 1) * FLAGS.batch_size, :]
feed = {x_pl: x_batch, y_pl: y_batch, stopgrads: True}
batch_err, batch_acc = sess.run([loss, acc], feed_dict=feed)
test_err += batch_err
test_acc += batch_acc
summary_str = sess.run(summary_op, feed_dict=feed) # possibly incorrect. should pool summaries
test_summary_writer.add_summary(summary_str, step)
test_summary_writer.flush()
test_err /= num_batches
test_acc /= num_batches
print('--- TEST --- step: ', str(step), ' err: ', str(train_loss), ' acc: ', str(train_acc))
# summary_str = sess.run(summary_op, feed_dict=feed) # possibly incorrect. should pool summaries
# test_summary_writer.add_summary(summary_str, step)
# test_summary_writer.flush()
if (step + 1) % FLAGS.checkpoint_freq == 0 or step + 1 == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
saver.save(sess, checkpoint_file, global_step=(step + 1))
########################
# END OF YOUR CODE #
########################
def initialize_folders():
"""
Initializes all folders in FLAGS variable.
"""
if not tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.MakeDirs(FLAGS.log_dir)
if not tf.gfile.Exists(FLAGS.data_dir):
tf.gfile.MakeDirs(FLAGS.data_dir)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
def print_flags():
"""
Prints all entries in FLAGS variable.
"""
for key, value in vars(FLAGS).items():
print(key + ' : ' + str(value))
def main(_):
print_flags()
initialize_folders()
train()
if __name__ == '__main__':
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type = float, default = LEARNING_RATE_DEFAULT,
help='Learning rate')
parser.add_argument('--max_steps', type = int, default = MAX_STEPS_DEFAULT,
help='Number of steps to run trainer.')
parser.add_argument('--batch_size', type = int, default = BATCH_SIZE_DEFAULT,
help='Batch size to run trainer.')
parser.add_argument('--print_freq', type = int, default = PRINT_FREQ_DEFAULT,
help='Frequency of evaluation on the train set')
parser.add_argument('--eval_freq', type = int, default = EVAL_FREQ_DEFAULT,
help='Frequency of evaluation on the test set')
parser.add_argument('--refine_after_k', type = int, default = REFINE_AFTER_K_STEPS_DEFAULT,
help='Number of steps after which to refine VGG model parameters (default 0).')
parser.add_argument('--checkpoint_freq', type = int, default = CHECKPOINT_FREQ_DEFAULT,
help='Frequency with which the model state is saved.')
parser.add_argument('--data_dir', type = str, default = DATA_DIR_DEFAULT,
help='Directory for storing input data')
parser.add_argument('--log_dir', type = str, default = LOG_DIR_DEFAULT,
help='Summaries log directory')
parser.add_argument('--checkpoint_dir', type = str, default = CHECKPOINT_DIR_DEFAULT,
help='Checkpoint directory')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run()
| frhrdr/dlc2016 | practical_3/retrain_vgg.py | retrain_vgg.py | py | 10,700 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tensorflow.train.AdamOptimizer",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 50,
"usage_type": "call"
},
{
"api_na... |
410387277 | """gistandard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.views.static import serve
from gistandard.settings import MEDIA_ROOT
import xadmin
from users.views_user import LoginView, IndexView, LogoutView
from system.views import SystemView
from adm.views import AdmView
from personal import views as personal_views
from personal import views_work_order as order
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
url(r'^$', IndexView.as_view(), name='index'),
# 用户登录
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name="logout"),
url(r'^system/$', SystemView.as_view(), name="system"),
url(r'^system/basic/', include('users.urls', namespace='system-basic')),
url(r'^system/rbac/', include('rbac.urls', namespace='system-rbac')),
url(r'^system/tools/', include('system.urls', namespace='system-tools')),
url(r'^adm/$', AdmView.as_view(), name="adm-main"),
url(r'^adm/bsm/', include('adm.urls', namespace='adm-bsm')),
url(r'^adm/equipment/', include('adm.urls_equipment', namespace='adm-equipment')),
url(r'^adm/asset/', include('adm.urls_asset', namespace='adm-asset')),
url(r'^personal/$', personal_views.PersonalView.as_view(), name="personal"),
url(r'^personal/userinfo', personal_views.UserInfoView.as_view(), name="personal-user_info"),
url(r'^personal/uploadimage', personal_views.UploadImageView.as_view(), name="personal-uploadimage"),
url(r'^personal/passwordchange', personal_views.PasswdChangeView.as_view(), name="personal-passwordchange"),
url(r'^personal/phonebook', personal_views.PhoneBookView.as_view(), name="personal-phonebook"),
url(r'^personal/workorder_Icrt/$', order.WorkOrderView.as_view(), name="personal-workorder_Icrt"),
url(r'^personal/workorder_Icrt/list', order.WorkOrderListView.as_view(), name="personal-workorder-list"),
url(r'^personal/workorder_Icrt/create', order.WorkOrderCreateView.as_view(), name="personal-workorder-create"),
url(r'^personal/workorder_Icrt/detail', order.WorkOrderDetailView.as_view(), name="personal-workorder-detail"),
url(r'^personal/workorder_Icrt/delete', order.WorkOrderDeleteView.as_view(), name="personal-workorder-delete"),
url(r'^personal/workorder_Icrt/update', order.WorkOrderUpdateView.as_view(), name="personal-workorder-update"),
url(r'^personal/workorder_app/$', order.WorkOrderView.as_view(), name="personal-workorder_app"),
url(r'^personal/workorder_app/send', order.WrokOrderSendView.as_view(), name="personal-workorder-send"),
url(r'^personal/workorder_rec/$', order.WorkOrderView.as_view(), name="personal-workorder_rec"),
url(r'^personal/workorder_rec/execute', order.WorkOrderExecuteView.as_view(), name="personal-workorder-execute"),
url(r'^personal/workorder_rec/finish', order.WorkOrderFinishView.as_view(), name="personal-workorder-finish"),
url(r'^personal/workorder_rec/upload', order.WorkOrderUploadView.as_view(), name="personal-workorder-upload"),
url(r'^personal/workorder_rec/return', order.WorkOrderReturnView.as_view(), name="personal-workorder-return"),
url(r'^personal/workorder_Icrt/upload', order.WorkOrderProjectUploadView.as_view(),
name="personal-workorder-project-upload"),
url(r'^personal/workorder_all/$', order.WorkOrderView.as_view(), name="personal-workorder_all"),
url(r'^personal/document/$', order.WorkOrderDocumentView.as_view(), name="personal-document"),
url(r'^personal/document/list', order.WorkOrderDocumentListView.as_view(), name="personal-document-list"),
]
| RobbieHan/gistandard | gistandard/urls.py | urls.py | py | 4,296 | python | en | code | 546 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "xadmin.site",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "django.conf.urls.url",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.vie... |
34846072669 | #!/usr/bin/env python
# coding: utf-8
# refer to https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/
#
# to tune parameters
# refer to http://yangguang2009.github.io/2017/01/08/deeplearning/grid-search-hyperparameters-for-deep-learning/
# In[1]:
from __future__ import print_function
import json
import numpy as np
import os
import pandas as pd
import urllib
import math
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
# connect to poloniex's API
url = 'https://poloniex.com/public?command=returnChartData¤cyPair=USDT_BTC&start=1546300800&end=9999999999&period=300&resolution=auto'
# parse json returned from the API to Pandas DF
openUrl = urllib.request.urlopen(url)
r = openUrl.read()
openUrl.close()
d = json.loads(r.decode())
df = pd.DataFrame(d)
original_columns=[u'date', u'close', u'high', u'low', u'open', u'volume']
new_columns = ['Timestamp','Close','High','Low','Open','Volume']
df = df.loc[:,original_columns]
df.columns = new_columns
df.to_csv('bitcoin201901to201905.csv',index=None)
# In[2]:
df = df.set_index('Timestamp')
df.head()
# In[3]:
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import seaborn as sns
import numpy as np
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# In[4]:
pyplot.plot(df['Close'].values, label='price')
pyplot.legend()
pyplot.show()
# In[5]:
sns.heatmap(df.corr(), annot=True, cmap='RdYlGn', linewidths=0.1, vmin=0)
# In[6]:
# load dataset
#dataset = read_csv('update_20190301_bitbank_f.csv', header=0, index_col=0)
#values = dataset.values
#dataset.head()
values = df['Close'].values
values = values.reshape(-1, 1)
print(values)
# In[7]:
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# frame as supervised learning
reframed = series_to_supervised(scaled, 1, 1)
#test = series_to_supervised(values, 1, 1)
#print(test.head())
#print(test.shape)
# In[8]:
print(values.shape)
print(reframed.shape)
print('---------')
#print(reframed.columes)
# split into train and test sets
values = reframed.values
print(values.shape)
n_train_rate = 0.7
n_train = values.shape[0] * n_train_rate
n_train = math.floor(n_train)
print(n_train)
train = values[:n_train, :]
test = values[n_train:, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# In[9]:
import math
# drop columns we don't want to predict
# 只留下 close 列
#reframed.drop(reframed.columns[[6, 7, 8, 10, 11]], axis=1, inplace=True)
#print(reframed.head())
# split into train and test sets
values = reframed.values
print(values.shape)
n_train_rate = 0.7
n_train = values.shape[0] * n_train_rate
n_train = math.floor(n_train)
print(n_train)
train = values[:n_train, :]
test = values[n_train:, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# In[10]:
#!pip install tqdm --upgrade
#!pip install hyperopt --upgrade
#!pip install hyperas --upgrade
type(train_X)
# In[16]:
def data():
global train_X, test_X, train_y, test_y
return train_X, test_X, train_y, test_y
# design network
def model(train_X, train_Y, test_X, test_Y):
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(train_X, train_y, epochs={{choice([10, 25, 50])}}, batch_size={{choice([8, 16, 32,50])}}, validation_data=(test_X, test_y), verbose=2, shuffle=False)
score, acc = model.evaluate(test_X, test_y, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=10,
trials=Trials(),
notebook_name='LSTMsinKeras-VirtualCurrency-Simple')
print("Evalutation of best performing model:")
print(best_model.evaluate(test_X, test_y))
# In[ ]:
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
# In[ ]:
# make a prediction
yhat = model.predict(test_X)
print('yhat.shape', yhat.shape, yhat[0:5, :])
test_X_reshape = test_X.reshape((test_X.shape[0], test_X.shape[2]))
print(test_X_reshape.shape, test_X_reshape[0:5, -7:])
# invert scaling for forecast
inv_yhat = concatenate((yhat, test_X_reshape[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
print('inv_yhat.shape', inv_yhat.shape, inv_yhat[0:5, :])
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_X_reshape[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
# calculate RMSE
# 因为inv_y 预测是下一时刻的值,所以需要把 inv_yhat 往后 shift 一个时刻
rmse = sqrt(mean_squared_error(inv_y[:-1], inv_yhat[1:]))
print('Test RMSE: %.3f' % rmse)
# In[ ]:
print(test_X.shape)
#print(range(test_X.shape))
#pyplot.plot( inv_y[-100:-1], label='predict')
#pyplot.plot( inv_yhat[-99:], label='actual')
pyplot.plot( inv_y, label='predict')
pyplot.plot( inv_yhat, label='actual')
pyplot.legend()
pyplot.show()
#涨跌的判准率
#获取预测跟实际对应元素值,是否大于0
a = np.diff(inv_y) > 0
b = np.diff(inv_yhat) > 0
#比较相同值的个数
print(sum(a ==b)/a.shape[0])
# In[14]:
x = 6
def func():
global x
print(x)
return x
func()
# In[ ]:
| dxcv/TradingAlgo | Multi-LSTM/LSTMsinKeras-VirtualCurrency-Simple.py | LSTMsinKeras-VirtualCurrency-Simple.py | py | 7,561 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.DataFram... |
15868980621 | from collections import deque
import sys
dx = [1,-1,0,0]
dy = [0,0,-1,1]
def iswall(x,y):
if x<0 or y<0 :
return False
if x >= n or y >= m :
return False
if matrix[x][y] == 0 : # 방문한 경우
return False
return True # 그 외의 경우
def bfs(x,y):
queue = deque()
print(queue)
queue.append((x, y))
print(queue)
# queue = deque((x,y)) # 시작 지점을 넣는다.
while queue:
x,y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if iswall(nx,ny) and matrix[nx][ny]==1:
matrix[nx][ny] = matrix[x][y]+1
queue.append((nx,ny))
return matrix[n-1][m-1]
n,m = map(int,input().split())
matrix = [[1, 1, 0], [0, 1, 0], [0, 1, 1]]
print(bfs(0,0))
| HYEONAH-SONG/Algorithms | 파이썬 알고리즘 인터뷰/미로탈출.py | 미로탈출.py | py | 815 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 17,
"usage_type": "call"
}
] |
74491447145 | import time
import json
import datetime
invalid = "\n--Invalid response, please try again.--"
scheduleFile = "schedule.json"
assignmentFile = "assignment.json"
def load():
for i in range(0, 40):
time.sleep(0.00000000000001)
print("-", end='', flush=True)
print()
def unload():
for i in range(0, 40):
time.sleep(0.00000000000001)
print('-' * (40 - i))
print("--Goodbye!--")
def anythingElse():
load()
while True:
userInput = input("Anything else?\n[0] - Yes\n[1] - No\n\nPlease choose an option: ")
if userInput == "0":
load()
break
elif userInput == "1":
unload()
return 1
else:
print(invalid)
def createSchedule():
with open(scheduleFile) as f:
data = json.load(f)
while True:
sched = input("Schedule Name: ")
if sched not in data:
break
print(f'"{sched}" is already a schedule. Please enter a different name.')
data[sched] = {}
while True:
number = input("Number of Classes (1 - 7): ")
try:
number = int(number)
if number > 7 or number < 1:
print(invalid)
else:
break
except Exception:
print(invalid)
for i in range(1, number + 1):
name = input(f"\nPeriod {i}: ")
teacher = input("Teacher: ")
description = input("Description: ")
data[sched][name] = {}
data[sched][name]["teacher"] = teacher
data[sched][name]["description"] = description
with open(scheduleFile, "w") as f:
json.dump(data, f, indent=2)
load()
print(f'--Schedule "{sched}" created!--')
def seeSchedule():
with open(scheduleFile) as f:
data = json.load(f)
if len(data) < 1:
print("--There are currently no schedules.--")
return
num = 0
while True:
lister = []
for i in data:
lister += [i]
print(f"[{num}] {i}\nPeriods: {len(data[i])}\n")
num += 1
num -= 1
userInput = input("Please choose a schedule (or press e to exit): ")
if userInput == "e":
return
else:
try:
userInput = int(userInput)
if userInput > -1 and userInput <= num:
num = 0
load()
for i in data[lister[userInput]]:
print(f"Period {num + 1}: {i}\nTeacher: {data[lister[userInput]][i]['teacher']}\nDescription: {data[lister[userInput]][i]['description']}\n")
num += 1
userInput = input("Enter any key to return: ")
load()
else:
print(invalid)
except Exception:
print(invalid)
def deleteSchedule():
with open(scheduleFile) as f:
data = json.load(f)
if len(data) < 1:
print("--There are currently no schedules.--")
return
num = 0
while True:
lister = []
for i in data:
lister += [i]
print(f"[{num}] {i}\nPeriods: {len(data[i])}\n")
num += 1
num -= 1
userInput = input("Please choose a schedule to delete (or press e to exit): ")
if userInput == "e":
return
else:
print()
try:
userInput = int(userInput)
if userInput > -1 and userInput <= num:
num = 0
confirm = input(f'Are you sure you want to delete "{lister[userInput]}"?\nEnter "13579" to confirm, or enter anything else to cancel: ')
if confirm == "13579":
load()
del data[i]
with open(scheduleFile, "w") as f:
json.dump(data, f, indent=2)
userInput = input("--Schedule has been deleted.--\n\nEnter any key to return: ")
print()
break
else:
return
else:
print(invalid)
except Exception:
print(invalid)
def createAssignment():
with open(assignmentFile) as f:
data = json.load(f)
while True:
name = input("Assignment Name: ")
if name not in data:
break
else:
print(f'"{name}" is already an assignment. Please enter a different name.')
classname = input("Class: ")
while True:
due = input('Due Date (mm/dd/yyyy): ')
try:
s = datetime.date(int(due.split("/")[2]), int(due.split("/")[1]), int(due.split("/")[0]))
n = datetime.datetime.now().date()
if s > n and len(due.split("/")) == 3:
break
elif(s <= n):
print("\n--That date has already passed. Please enter a different response.--")
else:
print(invalid)
except Exception:
print(invalid)
description = input("Description: ")
data[name] = {}
data[name]["class"] = classname
data[name]["due"] = due
data[name]["description"] = description
with open(assignmentFile, "w") as f:
json.dump(data, f, indent=2)
load()
print(f'--Assignment "{name}" created!--')
def seeAssignment():
with open(assignmentFile) as f:
data = json.load(f)
if len(data) < 1:
print("--There are currently no assignments.--")
return
num = 0
for i in data:
print(f"[{num}] Assignment: {i}{len(data[i])}\n{' ' * len(str(len(data[i])))} Class: {data[i]['class']}\n{' ' * len(str(len(data[i])))} Due Date: {data[i]['due']}\n{' ' * len(str(len(data[i])))} Description: {data[i]['description']}\n")
num += 1
userInput = input("Press any key to return: ")
def deleteAssignment():
with open(assignmentFile) as f:
data = json.load(f)
lister = [x for x in data]
if len(data) < 1:
print("--There are currently no assignments.--")
return
num = 0
for i in data:
print(f"[{num}] Assignment: {i}{len(data[i])}\n{' ' * len(str(len(data[i])))} Class: {data[i]['class']}\n{' ' * len(str(len(data[i])))} Due Date: {data[i]['due']}\n{' ' * len(str(len(data[i])))} Description: {data[i]['description']}\n")
num += 1
num -= 1
while True:
try:
userInput = input("Please choose an assignment to delete (or press e to exit): ")
if userInput == "e":
return
elif int(userInput) > -1 and int(userInput) <= num:
confirm = input(f'\nAre you sure you want to delete "{lister[int(userInput)]}"?\nEnter "13579" to confirm, or enter anything else to cancel: ')
if confirm == "13579":
del data[lister[int(userInput)]]
with open(assignmentFile, "w") as f:
json.dump(data, f, indent=2)
userInput = input("--Assignment has been deleted.--\n\nEnter any key to return: ")
print()
break
else:
print(invalid)
except Exception as e:
print(e)
def programChoice():
while True:
userInput = input("[0] - Create a schedule\n[1] - See existing schedules\n[2] - Delete a schedule\n[3] - Create an assignment\n[4] - Create an assignment\n[5] - Delete a schedule\n\nPlease choose the program you would like to use: ")
if userInput == "0":
load()
createSchedule()
if anythingElse() == 1:
break
elif userInput == "1":
load()
seeSchedule()
if anythingElse() == 1:
break
elif userInput == "2":
load()
deleteSchedule()
if anythingElse() == 1:
break
elif userInput == "3":
load()
createAssignment()
if anythingElse() == 1:
break
elif userInput == "4":
load()
seeAssignment()
if anythingElse() == 1:
break
elif userInput == "5":
load()
deleteAssignment()
if anythingElse() == 1:
break
else:
print(invalid)
def main():
print("\n\n-----Welcome to Scheduler.py, a program made to schedule classes and assignments.-----")
while True:
userInput = input("[0] - Begin\n[1] - Quit\n\nPlease choose an option: ")
if userInput == "0":
load()
programChoice()
break
elif userInput == "1":
unload()
break
else:
print(invalid)
main()
| BenVN123/PythonScheduler | scheduler.py | scheduler.py | py | 8,943 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 61,
... |
32766043528 | #!/urs/bin/python
#-*- coding:utf8 -*-
from bs4 import BeautifulSoup as bs
import urllib
import re
import json
import os
def get_musicid(url):
#url='http://music.baidu.com/top/dayhot'
html = urllib.urlopen(url).read()
soup = bs(html,'lxml',from_encoding='utf8')
urls = soup.findAll('a',href=re.compile(r'/song/(\d+)'))
musicidlist=set()
for url in urls:
musicidlist.add(url['href'].split('/')[-1])
return musicidlist
def parser(api):
#api="http://musicapi.qianqian.com/v1/restserver/ting?method=baidu.ting.song.play&format=jsonp&callback=jQuery17208098337996053833_1513859108469&songid=%s&_=1513859109906" % musicid
html=urllib.urlopen(api).read()
data = re.findall(r'\((.*)\)',html)[0]
jsondata = json.loads(data)
songtitle=jsondata['songinfo']['title']
songdownloadlink=jsondata['bitrate']['file_link']
songformat=jsondata['bitrate']['file_extension']
#print(jsondata)
return songtitle,songformat,songdownloadlink
def music_download(filename,downloadlink):
dir = os.getcwd()+'/music/'
path= dir + filename
if(os.path.exists(dir)==False):
os.makedirs(dir)
elif(os.path.isfile(path)==False):
urllib.urlretrieve(downloadlink, dir + filename)
else:
return
url='http://music.baidu.com/top/dayhot'
musicidlist = get_musicid(url)
# num = 1
for songid in musicidlist:
try:
api = "http://musicapi.qianqian.com/v1/restserver/ting?method=baidu.ting.song.play&format=jsonp&callback=jQuery17208098337996053833_1513859108469&songid=%s&_=1513859109906"%songid
songtitle,songformat,songdownloadlink=parser(api)
filename=songtitle+'.'+songformat
music_download(filename,songdownloadlink)
print(songtitle+' downloaded successfully!')
# num+=1
# if num>10:
# break
except:
print('download fail')
#parser(api)
#print(musicidlist)
| carloszo/Carlos_python | Crawler/BaiduMusicCrawler.py | BaiduMusicCrawler.py | py | 1,916 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.urlopen",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"lin... |
27037909109 | import torch
from torch import nn
from fuxictr.pytorch.models import MultiTaskModel
from fuxictr.pytorch.layers import FeatureEmbedding, MLP_Block
class SharedBottom(MultiTaskModel):
def __init__(self,
feature_map,
model_id="SharedBottom",
gpu=-1,
task=["binary_classification"],
num_tasks=1,
loss_weight='EQ',
learning_rate=1e-3,
embedding_dim=10,
bottom_hidden_units=[64, 64, 64],
tower_hidden_units=[64, ],
hidden_activations="ReLU",
net_dropout=0,
batch_norm=False,
embedding_regularizer=None,
net_regularizer=None,
**kwargs):
super(SharedBottom, self).__init__(feature_map,
task=task,
loss_weight=loss_weight,
num_tasks=num_tasks,
model_id=model_id,
gpu=gpu,
embedding_regularizer=embedding_regularizer,
net_regularizer=net_regularizer,
**kwargs)
self.embedding_layer = FeatureEmbedding(feature_map, embedding_dim)
self.bottom = MLP_Block(input_dim=embedding_dim * feature_map.num_fields,
hidden_units=bottom_hidden_units,
hidden_activations=hidden_activations,
output_activation=None,
dropout_rates=net_dropout,
batch_norm=batch_norm)
self.tower = nn.ModuleList([MLP_Block(input_dim=bottom_hidden_units[-1],
output_dim=1,
hidden_units=tower_hidden_units,
hidden_activations=hidden_activations,
output_activation=None,
dropout_rates=net_dropout,
batch_norm=batch_norm)
for _ in range(num_tasks)])
self.compile(kwargs["optimizer"], kwargs["loss"], learning_rate)
self.reset_parameters()
self.model_to_device()
def forward(self, inputs):
X = self.get_inputs(inputs)
feature_emb = self.embedding_layer(X)
bottom_output = self.bottom(feature_emb.flatten(start_dim=1)) # (?, bottom_hidden_units[-1])
tower_output = [self.tower[i](bottom_output) for i in range(self.num_tasks)]
y_pred = [self.output_activation[i](tower_output[i]) for i in range(self.num_tasks)]
return_dict = {}
labels = self.feature_map.labels
for i in range(self.num_tasks):
return_dict["{}_pred".format(labels[i])] = y_pred[i]
return return_dict
| xue-pai/FuxiCTR | model_zoo/multitask/SharedBottom/src/SharedBottom.py | SharedBottom.py | py | 3,155 | python | en | code | 671 | github-code | 36 | [
{
"api_name": "fuxictr.pytorch.models.MultiTaskModel",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "fuxictr.pytorch.layers.FeatureEmbedding",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "fuxictr.pytorch.layers.MLP_Block",
"line_number": 35,
"usage... |
473540501 | #!/usr/bin/env python
import musescore_parser as mp
import sys
from fractions import Fraction
from dataclasses import dataclass, field
from typing import Optional
import re
#https://github.com/OpenLilyPondFonts/lilyjazz/blob/master/JazzSampler.pdf
@dataclass
class Base:
def __post_init__(self):
print("%%", self)
pass
@dataclass
class LyricHandler(Base):
note_duration: Optional[Fraction] = None
text: Optional[str] = None
note_pitch: Optional[str] = None
extender_line: Optional[str] = None
extender_duration: Optional[Fraction] = None
slur: Optional[str] = None
parser_key_signature = {
'-7' : 'ces',
'-6' : 'ges',
'-5' : 'des',
'-4' : 'as',
'-3' : 'es',
'-2' : 'b',
'-1' : 'f',
'0' : 'c',
'1' : 'g',
'2' : 'd',
'3' : 'a',
'4' : 'e',
'5' : 'h',
'6' : 'fis',
'7' : 'cis',
}
parser_key_signature_duration = {
'4/4': "1",
'3/4': "2.",
'2/4': "2",
}
parser_duration_fractions = {
'whole' : "4/4",
'half' : "2/4",
'quarter' : "1/4",
'eighth' : "1/8",
'16th' : "1/16",
'32nd' : "1/32",
'64th' : "1/64"
}
parser_tpc = {
'' : 's',
'-1' : 'feses',
'0' : 'ceses',
'1' : 'geses',
'2' : 'deses',
'3' : 'ases',
'4' : 'eses',
'5' : 'bes',
'6' : 'fes',
'7' : 'ces',
'8' : 'ges',
'9' : 'des',
'10' : 'as',
'11' : 'es',
'12' : 'b',
'13' : 'f',
'14' : 'c',
'15' : 'g',
'16' : 'd',
'17' : 'a',
'18' : 'e',
'19' : 'h',
'20' : 'fis',
'21' : 'cis',
'22' : 'gis',
'23' : 'dis',
'24' : 'ais',
'25' : 'eis',
'26' : 'his',
'27' : 'fisis',
'28' : 'cisis',
'29' : 'gisis',
'30' : 'disis',
'31' : 'aisis',
'32' : 'eisis',
'33' : 'hisis'
}
parser_barline = {
"startRepeat" : ".|:",
"endRepeat" : ":|.",
"double" : "||",
"end" : "|."
}
parser_clefs = {
"G8vb" : "tenorG",
"F" : "bass",
'' : "treble",
'G' : "treble"
}
parser_name = {
"": "Zero",
"0": "Zero",
"1": "One",
"2": "Two",
"3": "Three",
"4": "Four",
"5": "Five",
"6": "Six",
}
parser_dots_fractions = {
"": 1,
"1": 1 + 1/2,
"2": 1 + 1/2 + 1/2/2,
"3": 1 + 1/2 + 1/2/2 + 1/2/2/2,
"4": 1 + 1/2 + 1/2/2 + 1/2/2/2 + 1/2/2/2/2,
}
parser_fraction_to_duration = {
"1": "1",
"1/1": "1",
"1/2": "2",
"1/4": "4",
"2/4": "2",
"3/4": "2.",
"1/8": "8",
"3/8": "4.",
"7/8": "2..",
"1/16": "16",
"3/16": "8.",
"7/16": "4..",
"15/16": "2...",
"1/32": "32",
"3/32": "16.",
"7/32": "8..",
"15/32": "4...",
"1/64": "64",
"3/64": "32.",
}
parse_measure_end_repeat = {
"2": ":|."
}
#https://github.com/OpenLilyPondFonts/lilyjazz/blob/master/JazzSampler.pdf
parse_chord_names = {
"m7": "m7",
"(add9)": "9^7",
"7": "7",
"m6": "m6",
"dim6": "dim6",
"dim7": "dim7",
"dim": "dim",
"m7(11)": "m7.11",
"6": "6",
"Maj9": "maj9",
"7(b9)": "9-",
"m": "m",
"0": "m7.5-",
"7(#9)": "9+",
"o7": "dim7",
"7(#5)": "7.5+",
"(b5)": "dim",
"sus4": "sus4",
"7sus4": "sus4.7"
}
last_pitch = 60
last_tpc = 14
def get_pitch(pitch, tpc):
global last_pitch, last_tpc
line = parser_tpc[tpc]
pitch_diff = int(pitch) - int(last_pitch)
tcp_diff = int(tpc) - int(last_tpc)
last_pitch = pitch
last_tpc = tpc
#print("%%%% pitch_diff %s, last_pitch %s, pitch %s, tcp_diff %s" % (pitch_diff, last_pitch, pitch, tcp_diff))
#TODO: clean up this mess
if (pitch_diff >= 6 and pitch_diff < 18):
if (pitch_diff == 6 and tcp_diff == 6):
#print("%% pitch_diff > but exception")
line += ""
else:
#print("%% pitch_diff >")
line += "'"
elif (pitch_diff >= 18 and pitch_diff < 30):
if (pitch_diff == 18 and tcp_diff == 6):
#print("%% pitch_diff >> but exception")
line += "'"
else:
#print("%% pitch_diff >>")
line += "''"
elif (pitch_diff >= 30):
if (pitch_diff == 30 and tcp_diff == 6):
#print("%% pitch_diff >>> but exception")
line += "''"
else:
#print("%% pitch_diff >>>")
line += "'''"
elif (pitch_diff <= -6 and pitch_diff > -18):
if (pitch_diff == -6 and tcp_diff == -6):
#print("%% pitch_diff < but exception")
line += ""
else:
#print("%% pitch_diff <")
line += ","
elif (pitch_diff <= -18 and pitch_diff > -30):
if (pitch_diff == -18 and tcp_diff == -6):
#print("%% pitch_diff << but exception")
line += ","
else:
#print("%% pitch_diff <<")
line += ",,"
elif (pitch_diff <= -30):
if (pitch_diff == -30 and tcp_diff == -6):
#print("%% pitch_diff <<< but exception")
line += ",,"
else:
#print("%% pitch_diff <<<")
line += ",,,"
return line
class LilypondGenerator(mp.MuseScoreParser):
def get_head(self):
string = []
string.append("\\version \"2.24.1\"")
string.append("\\include \"deutsch.ly\"")
string.append("jazzChords = { \\semiGermanChords }")
string.append("aFourL = {}")
string.append("%\\include \"../config/include.ily\"")
string.append("markMoj = #(define-music-function (letter) (string?) #{ \\mark \\markup { \\box \\bold #letter } #})")
string.append("")
string.append("\layout {")
string.append(" indent = 0")
string.append("}")
return string
def get_header(self):
string = []
string.append("\header {")
string.append(" titlex = \"Pjevajte Jahvi\"")
poet_found = False
part_found = False
for e in self.staffs[0].children:
if isinstance(e, mp.VBox):
if e.style == "Title":
string.append(f" title = \"%s\"" % e.text.upper())
elif e.style == "Composer":
string.append(" composer = \"%s\"" % e.text)
elif e.style == "Lyricist":
string.append(" %%poet = \"%s\"" % e.text)
string.append(" style = \"%s\"" % e.text)
poet_found = True
elif e.style == "Instrument Name (Part)":
string.append(" %%meter = \"%s\"" % e.text)
string.append(" broj = \"%s\"" % e.text)
part_found = True
if not poet_found:
string.append(" style = \"\"")
if not part_found:
string.append(" broj = \"1\"")
string.append(" %tagline = \\markup { \\override #'(font-name . \"JohnSans White Pro\") \\override #'(font-size . -3) { Izvorno: Name, Album } }")
string.append("}")
return string
def get_paper(self):
string = []
string.append("\\paper {")
string.append(" \\aFourL")
string.append(" %min-systems-per-page = #7")
string.append(" %annotate-spacing = ##t")
string.append(" %system-system-spacing.padding = #3.2")
string.append(" %page-breaking = #ly:one-page-breaking")
string.append(" %last-bottom-spacing.minimum-distance = #8")
string.append("}")
return string
def get_staff_start(self, staff):
string = []
string.append("staff%s = \\relative c' {" % parser_name[staff.id])
return string
def get_staff_end(self):
string = []
string.append("}")
return string
def fractions_add_missing(self, bar, time_signature):
fraction_sum = Fraction(0)
for e in bar:
if isinstance(e, Fraction):
fraction_sum += e
if fraction_sum != time_signature:
bar.append(time_signature - fraction_sum)
return bar
def fractions_sum_neighbor(self, bar):
summed_bar = []
fraction = None
for e in bar:
if isinstance(e, Fraction):
if fraction is not None:
fraction += e
else:
fraction = e
else:
if fraction is not None:
summed_bar.append(fraction)
fraction = None
summed_bar.append(e)
if fraction is not None:
summed_bar.append(fraction)
fraction = None
return summed_bar
def fractions_add_skip_if_bar_starts_with_fraction(self, bar):
if len(bar) > 0 and isinstance(bar[0], Fraction):
bar.insert(0, "s")
return bar
def fractions_convert_bar_with_fractions_to_ly(self, bar, lyrics=False):
line = ""
for e in bar:
if isinstance(e, Fraction):
if not lyrics:
line += parser_fraction_to_duration[str(e)]
line += " "
else:
line += e
if lyrics:
line += " "
if "bar" in e or "mark" in e or "clef" in e or "repeat" in e:
line += " "
if "{" in e or "}" in e:
line += " "
return line
def fractions_convert_harmony_bar_with_fractions_to_ly(self, bar):
line = ""
harmony = None
for e in bar:
if isinstance(e, Fraction):
if harmony is not None:
line += parser_tpc[harmony.root]
line += parser_fraction_to_duration[str(e)]
if harmony is not None:
if harmony.name:
line += ":" + parse_chord_names[harmony.name]
if harmony.base:
line += "/" + parser_tpc[harmony.base]
line += " "
harmony = None
elif isinstance(e, mp.Harmony):
harmony = e
else:
line += e
return line
def get_staff_data(self, staff):
string = []
for sc in staff.children:
if isinstance(sc, mp.Measure):
bar = []
line = " "
has_break = False
for e in sc.children:
if isinstance(e, mp.TimeSig):
string.append(" \\time %s/%s" % (e.sig_n, e.sig_d))
elif isinstance(e, mp.Tempo):
string.append(" \\tempo 4 = %s" % int((60 * float(e.tempo))))
elif isinstance(e, mp.Rest):
if e.duration_type == "measure":
bar.append("r")
predicted_duration = Fraction(e.duration)
bar.append(predicted_duration)
else:
bar.append("r")
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
bar.append(predicted_duration)
elif isinstance(e, mp.Chord):
bar.append(get_pitch(e.note_pitch, e.note_tpc))
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
bar.append(predicted_duration)
elif isinstance(e, mp.KeySig):
tpc_value = str(14 + int(e.accidental))
string.append(" \\key %s \\major" % parser_tpc[tpc_value])
elif isinstance(e, mp.ChordNoteSpanner):
if e.type == "Tie":
if e.next_location_fractions or e.next_location_measures:
bar.append("~")
elif isinstance(e, mp.ChordSpanner):
if e.type == "Slur":
if e.next_location_fractions or e.next_location_measures:
bar.append("(")
elif e.prev_location_fractions or e.prev_location_measures:
bar.append(")")
elif isinstance(e, mp.BarLine):
bar.append("\\bar \"%s\"" % parser_barline[e.subtype])
elif isinstance(e, mp.RehearsalMark):
#text = "\\mark \\markup { \\box \\bold %s }" % e.text
#bar.append(text)
text = "\\markMoj \"%s\"" % e.text
#text = "\\markMoj"
bar.append(text)
#text = "%\\markMojPonn"
#bar.append(text)
elif isinstance(e, mp.Clef):
if e.concert_clef_type:
text = "\\clef %s" % parser_clefs[e.concert_clef_type]
bar.append(text)
elif e.transposing_clef_type:
text = "\\clef %s" % parser_clefs[e.transposing_clef_type]
bar.append(text)
elif isinstance(e, mp.LayoutBreak):
if e.subtype == "line":
has_break = True
elif isinstance(e, mp.VoltaSpanner):
if e.next_location_measures:
text = "\\set Score.repeatCommands = #\'((volta \"%s\"))" % e.begin_text
bar.append(text)
elif e.prev_location_measures:
text = "\\set Score.repeatCommands = #\'((volta #f))"
bar.append(text)
elif isinstance(e, mp.Tuplet):
text = "\\tuplet %s/%s {" % (e.actual_notes, e.normal_notes)
bar.append(text)
elif isinstance(e, mp.EndTuplet):
text = "}"
bar.append(text)
#line += str(bar) + "\n "
if sc.len:
line += "\\partial %s" % parser_fraction_to_duration[sc.len]
line += "\n "
line += self.fractions_convert_bar_with_fractions_to_ly(bar)
if sc.end_repeat:
line += "\\bar \"%s\"" % parse_measure_end_repeat[sc.end_repeat]
line += " "
line += "|"
#if has_break:
# line += " \\break"
string.append(line)
return string
def get_harmony(self, staff):
string = []
#harmony_found = False
#for sc in staff.children:
# if isinstance(sc, mp.Measure):
# for e in sc.children:
# if isinstance(e, mp.Harmony):
# harmony_found = True
#if not harmony_found:
# return string
string.append("harmony%s = \chordmode {" % parser_name[staff.id])
time_signature = None
for sc in staff.children:
if isinstance(sc, mp.Measure):
bar = []
line = " "
for e in sc.children:
if isinstance(e, mp.TimeSig):
time_signature = Fraction(f"{e.sig_n}/{e.sig_d}")
elif isinstance(e, mp.Harmony):
bar.append(e)
elif isinstance(e, mp.Chord):
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
bar.append(predicted_duration)
elif isinstance(e, mp.Rest):
if e.duration_type == "measure":
predicted_duration = Fraction(e.duration)
bar.append(predicted_duration)
else:
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
bar.append(predicted_duration)
elif isinstance(e, mp.Location):
predicted_duration = Fraction(e.fractions)
bar.append(predicted_duration)
if sc.len:
bar = self.fractions_add_missing(bar, Fraction(sc.len))
else:
bar = self.fractions_add_missing(bar, time_signature)
bar = self.fractions_sum_neighbor(bar)
bar = self.fractions_add_skip_if_bar_starts_with_fraction(bar)
line += self.fractions_convert_harmony_bar_with_fractions_to_ly(bar)
#line += str(bar)
line += "|"
string.append(line)
# force end bar
string.append(" \\bar \"|.\"")
string.append("}")
return(string)
def get_lyric_nos(self, staff):
nos = []
for sc in staff.children:
if isinstance(sc, mp.Measure):
for e in sc.children:
if isinstance(e, mp.Lyrics):
if e.no not in nos:
nos.append(e.no)
return sorted(nos)
def fractions_swap_with_elements(self, bar):
swaped_bar = []
fraction = None
for e in bar:
if isinstance(e, Fraction):
if fraction is None:
fraction = e
else:
swaped_bar.append(fraction)
fraction = e
else:
swaped_bar.append(e)
if fraction is not None:
swaped_bar.append(fraction)
fraction = None
if fraction is not None:
swaped_bar.append(fraction)
fraction = None
return swaped_bar
def get_lyric(self, staff, no):
bars = []
for sc in staff.children:
if isinstance(sc, mp.Measure):
bar = []
lyric_handler = LyricHandler()
for e in sc.children:
if isinstance(e, mp.Lyrics):
if e.no == no:
#print(repr(e.text))
if "\xa0" in e.text:
lyric_handler.text = "\"%s\"" % e.text
else:
lyric_handler.text = e.text
if e.syllabic in ["begin", "middle"]:
lyric_handler.extender_line = "--"
if e.ticks_f and e.ticks:
predicted_duration = - Fraction(e.ticks_f)
lyric_handler.extender_line = "__"
lyric_handler.extender_duration = abs(predicted_duration)
elif isinstance(e, mp.Chord):
if lyric_handler.note_duration is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
lyric_handler.note_pitch = "c"
lyric_handler.note_duration = predicted_duration
elif isinstance(e, mp.Rest):
if e.duration_type == "measure":
if lyric_handler.note_duration is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
predicted_duration = Fraction(e.duration)
lyric_handler.note_pitch = "r"
lyric_handler.note_duration = predicted_duration
else:
if lyric_handler.note_duration is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
predicted_duration = Fraction(parser_duration_fractions[e.duration_type])
predicted_duration *= Fraction(parser_dots_fractions[e.dots])
lyric_handler.note_pitch = "r"
lyric_handler.note_duration = predicted_duration
if lyric_handler.note_duration is not None and lyric_handler.text is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
if lyric_handler.note_duration is not None:
bar.append(lyric_handler)
lyric_handler = LyricHandler()
bars.append(bar)
# add slurs for extender line and replace non text notes to rests
extender_duration = None
for bar in bars:
#print("|")
for b in bar:
#print(" ", b)
if b.text is not None:
if b.extender_duration:
extender_duration = b.extender_duration - b.note_duration
#print(extender_duration, "adding (")
b.slur = "("
else:
if extender_duration is None:
b.note_pitch = "r"
else:
extender_duration -= b.note_duration
#print(extender_duration, "calculating")
if extender_duration < 0:
extender_duration = None
#print("adding )")
b.slur = ")"
string = []
#string.append("%%test%s%s = {" % (parser_name[staff.id], parser_name[no]))
#for bar in bars:
# for b in bar:
# line = "% "
# line += str(b)
# string.append(line)
# string.append("% |")
#string.append("%}")
#string.append("")
string.append("aligner%s%s = \\relative {" % (parser_name[staff.id], parser_name[no]))
for bar in bars:
line = " "
for b in bar:
line += b.note_pitch + parser_fraction_to_duration[str(b.note_duration)]
if b.slur:
line += b.slur
line += " "
line += "|"
if len(line.strip()):
string.append(line)
string.append("}")
string.append("")
string.append("lyric%s%s = \\lyricmode {" % (parser_name[staff.id], parser_name[no]))
for bar in bars:
line = " "
for b in bar:
if b.text is not None:
line += b.text
line += " "
if b.extender_line is not None:
line += b.extender_line
line += " "
line += "%|"
if len(line.strip()):
string.append(line)
string.append("}")
return string
def get_tbox(self):
string = []
#tbox_found = False
#for e in self.staffs[0].children:
# if isinstance(e, mp.TBox):
# tbox_found = True
# break
#if not tbox_found:
# return string
stanzas = []
lyrics = []
for e in self.staffs[0].children:
if isinstance(e, mp.TBox):
if e.style == "Frame":
line_count = 0
for line in e.text.split("\n"):
line = line.strip()
if len(line) > 0:
if re.match("\\d\\.", line):
stanzas.append(" \\line { \\bold %s }" % line)
else:
line_count += 1
lyrics.append(" \\line { %s }" % line)
else:
stanzas.append(" \\vspace #%s" % (line_count))
line_count = 0
lyrics.append(" \\vspace #1")
string.append("\\markup {")
string.append(" \\column {")
string += stanzas
string.append(" }")
string.append(" \\hspace #1")
string.append(" \\column {")
string += lyrics
string.append(" }")
string.append("}")
return string
def get_score(self):
string = []
string.append("\\score {")
string.append(" <<")
for staff in self.staffs:
string.append(" \\new ChordNames { \\jazzChords \\harmony%s }" % parser_name[staff.id])
string.append(" \\new Staff {")
string.append(" <<")
string.append(" \\new Voice { \\staff%s }" % parser_name[staff.id])
for no in self.get_lyric_nos(staff):
string.append(" \\new NullVoice = \"aligner%s%s\" { \\aligner%s%s }" % (parser_name[staff.id], parser_name[no], parser_name[staff.id], parser_name[no]))
string.append(" \\new Lyrics \\lyricsto \"aligner%s%s\" { \\lyric%s%s }" % (parser_name[staff.id], parser_name[no], parser_name[staff.id], parser_name[no]))
string.append(" >>")
string.append(" }")
#string.append(" \\new Staff {")
#for no in self.get_lyric_nos(staff):
# string.append(" \\new Voice = \"aligner%s%s\" { \\transpose c c'' \\aligner%s%s }" % (parser_name[staff.id], parser_name[no], parser_name[staff.id], parser_name[no]))
#string.append(" }")
string.append(" >>")
string.append("}")
return(string)
def get_file(self):
string = []
string += self.get_head()
string.append("")
string += self.get_header()
string.append("")
string += self.get_paper()
string.append("")
for s in self.staffs:
string += self.get_staff_start(s)
string += self.get_staff_data(s)
string += self.get_staff_end()
string.append("")
string += self.get_harmony(s)
string.append("")
for no in self.get_lyric_nos(s):
string += self.get_lyric(s, no)
string.append("")
string += self.get_score()
string.append("")
string += self.get_tbox()
return(string)
if __name__ == "__main__":
lg = LilypondGenerator(sys.argv[1])
print("\n".join(lg.get_file()))
| duhovniprojekt/duhovne_pjesme_novi_sad_1966 | scripts/new/lilypond_generator.py | lilypond_generator.py | py | 27,495 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "fractions.Fraction",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Optio... |
25209486610 | #!/usr/local/bin/python3
import socket
import struct
import crcmod
#from dataservice.datawave_produce.waveproduce import sin_wave,triangle_wave
import random
def crccreate(b,length):
crc16_func = crcmod.mkCrcFun(0x18005, initCrc=0xFFFF, rev=True, xorOut=0x0000)
return crc16_func(b[0:length])
def crccheckhole(b,length):
crc16_func = crcmod.mkCrcFun(0x18005, initCrc=0xFFFF, rev=True, xorOut=0x0000)
return hex(crc16_func(b[0:length]))==bytesToHex(b[length],b[length+1])
def crccheck(b,length):
print('传过来的b,和lenght',b,' ',length)
crc16_func = crcmod.mkCrcFun(0x18005, initCrc=0xFFFF, rev=True, xorOut=0x0000)
return crc16_func(b[0:length]) == bytesToInt(b[length], b[length + 1])
def get_send_msgflowbytes(slave,func,register,length,data):
if length!=4:
pass
else:
# print('data',data)
a = struct.pack('!bbbbf', slave, func, register, length, data)
# print(len(a))
b=struct.pack('H',crccreate(a[0:8], length=8))
a=a + b
# print(a)
return a
if __name__=='__main__':
tcp_server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)#创建套接字
tcp_server_socket.bind(('127.0.0.1',5000))#绑定本机地址和接收端口
tcp_server_socket.setsockopt(socket.IPPROTO_TCP,socket.TCP_NODELAY,True)
print('Waiting connecting')
# tcp_server_socket.listen(1)#监听()内为最大监听值
# client_socket,client_addr= tcp_server_socket.accept()#建立连接(accept(无参数)
# print('Someone has connected to this sever')
#xsin,ysin=sin_wave(0,100,1,2,2)
#xtri,ytri=triangle_wave(0,100,1,2,2)
#ysin=ysin-0.5
#ytri=10*ytri
data=0.0
#sinindex=0;
#triindex=0;
while True:
tim
# b =client_socket.recv(10)
# print('receiving msg:',b)
# if b[1]==0x03:
# print('we are receiving setting command',b)
# # client_socket.send(b)
# elif b[2]==0x01: #正弦波产生函数
# slave,func,register,length=struct.unpack('!bbbb',b[0:4]) #解析传过来的二进制字节流
#sinindex +=1
data=random.uniform(10,11)
print(data)
# #此处的数据包格式由epics 的protocol文件所确定
# msg = get_send_msgflowbytes(slave, func, register, length, data) #构建符合要求的数据包格式
# print('sending msg:',msg)
# print(b)
# client_socket.send(msg)
#if sinindex==99:
# sinindex=0
| Scottars/nis_website | dataservice/epicsrelated/simulate2.py | simulate2.py | py | 2,560 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "crcmod.mkCrcFun",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "crcmod.mkCrcFun",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "crcmod.mkCrcFun",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"lin... |
1925887546 | #!/bin/python
import collections
import os
import re
import subprocess
import time
GHOSTLY_PATH = '/usr/bin/ghostly'
ALLIE_DBG = '../target/debug/allie'
# Old versions
ALLIE_1_1 = './bin/allie_v1.1'
ALLIE_1_0 = './bin/allie_v1.0'
ALLIE_0_9 = './bin/allie_v0.9'
ALLIE_0_8 = './bin/allie_v0.8'
ALLIE_0_7 = './bin/allie_v0.7'
ALLIE_0_6 = './bin/allie_v0.6'
ALLIE_0_5 = './bin/allie_v0.5'
ALLIE_0_4 = './bin/allie_v0.4'
ALLIE_0_3 = './bin/allie_v0.3'
ALLIE_0_2 = './bin/allie_v0.2'
ALLIE_0_1 = './bin/allie_v0.1'
RESULT_RE = re.compile(r'^name:(?P<name>[^;]+);wins:(?P<wins>\d+);score:(?P<score>\d+)$')
ROUNDS = 25
Score = collections.namedtuple('Score', ['wins', 'score'])
def parse_result(server_output):
ret = {}
for result in server_output.decode("utf-8").split('\n'):
match = RESULT_RE.match(result)
if match is not None:
ret[match.group('name')] = Score(int(match.group('wins')), int(match.group('score')))
return ret
def benchmark():
# Start the server
server = subprocess.Popen([GHOSTLY_PATH
# , '--headless'
, '--start-at', '2'
, '--tickless'
, '--rounds', str(ROUNDS)]
, stdout=subprocess.PIPE
, stderr=subprocess.PIPE)
time.sleep(1)
# Start the bots, ignoring any output
devnull = open(os.devnull, 'w')
subprocess.Popen([ALLIE_1_0], stdout=devnull, stderr=devnull)
subprocess.Popen([ALLIE_1_1])
# Wait here until the match is finished
out, _ = server.communicate()
# Parse the result
results = parse_result(out)
total_wins = sum(t.wins for t in results.values())
total_score = sum(t.score for t in results.values())
# Print the result
for name, result in results.items():
print(name + ":")
print('\tWins: {}/{} {:.2f}%'
.format(result.wins
, total_wins
, result.wins / total_wins * 100 if total_wins > 0 else 0))
print('\tScore: {}/{} {:.2f}%'
.format(result.score
, total_score
, result.score / total_score * 100 if total_score > 0 else 0))
if __name__ == '__main__':
benchmark()
| Kwarf/Allie-2017 | benchmarker/bench.py | bench.py | py | 2,332 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
... |
40978312177 | # pylint: disable=E0401,E0611
import os
import json
script_dir = os.path.dirname(__file__)
from helpers.DataService import DataService
from models.InputData import InputData
from models.OutputData import OutputData
from models.DataResult import DataResult
from models.Encoder import Encoder
from models.Decoder import Decoder
from callbacks.BatchSaver import BatchSaver
from Config import BATCH_SIZE, EPOCHS, LIMIT_GPU_USAGE
from generator.TrainingGenerator import TrainingGenerator
from tensorflow.keras.models import Model, save_model
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow as tf
from tensorflow.keras import backend as ktf
def get_session(gpu_fraction=0.3):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
if (LIMIT_GPU_USAGE):
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
ktf.set_session(get_session())
# Process the dataset
print('STARTING: loading_data')
data_result = DataResult(None, None)
with open(script_dir + './temp/processed_data.json', 'r') as output:
json_data = json.load(output)
data_result.loadJSON(json_data)
print('END: loading_data')
print('')
# Create the encoder
print('STARTING: create encoder')
encoder = Encoder(data_result.input_data)
print('END: create encoder')
print('')
# Create the decoder
print('STARTING: create decoder')
decoder = Decoder(data_result.output_data, encoder)
print('STARTING: create decoder')
print('')
# Create the model
print('STARTING: create model')
model = Model([encoder.inputs, decoder.inputs], decoder.outputs)
print('END: create model')
print('')
# Compile the model
print('STARTING: compile model')
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
print('END: compile model')
print('')
# Train the model
print('STARTING: train model')
print(' Training with ' + str(data_result.input_data.num_lines) + ' lines')
generator = TrainingGenerator(data_result, BATCH_SIZE)
model.fit_generator(generator, epochs=EPOCHS, verbose=1, callbacks=[BatchSaver()])
# model.fit([token_result.encoder_input, token_result.decoder_input], token_result.decoder_output, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_split=0.2)
print('END: train model')
print('')
#Save the entire model
save_model(model, 'model.h5')
#Save the weights for cpu compatibility
model.save_weights('model_weights.h5')
| AtLeastITry/seq2seq-keras-chatBot | train.py | train.py | py | 2,470 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.GPUOptions",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",... |
38449435175 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from move_base_msgs.msg import MoveBaseGoal
from move_base_msgs.msg import MoveBaseAction
import re
from Command import Command
from Queue import Queue
import actionlib
from tf import transformations
from geometry_msgs.msg import Quaternion
from sound_play.libsoundplay import SoundClient
import genpy
class CommandScheduler:
"""
Scheduler class for the multi-step speech processing
"""
def __init__(self):
rospy.init_node('command_scheduler', anonymous=True)
self.rate = rospy.Rate(10) # 10hz
self.command_listener = rospy.Subscriber('/autospeech/run', String, self.received_command)
self.typeSwitch = {
'go': self.navigate,
'turn': self.turn,
'say': self.say
}
self.queue = Queue()
self.sound_client = SoundClient()
while not rospy.is_shutdown():
if self.queue.not_empty:
current = self.queue.get()
if current.get_data_type() == SoundClient:
rospy.loginfo("Saying " + current.get_data())
self.sound_client.say(current.get_data())
rospy.sleep(2)
else:
ac = actionlib.SimpleActionClient(current.get_path(), current.get_data_type())
ac.wait_for_server()
ac.send_goal_and_wait(current.get_data())
rospy.spin()
def received_command(self, data):
split = re.split('///', data.data)
command = self.typeSwitch[split[0]](split[1])
self.queue.put(command)
@staticmethod
def navigate(location):
goal = MoveBaseGoal()
goal.target_pose.header.stamp = genpy.Time()
goal.target_pose.header.frame_id = "/base_link"
dirs = {
'forward': 1.0,
'backward': -1.0
}
goal.target_pose.pose.position.x = dirs[location]
goal.target_pose.pose.orientation.w = 1.0
return Command('/move_base', MoveBaseAction, goal)
@staticmethod
def say(string):
return Command('', SoundClient, string)
@staticmethod
def turn(direction):
goal = MoveBaseGoal()
goal.target_pose.header.stamp = genpy.Time()
goal.target_pose.header.frame_id = "/base_link"
dirs = {
'left': 90,
'right': -90
}
quaternion = transformations.quaternion_from_euler(0, 0, dirs[direction])
goal.target_pose.pose.orientation.x = quaternion[0]
goal.target_pose.pose.orientation.y = quaternion[1]
goal.target_pose.pose.orientation.z = quaternion[2]
goal.target_pose.pose.orientation.w = quaternion[3]
return Command('/move_base', MoveBaseAction, goal)
if __name__ == '__main__':
try:
CommandScheduler()
except rospy.ROSInterruptException:
pass
| elmdecoste/ros_advanced_voice | scripts/speech_queue.py | speech_queue.py | py | 2,950 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rospy.init_node",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "rospy.Rate",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "rospy.Subscriber",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.String",
... |
15871926331 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
def read_image(image):
return mpimg.imread(image)
def format_image(image):
return tf.image.resize(image[tf.newaxis, ...], [224, 224]) / 255.0
def get_category(img):
"""Write a Function to Predict the Class Name
Args:
img [jpg]: image file
Returns:
[str]: Prediction
"""
path = 'static/model/'
tflite_model_file = 'converted_model.tflite'
# Load TFLite model and allocate tensors.
with open(path + tflite_model_file, 'rb') as fid:
tflite_model = fid.read()
# Interpreter interface for TensorFlow Lite Models.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# Gets model input and output details.
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
input_img = read_image(img)
format_img = format_image(input_img)
# Sets the value of the input tensor
interpreter.set_tensor(input_index, format_img)
# Invoke the interpreter.
interpreter.invoke()
predictions_array = interpreter.get_tensor(output_index)
predicted_label = np.argmax(predictions_array)
class_names = ['rock', 'paper', 'scissors']
return class_names[predicted_label]
def plot_category(img, current_time):
"""Plot the input image
Args:
img [jpg]: image file
"""
read_img = mpimg.imread(img)
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(ROOT_DIR + f'/static/images/output_{current_time}.png')
print(file_path)
if os.path.exists(file_path):
os.remove(file_path)
plt.imsave(file_path, read_img)
| FourthBrain/Intro-to-Flask | inference.py | inference.py | py | 1,815 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.image.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.image",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "tensorflow.image.resize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensor... |
35623428711 | from django.urls import path
from .views import *
app_name = "Mentor"
urlpatterns = [
path("", view=MentorListView.as_view(), name="listar y crear mentores"),
path("user/", view=MentorByUserRUD.as_view(), name="traer mentor por id de usuario"),
path("<int:pk>/", view=MentorRUDView.as_view(), name="Obtener, actualizar y eliminar mentor"),
path("mentoria/", view=MentoriaListView.as_view(), name="listar y crear mentorias"),
path("mentoria/<int:pk>/", view=MentoriaRUDView.as_view(), name="Obtener, actualizar y eliminar mentoria")
] | DiegoStevenVera/MentorTic | apps/mentor/urls.py | urls.py | py | 554 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
36887287548 | from flask import Flask, request
from . import db
app = Flask(__name__)
@app.route("/api/message", methods=["GET"])
def get_random_message():
"""Return a random message to play the part of 'message in a bottle'."""
return { "content": db.get_random_message() }
@app.route("/api/message", methods=["POST"])
def create_message():
content = request.get_json()["content"]
if not 2 <= len(content) <= 1023:
raise Exception(f"Message must be between 2 and 1023 characters. It was {len(content)} characters.")
db.create_message(content)
return "", 201
| mshenfield/swellnote | swellnote/__init__.py | __init__.py | py | 584 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 15,
"usage_type": "name"
}
] |
29772321096 | import unittest
import HtmlTestRunner
from selenium import webdriver
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class LoginTest(unittest.TestCase):
baseURL = "https://test-bitdef.web.app"
driver = webdriver.Chrome(executable_path = "..\drivers\chromedriver.exe")
@classmethod
def setUpClass(cls):
cls.driver.get(cls.baseURL)
cls.driver.maximize_window()
def test_createReport(self):
wait = WebDriverWait(self.driver, 15)
#Assert Title Page
assert self.driver.title == "TestFrontend"
#Create Report
wait.until(EC.presence_of_element_located((By.XPATH,"//span[text()=' CREATE REPORT ']"))).click()
#Details
detailsType = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select type']")))
detailsType.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ENTER)
detailsCompany = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select Company']")))
detailsCompany.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ENTER)
wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Enter name']"))).send_keys("Bogdan Eugen")
#Settings
wait.until(EC.presence_of_element_located((By.XPATH,"//label[@for = 'mat-radio-2-input']//span[@class='mat-radio-container']"))).click()
settingsReccurance = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select reccurence']")))
settingsReccurance.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ENTER)
settingsOn = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select day']")))
settingsOn.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ARROW_DOWN,Keys.ENTER)
settingInterval = wait.until(EC.presence_of_element_located((By.XPATH,"//input[@placeholder = 'Select interval']")))
settingInterval.send_keys(Keys.ENTER,Keys.ARROW_DOWN,Keys.ENTER)
wait.until(EC.presence_of_element_located((By.XPATH,"//label[@for = 'mat-checkbox-1-input']"))).click()
wait.until(EC.presence_of_element_located((By.XPATH,"//span[text()=' SAVE ']"))).click()
#Assert Raport SAVE
time.sleep(1)
successSave = self.driver.find_element_by_xpath("//div[text()=' Successfully saved the report ']").text
self.assertEqual("Successfully saved the report", successSave)
#Sleep to see ending
time.sleep(3)
@classmethod
def tearDownClass(cls):
cls.driver.close()
if __name__== "__main__":
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='..\\reports'))
| degea78/Bitdefender | test-bitdef/testCases/testBitdef.py | testBitdef.py | py | 2,917 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "s... |
18405451718 | #!/usr/bin/env python3
import boto3
import argparse
import os
import base64
from common_functions import getAllInstances, getDynamoDBItems
from common_jenkins import triggerJob
from common_kms import get_plaintext_key
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env", help="Staging or Production", type=str, required=True)
parser.add_argument("-t", "--dryrun", help="Displaying orphanedInstances only", required=False, action='store_true', default=False, dest='DryRun')
args = parser.parse_args()
jenkins_server_url = 'https://jenkins.cicd.cloud.fpdev.io'
if ((args.env).lower()).startswith("s"):
tableName = 'dyn-use1-cpt-s-tenant-service-stage'
os.system("okta-login pe-stg")
# Assuming your AWS profile is pe-stg for DEP staging account
session = boto3.Session(profile_name='pe-stg')
jobname = 'GHE-CLDOPS/cpt-staging-deployment-pipelines/edge-ngfw'
tokenHashed = 'AQICAHhYGEB1OYp+r8QB00qX9ggImKyc5paoUPZIsm20O94PvAEvbX7EICaGbwSMNqJIzaksAAAAfjB8BgkqhkiG9w0BBwagbzBtAgEAMGgGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMYyMVNddcwLg/UsV9AgEQgDvkF7+q8nVEEh+94gDS9VxigULgdJE8mv/pQxK4ye/CkFyy5/Woo5QIQSS1J+2AEPc/iRGHxpomG71RwA=='
token = str(base64.b64decode(get_plaintext_key(tokenHashed, 'us-east-2', session)), 'utf-8')
elif ((args.env).lower()).startswith("pe-pre"):
tableName = 'dyn-use1-cpt-s-tenant-service-prestaging'
os.system("okta-login pe-prestg")
# Assuming your AWS profile is pe-stg for DEP staging account
session = boto3.Session(profile_name='pe-prestg')
jobname = 'GHE-CLDOPS/cpt-prestaging-deployment-pipelines/edge-ngfw'
# tokenHashed = to be added once we have pre-staging setup
token = str(base64.b64decode(get_plaintext_key(tokenHashed, 'us-east-2', session)), 'utf-8')
else:
tableName = 'dyn-use1-cpt-p-tenant-service-production'
os.system("okta-login pe-prod")
# Assuming your AWS profile is pe-prod for DEP staging account
session = boto3.Session(profile_name='pe-prod')
jobname = 'GHE-CLDOPS/cpt-prod-deployment-pipelines/edge-ngfw'
tokenHashed = 'AQICAHgNWkrfbqMq3gyhFfHoJjENYsopnb7sN2lR2l5wDhJHNgFxjPyfbu4LqjAKUAAX0vvKAAAAgDB+BgkqhkiG9w0BBwagcTBvAgEAMGoGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMZmrQxioap7ALvPhEAgEQgD3zHoykyjj95EfzsiIn7GJWEPai+JAkmBKNEufifNOafTXMG0JVDT8KZW4ThV0km1Jx/0pCaqe8z7Bj16t3'
token = str(base64.b64decode(get_plaintext_key(tokenHashed, 'us-east-2', session)), 'utf-8')
table = getDynamoDBItems(tableName, 'us-east-1', session)
instances = getAllInstances(session)
orphanedInstances = []
isValidTenant = False
for reservation in instances["Reservations"]:
for instance in reservation["Instances"]:
for item in table['Items']:
try:
for tag in instance['Tags']:
if tag['Key'] == 'fp-tenant-id' and tag['Value'] == item['tenantId']:
isValidTenant = True
break
except KeyError as error:
print(error)
if not isValidTenant:
orphanedInstance = {}
isEdge = False
for tag in instance['Tags']:
if "fp-tenant-id" in tag["Key"]:
orphanedInstance["fp_tenant_id"] = tag['Value']
orphanedInstance["InstanceId"] = instance["InstanceId"]
orphanedInstance["LaunchTime"] = instance["LaunchTime"]
orphanedInstance["Region"] = instance["Placement"]["AvailabilityZone"][:-1]
if "fp-edge-id" == tag["Key"]:
orphanedInstance["fp-edge-id"] = tag["Value"]
if "Name" == tag["Key"] and "edge-ngfw670" in tag["Value"]:
isEdge = True
if isEdge:
orphanedInstances.append(orphanedInstance)
isValidTenant = False
if len(orphanedInstance) == 0:
print("No orphaned Instances found")
else:
print(f"There are {len(orphanedInstances)} orphaned instances")
for inst in orphanedInstances:
if not args.DryRun:
params = {'REGION': inst["Region"], 'JOB_TYPE': 'destroy', 'TENANT_ID': inst["fp_tenant_id"], 'EDGE_ID': inst["fp-edge-id"]}
triggerJob('trung.truong@forcepoint.com', token, jenkins_server_url, jobname, params)
else:
print("DryRun only. No Action was taken")
print(inst["InstanceId"] + " " + inst["fp_tenant_id"] + " " + inst["fp-edge-id"] + " " + inst["Region"])
| trtruong/utilities | scripts/python/checkOrphanedInstances.py | checkOrphanedInstances.py | py | 4,165 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "boto3.Session",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
... |
35855718282 | from __future__ import print_function
import scrapy
from scrapy.http.cookies import CookieJar
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.http import Request,FormRequest
from mytest.items import myItem
class mySpider(scrapy.Spider):
name = "myspider"
allowed_domains = ["www.amazon.com"]
start_urls =[
#"https://www.amazon.com/s"
"https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=milk"
]
headers ={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Referer": "https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=milk"
}
formdata = {
'url': 'search-alias=aps',
'field-keywords': 'milk'
}
'''
def start_requests(self):
return [FormRequest("https://www.amazon.com/s",formdata=self.formdata)]
'''
def parse(self, response):
myitem = myItem()
items = Selector(response).css('.s-item-container')
for item in items:
titles = item.css('h2::text').extract_first()
prices = item.xpath('descendant::div/div/a/span/@aria-label').extract_first()
#prices = item.css('div>div>a>span').extract_first()
#prices = item.css('[aria-label]::first_child').extract_first()
#stars = item.xpath('//span/a/i[1]/span/text()').extract_first()
stars = item.css('.a-icon-alt::text').extract_first()
stars = str(stars)[:-15]
yield myItem(
title=item.css('h2::text').extract_first(),
stars=stars,
)
#myitem['title'] = p.item.css('h2::text').extract_first()
print(response.url)
print(myitem['title'])
#atfResults
'''
all_urls = hxs.select('//a/@href').extract()
for url in all_urls:
if url.startswith('http://www.xiaohuar.com/list-1-'):
yield Request(url, callback=self.parse)
''' | zhengwuyang/notes | Testcode/Scrapytest/mytest/spiders/my_spider.py | my_spider.py | py | 2,289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "mytest.items.myItem",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "scrapy.selector.Selector",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "mytes... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.