hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75d7637d4de985450afeaf8267ea59deab8e6e61
| 478
|
py
|
Python
|
Module_04/ex00/test.py
|
CristinaFdezBornay/PythonPiscine
|
143968c2e26f5ddddb5114f3bcdddd0b1f00d153
|
[
"MIT"
] | 1
|
2021-11-17T10:04:30.000Z
|
2021-11-17T10:04:30.000Z
|
Module_04/ex00/test.py
|
CristinaFdezBornay/PythonPiscine
|
143968c2e26f5ddddb5114f3bcdddd0b1f00d153
|
[
"MIT"
] | null | null | null |
Module_04/ex00/test.py
|
CristinaFdezBornay/PythonPiscine
|
143968c2e26f5ddddb5114f3bcdddd0b1f00d153
|
[
"MIT"
] | null | null | null |
from FileLoader import FileLoader
tests = [
"non_existing_file.csv",
"empty_file.csv",
"../data/athlete_events.csv",
]
if __name__=="__main__":
for test in tests:
print(f"==> TESTING {test}")
fl = FileLoader()
print(f"\n=> Loading file")
df = fl.load(test)
print(f"\n=> Display first 3 rows")
fl.display(df, 3)
print(f"\n=> Display lasts 3 rows")
fl.display(df, -3)
input("====>\n\n")
| 20.782609
| 43
| 0.541841
| 63
| 478
| 3.920635
| 0.507937
| 0.097166
| 0.08502
| 0.11336
| 0.137652
| 0.137652
| 0
| 0
| 0
| 0
| 0
| 0.01173
| 0.286611
| 478
| 23
| 44
| 20.782609
| 0.71261
| 0
| 0
| 0
| 0
| 0
| 0.340292
| 0.098121
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.235294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75db1e4b6ac368d1004f97e5c6edf9221b06b01a
| 7,631
|
py
|
Python
|
lagen/nu/regeringenlegacy.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 18
|
2015-03-12T17:42:44.000Z
|
2021-12-27T10:32:22.000Z
|
lagen/nu/regeringenlegacy.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 13
|
2016-01-27T10:19:07.000Z
|
2021-12-13T20:24:36.000Z
|
lagen/nu/regeringenlegacy.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 6
|
2016-11-28T15:41:29.000Z
|
2022-01-08T11:16:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
# this repo overrides ferenda.sources.legal.se.Regeringen to work
# against old downloaded
import re
import codecs
# from urllib.parse import urljoin
from rdflib import URIRef
from rdflib.namespace import SKOS
from ferenda.sources.legal.se import Regeringen, RPUBL
from ferenda.sources.legal.se.direktiv import DirRegeringen
from ferenda.sources.legal.se.sou import SOURegeringen
from ferenda.sources.legal.se.ds import Ds
from ferenda.sources.legal.se.propositioner import PropRegeringen
from ferenda.compat import urljoin
from . import SameAs
class RegeringenLegacy(Regeringen):
source_encoding = "iso-8859-1"
def download(self, basefile=None):
return False
def downloaded_to_intermediate(self, basefile, attachment=None):
return codecs.open(self.store.downloaded_path(basefile), encoding=self.source_encoding)
# override just some of the methods to parse the HTML index page
def extract_metadata(self, rawhead, basefile):
content = rawhead
title = content.find("h1").string
identifier_node = content.find("p", "lead")
if identifier_node:
identifier = identifier_node.text
else:
identifier = "" # infer_metadata calls infer_identifier
# if this is falsy, which will be good
# enough. No need to warn.
definitions = content.find("dl", "definitions")
ansvarig = None
if definitions:
for dt in definitions.find_all("dt"):
key = dt.get_text(strip=True)
value = dt.find_next_sibling("dd").get_text(strip=True)
if key in ("Utgiven:", "Publication date:"):
utgiven = self.parse_swedish_date(value)
elif key in ("Avsändare:",):
ansvarig = value
sammanfattning = None
if content.find("h2", text="Sammanfattning"):
sums = content.find("h2", text="Sammanfattning").find_next_siblings("p")
# "\n\n" doesn't seem to survive being stuffed in a rdfa
# content attribute. Replace with simple space.
sammanfattning = " ".join([x.get_text(strip=True) for x in sums])
# find related documents
re_basefile = re.compile(r'\d{4}(|/\d{2,4}):\d+')
# legStep1=Kommittedirektiv, 2=Utredning, 3=lagrådsremiss,
# 4=proposition. Assume that relationships between documents
# are reciprocal (ie if the page for a Kommittedirektiv
# references a Proposition, the page for that Proposition
# references the Kommittedirektiv.
elements = {self.KOMMITTEDIREKTIV: [],
self.DS: ["legStep1"],
self.PROPOSITION: ["legStep1", "legStep2"],
self.SOU: ["legStep1"]}[self.document_type]
utgarFran = []
for elementid in elements:
box = content.find(id=elementid)
if not box:
continue
for listitem in box.find_all("li"):
if not listitem.find("span", "info"):
continue
infospans = [x.text.strip(
) for x in listitem.find_all("span", "info")]
rel_basefile = None
rel_identifier = None
for infospan in infospans:
if re_basefile.search(infospan):
# scrub rel_identifier ("Dir. 2008:50" -> "2008:50" etc)
rel_basefile = re_basefile.search(infospan).group()
rel_identifier = infospan
if not rel_basefile:
# this often means that a non-standard document
# type is used as preparatory work for this
# document (eg department memos not published in
# Ds, like "S2013/8074/PBB" -- seems to be common
# in Socialdepartementet and Finansdepartementet)
self.log.warning(
"%s: Couldn't find rel_basefile (elementid #%s) among %r" % (basefile, elementid, infospans))
continue
attribs = {"rpubl:arsutgava": basefile.split(":")[0],
"rpubl:lopnummer": basefile.split(":")[1]}
if elementid == "legStep1":
attribs["rdf:type"] = RPUBL.Kommittedirektiv
elif elementid == "legStep2":
attribs["rdf:type"] = RPUBL.Utredningsbetankande
if rel_identifier.startswith("SOU"):
altlabel = "SOU"
elif rel_identifier.startswith(("Ds", "DS")):
altlabel = "Ds"
else:
self.log.warning(
"%s: Cannot find out what type of document the linked %s is (#%s)" % (basefile, rel_identifier, elementid))
continue
attribs["rpubl:utrSerie"] = self.lookup_resource(altlabel, SKOS.altLabel)
elif elementid == "legStep3":
attribs["rdf:type"] = RPUBL.Proposition
uri = self.minter.space.coin_uri(self.attributes_to_resource(attribs))
utgarFran.append(uri)
# find related pages
related = content.find("h2", text="Relaterat")
seealso = []
if related:
for link in related.findParent("div").find_all("a"):
r = urljoin("http://www.regeringen.se/", link["href"])
seealso.append(URIRef(r))
a = self.metadata_from_basefile(basefile)
a.update({'dcterms:title': title,
'dcterms:identifier': identifier,
'dcterms:issued': utgiven,
'rpubl:utgarFran': utgarFran
})
if ansvarig:
a["rpubl:departement"] = ansvarig
if seealso:
a["rdfs:seeAlso"] = seealso
if sammanfattning:
a['dcterms:abstract'] = sammanfattning
return a
def find_doc_links(self, soup, basefile):
files = []
docsection = soup.find('div', 'doc')
if docsection:
for li in docsection.find_all("li", "pdf"):
link = li.find('a')
m = re.match(r'/download/(\w+\.pdf).*', link['href'], re.IGNORECASE)
if not m:
continue
pdfbasefile = m.group(1)
files.append((pdfbasefile, link.string))
selected = self.select_files(files)
self.log.debug("selected %s out of %d pdf files" % (", ".join([x[0] for x in selected]), len(files)))
return selected
def source_url(self, basefile):
# as the old site is gone, there is no possible URL we can
# return here.
return None
class DirRegeringenLegacy(RegeringenLegacy, SameAs, DirRegeringen):
alias = "dirregeringen.legacy"
class SOURegeringenLegacy(RegeringenLegacy, SameAs, SOURegeringen):
alias = "souregeringen.legacy"
def sanitize_identifier(self, identifier):
from ferenda.sources.legal.se.sou import sou_sanitize_identifier
return sou_sanitize_identifier(identifier)
class DsRegeringenLegacy(RegeringenLegacy, SameAs, Ds):
alias = "dsregeringen.legacy"
class PropRegeringenLegacy(RegeringenLegacy, SameAs, PropRegeringen):
alias = "propregeringen.legacy"
| 40.375661
| 135
| 0.576727
| 798
| 7,631
| 5.438596
| 0.349624
| 0.022581
| 0.030645
| 0.033871
| 0.052995
| 0.015668
| 0.015668
| 0
| 0
| 0
| 0
| 0.009301
| 0.32368
| 7,631
| 188
| 136
| 40.590426
| 0.831622
| 0.139169
| 0
| 0.068182
| 0
| 0
| 0.106728
| 0.006575
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.106061
| 0.022727
| 0.272727
| 0.007576
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75e2178969612f0c7284d059eb5edd0c7915d7e5
| 2,850
|
py
|
Python
|
lambda_assistant/mysql/client_handler.py
|
matiasvallejosdev/py-aws-lambda-handlers
|
4643042bc02e557bb4a2953118de5f4eb5320d70
|
[
"Apache-2.0"
] | null | null | null |
lambda_assistant/mysql/client_handler.py
|
matiasvallejosdev/py-aws-lambda-handlers
|
4643042bc02e557bb4a2953118de5f4eb5320d70
|
[
"Apache-2.0"
] | null | null | null |
lambda_assistant/mysql/client_handler.py
|
matiasvallejosdev/py-aws-lambda-handlers
|
4643042bc02e557bb4a2953118de5f4eb5320d70
|
[
"Apache-2.0"
] | null | null | null |
import pymysql
import logging
from lambda_assistant.handlers.event_handler import EventHandler
from lambda_assistant.errors import *
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class Select():
def Select(self, handler: EventHandler, conn: pymysql.connections.Connection, sql: str):
try:
result = {}
# Execute SQL command
with conn.cursor() as cur:
cur.execute(sql)
row_headers=[x[0] for x in cur.description] #this will extract row headers
for index, row in enumerate(cur):
result[index] = dict(zip(row_headers, row))
# Commit changes
conn.commit()
return result
except Exception as e:
handler.performError(GetDataFailedError())
logger.error(handler.lambdaError.toPrint())
logger.error(e)
return handler.lambdaError.toDict()
class Delete():
def Delete(self, handler: EventHandler, conn:pymysql.connections.Connection, sql: str, sql_recheckidentity: str):
try:
result = {}
# Execute SQL command
with conn.cursor() as cur:
cur.execute(sql)
cur.execute(sql_recheckidentity)
# Commit changes
conn.commit()
return result
except Exception as e:
handler.performError(DeleteDataFailedError())
logger.error(handler.lambdaError.toPrint())
logger.error(e)
return handler.lambdaError.toDict()
class Insert():
def Insert(self, handler: EventHandler, conn: pymysql.connections.Connection, sql: str, get_id=False):
try:
result = {}
# Execute SQL command
with conn.cursor() as cur:
cur.execute(sql)
if get_id:
id = int(cur.lastrowid)
result['id_inserted'] = id
# Commit changes
conn.commit()
return result
except Exception as e:
handler.performError(PutDataFailedError())
logger.error(handler.lambdaError.toPrint())
logger.error(e)
return handler.lambdaError.toDict()
class MySqlHandler(Select, Delete, Insert):
def __init__(self, db_name, rds_host, db_username, db_password):
self.rds_host = rds_host
self.db_name = db_name
self.db_username = db_username
self.db_password = db_password
def Connect(self):
conn = pymysql.connect(host=self.rds_host, user=self.db_username, passwd=self.db_password, db=self.db_name, connect_timeout=5)
return conn
| 35.185185
| 135
| 0.565965
| 288
| 2,850
| 5.493056
| 0.288194
| 0.044248
| 0.03287
| 0.051201
| 0.521492
| 0.521492
| 0.521492
| 0.521492
| 0.521492
| 0.407712
| 0
| 0.001081
| 0.350526
| 2,850
| 80
| 136
| 35.625
| 0.853593
| 0.05193
| 0
| 0.491803
| 0
| 0
| 0.004215
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0.04918
| 0.065574
| 0
| 0.327869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75e72372c73d69ec71d6ae230b03dd3710c4e2a3
| 2,506
|
py
|
Python
|
examples/building.py
|
jbermudezcabrera/campos
|
df34f93dd37b435a82663fb72ef37f669832af22
|
[
"MIT"
] | null | null | null |
examples/building.py
|
jbermudezcabrera/campos
|
df34f93dd37b435a82663fb72ef37f669832af22
|
[
"MIT"
] | null | null | null |
examples/building.py
|
jbermudezcabrera/campos
|
df34f93dd37b435a82663fb72ef37f669832af22
|
[
"MIT"
] | null | null | null |
"""This example demonstrates the basics on building complete forms using campos.
It creates several fields, marking some of them as required and adding some
custom validation.
Finally fields are added to a CreationForm which have several buttons and a
custom callback connected to one of them. After added, some related fields
are grouped.
"""
__author__ = 'Juan Manuel Bermúdez Cabrera'
def fake_create_person():
if form.valid:
msg = 'ID: {}<br/>'.format(form.id)
msg += 'Name: {}<br/>'.format(form.name)
msg += 'Last name: {}<br/>'.format(form.last_name)
msg += 'Phone: {}<br/>'.format(form.phone)
msg += 'Address: {}<br/>'.format(form.address)
msg += 'Country: {}<br/>'.format(form.country[0])
msg = 'New person created correctly with values:<br/>{}'.format(msg)
msg = '<html>{}</html>'.format(msg)
QMessageBox.information(None, 'Created', msg)
form.close()
def create_form():
id = campos.StringField(name='id', text='Personal ID', max_length=11,
required=True)
name = campos.StringField(name='name', text='Name', required=True)
last = campos.StringField(name='last_name', text='Last name', required=True)
val = campos.RegExp(r'\+?\d+', message='Invalid phone number')
phone = campos.StringField(name='phone', text='Phone number',
validators=[val])
address = campos.StringField(name='address', text='Home address')
country = campos.SelectField(name='country', text='Country', blank=True,
blank_text='Other', choices=['Cuba', 'EE.UU'],
default='Cuba')
fields = (id, name, last, phone, address, country)
global form
form = campos.CreationForm(on_save=fake_create_person, fields=fields)
form.setWindowTitle('Create Person')
# group some fields
form.group('Very personal info', ('phone', 'address'), layout='grid')
form.group('Identification', ['id', 'name', 'last_name'])
return form
if __name__ == '__main__':
import os
import sys
# set gui api to use
os.environ['QT_API'] = 'pyside'
from qtpy.QtWidgets import QMessageBox, QApplication
import campos
# set global settings for validation type and label positions
campos.Validation.set_current('instant')
campos.Labelling.set_current('top')
app = QApplication(sys.argv)
dialog = create_form()
sys.exit(dialog.exec_())
| 33.413333
| 80
| 0.634876
| 306
| 2,506
| 5.107843
| 0.441176
| 0.035829
| 0.046065
| 0.020473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001544
| 0.224661
| 2,506
| 74
| 81
| 33.864865
| 0.802882
| 0.173982
| 0
| 0
| 0
| 0
| 0.206211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.093023
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75e9882a624cfcf705ab7744f64aca22cda52bfb
| 8,452
|
py
|
Python
|
ai.py
|
LHGames-2017/nospace
|
1f36fb980ee51cdc576b765eff2c4ad5533ea0e3
|
[
"MIT"
] | null | null | null |
ai.py
|
LHGames-2017/nospace
|
1f36fb980ee51cdc576b765eff2c4ad5533ea0e3
|
[
"MIT"
] | null | null | null |
ai.py
|
LHGames-2017/nospace
|
1f36fb980ee51cdc576b765eff2c4ad5533ea0e3
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
from structs import *
import json
import numpy as np
import sys
import random, time
app = Flask(__name__)
dx=0
dy=0
def create_action(action_type, target):
actionContent = ActionContent(action_type, target.__dict__)
#print(actionContent)
return json.dumps(actionContent.__dict__)
def create_move_action(target):
return create_action("MoveAction", Point(target.X-dx,target.Y-dy))
def create_attack_action(target):
return create_action("AttackAction", Point(target.X-dx,target.Y-dy))
def create_collect_action(target):
return create_action("CollectAction", Point(target.X-dx,target.Y-dy))
def create_steal_action(target):
return create_action("StealAction", Point(target.X-dx,target.Y-dy))
def create_heal_action():
return create_action("HealAction", "")
def create_purchase_action(item):
return create_action("PurchaseAction", item)
def deserialize_map(serialized_map):
"""
Fonction utilitaire pour comprendre la map
"""
serialized_map = serialized_map[1:]
rows = serialized_map.split('[')
column = rows[0].split('{')
deserialized_map = [[Tile() for x in range(40)] for y in range(40)]
for i in range(len(rows) - 1):
column = rows[i + 1].split('{')
for j in range(len(column) - 1):
infos = column[j + 1].split(',')
end_index = infos[2].find('}')
content = int(infos[0])
x = int(infos[1])
y = int(infos[2][:end_index])
deserialized_map[i][j] = Tile(content, x, y)
return deserialized_map
#customs
def visual(lines,x,y):
for i in lines:
line = ''
for j in i[:20]:
#Empty, Wall, House, Lava, Resource, Shop, Player
#0 1 2 3 4 5 6
line+=str(j.Content).replace('None','N').replace('0', ' ').replace('1','#').replace('2','^').replace('3','L').replace('4','$').replace('5','S').replace('6','o')
print(line)
def distance(p1, p2):
return math.sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2)
'''
def searchg(x,y,grid,target, at):
if grid[x][y] == target:
at.append([x,y]) #found
return True
elif grid[x][y] == 1 or grid[x][y] == 3:
return False #wall or lava
elif grid[x][y] == 9:
return False #been here
at.append([x,y])
grid[x][y] == 9
if ((x<len(grid)-1 and search(x+1,y,grid,target, at))
or (y > 0 and search(x, y-1,grid,target, at))
or (x > 0 and search(x-1,y,grid,target, at))
or (y < len(grid)-1 and search(x, y+1,grid,target, at))):
return True
return False
'''
def search_next(me, target,m,dx,dy):
x=me.Position.X
y=me.Position.Y
if me.CarriedRessources==me.CarryingCapacity:
print('resource')
target=me.HouseLocation
#if distance([target.X,target.Y],[x,y])==0:
# return create_collect_action(Point(x+dx, x+dy))
neighbors = [[x+1,y],[x-1,y],[x,y+1],[x,y-1]]
tNeighbors = []
for neighbor in neighbors:
tNeighbors.append([distance(neighbor,[target.X, target.Y]),neighbor])
sortedNeighbors=sorted(tNeighbors, key=lambda x:x[0])
print(sortedNeighbors)
for n in sortedNeighbors:
#print(target.__dict__)
#print(x,y)
#print('----------',n,'--------')
#Empty, Wall, House, Lava, Resource, Shop, Player
#0 1 2 3 4 5 6
tile = m[n[1][0]-dx][n[1][1]-dy]
#print(tile.__dict__)
content = tile.Content
point = Point(n[1][0],n[1][1])
if content==0 or content==2:
print('----move----',point)
return create_move_action(point)
elif content==1 or content == 6:
print('attack',point)
return create_attack_action(point)
elif content==4:
return create_collect_action(point)
else:# content==3:
print('skip')
continue
def route(start, end, at, best=[]):
best.append(end)
for i in range(len(at)-1,-1,-1):
if compare(at[i], best[-1]):
best.append(at[i])
best=best[::-1]
return best
def compare(a,b):
if (a[0]==b[0]) and (abs(a[1]-b[1])==1):
return True
elif (a[1]==b[1]) and (abs(a[0]-b[0])==1):
return True
else:
return False
def arr2action(c,d):
if c[0]==d[0]:
if c[1]<d[1]:
return create_move_action(Point(x+1,y))
else:
return create_move_action(Point(x-1,y))
elif c[0]<d[0]:
return create_move_action(Point(x,y-1))
else:
return create_move_action(Point(x,y+1))
def findTargets(mapmatrix, me):
resources = []
enemyhouses = []
shops = []
for row in mapmatrix:
for tile in row:
if tile.Content==4:
resources.append(tile)
elif tile.Content==2 and tile.Content!=me.HouseLocation:
enemyhouses.append(tile)
elif tile.Content==5:
shops.append(tile)
else:
continue
return [resources, enemyhouses, shops]
def decide(me, closestEnemies, targets, grid):
try:
distEn = closestEnemy[0][0]
enemy = closestEnemy[0][1]
except:
distEn=0
enemy = []
distTarget = targets[0][0]
target = targets[0][1]
best=[]
at=[]
if distEn==1:
#print('------1-------')
return create_attack_action(Point(enemy.X,enemy.Y))
elif distTarget==1 and target.Content==2:
#print('------2-------')
return create_collect_action(Point(target.X,target.Y))
elif distTarget==0 and target.Content==4:
#print('------3-------')
return create_collect_action(Point(target.X,target.Y))
else:
#print('------4-------')
#t = random.choice([1,0])
#u = (t+1)%2
#return create_move_action(Point(me.Position.X+t,me.Position.Y+u))
return search_next(me, target, grid)
def bot():
"""
Main de votre bot.
"""
map_json = request.form["map"]
# Player info
encoded_map = map_json.encode()
map_json = json.loads(encoded_map)
p = map_json["Player"]
pos = p["Position"]
x = pos["X"]
y = pos["Y"]
house = p["HouseLocation"]
player = Player(p["Health"], p["MaxHealth"], Point(x,y),
Point(house["X"], house["Y"]), p["Score"],
p["CarriedResources"], p["CarryingCapacity"])
# Map
serialized_map = map_json["CustomSerializedMap"]
deserialized_map=deserialize_map(serialized_map)
transposed=np.transpose(deserialized_map)
targets = findTargets(deserialized_map, player)
visual(transposed[:20][::-1],x,y)
otherPlayers = []
'''
#print(map_json)
for player_dict in map_json["OtherPlayers"]:
#print(player_dict)
for player_name in player_dict.keys():
player_info = player_dict[player_name]
#print('---------')
#print(player_info)
#print('---------')
p_pos = player_info["Position"]
player_info = PlayerInfo(player_info["Health"],
player_info["MaxHealth"],
Point(p_pos["X"], p_pos["Y"]))
otherPlayers.append({player_name: player_info })
'''
# return decision
#targets =
tTargets = []
for target in targets[0]:#+targets[1]:
tTargets.append([distance([x,y],[target.X,target.Y]),target])
sortedTargets = sorted(tTargets, key=lambda x:x[0])
tEnemies = []
for enemy in otherPlayers:
tEnemies.append([distance([x,y],[enemy.X,enemy.Y]),enemy])
sortedEnemies = sorted(tEnemies, key=lambda x:x[0])
dx,dy=0,0
for i,line in enumerate(deserialized_map):
for j,tile in enumerate(line):
if tile.X==x and tile.Y==y:
dx = x-i
dy = y-j
#return decide(player, sortedEnemies, sortedTargets, deserialized_map)
print(player.__dict__,player.Position.__dict__)
return search_next(player, sortedTargets[0][1], deserialized_map,dx,dy)
@app.route("/", methods=["POST"])
def reponse():
"""
Point d'entree appelle par le GameServer
"""
sys.stdout.flush()
return bot()
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3000)
| 30.959707
| 172
| 0.565192
| 1,129
| 8,452
| 4.116918
| 0.177148
| 0.010327
| 0.024096
| 0.028399
| 0.20525
| 0.1179
| 0.114458
| 0.112737
| 0.076592
| 0.03012
| 0
| 0.024801
| 0.270114
| 8,452
| 272
| 173
| 31.073529
| 0.728643
| 0.096545
| 0
| 0.069767
| 0
| 0
| 0.038992
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104651
| false
| 0
| 0.034884
| 0.040698
| 0.296512
| 0.040698
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75ee6ab2f29331c5f95dba4b6e05f4612d407042
| 3,004
|
py
|
Python
|
sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py
|
wellcomecollection/catalogue-pipeline
|
360fa432a006f5e197a5b22d72cced7d6735d222
|
[
"MIT"
] | 8
|
2019-08-02T09:48:40.000Z
|
2019-12-20T14:06:58.000Z
|
sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py
|
wellcomecollection/catalogue
|
17dcf7f1977f953fbaf35c60aa166aaa1413fdd2
|
[
"MIT"
] | 329
|
2020-02-18T07:43:08.000Z
|
2021-04-23T10:45:33.000Z
|
sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py
|
wellcomecollection/catalogue-pipeline
|
360fa432a006f5e197a5b22d72cced7d6735d222
|
[
"MIT"
] | 1
|
2019-08-22T11:44:34.000Z
|
2019-08-22T11:44:34.000Z
|
import datetime as dt
import os
import attr
@attr.s(repr=False)
class Interval:
start = attr.ib()
end = attr.ib()
key = attr.ib()
def __repr__(self):
return "%s(start=%r, end=%r, key=%r)" % (
type(self).__name__,
self.start.isoformat(),
self.end.isoformat(),
self.key,
)
__str__ = __repr__
def strip_timestamp(timestamp):
# The timezone offset may or may not be present, remove it if it's there
return timestamp.strip("Z").replace("+00-00", "")
def get_intervals(keys):
"""
Generate the intervals completed for a particular resource type.
:param keys: A generator of S3 key names.
"""
for k in keys:
name = os.path.basename(k)
start, end = name.split("__")
start = strip_timestamp(start)
end = strip_timestamp(end)
try:
yield Interval(
start=dt.datetime.strptime(start, "%Y-%m-%dT%H-%M-%S.%f"),
end=dt.datetime.strptime(end, "%Y-%m-%dT%H-%M-%S.%f"),
key=k,
)
except ValueError:
yield Interval(
start=dt.datetime.strptime(start, "%Y-%m-%dT%H-%M-%S"),
end=dt.datetime.strptime(end, "%Y-%m-%dT%H-%M-%S"),
key=k,
)
def combine_overlapping_intervals(sorted_intervals):
"""
Given a generator of sorted open intervals, generate the covering set.
It produces a series of 2-tuples: (interval, running), where ``running``
is the set of sub-intervals used to build the overall interval.
:param sorted_intervals: A generator of ``Interval`` instances.
"""
lower = None
running = []
for higher in sorted_intervals:
if not lower:
lower = higher
running.append(higher)
else:
# We treat these as open intervals. This first case is for the
# two intervals being wholly overlapping, for example:
#
# ( -- lower -- )
# ( -- higher -- )
#
if higher.start < lower.end:
upper_bound = max(lower.end, higher.end)
lower = Interval(start=lower.start, end=upper_bound, key=None)
running.append(higher)
# Otherwise the two intervals are disjoint. Note that this
# includes the case where lower.end == higher.start, because
# we can't be sure that point has been included.
#
# ( -- lower -- )
# ( -- higher -- )
#
# or
#
# ( -- lower -- )
# ( -- higher -- )
#
else:
yield (lower, running)
lower = higher
running = [higher]
# And spit out the final interval
if lower is not None:
yield (lower, running)
| 29.165049
| 78
| 0.508655
| 338
| 3,004
| 4.434911
| 0.366864
| 0.036691
| 0.048032
| 0.013342
| 0.106738
| 0.106738
| 0.106738
| 0.105404
| 0.105404
| 0.105404
| 0
| 0.003215
| 0.378828
| 3,004
| 102
| 79
| 29.45098
| 0.800107
| 0.314248
| 0
| 0.222222
| 0
| 0
| 0.055723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.055556
| 0.037037
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75f1634c6e371274f9060f7f9a480ee9c930fa89
| 1,082
|
py
|
Python
|
userbot/plugins/hpdiwali.py
|
yu9ohde/Marshmellow
|
145c90470701c972ab458483ac1b9320d1a44e8e
|
[
"MIT"
] | 2
|
2020-12-06T03:46:08.000Z
|
2022-02-19T20:34:52.000Z
|
userbot/plugins/hpdiwali.py
|
pro-boy/Marshmello
|
4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c
|
[
"MIT"
] | 4
|
2020-11-07T07:39:51.000Z
|
2020-11-10T03:46:41.000Z
|
userbot/plugins/hpdiwali.py
|
pro-boy/Marshmello
|
4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c
|
[
"MIT"
] | 9
|
2020-11-28T11:30:44.000Z
|
2021-06-01T07:11:57.000Z
|
# Plugin made by Dark cobra
# For Dark cobra
# Made by Shivam Patel(Team Cobra)
# Kang with credits..
import random
from userbot import CMD_HELP
from userbot.events import register
from userbot.utils import admin_cmd
from telethon import events, types, functions, utils
import asyncio
def choser(cmd, pack, blacklist={}):
docs = None
@borg.on(events.NewMessage(pattern=rf'\.{cmd}', outgoing=True))
async def handler(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0,8)
nonlocal docs
for i in animation_ttl:
await asyncio.sleep(animation_interval)
if docs is None:
docs = [
utils.get_input_document(x)
for x in (await borg(functions.messages.GetStickerSetRequest(types.InputStickerSetShortName(pack)))).documents
]
await event.respond(file=random.choice(docs))
choser('hpdiwali', 'a929138153_by_Shivam_Patel_1_anim')
| 28.473684
| 134
| 0.621072
| 126
| 1,082
| 5.222222
| 0.579365
| 0.050152
| 0.039514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017173
| 0.30037
| 1,082
| 37
| 135
| 29.243243
| 0.852048
| 0.086876
| 0
| 0
| 0
| 0
| 0.04883
| 0.033571
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.25
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75f227cf59ba67118be0d4f419b2d0cc15fd93df
| 1,024
|
py
|
Python
|
scripts/parse-weka-results.py
|
jholewinski/ics-12-overlapped-tiling
|
af2b39bc957d33f68d4617865431ca731b18430a
|
[
"MIT"
] | 3
|
2015-12-31T11:19:50.000Z
|
2017-11-30T03:14:56.000Z
|
scripts/parse-weka-results.py
|
jholewinski/ics-12-overlapped-tiling
|
af2b39bc957d33f68d4617865431ca731b18430a
|
[
"MIT"
] | null | null | null |
scripts/parse-weka-results.py
|
jholewinski/ics-12-overlapped-tiling
|
af2b39bc957d33f68d4617865431ca731b18430a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
maximum = 0.0
selected = 0.0
results = []
for line in sys.stdin.readlines()[5:]:
line = line.strip()
if len(line) == 0:
continue
(inst, actual, predicted, error) = line.split()
results.append([inst, actual, predicted, error])
predicted = float(predicted)
if predicted > maximum:
maximum = predicted
selected = float(actual)
by_predicted = sorted(results, key=lambda entry: float(entry[2]))
by_predicted.reverse()
by_actual = sorted(results, key=lambda entry: float(entry[1]))
by_actual.reverse()
best_of_actuals = float(by_actual[0][1])
sys.stdout.write('Best of Actuals: %f\n' % best_of_actuals)
sys.stdout.write('Maximum Prediction: %s\n' %
str([x[2] for x in by_predicted[0:5]]))
sys.stdout.write('Selected Actual: %s\n' %
str([x[1] for x in by_predicted[0:5]]))
sys.stdout.write('Percentages: %s\n' %
str([float(x[1])/best_of_actuals for x in by_predicted[0:5]]))
| 26.947368
| 79
| 0.630859
| 150
| 1,024
| 4.213333
| 0.313333
| 0.087025
| 0.082278
| 0.037975
| 0.251582
| 0.251582
| 0.251582
| 0.10443
| 0.10443
| 0.10443
| 0
| 0.023632
| 0.214844
| 1,024
| 37
| 80
| 27.675676
| 0.762438
| 0.019531
| 0
| 0
| 0
| 0
| 0.095904
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75f3476923aa5142454f8d9f4ed05a21bd8875d9
| 941
|
py
|
Python
|
symtuner/logger.py
|
audxo14/symtuner
|
741e4e14cfcf09b7c7a71ce34cf28f1858f1f476
|
[
"MIT"
] | null | null | null |
symtuner/logger.py
|
audxo14/symtuner
|
741e4e14cfcf09b7c7a71ce34cf28f1858f1f476
|
[
"MIT"
] | 1
|
2022-01-26T12:51:32.000Z
|
2022-01-26T12:51:32.000Z
|
symtuner/logger.py
|
audxo14/symtuner
|
741e4e14cfcf09b7c7a71ce34cf28f1858f1f476
|
[
"MIT"
] | 1
|
2022-01-26T12:42:24.000Z
|
2022-01-26T12:42:24.000Z
|
'''Logging module for symtuner library
Logging module for symtuner library. All loggings in symtuner library use this module.
'''
import logging as _logging
_LOGGER = None
def get_logger():
'''Get a logger.
Get a singleton `Logger`. If `Logger` not defined make one and return. If `get_logger` called
previously, returns a `Logger` object created previously.
Returns:
A `Logger` object.
'''
global _LOGGER
if not _LOGGER:
_LOGGER = _logging.getLogger('symtuner')
if not _logging.getLogger().handlers:
formatter = _logging.Formatter(fmt='%(asctime)s symtuner [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
stderr_handler = _logging.StreamHandler()
stderr_handler.setFormatter(formatter)
_LOGGER.addHandler(stderr_handler)
_LOGGER.setLevel('INFO')
return _LOGGER
| 28.515152
| 98
| 0.631243
| 106
| 941
| 5.443396
| 0.481132
| 0.07799
| 0.055459
| 0.083189
| 0.211438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268863
| 941
| 32
| 99
| 29.40625
| 0.838663
| 0.343252
| 0
| 0
| 0
| 0
| 0.131624
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75f57c3ebdfa5b1c58a1a40cbcfe56a933e80e69
| 3,326
|
py
|
Python
|
config/eval.py
|
XiaLiPKU/RESCAN-for-Deraining
|
e28d1d7cd3d8b276ce88de730de1603bafa30e23
|
[
"MIT"
] | 292
|
2018-07-17T01:11:53.000Z
|
2022-03-31T13:06:50.000Z
|
config/eval.py
|
XiaLiPKU/RESCAN-for-Deraining
|
e28d1d7cd3d8b276ce88de730de1603bafa30e23
|
[
"MIT"
] | 18
|
2018-08-02T13:33:06.000Z
|
2022-01-26T15:54:27.000Z
|
config/eval.py
|
XiaLiPKU/RESCAN-for-Deraining
|
e28d1d7cd3d8b276ce88de730de1603bafa30e23
|
[
"MIT"
] | 87
|
2018-07-17T18:02:09.000Z
|
2021-12-19T08:21:57.000Z
|
import os
import sys
import cv2
import argparse
import numpy as np
import torch
from torch import nn
from torch.nn import MSELoss
from torch.optim import Adam
from torch.optim.lr_scheduler import MultiStepLR
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import settings
from dataset import TestDataset
from model import RESCAN
from cal_ssim import SSIM
logger = settings.logger
torch.cuda.manual_seed_all(66)
torch.manual_seed(66)
torch.cuda.set_device(settings.device_id)
def ensure_dir(dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
class Session:
def __init__(self):
self.log_dir = settings.log_dir
self.model_dir = settings.model_dir
ensure_dir(settings.log_dir)
ensure_dir(settings.model_dir)
logger.info('set log dir as %s' % settings.log_dir)
logger.info('set model dir as %s' % settings.model_dir)
self.net = RESCAN().cuda()
self.crit = MSELoss().cuda()
self.ssim = SSIM().cuda()
self.dataloaders = {}
def get_dataloader(self, dataset_name):
dataset = TestDataset(dataset_name)
if not dataset_name in self.dataloaders:
self.dataloaders[dataset_name] = \
DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=1, drop_last=False)
return self.dataloaders[dataset_name]
def load_checkpoints(self, name):
ckp_path = os.path.join(self.model_dir, name)
try:
obj = torch.load(ckp_path)
logger.info('Load checkpoint %s' % ckp_path)
except FileNotFoundError:
logger.info('No checkpoint %s!!' % ckp_path)
return
self.net.load_state_dict(obj['net'])
def inf_batch(self, name, batch):
O, B = batch['O'].cuda(), batch['B'].cuda()
O, B = Variable(O, requires_grad=False), Variable(B, requires_grad=False)
R = O - B
with torch.no_grad():
O_Rs = self.net(O)
loss_list = [self.crit(O_R, R) for O_R in O_Rs]
ssim_list = [self.ssim(O - O_R, O - R) for O_R in O_Rs]
losses = {
'loss%d' % i: loss.item()
for i, loss in enumerate(loss_list)
}
ssimes = {
'ssim%d' % i: ssim.item()
for i, ssim in enumerate(ssim_list)
}
losses.update(ssimes)
return losses
def run_test(ckp_name):
sess = Session()
sess.net.eval()
sess.load_checkpoints(ckp_name)
dt = sess.get_dataloader('test')
all_num = 0
all_losses = {}
for i, batch in enumerate(dt):
losses = sess.inf_batch('test', batch)
batch_size = batch['O'].size(0)
all_num += batch_size
for key, val in losses.items():
if i == 0:
all_losses[key] = 0.
all_losses[key] += val * batch_size
logger.info('batch %d mse %s: %f' % (i, key, val))
for key, val in all_losses.items():
logger.info('total mse %s: %f' % (key, val / all_num))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', default='latest')
args = parser.parse_args(sys.argv[1:])
run_test(args.model)
| 28.672414
| 81
| 0.613049
| 459
| 3,326
| 4.259259
| 0.270153
| 0.027621
| 0.021483
| 0.017391
| 0.011253
| 0.011253
| 0.011253
| 0
| 0
| 0
| 0
| 0.004971
| 0.274203
| 3,326
| 115
| 82
| 28.921739
| 0.804888
| 0
| 0
| 0
| 0
| 0
| 0.046917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.182796
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75f5ca0e1019fe3f64db390c86a601c2f8792420
| 6,371
|
py
|
Python
|
FastEMRIWaveforms/few/utils/modeselector.py
|
basuparth/ICERM_Workshop
|
ebabce680fc87e90ff1de30246dcda9beb384bb4
|
[
"MIT"
] | null | null | null |
FastEMRIWaveforms/few/utils/modeselector.py
|
basuparth/ICERM_Workshop
|
ebabce680fc87e90ff1de30246dcda9beb384bb4
|
[
"MIT"
] | null | null | null |
FastEMRIWaveforms/few/utils/modeselector.py
|
basuparth/ICERM_Workshop
|
ebabce680fc87e90ff1de30246dcda9beb384bb4
|
[
"MIT"
] | null | null | null |
# Online mode selection for FastEMRIWaveforms Packages
# Copyright (C) 2020 Michael L. Katz, Alvin J.K. Chua, Niels Warburton, Scott A. Hughes
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from few.utils.citations import *
# check for cupy
try:
import cupy as xp
except (ImportError, ModuleNotFoundError) as e:
import numpy as xp
class ModeSelector:
"""Filter teukolsky amplitudes based on power contribution.
This module takes teukolsky modes, combines them with their associated ylms,
and determines the power contribution from each mode. It then filters the
modes bases on the fractional accuracy on the total power (eps) parameter.
The mode filtering is a major contributing factor to the speed of these
waveforms as it removes large numbers of useles modes from the final
summation calculation.
Be careful as this is built based on the construction that input mode arrays
will in order of :math:`m=0`, :math:`m>0`, and then :math:`m<0`.
args:
m0mask (1D bool xp.ndarray): This mask highlights which modes have
:math:`m=0`. Value is False if :math:`m=0`, True if not.
This only includes :math:`m\geq0`.
use_gpu (bool, optional): If True, allocate arrays for usage on a GPU.
Default is False.
"""
def __init__(self, m0mask, use_gpu=False):
if use_gpu:
self.xp = xp
else:
self.xp = np
# store information releated to m values
# the order is m = 0, m > 0, m < 0
self.m0mask = m0mask
self.num_m_zero_up = len(m0mask)
self.num_m_1_up = len(self.xp.arange(len(m0mask))[m0mask])
self.num_m0 = len(self.xp.arange(len(m0mask))[~m0mask])
def attributes_ModeSelector(self):
"""
attributes:
xp: cupy or numpy depending on GPU usage.
num_m_zero_up (int): Number of modes with :math:`m\geq0`.
num_m_1_up (int): Number of modes with :math:`m\geq1`.
num_m0 (int): Number of modes with :math:`m=0`.
"""
pass
@property
def citation(self):
"""Return citations related to this class."""
return few_citation + few_software_citation
def __call__(self, teuk_modes, ylms, modeinds, eps=1e-5):
"""Call to sort and filer teukolsky modes.
This is the call function that takes the teukolsky modes, ylms,
mode indices and fractional accuracy of the total power and returns
filtered teukolsky modes and ylms.
args:
teuk_modes (2D complex128 xp.ndarray): Complex teukolsky amplitudes
from the amplitude modules.
Shape: (number of trajectory points, number of modes).
ylms (1D complex128 xp.ndarray): Array of ylm values for each mode,
including m<0. Shape is (num of m==0,) + (num of m>0,)
+ (num of m<0). Number of m<0 and m>0 is the same, but they are
ordered as (m==0 first then) m>0 then m<0.
modeinds (list of int xp.ndarrays): List containing the mode index arrays. If in an
equatorial model, need :math:`(l,m,n)` arrays. If generic,
:math:`(l,m,k,n)` arrays. e.g. [l_arr, m_arr, n_arr].
eps (double, optional): Fractional accuracy of the total power used
to determine the contributing modes. Lowering this value will
calculate more modes slower the waveform down, but generally
improving accuracy. Increasing this value removes modes from
consideration and can have a considerable affect on the speed of
the waveform, albeit at the cost of some accuracy (usually an
acceptable loss). Default that gives good mismatch qualities is
1e-5.
"""
# get the power contribution of each mode including m < 0
power = (
self.xp.abs(
self.xp.concatenate(
[teuk_modes, self.xp.conj(teuk_modes[:, self.m0mask])], axis=1
)
* ylms
)
** 2
)
# sort the power for a cumulative summation
inds_sort = self.xp.argsort(power, axis=1)[:, ::-1]
power = self.xp.sort(power, axis=1)[:, ::-1]
cumsum = self.xp.cumsum(power, axis=1)
# initialize and indices array for keeping modes
inds_keep = self.xp.full(cumsum.shape, True)
# keep modes that add to within the fractional power (1 - eps)
inds_keep[:, 1:] = cumsum[:, :-1] < cumsum[:, -1][:, self.xp.newaxis] * (
1 - eps
)
# finds indices of each mode to be kept
temp = inds_sort[inds_keep]
# adjust the index arrays to make -m indices equal to +m indices
# if +m or -m contributes, we keep both because of structure of CUDA kernel
temp = temp * (temp < self.num_m_zero_up) + (temp - self.num_m_1_up) * (
temp >= self.num_m_zero_up
)
# if +m or -m contributes, we keep both because of structure of CUDA kernel
keep_modes = self.xp.unique(temp)
# set ylms
# adust temp arrays specific to ylm setup
temp2 = keep_modes * (keep_modes < self.num_m0) + (
keep_modes + self.num_m_1_up
) * (keep_modes >= self.num_m0)
# ylm duplicates the m = 0 unlike teuk_modes
ylmkeep = self.xp.concatenate([keep_modes, temp2])
# setup up teuk mode and ylm returns
out1 = (teuk_modes[:, keep_modes], ylms[ylmkeep])
# setup up mode values that have been kept
out2 = tuple([arr[keep_modes] for arr in modeinds])
return out1 + out2
| 38.149701
| 95
| 0.622822
| 909
| 6,371
| 4.293729
| 0.331133
| 0.010249
| 0.009224
| 0.010249
| 0.149372
| 0.110684
| 0.070202
| 0.048424
| 0.029208
| 0.029208
| 0
| 0.017153
| 0.295401
| 6,371
| 166
| 96
| 38.379518
| 0.852306
| 0.624706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.02
| 0.1
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75f9e56ae6c6a091caa3997bff09abbf8201e9db
| 2,803
|
py
|
Python
|
source/hsicbt/model/vgg.py
|
tongjian121/PK-HBaR
|
c564e0f08c2c09e0023384adecfcf25e2d53a8a3
|
[
"MIT"
] | 9
|
2021-11-04T16:53:04.000Z
|
2022-03-28T10:27:44.000Z
|
source/hsicbt/model/vgg.py
|
tongjian121/PK-HBaR
|
c564e0f08c2c09e0023384adecfcf25e2d53a8a3
|
[
"MIT"
] | null | null | null |
source/hsicbt/model/vgg.py
|
tongjian121/PK-HBaR
|
c564e0f08c2c09e0023384adecfcf25e2d53a8a3
|
[
"MIT"
] | null | null | null |
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
defaultcfg = {
11 : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13 : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
}
def conv_layer(chann_in, chann_out, k_size, p_size):
layer = nn.Sequential(
nn.Conv2d(chann_in, chann_out, kernel_size=k_size, padding=p_size),
nn.BatchNorm2d(chann_out),
nn.ReLU()
)
return layer
def vgg_conv_block(in_list, out_list, k_list, p_list, pooling_k, pooling_s):
layers = [ conv_layer(in_list[i], out_list[i], k_list[i], p_list[i]) for i in range(len(in_list)) ]
layers += [ nn.MaxPool2d(kernel_size = pooling_k, stride = pooling_s)]
return nn.Sequential(*layers)
def vgg_fc_layer(size_in, size_out):
layer = nn.Sequential(
nn.Linear(size_in, size_out),
nn.BatchNorm1d(size_out),
nn.ReLU()
)
return layer
class VGG16(nn.Module):
def __init__(self, **kwargs):
super(VGG16, self).__init__()
self.rob = kwargs['robustness'] if 'robustness' in kwargs else False
# Conv blocks (BatchNorm + ReLU activation added in each block)
self.layer1 = vgg_conv_block([3,64], [64,64], [3,3], [1,1], 2, 2)
self.layer2 = vgg_conv_block([64,128], [128,128], [3,3], [1,1], 2, 2)
self.layer3 = vgg_conv_block([128,256,256], [256,256,256], [3,3,3], [1,1,1], 2, 2)
self.layer4 = vgg_conv_block([256,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
self.layer5 = vgg_conv_block([512,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
# FC layers
self.layer6 = vgg_fc_layer(512, 4096)
self.layer7 = vgg_fc_layer(4096, 4096)
# Final layer
self.layer8 = nn.Linear(4096, 10)
def forward(self, x):
output_list = []
out = self.layer1(x)
output_list.append(out)
out = self.layer2(out)
output_list.append(out)
out = self.layer3(out)
output_list.append(out)
out = self.layer4(out)
output_list.append(out)
vgg16_features = self.layer5(out)
out = vgg16_features.view(out.size(0), -1)
#print(out.shape)
out = self.layer6(out)
output_list.append(out)
out = self.layer7(out)
output_list.append(out)
out = self.layer8(out)
if self.rob:
return out
else:
return out, output_list
| 32.593023
| 108
| 0.567249
| 424
| 2,803
| 3.591981
| 0.214623
| 0.090611
| 0.076822
| 0.055154
| 0.286277
| 0.241628
| 0.224557
| 0.105056
| 0.099803
| 0.06172
| 0
| 0.148495
| 0.276846
| 2,803
| 86
| 109
| 32.593023
| 0.602861
| 0.035319
| 0
| 0.193548
| 0
| 0
| 0.013333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.080645
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75faae9bc5c91a63ded3c9f4f2e51213df5e1730
| 11,555
|
py
|
Python
|
src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py
|
faradaym/Lantern
|
536e48da79ee374527c669f77ad9e0a0776a0bb8
|
[
"BSD-3-Clause"
] | 158
|
2018-03-28T21:58:07.000Z
|
2022-02-22T00:49:46.000Z
|
src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py
|
douxiansheng/Lantern
|
f453de532da638c1f467953b32bbe49a3dedfa45
|
[
"BSD-3-Clause"
] | 35
|
2018-09-03T21:27:15.000Z
|
2019-05-11T02:17:49.000Z
|
src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py
|
douxiansheng/Lantern
|
f453de532da638c1f467953b32bbe49a3dedfa45
|
[
"BSD-3-Clause"
] | 36
|
2017-06-30T00:28:59.000Z
|
2022-01-24T12:20:42.000Z
|
"""
Preprocessing script for Stanford Sentiment Treebank data.
"""
import os
import glob
#
# Trees and tree loading
#
class ConstTree(object):
def __init__(self):
self.left = None
self.right = None
def size(self):
self.size = 1
if self.left is not None:
self.size += self.left.size()
if self.right is not None:
self.size += self.right.size()
return self.size
def set_spans(self):
if self.word is not None:
self.span = self.word
return self.span
self.span = self.left.set_spans()
if self.right is not None:
self.span += ' ' + self.right.set_spans()
return self.span
def get_labels(self, spans, labels, dictionary):
if self.span in dictionary:
spans[self.idx] = self.span
labels[self.idx] = dictionary[self.span]
if self.left is not None:
self.left.get_labels(spans, labels, dictionary)
if self.right is not None:
self.right.get_labels(spans, labels, dictionary)
class DepTree(object):
def __init__(self):
self.children = []
self.lo, self.hi = None, None
def size(self):
self.size = 1
for c in self.children:
self.size += c.size()
return self.size
def set_spans(self, words):
self.lo, self.hi = self.idx, self.idx + 1
if len(self.children) == 0:
self.span = words[self.idx]
return
for c in self.children:
c.set_spans(words)
self.lo = min(self.lo, c.lo)
self.hi = max(self.hi, c.hi)
self.span = ' '.join(words[self.lo : self.hi])
def get_labels(self, spans, labels, dictionary):
if self.span in dictionary:
spans[self.idx] = self.span
labels[self.idx] = dictionary[self.span]
for c in self.children:
c.get_labels(spans, labels, dictionary)
def load_trees(dirpath):
const_trees, dep_trees, toks = [], [], []
with open(os.path.join(dirpath, 'parents.txt')) as parentsfile, \
open(os.path.join(dirpath, 'dparents.txt')) as dparentsfile, \
open(os.path.join(dirpath, 'sents.txt')) as toksfile:
parents, dparents = [], []
for line in parentsfile:
parents.append(map(int, line.split()))
for line in dparentsfile:
dparents.append(map(int, line.split()))
for line in toksfile:
toks.append(line.strip().split())
for i in xrange(len(toks)):
const_trees.append(load_constituency_tree(parents[i], toks[i]))
dep_trees.append(load_dependency_tree(dparents[i]))
return const_trees, dep_trees, toks
def load_constituency_tree(parents, words):
trees = []
root = None
size = len(parents)
for i in xrange(size):
trees.append(None)
word_idx = 0
for i in xrange(size):
if not trees[i]:
idx = i
prev = None
prev_idx = None
word = words[word_idx]
word_idx += 1
while True:
tree = ConstTree()
parent = parents[idx] - 1
tree.word, tree.parent, tree.idx = word, parent, idx
word = None
if prev is not None:
if tree.left is None:
tree.left = prev
else:
tree.right = prev
trees[idx] = tree
if parent >= 0 and trees[parent] is not None:
if trees[parent].left is None:
trees[parent].left = tree
else:
trees[parent].right = tree
break
elif parent == -1:
root = tree
break
else:
prev = tree
prev_idx = idx
idx = parent
return root
def load_dependency_tree(parents):
trees = []
root = None
size = len(parents)
for i in xrange(size):
trees.append(None)
for i in xrange(size):
if not trees[i]:
idx = i
prev = None
prev_idx = None
while True:
tree = DepTree()
parent = parents[idx] - 1
# node is not in tree
if parent == -2:
break
tree.parent, tree.idx = parent, idx
if prev is not None:
tree.children.append(prev)
trees[idx] = tree
if parent >= 0 and trees[parent] is not None:
trees[parent].children.append(tree)
break
elif parent == -1:
root = tree
break
else:
prev = tree
prev_idx = idx
idx = parent
return root
#
# Various utilities
#
def make_dirs(dirs):
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
def load_sents(dirpath):
sents = []
with open(os.path.join(dirpath, 'SOStr.txt')) as sentsfile:
for line in sentsfile:
sent = ' '.join(line.split('|'))
sents.append(sent.strip())
return sents
def load_splits(dirpath):
splits = []
with open(os.path.join(dirpath, 'datasetSplit.txt')) as splitfile:
splitfile.readline()
for line in splitfile:
idx, split = line.split(',')
splits.append(int(split))
return splits
def load_parents(dirpath):
parents = []
with open(os.path.join(dirpath, 'STree.txt')) as parentsfile:
for line in parentsfile:
p = ' '.join(line.split('|'))
parents.append(p.strip())
return parents
def load_dictionary(dirpath):
labels = []
with open(os.path.join(dirpath, 'sentiment_labels.txt')) as labelsfile:
labelsfile.readline()
for line in labelsfile:
idx, rating = line.split('|')
idx = int(idx)
rating = float(rating)
if rating <= 0.2:
label = -2
elif rating <= 0.4:
label = -1
elif rating > 0.8:
label = +2
elif rating > 0.6:
label = +1
else:
label = 0
labels.append(label)
d = {}
with open(os.path.join(dirpath, 'dictionary.txt')) as dictionary:
for line in dictionary:
s, idx = line.split('|')
d[s] = labels[int(idx)]
return d
def build_vocab(filepaths, dst_path, lowercase=True):
vocab = set()
for filepath in filepaths:
with open(filepath) as f:
for line in f:
if lowercase:
line = line.lower()
vocab |= set(line.split())
with open(dst_path, 'w') as f:
for w in sorted(vocab):
f.write(w + '\n')
def split(sst_dir, train_dir, dev_dir, test_dir):
sents = load_sents(sst_dir)
splits = load_splits(sst_dir)
parents = load_parents(sst_dir)
with open(os.path.join(train_dir, 'sents.txt'), 'w') as train, \
open(os.path.join(dev_dir, 'sents.txt'), 'w') as dev, \
open(os.path.join(test_dir, 'sents.txt'), 'w') as test, \
open(os.path.join(train_dir, 'parents.txt'), 'w') as trainparents, \
open(os.path.join(dev_dir, 'parents.txt'), 'w') as devparents, \
open(os.path.join(test_dir, 'parents.txt'), 'w') as testparents:
for sent, split, p in zip(sents, splits, parents):
if split == 1:
train.write(sent)
train.write('\n')
trainparents.write(p)
trainparents.write('\n')
elif split == 2:
test.write(sent)
test.write('\n')
testparents.write(p)
testparents.write('\n')
else:
dev.write(sent)
dev.write('\n')
devparents.write(p)
devparents.write('\n')
def get_labels(tree, dictionary):
size = tree.size()
spans, labels = [], []
for i in xrange(size):
labels.append(None)
spans.append(None)
tree.get_labels(spans, labels, dictionary)
return spans, labels
def write_labels(dirpath, dictionary):
print('Writing labels for trees in ' + dirpath)
with open(os.path.join(dirpath, 'labels.txt'), 'w') as labels, \
open(os.path.join(dirpath, 'dlabels.txt'), 'w') as dlabels:
# load constituency and dependency trees
const_trees, dep_trees, toks = load_trees(dirpath)
# write span labels
for i in xrange(len(const_trees)):
const_trees[i].set_spans()
dep_trees[i].set_spans(toks[i])
# const tree labels
s, l = [], []
for j in xrange(const_trees[i].size()):
s.append(None)
l.append(None)
const_trees[i].get_labels(s, l, dictionary)
labels.write(' '.join(map(str, l)) + '\n')
# dep tree labels
dep_trees[i].span = const_trees[i].span
s, l = [], []
for j in xrange(len(toks[i])):
s.append(None)
l.append('#')
dep_trees[i].get_labels(s, l, dictionary)
dlabels.write(' '.join(map(str, l)) + '\n')
def dependency_parse(filepath, cp='', tokenize=True):
print('\nDependency parsing ' + filepath)
dirpath = os.path.dirname(filepath)
filepre = os.path.splitext(os.path.basename(filepath))[0]
tokpath = os.path.join(dirpath, filepre + '.toks')
parentpath = os.path.join(dirpath, 'dparents.txt')
relpath = os.path.join(dirpath, 'rels.txt')
tokenize_flag = '-tokenize - ' if tokenize else ''
cmd = ('java -cp %s DependencyParse -tokpath %s -parentpath %s -relpath %s %s < %s'
% (cp, tokpath, parentpath, relpath, tokenize_flag, filepath))
os.system(cmd)
if __name__ == '__main__':
print('=' * 80)
print('Preprocessing Stanford Sentiment Treebank')
print('=' * 80)
base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(base_dir, 'data')
lib_dir = os.path.join(base_dir, 'lib')
sst_dir = os.path.join(data_dir, 'sst')
train_dir = os.path.join(sst_dir, 'train')
dev_dir = os.path.join(sst_dir, 'dev')
test_dir = os.path.join(sst_dir, 'test')
make_dirs([train_dir, dev_dir, test_dir])
# produce train/dev/test splits
split(sst_dir, train_dir, dev_dir, test_dir)
sent_paths = glob.glob(os.path.join(sst_dir, '*/sents.txt'))
# produce dependency parses
classpath = ':'.join([
lib_dir,
os.path.join(lib_dir, 'stanford-parser/stanford-parser.jar'),
os.path.join(lib_dir, 'stanford-parser/stanford-parser-3.5.1-models.jar')])
for filepath in sent_paths:
dependency_parse(filepath, cp=classpath, tokenize=False)
# get vocabulary
build_vocab(sent_paths, os.path.join(sst_dir, 'vocab.txt'))
build_vocab(sent_paths, os.path.join(sst_dir, 'vocab-cased.txt'), lowercase=False)
# write sentiment labels for nodes in trees
dictionary = load_dictionary(sst_dir)
write_labels(train_dir, dictionary)
write_labels(dev_dir, dictionary)
write_labels(test_dir, dictionary)
| 32.457865
| 87
| 0.539766
| 1,429
| 11,555
| 4.26662
| 0.121763
| 0.036411
| 0.049205
| 0.036739
| 0.409218
| 0.30802
| 0.206331
| 0.170248
| 0.149582
| 0.124651
| 0
| 0.004721
| 0.340026
| 11,555
| 355
| 88
| 32.549296
| 0.794781
| 0.02804
| 0
| 0.29932
| 0
| 0.003401
| 0.051035
| 0.007405
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.006803
| 0
| 0.129252
| 0.017007
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75fc997c30736fa87f40fddc061010fa3c1f2c9f
| 12,703
|
py
|
Python
|
models/relevance/relevance_google_net.py
|
sanglee/XAI-threshold-calibration
|
24ddd5213b02d4fb919bca191392fe8b1a30aa88
|
[
"Apache-2.0"
] | null | null | null |
models/relevance/relevance_google_net.py
|
sanglee/XAI-threshold-calibration
|
24ddd5213b02d4fb919bca191392fe8b1a30aa88
|
[
"Apache-2.0"
] | null | null | null |
models/relevance/relevance_google_net.py
|
sanglee/XAI-threshold-calibration
|
24ddd5213b02d4fb919bca191392fe8b1a30aa88
|
[
"Apache-2.0"
] | null | null | null |
import warnings
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import torch.utils.model_zoo as model_zoo
from typing import Optional, Tuple, List, Callable, Any
from modules.layers import *
__all__ = ['GoogLeNet', 'googlenet', "GoogLeNetOutputs", "_GoogLeNetOutputs"]
model_urls = {
# GoogLeNet ported from TensorFlow
'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
}
GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
GoogLeNetOutputs.__annotations__ = {'logits': Tensor, 'aux_logits2': Optional[Tensor],
'aux_logits1': Optional[Tensor]}
# Script annotations failed with _GoogleNetOutputs = namedtuple ...
# _GoogLeNetOutputs set here for backwards compat
_GoogLeNetOutputs = GoogLeNetOutputs
def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "GoogLeNet":
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = GoogLeNet(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['googlenet']))
return model
return GoogLeNet(**kwargs)
class GoogLeNet(nn.Module):
__constants__ = ['aux_logits', 'transform_input']
def __init__(
self,
num_classes: int = 1000,
aux_logits: bool = True,
transform_input: bool = False,
init_weights: Optional[bool] = None,
blocks: Optional[List[Callable[..., nn.Module]]] = None
) -> None:
super(GoogLeNet, self).__init__()
if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux]
if init_weights is None:
warnings.warn('The default weight initialization of GoogleNet will be changed in future releases of '
'torchvision. If you wish to keep the old behavior (which leads to long initialization times'
' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning)
init_weights = True
assert len(blocks) == 3
conv_block = blocks[0]
inception_block = blocks[1]
inception_aux_block = blocks[2]
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = conv_block(64, 64, kernel_size=1)
self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
self.maxpool2 = MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = inception_aux_block(512, num_classes)
self.aux2 = inception_aux_block(528, num_classes)
else:
self.aux1 = None # type: ignore[assignment]
self.aux2 = None # type: ignore[assignment]
self.avgpool = AdaptiveAvgPool2d((1, 1))
self.dropout = Dropout(0.2)
self.fc = Linear(1024, num_classes)
self.gradients = dict()
self.activations = dict()
def forward_hook(module, input, output):
self.activations['value'] = output
return None
def backward_hook(module,input,output):
self.gradients['value'] = output[0]
self.inception3b.register_forward_hook(forward_hook)
self.inception3b.register_backward_hook(backward_hook)
def forward(self, x):
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
# R = self.CLRP(x)
#
# logit = x[:, x.max(1)[-1]].sum()
# logit.backward()
# R = self.fc.relprop(R)
# R = self.dropout.relprop(R)
# R = R.reshape_as(self.avgpool.Y)
# R = self.avgpool.relprop(R)
# R = self.inception5b.relprop(R)
# R = self.inception5a.relprop(R)
# R = self.maxpool4.relprop(R)
# R = self.inception4e.relprop(R)
# R = self.inception4d.relprop(R)
# R = self.inception4c.relprop(R)
# R = self.inception4b.relprop(R)
# R = self.inception4a.relprop(R)
# R = self.maxpool3.relprop(R)
# R = self.inception3b.relprop(R)
# R = self.inception3a.relprop(R)
#
# r_weight = torch.mean(R,dim=(2,3),keepdim=True)
# r_cam = t*r_weight
# r_cam = torch.sum(r_cam,dim=(0,1))
#
# a = self.activations['value']
# g = self.gradients['value']
# g_ = torch.mean(g,dim=(2,3),keepdim=True)
# grad_cam = a * g_
# grad_cam = torch.sum(grad_cam,dim=(0,1))
#
# g_2 = g ** 2
# g_3 = g ** 3
# alpha_numer = g_2
# alpha_denom = 2 * g_2 + torch.sum(a * g_3, dim=(0, 1), keepdim=True) # + 1e-2
#
# alpha = alpha_numer / alpha_denom
#
# w = torch.sum(alpha * torch.clamp(g, min =0), dim=(0, 1), keepdim=True)
#
# grad_cam_pp = torch.clamp(w * a, min=0)
# grad_cam_pp = torch.sum(grad_cam_pp, dim=-1)
return x
def CLRP(self,x):
maxindex = torch.argmax(x)
R = torch.ones(x.shape).cuda()
R /= -1000
R[:, maxindex] = 1
return R
def relprop(self,R):
R = self.fc.relprop(R)
R = self.dropout.relprop(R)
R = R.reshape_as(self.avgpool.Y)
R = self.avgpool.relprop(R)
R = self.inception5b.relprop(R)
R = self.inception5a.relprop(R)
R = self.maxpool4.relprop(R)
R = self.inception4e.relprop(R)
R = self.inception4d.relprop(R)
R = self.inception4c.relprop(R)
R = self.inception4b.relprop(R)
R = self.inception4a.relprop(R)
# R = self.maxpool3.relprop(R)
# R = self.inception3b.relprop(R)
# R = self.inception3a.relprop(R)
# R = self.maxpool2.relprop(R)
# R = self.conv3.relprop(R)
# R = self.conv2.relprop(R)
# R = self.maxpool1.relprop(R)
# R = self.conv1.relprop(R)
return R
class InceptionAux(nn.Module):
def __init__(
self,
in_channels: int,
num_classes: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionAux, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.conv = conv_block(in_channels, 128, kernel_size=1)
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x: Tensor) -> Tensor:
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
# N x 1024
x = F.dropout(x, 0.7, training=self.training)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class Inception(nn.Module):
def __init__(
self,
in_channels: int,
ch1x1: int,
ch3x3red: int,
ch3x3: int,
ch5x5red: int,
ch5x5: int,
pool_proj: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Inception, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
self.channel1 = ch1x1
self.branch2 = Sequential(
conv_block(in_channels, ch3x3red, kernel_size=1),
conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1)
)
self.channel2 = ch3x3
self.branch3 = Sequential(
conv_block(in_channels, ch5x5red, kernel_size=1),
# Here, kernel_size=3 instead of kernel_size=5 is a known bug.
# Please see https://github.com/pytorch/vision/issues/906 for details.
conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1)
)
self.channel3 = ch5x5
self.branch4 = Sequential(
MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
conv_block(in_channels, pool_proj, kernel_size=1)
)
self.channel4 = pool_proj
def _forward(self, x: Tensor) -> List[Tensor]:
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
def relprop(self,R):
R1 = R[:,:self.channel1]
R2 = R[:, self.channel1:self.channel1+self.channel2]
R3 = R[:, self.channel1+self.channel2:self.channel1+self.channel2+self.channel3]
R4 = R[:, self.channel1+self.channel2+self.channel3:]
R1 = self.branch1.relprop(R1)
R2 = self.branch2.relprop(R2)
R3 = self.branch3.relprop(R3)
R4 = self.branch4.relprop(R4)
R = R1+R2+R3+R4
return R
class BasicConv2d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
**kwargs: Any
) -> None:
super(BasicConv2d, self).__init__()
self.conv = Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = BatchNorm2d(out_channels, eps=0.001)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
def relprop(self,R):
R = self.bn.relprop(R)
R = self.conv.relprop(R)
return R
| 32.994805
| 119
| 0.581831
| 1,704
| 12,703
| 4.220657
| 0.181338
| 0.027809
| 0.041296
| 0.054227
| 0.29644
| 0.241518
| 0.179227
| 0.16713
| 0.13668
| 0.13668
| 0
| 0.073857
| 0.302921
| 12,703
| 385
| 120
| 32.994805
| 0.73834
| 0.215855
| 0
| 0.163717
| 0
| 0
| 0.067507
| 0
| 0
| 0
| 0
| 0
| 0.004425
| 1
| 0.070796
| false
| 0
| 0.039823
| 0
| 0.185841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75fe4ffed842895823f432c3592116337d923fac
| 8,457
|
py
|
Python
|
polyglotdb/client/client.py
|
michaelhaaf/PolyglotDB
|
7640212c7062cf44ae911081241ce83a26ced2eb
|
[
"MIT"
] | 25
|
2016-01-28T20:47:07.000Z
|
2021-11-29T16:13:07.000Z
|
polyglotdb/client/client.py
|
michaelhaaf/PolyglotDB
|
7640212c7062cf44ae911081241ce83a26ced2eb
|
[
"MIT"
] | 120
|
2016-04-07T17:55:09.000Z
|
2022-03-24T18:30:10.000Z
|
polyglotdb/client/client.py
|
PhonologicalCorpusTools/PolyglotDB
|
7640212c7062cf44ae911081241ce83a26ced2eb
|
[
"MIT"
] | 10
|
2015-12-03T20:06:58.000Z
|
2021-02-11T03:02:48.000Z
|
import requests
from ..exceptions import ClientError
class PGDBClient(object):
"""
Simple client for interacting with ISCAN servers.
"""
def __init__(self, host, token=None, corpus_name=None):
self.host = host
self.token = token
if self.host.endswith('/'):
self.host = self.host[:-1]
self.corpus_name = corpus_name
self.query_behavior = 'speaker'
def login(self, user_name, password):
"""
Get an authentication token from the ISCAN server using the specified credentials
Parameters
----------
user_name : str
User name
password : str
Password
Returns
-------
str
Authentication token to use in future requests
"""
end_point = '/'.join([self.host, 'api', 'rest-auth', 'login', ''])
resp = requests.post(end_point, {'username': user_name, 'password': password})
token = resp.json()['key']
self.token = token
return token
def create_database(self, database_name):
"""
Create a new database with the specified name
Parameters
----------
database_name : str
Name of the database to be created
Returns
-------
dict
Database information
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
raise ClientError('Could not create database, already exists.')
end_point = '/'.join([self.host, 'api', 'databases', ''])
data = {'name': database_name}
resp = requests.post(end_point, data=data, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not create database: {}'.format(resp.text))
return resp.json()
def delete_database(self, database_name):
"""
Delete a database and all associated content
Parameters
----------
database_name : str
Name of database to be deleted
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not delete database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), ''])
resp = requests.delete(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code != 204:
raise ClientError('Could not delete database.')
def database_status(self, database_name=None):
"""
Get the current status of a specified database, or all databases on the server.
Parameters
----------
database_name : str
Name of database to get status of, if not specified, will get status of all databases
Returns
-------
dict
Database status JSON
"""
if database_name is not None:
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
else:
end_point = '/'.join([self.host, 'api', 'databases', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def get_directory(self, database_name):
"""
Get the directory of a local database
Parameters
----------
database_name : str
Name of database
Returns
-------
str
Database data directory
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'data_directory', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def get_ports(self, database_name):
"""
Get the ports of a locally running database
Parameters
----------
database_name : str
Name of database
Returns
-------
dict
Ports of the database
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'ports', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def list_databases(self):
"""
Get a list of all databases
Returns
-------
list
Database information
"""
end_point = '/'.join([self.host, 'api', 'databases', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code != 200:
raise ClientError('Encountered error getting list of databases: {}'.format(resp.json()))
return resp.json()
def list_corpora(self, database_name=None):
"""
Get a list of all corpora
Parameters
----------
database_name : str
Name of the database to restrict corpora list to, optional
Returns
-------
list
Corpora information
"""
if database_name is not None:
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'corpora', ''])
else:
end_point = '/'.join([self.host, 'api', 'corpora', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def start_database(self, database_name):
"""
Start a database
Parameters
----------
database_name : str
Database to start
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'start', ''])
resp = requests.post(end_point, data={}, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not start database: {}'.format(resp.text))
def stop_database(self, database_name):
"""
Stop a database
Parameters
----------
database_name : str
Database to stop
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'stop', ''])
resp = requests.post(end_point, data={}, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not stop database: {}'.format(resp.text))
| 33.295276
| 108
| 0.537898
| 895
| 8,457
| 4.973184
| 0.130726
| 0.072793
| 0.032352
| 0.043136
| 0.680521
| 0.644125
| 0.611099
| 0.596945
| 0.558751
| 0.514716
| 0
| 0.006033
| 0.33357
| 8,457
| 253
| 109
| 33.426877
| 0.783712
| 0.184936
| 0
| 0.661157
| 0
| 0
| 0.157215
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0.016529
| 0.016529
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75fefd40d863da1697a3900b9bc8d32e531394bf
| 2,745
|
py
|
Python
|
python/plotCSV.py
|
lrquad/LoboScripts
|
04d2de79d2d83e781e3f4a3de2531dc48e4013a6
|
[
"MIT"
] | null | null | null |
python/plotCSV.py
|
lrquad/LoboScripts
|
04d2de79d2d83e781e3f4a3de2531dc48e4013a6
|
[
"MIT"
] | null | null | null |
python/plotCSV.py
|
lrquad/LoboScripts
|
04d2de79d2d83e781e3f4a3de2531dc48e4013a6
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
from matplotlib import rcParams
import matplotlib.patches as patches
rcParams['font.family'] = 'Times New Roman'
rcParams['font.size'] = 20
rcParams['axes.edgecolor'] = (0.0, 0.0, 0.0)
rcParams['axes.linewidth'] = 2
hfont = {'fontname': 'Times New Roman'}
folderpath = "./testdata/"
def format_e(n):
a = '%E' % n
return a.split('E')[0].rstrip('0').rstrip('.') + '.0E' + a.split('E')[1]
def loadData(path, logscale=True, min=1e-16):
data = np.array(np.loadtxt(path, delimiter=',', unpack=True))
data[data < min] = min
data = np.log10(data)
return data
def loadlabel(path):
data = np.array(np.loadtxt(path, delimiter=' ', unpack=True))
return data
def init(ax,xlabel_list,y_min,y_max,num_x,x_labels_num = 16,y_labels_num=18):
ax.set_xlim(0, num_x)
print(y_min,y_max)
ax.set_ylim(y_min, y_max)
ax.set_xlabel("Perturbation size",**hfont)
ax.set_ylabel("Relative error ", **hfont)
y_labels_tuples = ()
ax.yaxis.set_major_locator(plt.MaxNLocator(y_labels_num))
x_labels_tuples = ()
ax.xaxis.set_major_locator(plt.MaxNLocator(x_labels_num+1))
for i in range(0,y_labels_num):
y_value = i/(y_labels_num-1)*(y_max-y_min)+y_min
y_value = format_e(10**int(y_value))
y_labels_tuples = y_labels_tuples+(y_value,)
ax.set_yticklabels(y_labels_tuples,size = 10)
for i in range(0,x_labels_num):
index = int(i/(x_labels_num-1)*(num_x-1))
x_labels_tuples = x_labels_tuples + (format_e(xlabel_list[index]),)
ax.set_xticklabels(x_labels_tuples,size = 15)
plt.xticks(rotation=45)
return
def plotData(data, labels,names):
num_lines = data.shape[0]
num_x = data.shape[1]
y_max = np.max(data)
y_min = np.min(data)
fig, ax = plt.subplots()
init(ax,labels,y_min,y_max,num_x,x_labels_num=16,y_labels_num=18)
fig.set_figheight(8)
fig.set_figwidth(8)
plt.grid(True)
ydata = np.arange(num_x)
for i in range(num_lines):
print(ydata.tolist())
print(data[i,:].tolist())
plt.plot(ydata.tolist(),data[i,:].tolist(),'-', animated=False,antialiased=True,markersize=5,color = '#FF5C5C',label = names[i],linewidth = 6)
#ln, = plt.plot(ydata.tolist(),data[0,:].tolist(),'-', animated=True,antialiased=True,markersize=5,color = '#FF5C5C',label = "te",linewidth = 6)
plt.subplots_adjust(bottom=0.22)
plt.show()
return
if __name__ == "__main__":
data = loadData(folderpath+"ttmath_error.csv")
labels = loadlabel(folderpath+"h_list.csv")
plotData(data, labels,["ttmath","FD","CD","CFD"])
| 30.5
| 151
| 0.665938
| 428
| 2,745
| 4.063084
| 0.299065
| 0.051754
| 0.017251
| 0.018401
| 0.230017
| 0.154112
| 0.13916
| 0.090857
| 0.090857
| 0.041403
| 0
| 0.025121
| 0.173406
| 2,745
| 89
| 152
| 30.842697
| 0.741296
| 0.052095
| 0
| 0.060606
| 0
| 0
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.090909
| 0
| 0.242424
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f01f5d13c019c855d7b51b2b4f48b63f6f7275b
| 12,327
|
py
|
Python
|
wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py
|
odidev/virgil-crypto-c
|
3d5d5cb19fdcf81eab08cdc63647f040117ecbd8
|
[
"BSD-3-Clause"
] | 26
|
2018-12-17T13:45:25.000Z
|
2022-01-16T20:00:04.000Z
|
wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py
|
odidev/virgil-crypto-c
|
3d5d5cb19fdcf81eab08cdc63647f040117ecbd8
|
[
"BSD-3-Clause"
] | 4
|
2019-01-03T12:08:52.000Z
|
2021-12-02T05:21:13.000Z
|
wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py
|
odidev/virgil-crypto-c
|
3d5d5cb19fdcf81eab08cdc63647f040117ecbd8
|
[
"BSD-3-Clause"
] | 8
|
2019-01-24T08:22:06.000Z
|
2022-02-07T11:37:00.000Z
|
# Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
from ._vscf_error import vscf_error_t
from ._vscf_raw_public_key import vscf_raw_public_key_t
from ._vscf_raw_private_key import vscf_raw_private_key_t
from virgil_crypto_lib.common._c_bridge import vsc_data_t
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
class vscf_rsa_t(Structure):
pass
class VscfRsa(object):
"""RSA implementation."""
# Defines whether a public key can be imported or not.
CAN_IMPORT_PUBLIC_KEY = True
# Define whether a public key can be exported or not.
CAN_EXPORT_PUBLIC_KEY = True
# Define whether a private key can be imported or not.
CAN_IMPORT_PRIVATE_KEY = True
# Define whether a private key can be exported or not.
CAN_EXPORT_PRIVATE_KEY = True
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_rsa_new(self):
vscf_rsa_new = self._lib.vscf_rsa_new
vscf_rsa_new.argtypes = []
vscf_rsa_new.restype = POINTER(vscf_rsa_t)
return vscf_rsa_new()
def vscf_rsa_delete(self, ctx):
vscf_rsa_delete = self._lib.vscf_rsa_delete
vscf_rsa_delete.argtypes = [POINTER(vscf_rsa_t)]
vscf_rsa_delete.restype = None
return vscf_rsa_delete(ctx)
def vscf_rsa_use_random(self, ctx, random):
vscf_rsa_use_random = self._lib.vscf_rsa_use_random
vscf_rsa_use_random.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)]
vscf_rsa_use_random.restype = None
return vscf_rsa_use_random(ctx, random)
def vscf_rsa_generate_ephemeral_key(self, ctx, key, error):
"""Generate ephemeral private key of the same type.
Note, this operation might be slow."""
vscf_rsa_generate_ephemeral_key = self._lib.vscf_rsa_generate_ephemeral_key
vscf_rsa_generate_ephemeral_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_rsa_generate_ephemeral_key.restype = POINTER(vscf_impl_t)
return vscf_rsa_generate_ephemeral_key(ctx, key, error)
def vscf_rsa_import_public_key(self, ctx, raw_key, error):
"""Import public key from the raw binary format.
Return public key that is adopted and optimized to be used
with this particular algorithm.
Binary format must be defined in the key specification.
For instance, RSA public key must be imported from the format defined in
RFC 3447 Appendix A.1.1."""
vscf_rsa_import_public_key = self._lib.vscf_rsa_import_public_key
vscf_rsa_import_public_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_raw_public_key_t), POINTER(vscf_error_t)]
vscf_rsa_import_public_key.restype = POINTER(vscf_impl_t)
return vscf_rsa_import_public_key(ctx, raw_key, error)
def vscf_rsa_export_public_key(self, ctx, public_key, error):
"""Export public key to the raw binary format.
Binary format must be defined in the key specification.
For instance, RSA public key must be exported in format defined in
RFC 3447 Appendix A.1.1."""
vscf_rsa_export_public_key = self._lib.vscf_rsa_export_public_key
vscf_rsa_export_public_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_rsa_export_public_key.restype = POINTER(vscf_raw_public_key_t)
return vscf_rsa_export_public_key(ctx, public_key, error)
def vscf_rsa_import_private_key(self, ctx, raw_key, error):
"""Import private key from the raw binary format.
Return private key that is adopted and optimized to be used
with this particular algorithm.
Binary format must be defined in the key specification.
For instance, RSA private key must be imported from the format defined in
RFC 3447 Appendix A.1.2."""
vscf_rsa_import_private_key = self._lib.vscf_rsa_import_private_key
vscf_rsa_import_private_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_raw_private_key_t), POINTER(vscf_error_t)]
vscf_rsa_import_private_key.restype = POINTER(vscf_impl_t)
return vscf_rsa_import_private_key(ctx, raw_key, error)
def vscf_rsa_export_private_key(self, ctx, private_key, error):
"""Export private key in the raw binary format.
Binary format must be defined in the key specification.
For instance, RSA private key must be exported in format defined in
RFC 3447 Appendix A.1.2."""
vscf_rsa_export_private_key = self._lib.vscf_rsa_export_private_key
vscf_rsa_export_private_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_rsa_export_private_key.restype = POINTER(vscf_raw_private_key_t)
return vscf_rsa_export_private_key(ctx, private_key, error)
def vscf_rsa_can_encrypt(self, ctx, public_key, data_len):
"""Check if algorithm can encrypt data with a given key."""
vscf_rsa_can_encrypt = self._lib.vscf_rsa_can_encrypt
vscf_rsa_can_encrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t]
vscf_rsa_can_encrypt.restype = c_bool
return vscf_rsa_can_encrypt(ctx, public_key, data_len)
def vscf_rsa_encrypted_len(self, ctx, public_key, data_len):
"""Calculate required buffer length to hold the encrypted data."""
vscf_rsa_encrypted_len = self._lib.vscf_rsa_encrypted_len
vscf_rsa_encrypted_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t]
vscf_rsa_encrypted_len.restype = c_size_t
return vscf_rsa_encrypted_len(ctx, public_key, data_len)
def vscf_rsa_encrypt(self, ctx, public_key, data, out):
"""Encrypt data with a given public key."""
vscf_rsa_encrypt = self._lib.vscf_rsa_encrypt
vscf_rsa_encrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), vsc_data_t, POINTER(vsc_buffer_t)]
vscf_rsa_encrypt.restype = c_int
return vscf_rsa_encrypt(ctx, public_key, data, out)
def vscf_rsa_can_decrypt(self, ctx, private_key, data_len):
"""Check if algorithm can decrypt data with a given key.
However, success result of decryption is not guaranteed."""
vscf_rsa_can_decrypt = self._lib.vscf_rsa_can_decrypt
vscf_rsa_can_decrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t]
vscf_rsa_can_decrypt.restype = c_bool
return vscf_rsa_can_decrypt(ctx, private_key, data_len)
def vscf_rsa_decrypted_len(self, ctx, private_key, data_len):
"""Calculate required buffer length to hold the decrypted data."""
vscf_rsa_decrypted_len = self._lib.vscf_rsa_decrypted_len
vscf_rsa_decrypted_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t]
vscf_rsa_decrypted_len.restype = c_size_t
return vscf_rsa_decrypted_len(ctx, private_key, data_len)
def vscf_rsa_decrypt(self, ctx, private_key, data, out):
"""Decrypt given data."""
vscf_rsa_decrypt = self._lib.vscf_rsa_decrypt
vscf_rsa_decrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), vsc_data_t, POINTER(vsc_buffer_t)]
vscf_rsa_decrypt.restype = c_int
return vscf_rsa_decrypt(ctx, private_key, data, out)
def vscf_rsa_can_sign(self, ctx, private_key):
"""Check if algorithm can sign data digest with a given key."""
vscf_rsa_can_sign = self._lib.vscf_rsa_can_sign
vscf_rsa_can_sign.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)]
vscf_rsa_can_sign.restype = c_bool
return vscf_rsa_can_sign(ctx, private_key)
def vscf_rsa_signature_len(self, ctx, private_key):
"""Return length in bytes required to hold signature.
Return zero if a given private key can not produce signatures."""
vscf_rsa_signature_len = self._lib.vscf_rsa_signature_len
vscf_rsa_signature_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)]
vscf_rsa_signature_len.restype = c_size_t
return vscf_rsa_signature_len(ctx, private_key)
def vscf_rsa_sign_hash(self, ctx, private_key, hash_id, digest, signature):
"""Sign data digest with a given private key."""
vscf_rsa_sign_hash = self._lib.vscf_rsa_sign_hash
vscf_rsa_sign_hash.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_int, vsc_data_t, POINTER(vsc_buffer_t)]
vscf_rsa_sign_hash.restype = c_int
return vscf_rsa_sign_hash(ctx, private_key, hash_id, digest, signature)
def vscf_rsa_can_verify(self, ctx, public_key):
"""Check if algorithm can verify data digest with a given key."""
vscf_rsa_can_verify = self._lib.vscf_rsa_can_verify
vscf_rsa_can_verify.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)]
vscf_rsa_can_verify.restype = c_bool
return vscf_rsa_can_verify(ctx, public_key)
def vscf_rsa_verify_hash(self, ctx, public_key, hash_id, digest, signature):
"""Verify data digest with a given public key and signature."""
vscf_rsa_verify_hash = self._lib.vscf_rsa_verify_hash
vscf_rsa_verify_hash.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_int, vsc_data_t, vsc_data_t]
vscf_rsa_verify_hash.restype = c_bool
return vscf_rsa_verify_hash(ctx, public_key, hash_id, digest, signature)
def vscf_rsa_setup_defaults(self, ctx):
"""Setup predefined values to the uninitialized class dependencies."""
vscf_rsa_setup_defaults = self._lib.vscf_rsa_setup_defaults
vscf_rsa_setup_defaults.argtypes = [POINTER(vscf_rsa_t)]
vscf_rsa_setup_defaults.restype = c_int
return vscf_rsa_setup_defaults(ctx)
def vscf_rsa_generate_key(self, ctx, bitlen, error):
"""Generate new private key.
Note, this operation might be slow."""
vscf_rsa_generate_key = self._lib.vscf_rsa_generate_key
vscf_rsa_generate_key.argtypes = [POINTER(vscf_rsa_t), c_size_t, POINTER(vscf_error_t)]
vscf_rsa_generate_key.restype = POINTER(vscf_impl_t)
return vscf_rsa_generate_key(ctx, bitlen, error)
def vscf_rsa_shallow_copy(self, ctx):
vscf_rsa_shallow_copy = self._lib.vscf_rsa_shallow_copy
vscf_rsa_shallow_copy.argtypes = [POINTER(vscf_rsa_t)]
vscf_rsa_shallow_copy.restype = POINTER(vscf_rsa_t)
return vscf_rsa_shallow_copy(ctx)
def vscf_rsa_impl(self, ctx):
vscf_rsa_impl = self._lib.vscf_rsa_impl
vscf_rsa_impl.argtypes = [POINTER(vscf_rsa_t)]
vscf_rsa_impl.restype = POINTER(vscf_impl_t)
return vscf_rsa_impl(ctx)
| 49.705645
| 124
| 0.736108
| 1,875
| 12,327
| 4.4672
| 0.1264
| 0.136223
| 0.023878
| 0.04298
| 0.671442
| 0.53713
| 0.435769
| 0.368792
| 0.306232
| 0.24797
| 0
| 0.00352
| 0.193397
| 12,327
| 247
| 125
| 49.906883
| 0.838882
| 0.297883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.180451
| false
| 0.007519
| 0.150376
| 0
| 0.548872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f03da5c972d890701aa5588b07be7bd754ca560
| 5,268
|
py
|
Python
|
bulk-image-optimizer.py
|
carzam87/python-bulk-image-optimizer
|
1e9e9396de84de3651b963fc3b8b569893296dde
|
[
"MIT"
] | 8
|
2020-01-28T10:33:28.000Z
|
2022-01-28T12:51:50.000Z
|
bulk-image-optimizer.py
|
carzam87/python-bulk-image-optimizer
|
1e9e9396de84de3651b963fc3b8b569893296dde
|
[
"MIT"
] | null | null | null |
bulk-image-optimizer.py
|
carzam87/python-bulk-image-optimizer
|
1e9e9396de84de3651b963fc3b8b569893296dde
|
[
"MIT"
] | 5
|
2020-09-29T08:26:35.000Z
|
2021-11-15T20:07:20.000Z
|
import os
import subprocess
from pathlib import Path
from PIL import Image
import errno
import time
from re import search
CONVERT_PNG_TO_JPG = False
TOTAL_ORIGINAL = 0
TOTAL_COMPRESSED = 0
TOTAL_GAIN = 0
TOTAL_FILES = 0
QUALITY = 85
def compress(location):
for r, d, f in os.walk(location):
for item in d:
compress(location + os.sep + item)
for image in f:
path = location
input_path = path + os.sep + image
out_path = path.replace(r'input', r'output')
if image.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif', 'webp')):
if os.path.isfile(input_path):
global TOTAL_GAIN
global TOTAL_ORIGINAL
global TOTAL_COMPRESSED
global TOTAL_FILES
global QUALITY
opt = None
try:
opt = Image.open(input_path)
except:
#do nothing just print the file skipping
print(f'skipping file cannot open: {input_path}')
continue
original_size = os.stat(input_path).st_size / 1024 / 1024
TOTAL_ORIGINAL += original_size
print(input_path)
print("Original size: " + f'{original_size:,.2f}' + ' Megabytes')
if not os.path.exists(out_path):
try:
os.makedirs(out_path, exist_ok=True)
except OSError as e:
#wait for race condition to settle
time.sleep(1)
# try to create the folder again
os.makedirs(out_path, exist_ok=True)
if e.errno != errno.EEXIST:
raise
out_file= out_path + os.sep + image
# Convert .pgn to .jpg
if CONVERT_PNG_TO_JPG and image.lower().endswith('.png'):
im = opt
rgb_im = im.convert('RGB')
out_file = out_file.replace(".png", ".jpg")
rgb_im.save(out_file)
opt = Image.open(out_file)
opt.save(out_file, optimize=True, quality=QUALITY)
opt = Image.open(out_file)
compressed_size = os.stat(out_file).st_size / 1024 / 1024
TOTAL_COMPRESSED += compressed_size
gain = original_size - compressed_size
TOTAL_GAIN += gain
TOTAL_FILES +=1
print("Compressed size: " + f'{compressed_size:,.2f}' + " megabytes")
print("Gain : " + f'{gain:,.2f}' + " megabytes")
opt.close()
else:
if os.path.isdir(out_path) and not os.path.exists(out_path):
try:
os.makedirs(out_path, exist_ok=True)
except OSError as e:
#wait for race condition to settle
time.sleep(1)
# try to create the folder again
os.makedirs(out_path, exist_ok=True)
if e.errno != errno.EEXIST:
raise
if os.path.isfile(input_path):
if not os.path.exists(out_path):
try:
os.makedirs(out_path, exist_ok=True)
except OSError as e:
#wait for race condition to settle
time.sleep(1)
# try to create the folder again
os.makedirs(out_path, exist_ok=True)
if e.errno != errno.EEXIST:
raise
input_file = input_path
output_file= input_file.replace('input','output')
print('File not image, copying instead: ' + input_path)
subprocess.call('cp ' + input_file + ' ' + output_file, shell=True)
if __name__ == '__main__':
start_path = os.path.dirname(os.path.abspath(__file__)) + os.sep + r"input"
# ask if .pgn images should automatically converted to .jpg
CONVERT_PNG_TO_JPG = input('Would you like to convert .png images to .jpg? (y/n): ') == 'y'
TOTAL_GAIN = 0
compress(start_path)
print("---------------------------------------------------------------------------------------------")
print('-------------------------------------------SUMMARY-------------------------------------------')
print('Files: ' + f'{TOTAL_FILES}')
print(
"Original: " + f'{TOTAL_ORIGINAL:,.2f}' + " megabytes || " + "New Size: " + f'{TOTAL_COMPRESSED:,.2f}' +
" megabytes" + " || Gain: " + f'{TOTAL_GAIN:,.2f}' + " megabytes ~" + f'{(TOTAL_GAIN / TOTAL_ORIGINAL) * 100:,.2f}'
+ "% reduction")
| 44.644068
| 123
| 0.44609
| 525
| 5,268
| 4.297143
| 0.234286
| 0.037234
| 0.034574
| 0.045213
| 0.300532
| 0.266844
| 0.246454
| 0.246454
| 0.246454
| 0.246454
| 0
| 0.01235
| 0.431283
| 5,268
| 117
| 124
| 45.025641
| 0.740654
| 0.058846
| 0
| 0.309278
| 0
| 0
| 0.145282
| 0.050919
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010309
| false
| 0
| 0.072165
| 0
| 0.082474
| 0.103093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f03ebf048e5859cb54e5897517da48e3b0f38d0
| 16,968
|
py
|
Python
|
interpretdl/interpreter/lime.py
|
Tyihou/InterpretDL
|
df8894f8703634df4bfcbdcc495a3d12b220028c
|
[
"Apache-2.0"
] | 1
|
2021-03-11T02:38:51.000Z
|
2021-03-11T02:38:51.000Z
|
interpretdl/interpreter/lime.py
|
Tyihou/InterpretDL
|
df8894f8703634df4bfcbdcc495a3d12b220028c
|
[
"Apache-2.0"
] | null | null | null |
interpretdl/interpreter/lime.py
|
Tyihou/InterpretDL
|
df8894f8703634df4bfcbdcc495a3d12b220028c
|
[
"Apache-2.0"
] | null | null | null |
import os
import typing
from typing import Any, Callable, List, Tuple, Union
import numpy as np
from ..data_processor.readers import preprocess_image, read_image, restore_image
from ..data_processor.visualizer import show_important_parts, visualize_image, save_image
from ..common.paddle_utils import init_checkpoint, to_lodtensor
from ._lime_base import LimeBase
from .abc_interpreter import Interpreter
class LIMECVInterpreter(Interpreter):
"""
LIME Interpreter for CV tasks.
More details regarding the LIME method can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(self,
paddle_model: Callable,
trained_model_path: str,
model_input_shape=[3, 224, 224],
use_cuda=True) -> None:
"""
Initialize the LIMECVInterpreter.
Args:
paddle_model (callable): A user-defined function that gives access to model predictions.
It takes the following arguments:
- data: Data inputs.
and outputs predictions. See the example at the end of ``interpret()``.
trained_model_path (str): The pretrained model directory.
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
use_cuda (bool, optional): Whether or not to use cuda. Default: True
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.trained_model_path = trained_model_path
self.model_input_shape = model_input_shape
self.use_cuda = use_cuda
self.paddle_prepared = False
# use the default LIME setting
self.lime_base = LimeBase()
self.lime_intermediate_results = {}
def interpret(self,
data,
interpret_class=None,
num_samples=1000,
batch_size=50,
visual=True,
save_path=None):
"""
Main function of the interpreter.
Args:
data (str): The input file path.
interpret_class (int, optional): The index of class to interpret. If None, the most likely label will be used. Default: None
num_samples (int, optional): LIME sampling numbers. Larger number of samples usually gives more accurate interpretation. Default: 1000
batch_size (int, optional): Number of samples to forward each time. Default: 50
visual (bool, optional): Whether or not to visualize the processed image. Default: True
save_path (str, optional): The path to save the processed image. If None, the image will not be saved. Default: None
:return: LIME Prior weights: {interpret_label_i: weights on features}
:rtype: dict
Example::
import interpretdl as it
def paddle_model(data):
import paddle.fluid as fluid
class_num = 1000
model = ResNet50()
logits = model.net(input=image_input, class_dim=class_num)
probs = fluid.layers.softmax(logits, axis=-1)
return probs
lime = it.LIMECVInterpreter(paddle_model, "assets/ResNet50_pretrained")
lime_weights = lime.interpret(
'assets/catdog.png',
num_samples=1000,
batch_size=100,
save_path='assets/catdog_lime.png')
"""
if isinstance(data, str):
data_instance = read_image(
data, crop_size=self.model_input_shape[1])
else:
if len(data.shape) == 3:
data = np.expand_dims(data, axis=0)
if np.issubdtype(data.dtype, np.integer):
data_instance = data
else:
data_instance = restore_image(data.copy())
self.input_type = type(data_instance)
self.data_type = np.array(data_instance).dtype
if not self.paddle_prepared:
self._paddle_prepare()
# only one example here
probability = self.predict_fn(data_instance)[0]
# only interpret top 1
if interpret_class is None:
pred_label = np.argsort(probability)
interpret_class = pred_label[-1:]
interpret_class = np.array(interpret_class)
lime_weights, r2_scores = self.lime_base.interpret_instance(
data_instance[0],
self.predict_fn,
interpret_class,
num_samples=num_samples,
batch_size=batch_size)
interpretation = show_important_parts(
data_instance[0],
lime_weights,
interpret_class[0],
self.lime_base.segments,
visual=visual,
save_path=save_path)
self.lime_intermediate_results['probability'] = probability
self.lime_intermediate_results['input'] = data_instance[0]
self.lime_intermediate_results[
'segmentation'] = self.lime_base.segments
self.lime_intermediate_results['r2_scores'] = r2_scores
return lime_weights
def _paddle_prepare(self, predict_fn=None):
if predict_fn is None:
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
data_op = fluid.data(
name='data',
shape=[None] + self.model_input_shape,
dtype='float32')
probs = self.paddle_model(data_op)
if isinstance(probs, tuple):
probs = probs[0]
main_program = main_program.clone(for_test=True)
if self.use_cuda:
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id)
else:
place = fluid.CPUPlace()
self.place = place
exe = fluid.Executor(place)
fluid.io.load_persistables(exe, self.trained_model_path,
main_program)
def predict_fn(data_instance):
data = preprocess_image(
data_instance
) # transpose to [N, 3, H, W], scaled to [0.0, 1.0]
[result] = exe.run(main_program,
fetch_list=[probs],
feed={'data': data})
return result
self.predict_fn = predict_fn
self.paddle_prepared = True
class LIMENLPInterpreter(Interpreter):
"""
LIME Interpreter for NLP tasks.
More details regarding the LIME method can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(self,
paddle_model: Callable,
trained_model_path: str,
use_cuda=True) -> None:
"""
Initialize the LIMENLPInterpreter.
Args:
paddle_model (callable): A user-defined function that gives access to model predictions.
It takes the following arguments:
- data: Data inputs.
and outputs predictions. See the example at the end of ``interpret()``.
trained_model_path (str): The pretrained model directory.
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
use_cuda (bool, optional): Whether or not to use cuda. Default: True
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.trained_model_path = trained_model_path
self.use_cuda = use_cuda
self.paddle_prepared = False
# use the default LIME setting
self.lime_base = LimeBase()
self.lime_intermediate_results = {}
def interpret(self,
data,
preprocess_fn,
unk_id,
pad_id=None,
interpret_class=None,
num_samples=1000,
batch_size=50,
lod_levels=None,
return_pred=False,
visual=True):
"""
Main function of the interpreter.
Args:
data (str): The raw string for analysis.
preprocess_fn (Callable): A user-defined function that input raw string and outputs the a tuple of inputs to feed into the NLP model.
unk_id (int): The word id to replace occluded words. Typical choices include "", <unk>, and <pad>.
pad_id (int or None): The word id used to pad the sequences. If None, it means there is no padding. Default: None.
interpret_class (list or numpy.ndarray, optional): The index of class to interpret. If None, the most likely label will be used. Default: None
num_samples (int, optional): LIME sampling numbers. Larger number of samples usually gives more accurate interpretation. Default: 1000
batch_size (int, optional): Number of samples to forward each time. Default: 50
lod_levels (list or tuple or numpy.ndarray or None, optional): The lod levels for model inputs. It should have the length equal to number of outputs given by preprocess_fn.
If None, lod levels are all zeros. Default: None.
visual (bool, optional): Whether or not to visualize. Default: True
:return: LIME Prior weights: {interpret_label_i: weights on features}
:rtype: dict
Example::
from assets.bilstm import bilstm
import io
from interpretdl.data_processor.visualizer import VisualizationTextRecord, visualize_text
def load_vocab(file_path):
vocab = {}
with io.open(file_path, 'r', encoding='utf8') as f:
wid = 0
for line in f:
if line.strip() not in vocab:
vocab[line.strip()] = wid
wid += 1
vocab["<unk>"] = len(vocab)
return vocab
MODEL_PATH = "assets/senta_model/bilstm_model"
VOCAB_PATH = os.path.join(MODEL_PATH, "word_dict.txt")
PARAMS_PATH = os.path.join(MODEL_PATH, "params")
DICT_DIM = 1256606
def paddle_model(data, seq_len):
probs = bilstm(data, seq_len, None, DICT_DIM, is_prediction=True)
return probs
MAX_SEQ_LEN = 256
def preprocess_fn(data):
word_ids = []
sub_word_ids = [word_dict.get(d, unk_id) for d in data.split()]
seq_lens = [len(sub_word_ids)]
if len(sub_word_ids) < MAX_SEQ_LEN:
sub_word_ids += [0] * (MAX_SEQ_LEN - len(sub_word_ids))
word_ids.append(sub_word_ids[:MAX_SEQ_LEN])
return word_ids, seq_lens
#https://baidu-nlp.bj.bcebos.com/sentiment_classification-dataset-1.0.0.tar.gz
word_dict = load_vocab(VOCAB_PATH)
unk_id = word_dict[""] #word_dict["<unk>"]
lime = it.LIMENLPInterpreter(paddle_model, PARAMS_PATH)
reviews = [
'交通 方便 ;环境 很好 ;服务态度 很好 房间 较小',
'这本书 实在 太烂 了 , 什么 朗读 手册 , 一点 朗读 的 内容 都 没有 . 看 了 几页 就 不 想 看 下去 了 .'
]
true_labels = [1, 0]
recs = []
for i, review in enumerate(reviews):
pred_class, pred_prob, lime_weights = lime.interpret(
review,
preprocess_fn,
num_samples=200,
batch_size=10,
unk_id=unk_id,
pad_id=0,
return_pred=True)
id2word = dict(zip(word_dict.values(), word_dict.keys()))
for y in lime_weights:
print([(id2word[t[0]], t[1]) for t in lime_weights[y]])
words = review.split()
interp_class = list(lime_weights.keys())[0]
word_importances = [t[1] for t in lime_weights[interp_class]]
word_importances = np.array(word_importances) / np.linalg.norm(
word_importances)
true_label = true_labels[i]
if interp_class == 0:
word_importances = -word_importances
rec = VisualizationTextRecord(words, word_importances, true_label,
pred_class[0], pred_prob[0],
interp_class)
recs.append(rec)
visualize_text(recs)
"""
model_inputs = preprocess_fn(data)
if not isinstance(model_inputs, tuple):
self.model_inputs = (np.array(model_inputs), )
else:
self.model_inputs = tuple(np.array(inp) for inp in model_inputs)
if lod_levels is None:
lod_levels = [0] * len(self.model_inputs)
self.lod_levels = lod_levels
if not self.paddle_prepared:
self._paddle_prepare()
# only one example here
probability = self.predict_fn(*self.model_inputs)[0]
# only interpret top 1
if interpret_class is None:
pred_label = np.argsort(probability)
interpret_class = pred_label[-1:]
lime_weights, r2_scores = self.lime_base.interpret_instance_text(
self.model_inputs,
classifier_fn=self.predict_fn,
interpret_labels=interpret_class,
unk_id=unk_id,
pad_id=pad_id,
num_samples=num_samples,
batch_size=batch_size)
data_array = self.model_inputs[0]
data_array = data_array.reshape((np.prod(data_array.shape), ))
for c in lime_weights:
weights_c = lime_weights[c]
weights_new = [(data_array[tup[0]], tup[1]) for tup in weights_c]
lime_weights[c] = weights_new
if return_pred:
return (interpret_class, probability[interpret_class],
lime_weights)
return lime_weights
def _paddle_prepare(self, predict_fn=None):
if predict_fn is None:
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
data_ops = ()
for i, inp in enumerate(self.model_inputs):
if self.lod_levels[i] > 0:
op_ = fluid.data(
name='op_%d' % i,
shape=[None],
dtype=inp.dtype,
lod_level=self.lod_levels[i])
else:
op_ = fluid.data(
name='op_%d' % i,
shape=(None, ) + inp.shape[1:],
dtype=inp.dtype)
data_ops += (op_, )
probs = self.paddle_model(*data_ops)
if isinstance(probs, tuple):
probs = probs[0]
main_program = main_program.clone(for_test=True)
if self.use_cuda:
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id)
else:
place = fluid.CPUPlace()
self.place = place
exe = fluid.Executor(self.place)
#exe.run(startup_prog)
#fluid.io.load_persistables(exe, self.trained_model_path,
# main_program)
init_checkpoint(exe, self.trained_model_path, main_program)
#fluid.load(main_program, self.trained_model_path, exe)
def predict_fn(*params):
params = self._format_model_inputs(params)
[result] = exe.run(
main_program,
fetch_list=[probs],
feed={'op_%d' % i: d
for i, d in enumerate(params)})
return result
self.predict_fn = predict_fn
self.paddle_prepared = True
def _format_model_inputs(self, model_inputs):
out = ()
for i, inp in enumerate(model_inputs):
if self.lod_levels[i] == 0:
out += (inp, )
else:
out += (to_lodtensor(inp, self.place), )
return out
| 39.277778
| 184
| 0.552393
| 1,919
| 16,968
| 4.665451
| 0.181866
| 0.019658
| 0.021445
| 0.013403
| 0.501731
| 0.486317
| 0.463755
| 0.449012
| 0.425109
| 0.379984
| 0
| 0.013825
| 0.369107
| 16,968
| 431
| 185
| 39.36891
| 0.822513
| 0.405646
| 0
| 0.475962
| 0
| 0
| 0.011678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043269
| false
| 0
| 0.057692
| 0
| 0.139423
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f042e06fed341e6137967c14ffb3b319a432271
| 2,106
|
py
|
Python
|
opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py
|
Koen1999/opendc
|
f9b43518d2d50f33077734537a477539fca9f5b7
|
[
"MIT"
] | null | null | null |
opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py
|
Koen1999/opendc
|
f9b43518d2d50f33077734537a477539fca9f5b7
|
[
"MIT"
] | 4
|
2020-11-27T16:27:58.000Z
|
2020-12-28T23:00:08.000Z
|
opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py
|
Koen1999/opendc
|
f9b43518d2d50f33077734537a477539fca9f5b7
|
[
"MIT"
] | null | null | null |
from opendc.models.portfolio import Portfolio
from opendc.models.scenario import Scenario
from opendc.models.topology import Topology
from opendc.util.rest import Response
def POST(request):
"""Add a new Scenario for this Portfolio."""
request.check_required_parameters(path={'portfolioId': 'string'},
body={
'scenario': {
'name': 'string',
'trace': {
'traceId': 'string',
'loadSamplingFraction': 'float',
},
'topology': {
'topologyId': 'string',
},
'operational': {
'failuresEnabled': 'bool',
'performanceInterferenceEnabled': 'bool',
'schedulerName': 'string',
},
}
})
portfolio = Portfolio.from_id(request.params_path['portfolioId'])
portfolio.check_exists()
portfolio.check_user_access(request.google_id, True)
scenario = Scenario(request.params_body['scenario'])
topology = Topology.from_id(scenario.obj['topology']['topologyId'])
topology.check_exists()
topology.check_user_access(request.google_id, True)
scenario.set_property('portfolioId', portfolio.get_id())
scenario.set_property('simulation', {'state': 'QUEUED'})
scenario.set_property('topology.topologyId', topology.get_id())
scenario.insert()
portfolio.obj['scenarioIds'].append(scenario.get_id())
portfolio.update()
return Response(200, 'Successfully added Scenario.', scenario.obj)
| 42.12
| 91
| 0.45679
| 144
| 2,106
| 6.541667
| 0.402778
| 0.042463
| 0.050955
| 0.046709
| 0.089172
| 0.089172
| 0.089172
| 0.089172
| 0
| 0
| 0
| 0.00258
| 0.447768
| 2,106
| 49
| 92
| 42.979592
| 0.807395
| 0.018044
| 0
| 0
| 0
| 0
| 0.151309
| 0.014549
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.108108
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f92e0d9330578dc947fa3c0cecc40a9523ecca24
| 1,906
|
py
|
Python
|
Python/pymd/md/core/box.py
|
ryanlopezzzz/ABPTutorial
|
923fa89f1959cd71b28ecf4628ecfbfce6a6206c
|
[
"MIT"
] | 8
|
2020-05-05T00:41:50.000Z
|
2021-11-04T20:54:43.000Z
|
Python/pymd/md/core/box.py
|
ryanlopezzzz/ABPTutorial
|
923fa89f1959cd71b28ecf4628ecfbfce6a6206c
|
[
"MIT"
] | null | null | null |
Python/pymd/md/core/box.py
|
ryanlopezzzz/ABPTutorial
|
923fa89f1959cd71b28ecf4628ecfbfce6a6206c
|
[
"MIT"
] | 5
|
2020-05-04T16:37:13.000Z
|
2021-08-18T07:53:58.000Z
|
# Copyright 2020 Rastko Sknepnek, University of Dundee, r.skepnek@dundee.ac.uk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Class handling the simulation box
class Box:
def __init__(self, Lx, Ly = None):
"""
Construct simulation box.
Parameters
----------
Lx : float
Size of the simulation box in x direction
Ly : float
Size of the simulation box in y direction (if None, same as Lx, i.e., square box)
Note
----
Simulation box is centred as (0,0), i.e., x is in (-Lx/2,Lx/2] and y is in (-Ly/2,Ly/2]
"""
if Lx < 0.0:
raise ValueError('Simulation box has to have length larger than 0.')
self.Lx = Lx
self.Ly = Lx if (Ly == None or Ly < 0.0) else Ly
self.xmin = -0.5*self.Lx
self.xmax = 0.5*self.Lx
self.ymin = -0.5*self.Ly
self.ymax = 0.5*self.Ly
self.A = self.Lx*self.Ly
| 45.380952
| 114
| 0.693599
| 306
| 1,906
| 4.30719
| 0.454248
| 0.066768
| 0.018209
| 0.021244
| 0.080425
| 0.044006
| 0.044006
| 0
| 0
| 0
| 0
| 0.015562
| 0.224554
| 1,906
| 42
| 115
| 45.380952
| 0.876184
| 0.764953
| 0
| 0
| 0
| 0
| 0.125654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f92f4eea713aeec6532cc3eed5da737cef8d020e
| 884
|
py
|
Python
|
dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 4,897
|
2015-07-12T17:52:02.000Z
|
2022-03-31T16:07:01.000Z
|
dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 337
|
2015-07-12T17:14:35.000Z
|
2022-03-05T17:27:24.000Z
|
dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 365
|
2015-07-20T07:51:11.000Z
|
2022-02-22T05:00:56.000Z
|
# -*- coding: utf-8 -*-
# vim:se fenc=utf8 noet:
from __future__ import (unicode_literals, division, absolute_import, print_function)
try:
import vim
except ImportError:
vim = {}
from powerline.bindings.vim import (vim_get_func, buffer_name)
from powerline.theme import requires_segment_info
@requires_segment_info
def webdevicons(pl, segment_info):
webdevicons = vim_get_func('WebDevIconsGetFileTypeSymbol')
name = buffer_name(segment_info)
return [] if not webdevicons else [{
'contents': webdevicons(name),
'highlight_groups': ['webdevicons', 'file_name'],
}]
@requires_segment_info
def webdevicons_file_format(pl, segment_info):
webdevicons_file_format = vim_get_func('WebDevIconsGetFileFormatSymbol')
return [] if not webdevicons_file_format else [{
'contents': webdevicons_file_format(),
'highlight_groups': ['webdevicons_file_format', 'file_format'],
}]
| 30.482759
| 84
| 0.777149
| 107
| 884
| 6.065421
| 0.411215
| 0.101695
| 0.161787
| 0.067797
| 0.101695
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002548
| 0.111991
| 884
| 28
| 85
| 31.571429
| 0.824204
| 0.049774
| 0
| 0.181818
| 0
| 0
| 0.191159
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.227273
| 0
| 0.409091
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9306dd6abfdca80dd6982ef5b08247263dd7576
| 5,530
|
py
|
Python
|
src/gui_occluder/custom/sr_occluder.py
|
hgiesel/anki-multiple-choice
|
1a9a22480eb6c0e7f421dc08d36d14920e43dd3e
|
[
"MIT"
] | 5
|
2019-12-26T08:08:52.000Z
|
2021-11-21T03:34:27.000Z
|
src/gui_occluder/custom/sr_occluder.py
|
hgiesel/anki-set-randomizer
|
1a9a22480eb6c0e7f421dc08d36d14920e43dd3e
|
[
"MIT"
] | 84
|
2019-08-01T20:36:17.000Z
|
2019-10-26T16:16:33.000Z
|
src/gui_occluder/custom/sr_occluder.py
|
hgiesel/anki_set_randomizer
|
1a9a22480eb6c0e7f421dc08d36d14920e43dd3e
|
[
"MIT"
] | null | null | null |
import os
import enum
from aqt.qt import QDialog, QGraphicsScene, QGraphicsRectItem, QGraphicsEllipseItem, QApplication
from aqt.qt import Qt, QPen, QGraphicsItem, QPixmap, QRectF, QPainter
from aqt.qt import QPointF, QBrush, QColor, QPainterPath, QIcon, QSize, QPalette
from aqt.utils import showInfo
from ..sr_occluder_ui import Ui_SROccluder
from .sr_rect import SRRect
from .sr_occlusion_view.py import SROcclusionView
from .sr_occlusion_scene.py import SROcclusionScene
class ToolMode(enum.Enum):
Select = 1
Move = 2
Zoom = 3
Rect = 4
Ellipse = 5
Polygon = 6
Line = 7
Arrow = 8
Darrow = 9
Text = 10
class SROccluder(QDialog):
def __init__(self, parent):
super().__init__(parent=parent)
self.ui = Ui_SROccluder()
self.ui.setupUi(self)
self.toolMode = ToolMode.Select
self.setupButtons()
def setupButtons(self):
main_path = f'{os.path.dirname(os.path.realpath(__file__))}/../icons'
self.ui.selectButton.setIcon(QIcon(f"{main_path}/select.png"))
self.ui.moveButton.setIcon(QIcon(f"{main_path}/move.png"))
self.ui.zoomButton.setIcon(QIcon(f"{main_path}/zoom.png"))
self.ui.rectButton.setIcon(QIcon(f"{main_path}/rect.png"))
self.ui.ellipseButton.setIcon(QIcon(f"{main_path}/ellipse.png"))
self.ui.polygonButton.setIcon(QIcon(f"{main_path}/polygon.png"))
self.ui.lineButton.setIcon(QIcon(f"{main_path}/line.png"))
self.ui.arrowButton.setIcon(QIcon(f"{main_path}/arrow.png"))
self.ui.darrowButton.setIcon(QIcon(f"{main_path}/darrow.png"))
self.ui.textButton.setIcon(QIcon(f"{main_path}/text.png"))
self.ui.selectButton.clicked.connect(self.selectTool)
self.ui.moveButton.clicked.connect(self.moveTool)
self.ui.zoomButton.clicked.connect(self.zoomTool)
self.ui.rectButton.clicked.connect(self.rectTool)
self.ui.ellipseButton.clicked.connect(self.ellipseTool)
self.ui.polygonButton.clicked.connect(self.polygonTool)
self.ui.lineButton.clicked.connect(self.lineTool)
self.ui.arrowButton.clicked.connect(self.arrowTool)
self.ui.darrowButton.clicked.connect(self.darrowTool)
self.ui.textButton.clicked.connect(self.textTool)
def selectTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Select)
def moveTool(self):
QApplication.setOverrideCursor(Qt.SizeAllCursor)
self.changeMode(ToolMode.Move)
def zoomTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Zoom)
def rectTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Rect)
def ellipseTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Ellipse)
def polygonTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Polygon)
def lineTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Line)
def arrowTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Arrow)
def darrowTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Darrow)
def textTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Text)
def changeMode(self, mode):
self.resetButton(mode, True)
self.resetButton(self.toolMode, False)
self.toolMode = mode
def resetButton(self, mode, state):
if mode == ToolMode.Select:
self.ui.selectButton.setChecked(state)
self.ui.selectButton.repaint()
elif mode == ToolMode.Move:
self.ui.moveButton.setChecked(state)
self.ui.moveButton.repaint()
elif mode == ToolMode.Zoom:
self.ui.zoomButton.setChecked(state)
self.ui.zoomButton.repaint()
elif mode == ToolMode.Rect:
self.ui.rectButton.setChecked(state)
self.ui.rectButton.repaint()
elif mode == ToolMode.Ellipse:
self.ui.ellipseButton.setChecked(state)
self.ui.ellipseButton.repaint()
elif mode == ToolMode.Polygon:
self.ui.polygonButton.setChecked(state)
self.ui.polygonButton.repaint()
elif mode == ToolMode.Line:
self.ui.lineButton.setChecked(state)
self.ui.lineButton.repaint()
elif mode == ToolMode.Arrow:
self.ui.arrowButton.setChecked(state)
self.ui.arrowButton.repaint()
elif mode == ToolMode.Darrow:
self.ui.darrowButton.setChecked(state)
self.ui.darrowButton.repaint()
elif mode == ToolMode.Text:
self.ui.textButton.setChecked(state)
self.ui.textButton.repaint()
def setupUi(self):
theScene = SROcclusionScene(self, 'skull.jpg')
self.ui.graphicsView.setScene(theScene)
outlinePen = QPen()
rect = theScene.addRect(10, 10, 50, 50, outlinePen, Qt.green)
rect.setFlag(QGraphicsItem.ItemIsMovable)
rect.setFlag(QGraphicsItem.ItemIsSelectable)
rect.setFlag(QGraphicsItem.ItemIsFocusable)
rect2 = SRRect(0, 0, 50, 30)
rect2.setFlag(QGraphicsItem.ItemIsMovable)
rect2.setFlag(QGraphicsItem.ItemIsSelectable)
theScene.addItem(rect2)
| 34.5625
| 97
| 0.674141
| 613
| 5,530
| 6.029364
| 0.205546
| 0.069805
| 0.035173
| 0.045996
| 0.222403
| 0.165584
| 0.165584
| 0.165584
| 0
| 0
| 0
| 0.006679
| 0.214828
| 5,530
| 159
| 98
| 34.779874
| 0.844542
| 0
| 0
| 0.070866
| 0
| 0
| 0.049548
| 0.029837
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11811
| false
| 0
| 0.07874
| 0
| 0.291339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f930eb0037c9a1f7c847f03ac1f6289fad3453d4
| 13,371
|
py
|
Python
|
gen3config/config.py
|
uc-cdis/gen3config
|
fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c
|
[
"Apache-2.0"
] | null | null | null |
gen3config/config.py
|
uc-cdis/gen3config
|
fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c
|
[
"Apache-2.0"
] | null | null | null |
gen3config/config.py
|
uc-cdis/gen3config
|
fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c
|
[
"Apache-2.0"
] | null | null | null |
"""
Configuration class for handling configs with a given default.
If you need custom functionality or need to apply post_processing to parsed config,
simply extend this class.
Example:
```
class FenceConfig(Config):
def __init__(self, *args, **kwargs):
super(FenceConfig, self).__init__(*args, **kwargs)
def post_process(self):
# allow authlib traffic on http for development if enabled. By default
# it requires https.
#
# NOTE: use when fence will be deployed in such a way that fence will
# only receive traffic from internal clients, and can safely use HTTP
if (
self._configs.get("AUTHLIB_INSECURE_TRANSPORT")
and "AUTHLIB_INSECURE_TRANSPORT" not in os.environ
):
os.environ["AUTHLIB_INSECURE_TRANSPORT"] = "true"
# if we're mocking storage, ignore the storage backends provided
# since they'll cause errors if misconfigured
if self._configs.get("MOCK_STORAGE", False):
self._configs["STORAGE_CREDENTIALS"] = {}
cirrus.config.config.update(**self._configs.get("CIRRUS_CFG", {}))
```
Recommended use:
- Create a `config-default.yaml` and `config.py` in the top-level folder your app
- Inside `config-default.yaml` add keys and reasonable default values
- Inside `config.py`, create a class that inherits from this Config class
- See above example
- Add a final line to your `config.py` that instantiates your custom class:
- Ensure that you provide the default config path
- If placed in same directory as `config.py` you can use something like:
```
default_cfg_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "config-default.yaml"
)
config = FenceConfig(default_cfg_path)
```
- Import your instaniated object whenever you need to get configuration
- Example: `from fence.config import config`
- Load in application configuration during init of your app
- Example: `config.load('path/to/fence-config.yaml')`
- Now you can safely access anything that was in your `config-default.yaml` from this
object as if it were a dictionary
- Example: `storage_creds = config["STORAGE_CREDENTIALS"]`
- Example: `if config["SOME_BOOLEAN"]: ...`
- Example: `nested_value = config["TOP_LEVEL"]["nested"]
- And of course you can import that into any file you want and will have access to
keys/values
- Example: `from fence.config import config`
"""
from __future__ import division, absolute_import, print_function, unicode_literals
import os
import glob
from yaml import safe_load as yaml_load
from yaml.scanner import ScannerError
from jinja2 import Template, TemplateSyntaxError
import six
from cdislogging import get_logger
from gen3config.errors import NotFoundError, ParsingError
logger = get_logger(__name__, log_level="info")
class Config(dict):
"""
Configuration singleton that's instantiated on module load.
Allows updating from a config file by using .update()
"""
def __init__(self, default_cfg_path):
self._configs = {}
self.default_cfg_path = default_cfg_path
logger.debug("Checking if provided cfg path is an actual file...")
if not os.path.isfile(default_cfg_path):
raise FileNotFoundError(
"Default configuration file provided {} does not exist.".format(
default_cfg_path
)
)
logger.debug("Attempting to parse provided cfg as yaml file...")
try:
yaml_load(open(self.default_cfg_path))
except Exception as exc:
logger.exception(exc)
raise ParsingError(
"Could not parse provided file {} as YAML. See logs for details.".format(
default_cfg_path
)
)
def get(self, key, default=None):
return self._configs.get(key, default)
def set(self, key, value):
self._configs.__setitem__(key, value)
def setdefault(self, key, default=None):
self._configs.setdefault(key, default)
def __setitem__(self, key, value):
self._configs.__setitem__(key, value)
def __contains__(self, key):
return key in self._configs
def __iter__(self):
for key, value in six.iteritems(self._configs):
yield key, value
def __getitem__(self, key):
return self._configs[key]
def __delitem__(self, key):
del self._configs[key]
def __len__(self):
return len(self._configs)
def __str__(self):
return str(self._configs)
def update(self, *args, **kwargs):
"""
update configuration properties
support passing dictionary or keyword args
"""
if len(args) > 1:
raise TypeError(
"update expected at most 1 arguments, got {}".format(len(args))
)
if args:
self._configs.update(dict(args[0]))
self._configs.update(kwargs)
def load(self, config_path=None, search_folders=None, file_name=None):
if not config_path and not search_folders:
raise AttributeError(
"Cannot find configuration with given information. "
"You must either provide `search_folders` arg so load knows where to "
"look OR provide `config_path` as full path to config."
)
config_path = config_path or get_config_path(search_folders, file_name)
if config_path:
self.load_configuration_file(config_path)
self.post_process()
return self
def load_configuration_file(self, provided_cfg_path):
logger.info("Opening default configuration...")
# treat default cfg as template and replace nested vars, returning an updated dict
config = nested_render(
yaml_load(open(self.default_cfg_path)), {}, {}
)
logger.info("Applying configuration: {}".format(provided_cfg_path))
# treat provided cfg as template and replace nested vars, returning an updated dict
provided_configurations = nested_render(
yaml_load(open(provided_cfg_path)), {}, {}
)
# only update known configuration values. In the situation
# where the provided config does not have a certain value,
# the default will be used.
common_keys = {
key: value
for (key, value) in six.iteritems(config)
if key in provided_configurations
}
keys_not_provided = {
key: value
for (key, value) in six.iteritems(config)
if key not in provided_configurations
}
keys_to_update = {
key: value
for (key, value) in six.iteritems(provided_configurations)
if key in common_keys
}
unknown_keys = {
key: value
for (key, value) in six.iteritems(provided_configurations)
if key not in common_keys
}
config.update(keys_to_update)
if keys_not_provided:
logger.warning(
"Did not provide key(s) {} in {}. Will be set to default value(s) from {}.".format(
keys_not_provided.keys(), provided_cfg_path, self.default_cfg_path
)
)
if unknown_keys:
logger.warning(
"Unknown key(s) {} found in {}. Will be ignored.".format(
unknown_keys.keys(), provided_cfg_path
)
)
self._configs.update(config)
def post_process(self):
"""
Do some post processing to the configuration (set env vars if necessary,
do more complex modifications/changes to vars, etc.)
Called after loading the configuration and doing the template-replace.
"""
pass
def force_default_if_none(self, key, default_cfg=None, default_cfg_path=None):
"""
Set the key in the configuration to the default value if it either
1) doesn't exist (this is mostly for backwards-compatibility with previous
configuration methods)
2) is None
"""
default_cfg = default_cfg or yaml_load(open(default_cfg_path))
if key not in self._configs or self._configs[key] is None:
self._configs[key] = default_cfg.get(key)
def nested_render(cfg, fully_rendered_cfgs, replacements):
"""
Template render the provided cfg by recurisevly replacing {{var}}'s which values
from the current "namespace".
The nested config is treated like nested namespaces where the inner variables
are only available in current block and further nested blocks.
Said the opposite way: the namespace with available vars that can be used
includes the current block's vars and parent block vars.
This means that you can do replacements for top-level
(global namespaced) config vars anywhere, but you can only use inner configs within
that block or further nested blocks.
An example is worth a thousand words:
---------------------------------------------------------------------------------
fence-config.yaml
--------------------------------------------------------------------------------
BASE_URL: 'http://localhost/user'
OPENID_CONNECT:
fence:
api_base_url: 'http://other_fence/user'
client_kwargs:
redirect_uri: '{{BASE_URL}}/login/fence/login'
authorize_url: '{{api_base_url}}/oauth2/authorize'
THIS_WONT_WORK: '{{api_base_url}}/test'
--------------------------------------------------------------------------------
"redirect_uri" will become "http://localhost/user/login/fence/login"
- BASE_URL is in the global namespace so it can be used in this nested cfg
"authorize_url" will become "http://other_fence/user/oauth2/authorize"
- api_base_url is in the current namespace, so it is available
"THIS_WONT_WORK" will become "/test"
- Why? api_base_url is not in the current namespace and so we cannot use that
as a replacement. the configuration (instead of failing) will replace with
an empty string
Args:
cfg (TYPE): Description
fully_rendered_cfgs (TYPE): Description
replacements (TYPE): Description
Returns:
dict: Configurations with template vars replaced
"""
if isinstance(cfg, dict):
for key, value in six.iteritems(cfg):
replacements.update(cfg)
fully_rendered_cfgs[key] = {}
fully_rendered_cfgs[key] = nested_render(
value,
fully_rendered_cfgs=fully_rendered_cfgs[key],
replacements=replacements,
)
# new namespace, remove current vars (no longer available as replacements)
for old_cfg, value in six.iteritems(cfg):
replacements.pop(old_cfg, None)
return fully_rendered_cfgs
else:
# it's not a dict, so lets try to render it. But only if it's
# truthy (which means there's actually something to replace)
if cfg:
try:
t = Template(str(cfg))
rendered_value = t.render(**replacements)
except TemplateSyntaxError:
rendered_value = cfg
try:
cfg = yaml_load(rendered_value)
except ScannerError:
# it's not loading into yaml, so let's assume it's a string with special
# chars such as: {}[],&*#?|:-<>=!%@\)
#
# in YAML, we have to "quote" a string with special chars.
#
# since yaml_load isn't loading from a file, we need to wrap the Python
# str in actual quotes.
cfg = yaml_load('"{}"'.format(rendered_value))
return cfg
def get_config_path(search_folders, file_name="*config.yaml"):
"""
Return the path of a single configuration file ending in config.yaml
from one of the search folders.
NOTE: Will return the first match it finds. If multiple are found,
this will error out.
"""
possible_configs = []
file_name = file_name or "*config.yaml"
for folder in search_folders:
config_path = os.path.join(folder, file_name)
possible_files = glob.glob(config_path)
possible_configs.extend(possible_files)
if len(possible_configs) == 1:
return possible_configs[0]
elif len(possible_configs) > 1:
raise IOError(
"Multiple config.yaml files found: {}. Please specify which "
"configuration to use by providing `config_path` instead of "
"`search_folders` to Config.load(). Alternatively, ensure that only a "
"single valid *config.yaml exists in the search folders: {}.".format(
str(possible_configs), search_folders
)
)
else:
raise NotFoundError(
"Could not find config file {}. Searched in the following locations: "
"{}".format(file_name, str(search_folders))
)
| 35.943548
| 99
| 0.615362
| 1,615
| 13,371
| 4.928173
| 0.237771
| 0.029024
| 0.022867
| 0.016711
| 0.11421
| 0.088328
| 0.067345
| 0.051263
| 0.051263
| 0.039955
| 0
| 0.001262
| 0.28861
| 13,371
| 371
| 100
| 36.040431
| 0.835471
| 0.442974
| 0
| 0.112426
| 0
| 0.005917
| 0.134393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106509
| false
| 0.005917
| 0.053254
| 0.029586
| 0.218935
| 0.005917
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f931949a583110bdf77e537bf67ef0dfdd9aeae4
| 8,150
|
py
|
Python
|
src/ReinforcementLearning/Modules/carlaUtils.py
|
B-C-WANG/ReinforcementLearningInAutoPilot
|
8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc
|
[
"MIT"
] | 27
|
2019-05-14T01:06:05.000Z
|
2022-03-06T03:12:40.000Z
|
src/ReinforcementLearning/Modules/carlaUtils.py
|
B-C-WANG/ReinforcementLearningInAutoPilot
|
8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc
|
[
"MIT"
] | null | null | null |
src/ReinforcementLearning/Modules/carlaUtils.py
|
B-C-WANG/ReinforcementLearningInAutoPilot
|
8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc
|
[
"MIT"
] | 10
|
2020-01-20T09:39:51.000Z
|
2022-03-31T18:30:53.000Z
|
# coding:utf-8
# Type: Public
import numpy as np
import common.Math as cMath
import math
class CarlrUtils(object):
Author = "BaoChuan Wang"
AllowImport = False
@staticmethod
def get_direction_vector_series_and_car_to_next_waypoint_ratio(
carla_engine,
start_waypoint_xy_array,
target_waypoint_xy_array,
draw_in_UE=False
):
'''
适用于WaypointsTarget环境的state求取
# 以下代码作为参考
获得车辆最近的路径点,以及接下来n个路径点(目前改为最后两个路径点,不会随着车辆位置更新!),然后返回与这两个路径点相关的参数,有:
1.车辆到两个waypoints的中点距离
2.waypoint方向角
3.车辆到waypoint中点方向角
4.车辆本身方向角
# 另外这样获取waypoints实时更新的方法是不合适的,产生的rewards不对action连续
# 原来的方法是车辆获取最近的waypoint然后求得下一个waypoints,现在改成一开始就确定waypoints
因为使用获取最近waypoints的方法可能会产生变道
原来的方法代码:
# # 获得车辆的下两个waypoints的xy坐标
# next_center_waypoints = self.engine.map.get_waypoint(
# # location
# self.engine.vehicle.get_location()
# )
# # 获得接下来5m的作为下一个路径点
# next_next_center_waypoints = next_center_waypoints.next(5)[0]
#
# waypoint_list =((
# next_center_waypoints.transform.location.x,
# next_center_waypoints.transform.location.y
# ), (
# next_next_center_waypoints.transform.location.x,
# next_next_center_waypoints.transform.location.y
# ))
#
# # 在carla中绘制路径点
# self.engine.draw_waypoint_list(
# [next_center_waypoints,next_next_center_waypoints],life_time=1)
#
# return waypoint_list
# 注意点:
因为最终计算的时候是需要两个waypoint来得到和车辆的距离
以及 车辆到waypoints中心点的方向 和 两个waypoints方向 的夹角
所以一定要保证waypoints中心点在车辆前方(否则就会后退)
需要保证Waypoints的间隔足够大即可!也可以这里取点时取后面两个点而不是一个点!
# 这里的代码是求得距离车辆最近的点,然后往下找3个点,现在更新成一开始指定的点!
# # 求得最近的waypoints的index,然后找下一个!如果到了waypoints的末端?
# distance = np.sqrt(
# np.sum(np.square(self.car_waypoints_xy_array - np.array([self.engine.vehicle.get_location().x,
# self.engine.vehicle.get_location().y])), axis=1))
#
# # print(distance)
# # 最大的index
# index_max = distance.shape[0] - 1
# # 找到距离最近的waypoints的index
# index = int(np.argmin(distance))
#
#
# index = index_max - 1
#
# # 这里点取得向前一点儿
# next_point_index = index + 3
# if next_point_index > index_max: next_point_index = index_max
# if draw_in_UE:
# # 作出两个waypoints的线段
# start = self.car_waypoints_list[index]
# end = self.car_waypoints_list[next_point_index]
# self.engine.draw_line(start, end, life_time=1, color=(0, 255, 0))
# center_point = (self.car_waypoints_xy_array[index, :].reshape(-1) +
# self.car_waypoints_xy_array[next_point_index, :].reshape(-1)) / 2
'''
# 车辆位置
vehicle_location = carla_engine.vehicle.get_location()
car_point = np.array([vehicle_location.x, vehicle_location.y])
if draw_in_UE:
# waypoint中点
center_point = (start_waypoint_xy_array + target_waypoint_xy_array) / 2
center_point_transform = carla_engine.make_transform(
x=center_point[0],
y=center_point[1],
z=vehicle_location.z
)
carla_engine.draw_point_xyz(center_point[0], center_point[1], carla_engine.vehicle.get_location().z + 0.25,
color=(0, 255, 255), thickness=0.1)
carla_engine.draw_line_location(
vehicle_location,
center_point_transform.location,
life_time=1, color=(0, 0, 255)
)
# waypoints的单位方向向量
way_unit_direction = target_waypoint_xy_array - start_waypoint_xy_array
way_unit_direction /= np.linalg.norm(way_unit_direction, 2)
# 车辆到中心点的单位方向向量
car_to_way_unit_direction = (target_waypoint_xy_array - car_point)
car_to_way_unit_direction /= np.linalg.norm(car_to_way_unit_direction, 2)
# 车辆本身的单位方向向量
car_unit_direction = carla_engine.vehicle.get_transform().get_forward_vector()
car_unit_direction = np.array([car_unit_direction.x, car_unit_direction.y])
# 车辆到target点和总路程的比值
total_distance = np.linalg.norm(target_waypoint_xy_array - start_waypoint_xy_array, 2)
now_distance = np.linalg.norm(target_waypoint_xy_array - car_point, 2)
car_to_target_distance_ratio = now_distance / total_distance
# 车辆的yaw角度
car_yaw = math.radians(carla_engine.vehicle_yaw)
# 增加:相对于车辆坐标的目标waypoint的x和y
target_xy_array_relate_to_car = cMath.convert_point_into_relative_coordinate(
target_waypoint_xy_array,
car_point,
original_yaw_radius=car_yaw)
return way_unit_direction, car_to_way_unit_direction, car_unit_direction, car_to_target_distance_ratio, target_xy_array_relate_to_car
@staticmethod
def get_car_target_waypoints(engine, vehicle, n_waypoint=2, waypoint_spacing=15, draw_waypoints=True):
if n_waypoint < 2:
raise ValueError("At least 2 waypoints will return!")
# List<Waypoints>
car_waypoints_list = []
# Array2D
car_waypoints_xy_array = None
# List<List>
car_waypoints_xy_list = []
# 起始的点
next_center_waypoints = engine.map.get_waypoint(vehicle.get_location())
# 车辆的起点
start_waypoint_xy_array = np.array([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
car_waypoints_list.append(next_center_waypoints)
car_waypoints_xy_list.append([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
if n_waypoint == 2:
next_center_waypoints = next_center_waypoints.next(waypoint_spacing)[0]
car_waypoints_list.append(next_center_waypoints)
car_waypoints_xy_list.append([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
else:
for i in range(n_waypoint - 1):
next_center_waypoints = next_center_waypoints.next(waypoint_spacing)[0]
car_waypoints_list.append(next_center_waypoints)
car_waypoints_xy_list.append([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
car_waypoints_xy_array = np.array(car_waypoints_xy_list)
# 终点
target_waypoint_xy_array = np.array([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
# 绘制路径点
if draw_waypoints:
engine.draw_waypoint_list(car_waypoints_list, life_time=99999)
return car_waypoints_list, car_waypoints_xy_list, car_waypoints_xy_array, target_waypoint_xy_array
@staticmethod
def get_velocity_accel_relative_to_car_and_their_scalar(engine):
velocity_vector = engine.get_velocity()
velocity_to_car_x, velocity_to_car_y = cMath.convert_point_into_relative_coordinate(
target_xy=[velocity_vector.x, velocity_vector.y],
original_xy=[0, 0],
original_yaw_radius=math.radians(engine.vehicle_yaw))
velocity = engine.get_velocity_scalar()
accel_vector = engine.get_accel()
accel_to_car_x, accel_to_car_y = cMath.convert_point_into_relative_coordinate(
target_xy=[accel_vector.x, accel_vector.y],
original_xy=[0, 0],
original_yaw_radius=math.radians(engine.vehicle_yaw))
accel = engine.get_velocity_scalar()
return velocity, velocity_to_car_x, velocity_to_car_y, accel, accel_to_car_x, accel_to_car_y
| 40.346535
| 141
| 0.640613
| 900
| 8,150
| 5.385556
| 0.194444
| 0.055705
| 0.105839
| 0.080875
| 0.467918
| 0.374458
| 0.322468
| 0.285125
| 0.238704
| 0.215185
| 0
| 0.011974
| 0.282699
| 8,150
| 201
| 142
| 40.547264
| 0.81714
| 0.283313
| 0
| 0.247191
| 0
| 0
| 0.00866
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0
| 0.044944
| 0
| 0.146067
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f932dbe3d5afcee0aae3f946f59a3b66e3f2fb59
| 2,413
|
py
|
Python
|
models.py
|
abhishekyana/CycleGANs-PyTorch
|
ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a
|
[
"MIT"
] | 12
|
2019-07-27T09:54:57.000Z
|
2021-04-23T23:34:25.000Z
|
models.py
|
abhishekyana/CycleGANs-PyTorch
|
ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a
|
[
"MIT"
] | 5
|
2020-11-13T15:40:12.000Z
|
2022-03-11T23:53:51.000Z
|
models.py
|
abhishekyana/CycleGANs-PyTorch
|
ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a
|
[
"MIT"
] | 2
|
2021-03-11T10:45:33.000Z
|
2021-04-23T23:34:29.000Z
|
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, inFeatures):
super(ResBlock, self).__init__()
self.conv = nn.Sequential(nn.ReflectionPad2d(1),
nn.Conv2d(inFeatures, inFeatures, 3),
nn.InstanceNorm2d(inFeatures),
nn.ReLU(inplace=True),
nn.ReflectionPad2d(1),
nn.Conv2d(inFeatures, inFeatures, 3),
nn.InstanceNorm2d(inFeatures))
def forward(self, X):
out = X + self.conv(X)
return out
class Generator(nn.Module):
def __init__(self, inputnc, outputnc, nResBlocks=9):
super(Generator, self).__init__()
layers = [nn.ReflectionPad2d(3),
nn.Conv2d(inputnc, 64, 7),
nn.InstanceNorm2d(64),
nn.ReLU(inplace=True)]
#To downsample the Image
inFeatures = 64
outFeatures = 2*inFeatures
for i in range(2):
layers += [nn.Conv2d(inFeatures, outFeatures, 3, stride=2, padding=1),
nn.InstanceNorm2d(outFeatures),
nn.ReLU(inplace=True)]
inFeatures = outFeatures
outFeatures = 2*inFeatures
for i in range(nResBlocks):
layers += [ResBlock(inFeatures)]
#To upsample the Image
outFeatures = inFeatures//2
for i in range(2):
layers += [nn.ConvTranspose2d(inFeatures, outFeatures, 3, stride=2, padding=1, output_padding=1),
nn.InstanceNorm2d(outFeatures),
nn.ReLU(inplace=True)]
inFeatures = outFeatures
outFeatures = inFeatures//2
layers += [nn.ReflectionPad2d(3),
nn.Conv2d(64, outputnc, 7),
nn.Tanh()]
self.model = nn.Sequential(*layers)
def forward(self, X):
out=self.model(X)
return out
class Discriminator(nn.Module):
def __init__(self, inputnc):
super(Discriminator, self).__init__()
layers = [nn.Conv2d(inputnc, 64, 4, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 128, 4, stride=2, padding=1),
nn.InstanceNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(128, 256, 4, stride=2, padding=1),
nn.InstanceNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(256, 512, 4, padding=1),
nn.InstanceNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(512, 1, 4, padding=1)]
self.model = nn.Sequential(*layers)
def forward(self, X):
out = self.model(X)
out = F.avg_pool2d(out, out.size()[2:]).view(out.size()[0], -1)
return out
| 30.544304
| 100
| 0.642768
| 317
| 2,413
| 4.810726
| 0.201893
| 0.052459
| 0.039344
| 0.04918
| 0.613115
| 0.588852
| 0.502295
| 0.361967
| 0.278033
| 0.278033
| 0
| 0.058017
| 0.214256
| 2,413
| 79
| 101
| 30.544304
| 0.746308
| 0.018235
| 0
| 0.439394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.030303
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9347e37b52fec0692880a203b911075b279ecba
| 5,194
|
py
|
Python
|
file-io-and-other-io/modfile/test_ip_gen.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
file-io-and-other-io/modfile/test_ip_gen.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
file-io-and-other-io/modfile/test_ip_gen.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
This is written by Zhiyang Ong to modify text (non-binary) files.
Synopsis:
Script to modify text (non-binary) files.
Revision History:
1) November 11, 2014. Initial working version.
The MIT License (MIT)
Copyright (c) <2014> <Zhiyang Ong>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Email address: echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n"
"""
# Import packages and functions from the Python Standard Library.
#from os import listdir, system
from os import system
#from os.path import isfile, join, splitext
#from os.subprocess import call
#import subprocess
# ============================================================
"""
Create an output file object.
Assume that the specified filename does not belong to an important file.
Assume that the specified file can be overwritten.
"""
f_object = open("input-file.txt", "w");
# Lists to generate data for the input test file.
# List of universities that are good in EDA.
universities = ["Berkeley", "Stanford", "MIT", "UT Austin", "Carnegie Mellon", "Georgia Tech", "Columbia", "Northwestern", "Purdue", "UCSD", "UCLA"]
# List of other universities in EDA.
other_unis = ["UIUC", "Brown", "Boston University", "UC Irvine", "UC Riverside", "UCSB", "USC", "University of Minnesota at Twin Cities", "Utah", "University of Wisconsin-Madison"]
# List of VLSI topics.
vlsi_topics = ["RTL design", "TLM design", "processor design", "SRAM design", "DRAM design", "low-power VLSI design", "decoder design", "DFM", "VLSI verification", "VLSI design flow", "NoC", "asynchronous VLSI design", "VLSI architecture", "digitally-assisted analog IC design", "VLSI signal processing", "microarchitecture"]
# List of EDA topics.
eda_topics = ["model checking", "equivalence checking", "high-level synthesis", "hardware/software partitioning", "hardware-accelerated emulation", "logic synthesis", "RTL synthesis", "static timing analysis", "statistical STA", "power optimization", "DVFS", "logic simulation", "fault saimulation", "ATPG", "DFT", "BIST", "memory compiler", "gate sizing", "threshold voltage assignment", "buffer insertion", "crosstalk analysis", "signal integrity analysis", "noise analysis", "thermal analysis", "floorplanning", "partitioning", "detailed placement", "detailed routing", "global placement", "global routing", "clock network synthesis", "power and ground routing", "layout compaction", "layout extraction", "parasitic extraction", "interconnect modeling", "design rule check", "layout versus schematic check", "electric rule check", "computational lithography", "optical proximity correction", "resolution enhancement technologies", "mask data preparation", "circuit simulation"]
# Lists of numbers to be fixed.
list_of_hundreds = range(1500, 5000, 100)
list_of_10s = range(1234560, 1234767, 10)
# References:
# http://eecs_ece-and-cs.quora.com/Choosing-a-Graduate-Program-in-VLSI-Design-Related-Areas-Things-to-Consider
# http://www.quora.com/What-are-the-best-VLSI-CAD-research-groups-in-US-universities
# Write text to the input test file.
#f_object.write("Ciao Mondo")
# Pointer to currently enumerated index of EDA topics.
ptr = 0
# ============================================================
# Generate test data for the test input file.
# Enumerate all universities that are good in EDA.
for gd_uni in universities:
#temp_str = "%S %S %S", gd_uni, eda_topics[ptr], eda_topics[ptr+1]
temp_str = gd_uni + "; " + str(list_of_hundreds[ptr]) + "; " + eda_topics[ptr]
ptr = ptr + 1
temp_str = temp_str + "; " + str(list_of_10s[ptr]) + "; " + eda_topics[ptr] + ".\n"
if ptr < len(universities):
ptr = ptr + 1
f_object.write(temp_str)
temp_str = "Stanford" + "; " + "326748027" + "; " + "statistical STA"
temp_str = temp_str + "; " + "7289" + "; " + "hardware-accelerated emulation" + ".\n"
f_object.write(temp_str)
# ============================================================
# Close the file object
f_object.close()
| 57.076923
| 980
| 0.701579
| 709
| 5,194
| 5.090268
| 0.510578
| 0.017456
| 0.016625
| 0.012469
| 0.040454
| 0.029925
| 0
| 0
| 0
| 0
| 0
| 0.019811
| 0.144782
| 5,194
| 90
| 981
| 57.711111
| 0.792661
| 0.527147
| 0
| 0.2
| 0
| 0
| 0.587192
| 0
| 0.05
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f934993945194bcd3e81f89c7b932f03bda5ad14
| 8,771
|
py
|
Python
|
aux_lib.py
|
paulokuriki/dcmtag2table
|
e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f
|
[
"Apache-2.0"
] | null | null | null |
aux_lib.py
|
paulokuriki/dcmtag2table
|
e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f
|
[
"Apache-2.0"
] | null | null | null |
aux_lib.py
|
paulokuriki/dcmtag2table
|
e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f
|
[
"Apache-2.0"
] | null | null | null |
import pydicom
from tqdm import tqdm
import pandas as pd
import os
import time
import glob
import numpy as np
from pydicom import _dicom_dict as dc
from constants import *
import string
def dcmtag2df(folder: str, list_of_tags: list):
"""
# Create a Pandas DataFrame with the <list_of_tags> DICOM tags
# from the DICOM files in <folder>
# Parameters:
# folder (str): folder to be recursively walked looking for DICOM files.
# list_of_tags (list of strings): list of DICOM tags with no whitespaces.
# Returns:
# df (DataFrame): table of DICOM tags from the files in folder.
"""
list_of_tags = list_of_tags.copy()
table = []
start = time.time()
# checks if folder exists
if not os.path.isdir(folder):
print(f'{folder} is not a valid folder.')
return None
# joins ** to the folder name for using at the glob function
print("Searching files recursively...")
search_folder = os.path.join(folder, '**')
try:
filelist = glob.glob(search_folder, recursive=True)
print(f"{len(list(filelist))} files/folders found ")
except Exception as e:
print(e)
return None
time.time()
print("Reading files...")
for _f in tqdm(filelist):
try:
dataset = pydicom.dcmread(_f, stop_before_pixels=True)
items = []
items.append(_f)
for _tag in list_of_tags:
if _tag in dataset:
if dataset.data_element(_tag) is not None:
items.append(str(dataset.data_element(_tag).value))
else:
if dataset[tag_number] is not None:
items.append(str(dataset[tag_number].value))
else:
items.append("NaN")
else:
series_description = dataset.get('SeriesDescription')
if _tag == 'IOP_Plane':
IOP = dataset.get('ImageOrientationPatient')
_plano = IOP_Plane(IOP)
items.append(_plano)
elif _tag == "Primary":
try:
image_type = ' '.join(dataset.get('ImageType'))
except:
image_type = ''
found_word = search_words_in_serie(image_type, PRIMARY)
items.append(found_word)
elif _tag == "Gad":
found_word = search_words_in_serie(series_description, GAD, GAD_EXCLUSION)
items.append(found_word)
elif _tag == "T1":
found_word = search_words_in_serie(series_description, T1, FLAIR + T2)
items.append(found_word)
elif _tag == "T2":
found_word = search_words_in_serie(series_description, T2)
items.append(found_word)
elif _tag == "FLAIR":
found_word = search_words_in_serie(series_description, FLAIR, T1)
items.append(found_word)
elif _tag == "SWI":
found_word = search_words_in_serie(series_description, SWI)
items.append(found_word)
elif _tag == "FIESTA":
found_word = search_words_in_serie(series_description, FIESTA)
items.append(found_word)
elif _tag == "TOF":
found_word = search_words_in_serie(series_description, TOF)
items.append(found_word)
elif _tag == "DWI":
found_word = search_words_in_serie(series_description, DWI, DWI_EXCLUSION)
items.append(found_word)
elif _tag == "Angio":
found_word = search_words_in_serie(series_description, ANGIO)
items.append(found_word)
elif _tag == "MPR":
found_word = search_words_in_serie(series_description, MPR)
items.append(found_word)
elif _tag == "Others":
found_word = search_words_in_serie(series_description, OTHERS)
items.append(found_word)
else:
# checks if a tag number was informed
tag_number = tag_number_to_base_16(_tag)
if tag_number in dataset:
if dataset[tag_number] is not None:
items.append(str(dataset[tag_number].value))
else:
items.append("NaN")
else:
items.append("NaN")
table.append((items))
except (FileNotFoundError, PermissionError):
pass
except Exception as e:
pass
list_of_tags.insert(0, "Filename")
test = list(map(list, zip(*table)))
dictone = {}
if len(table) == 0:
print(f'0 DICOM files found at folder: {folder}')
return None
for i, _tag in enumerate(list_of_tags):
dictone[_tag] = test[i]
df = pd.DataFrame(dictone)
time.sleep(2)
print("Finished.")
return df
def IOP_Plane(IOP: list) -> str:
"""
This function takes IOP of an image and returns its plane (Sagittal, Coronal, Transverse)
['1', '0', '0', '0', '0', '-1'] you are dealing with Coronal plane view
['0', '1', '0', '0', '0', '-1'] you are dealing with Sagittal plane view
['1', '0', '0', '0', '1', '0'] you are dealing with Axial plane view
"""
try:
IOP_round = [round(x) for x in IOP]
plane = np.cross(IOP_round[0:3], IOP_round[3:6])
plane = [abs(x) for x in plane]
if plane[0] == 1:
return "SAG"
elif plane[1] == 1:
return "COR"
elif plane[2] == 1:
return "AXI"
else:
return "UNK"
except:
return "UNK"
def dicomtagnumber_to_tagname(dicom_tag_number: str) -> str:
# if receives int, convert to str
dicom_tag_base_16 = tag_number_to_base_16(dicom_tag_number)
try:
dicom_tag_name = dc.DicomDictionary.get(dicom_tag_base_16, (0, 0, 0, 0, dicom_tag_number))[4]
if dicom_tag_name == "0008103E":
dicom_tag_name = "SeriesDescription"
except Exception as e:
print(f'Erro ao converter dicomtag {dicom_tag_number}\n{e}')
return dicom_tag_name
def dicomtagname_to_tagnumber(dicom_tag_name: str) -> str:
tag_number_8_digits = dicom_tag_name
try:
# searches for Contracted Name
for key, value in dc.DicomDictionary.items():
if dicom_tag_name == value[4]:
tag_number = key
break
# searches for Expanded Name if not found Contracted Form
if not tag_number:
for key, value in dc.DicomDictionary.items():
if dicom_tag_name == value[2]:
tag_number = key
break
hex_number = hex(1048592)[2:]
tag_number_8_digits = f"{hex_number:>08}"
except Exception as e:
print(f'Erro ao converter dicomtag {dicom_tag_name}\n{e}')
return tag_number_8_digits
def tag_number_to_base_16(dicom_tag_number: str) -> str:
# if receives int, convert to str
hx = string.hexdigits
if type(dicom_tag_number) == int:
dicom_tag_number = str(dicom_tag_number)
only_hexdigits_tag = ''.join(i for i in dicom_tag_number if i in hx)
dicom_tag_base_16 = int(only_hexdigits_tag, 16)
return dicom_tag_base_16
def search_words_in_serie(series_description: str, search_words: list, exclusion_words: list = []) -> bool:
try:
search_flag = False
for word in search_words:
if word.upper() in series_description.upper():
search_flag = True
break
except Exception as e:
print(f"Erro ao procurar a lista de palavras de inclusao {search_words} na descricao {series_description}")
return "NaN"
try:
exclusion_flag = False
for word in exclusion_words:
if word.upper() in series_description.upper():
exclusion_flag = True
break
except Exception as e:
print(f"Erro ao procurar a lista de palavras de exclusao {search_words} na descricao {series_description}")
return "NaN"
found = search_flag and exclusion_flag is False
return found
| 37.165254
| 115
| 0.551248
| 1,032
| 8,771
| 4.452519
| 0.196705
| 0.048966
| 0.036779
| 0.050925
| 0.411099
| 0.386289
| 0.331665
| 0.296844
| 0.133624
| 0.133624
| 0
| 0.014674
| 0.3629
| 8,771
| 235
| 116
| 37.323404
| 0.807623
| 0.104663
| 0
| 0.335165
| 0
| 0
| 0.083162
| 0.011311
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032967
| false
| 0.010989
| 0.054945
| 0
| 0.17033
| 0.06044
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f93933ebd7cddbd101cc7daf0772e4787528a6a9
| 2,965
|
py
|
Python
|
server/swagger_server/models/people_patch.py
|
fabric-testbed/fabric-core-api
|
8ce79fd16e1020271487967743a89b7a2346bf45
|
[
"MIT"
] | null | null | null |
server/swagger_server/models/people_patch.py
|
fabric-testbed/fabric-core-api
|
8ce79fd16e1020271487967743a89b7a2346bf45
|
[
"MIT"
] | null | null | null |
server/swagger_server/models/people_patch.py
|
fabric-testbed/fabric-core-api
|
8ce79fd16e1020271487967743a89b7a2346bf45
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.preferences import Preferences # noqa: F401,E501
from swagger_server import util
class PeoplePatch(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, email: str=None, name: str=None, preferences: Preferences=None): # noqa: E501
"""PeoplePatch - a model defined in Swagger
:param email: The email of this PeoplePatch. # noqa: E501
:type email: str
:param name: The name of this PeoplePatch. # noqa: E501
:type name: str
:param preferences: The preferences of this PeoplePatch. # noqa: E501
:type preferences: Preferences
"""
self.swagger_types = {
'email': str,
'name': str,
'preferences': Preferences
}
self.attribute_map = {
'email': 'email',
'name': 'name',
'preferences': 'preferences'
}
self._email = email
self._name = name
self._preferences = preferences
@classmethod
def from_dict(cls, dikt) -> 'PeoplePatch':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The people_patch of this PeoplePatch. # noqa: E501
:rtype: PeoplePatch
"""
return util.deserialize_model(dikt, cls)
@property
def email(self) -> str:
"""Gets the email of this PeoplePatch.
:return: The email of this PeoplePatch.
:rtype: str
"""
return self._email
@email.setter
def email(self, email: str):
"""Sets the email of this PeoplePatch.
:param email: The email of this PeoplePatch.
:type email: str
"""
self._email = email
@property
def name(self) -> str:
"""Gets the name of this PeoplePatch.
:return: The name of this PeoplePatch.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this PeoplePatch.
:param name: The name of this PeoplePatch.
:type name: str
"""
self._name = name
@property
def preferences(self) -> Preferences:
"""Gets the preferences of this PeoplePatch.
:return: The preferences of this PeoplePatch.
:rtype: Preferences
"""
return self._preferences
@preferences.setter
def preferences(self, preferences: Preferences):
"""Sets the preferences of this PeoplePatch.
:param preferences: The preferences of this PeoplePatch.
:type preferences: Preferences
"""
self._preferences = preferences
| 25.560345
| 101
| 0.601349
| 329
| 2,965
| 5.334347
| 0.206687
| 0.054701
| 0.154986
| 0.039886
| 0.333333
| 0.191453
| 0.17094
| 0
| 0
| 0
| 0
| 0.013685
| 0.309949
| 2,965
| 115
| 102
| 25.782609
| 0.844086
| 0.412142
| 0
| 0.214286
| 0
| 0
| 0.049443
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.142857
| 0
| 0.452381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9393f537340aad0fcc03fb7b4478b7455578c86
| 14,649
|
py
|
Python
|
Code/src/models/optim/DMSAD_trainer.py
|
antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection
|
e84c28ce4dd28671d39752a7d21c674e05fcb495
|
[
"MIT"
] | 8
|
2021-02-19T17:30:00.000Z
|
2022-02-21T05:55:06.000Z
|
Code/src/models/optim/DMSAD_trainer.py
|
antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection
|
e84c28ce4dd28671d39752a7d21c674e05fcb495
|
[
"MIT"
] | 1
|
2021-05-03T14:04:53.000Z
|
2021-05-03T14:48:01.000Z
|
Code/src/models/optim/DMSAD_trainer.py
|
antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection
|
e84c28ce4dd28671d39752a7d21c674e05fcb495
|
[
"MIT"
] | 5
|
2021-02-18T22:43:40.000Z
|
2021-05-03T14:01:49.000Z
|
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
import logging
from sklearn.metrics import roc_auc_score
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from src.models.optim.Loss_Functions import DMSADLoss
from src.utils.utils import print_progessbar
class DMSAD_trainer:
"""
Trainer for the DMSAD.
"""
def __init__(self, c, R, eta=1.0, gamma=0.05, n_sphere_init=100, n_epoch=150,
lr=1e-4, lr_milestone=(), batch_size=64, weight_decay=1e-6,
device='cuda', n_job_dataloader=0, print_batch_progress=False):
"""
Constructor of the DMSAD trainer.
----------
INPUT
|---- c (array like N_sphere x Embed dim) the centers of the hyperspheres.
| If None, the centers are initialized using Kmeans.
|---- R (1D array) the radii associated with the centers.
|---- eta (float) the weight of semi-supervised labels in the loss.
|---- gamma (float) the fraction of allowed outlier when setting the
| radius of each sphere in the end.
|---- n_sphere_init (int) the number of initial hypersphere.
|---- n_epoch (int) the number of epoch.
|---- lr (float) the learning rate.
|---- lr_milestone (tuple) the lr update steps.
|---- batch_size (int) the batch_size to use.
|---- weight_decay (float) the weight_decay for the Adam optimizer.
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_job_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to dispay the batch
| progress bar.
OUTPUT
|---- None
"""
# learning parameters
self.n_epoch = n_epoch
self.lr = lr
self.lr_milestone = lr_milestone
self.batch_size = batch_size
self.weight_decay = weight_decay
self.device = device
self.n_job_dataloader = n_job_dataloader
self.print_batch_progress = print_batch_progress
# DMSAD parameters
self.c = torch.tensor(c, device=self.device) if c is not None else None
self.R = torch.tensor(R, device=self.device) if R is not None else None
self.eta = eta
self.gamma = gamma
self.n_sphere_init = n_sphere_init
# Optimization parameters
self.eps = 1e-6
# Results
self.train_time = None
self.train_loss = None
self.eval_auc = None
self.eval_time = None
self.eval_scores = None
def train(self, dataset, net, valid_dataset=None):
"""
Train the DMSAD network on the provided dataset.
----------
INPUT
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is trained. It must return an image, label, mask
| semi-supervized labels and the index.
|---- net (nn.Module) The DeepSAD to train.
|---- valid_dataset (torch.utils.data.Dataset) the dataset on which
| to validate the network at each epoch. Not validated if
| not provided.
OUTPUT
|---- net (nn.Module) The trained DeepSAD.
"""
logger = logging.getLogger()
# make the train dataloader
train_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, \
shuffle=True, num_workers=self.n_job_dataloader)
# put net to device
net = net.to(self.device)
# initialize hypersphere center
if self.c is None:
logger.info(' Initializing the hypersphere centers.')
self.initialize_centers(train_loader, net)
logger.info(f' {self.c.shape[0]} centers successfully initialized.')
# define loss criterion
loss_fn = DMSADLoss(self.eta, eps=self.eps)
# define optimizer
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# define scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestone, gamma=0.1)
# Start training
logger.info('Start Training the DMSAD.')
start_time = time.time()
epoch_loss_list = []
n_batch = len(train_loader)
for epoch in range(self.n_epoch):
net.train()
epoch_loss = 0.0
epoch_start_time = time.time()
n_k = torch.zeros(self.c.shape[0], device=self.device)
for b, data in enumerate(train_loader):
# get input and semi-supervized labels
input, _, _, semi_label, _ = data
# put them to device
input = input.to(self.device).float().requires_grad_(True)
semi_label = semi_label.to(self.device)
# zero the network's gradients
optimizer.zero_grad()
# optimize by backpropagation
_, embed = net(input)
loss = loss_fn(embed, self.c, semi_label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
# get the closest sphere and count the number of normal samples per sphere
idx = torch.argmin(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1)
for i in idx[semi_label != -1]:
n_k[i] += 1
if self.print_batch_progress:
print_progessbar(b, len(train_loader), Name='\t\tTrain Batch', Size=40, erase=True)
# remove centers with less than gamma fraction of largest hypersphere number of sample
self.c = self.c[n_k >= self.gamma * torch.max(n_k)]
# validate if required
valid_auc = ''
if valid_dataset:
auc = self.evaluate(net, valid_dataset, return_auc=True, print_to_logger=False, save_tSNE=False)
valid_auc = f' Valid AUC {auc:.3%} |'
# log the epoch statistics
logger.info(f'----| Epoch: {epoch + 1:03}/{self.n_epoch:03} '
f'| Train Time: {time.time() - epoch_start_time:.3f} [s] '
f'| Train Loss: {epoch_loss / n_batch:.6f} '
f'| N sphere {self.c.shape[0]:03} |' + valid_auc)
epoch_loss_list.append([epoch+1, epoch_loss/n_batch])
# update scheduler
scheduler.step()
if epoch + 1 in self.lr_milestone:
logger.info(f'---- LR Scheduler : new learning rate {scheduler.get_lr()[0]:g}')
# Set the radius of each sphere as 1-gamma quantile of normal samples distances
logger.info(f'---- Setting the hyperspheres radii as the {1-self.gamma:.1%} quantiles of normal sample distances.')
self.set_radius(train_loader, net)
logger.info(f'---- {self.R.shape[0]} radii successufully defined.')
# End training
self.train_loss = epoch_loss_list
self.train_time = time.time() - start_time
logger.info(f'---- Finished Training DMSAD in {self.train_time:.3f} [s]')
return net
def evaluate(self, net, dataset, return_auc=False, print_to_logger=True, save_tSNE=True):
"""
Evaluate the DSAD network on the provided dataset.
----------
INPUT
|---- net (nn.Module) The DMSAD network to validate.
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is evaluated.
|---- return_auc (bool) whether to return the computed auc or not.
|---- print_to_logger (bool) whether to print in the logger.
|---- save_tSNE (bool) whether to save a 2D t-SNE representation of
| the embeded data points
OUTPUT
|---- (auc) (float) the validation auc if required.
"""
if print_to_logger:
logger = logging.getLogger()
# make dataloader
loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
shuffle=True, num_workers=self.n_job_dataloader)
# put net on device
net = net.to(self.device)
# Evaluating
if print_to_logger:
logger.info('Start Evaluating the DMSAD.')
start_time = time.time()
idx_label_score = []
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data on device
input, label, _, semi_label, idx = data
input = input.to(self.device).float()
label = label.to(self.device)
semi_label = semi_label.to(self.device)
idx = idx.to(self.device)
# Embed input and compute anomaly score
_, embed = net(input)
# find closest sphere
score, sphere_idx = torch.min(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1)
# append idx, scores, label and embeding
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
label.cpu().data.numpy().tolist(),
score.cpu().data.numpy().tolist(),
sphere_idx.cpu().data.numpy().tolist(),
embed.cpu().data.numpy().tolist()))
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\t Evaluation Batch', Size=40, erase=True)
# compute AUCs
index, label, score, sphere_index, embed = zip(*idx_label_score)
label, score = np.array(label), np.array(score)
auc = roc_auc_score(label, score)
if save_tSNE:
embed = np.array(embed)
embed = TSNE(n_components=2).fit_transform(embed)
idx_label_score = list(zip(index, label.tolist(), score.tolist(), sphere_index, embed.tolist()))
self.eval_time = time.time() - start_time
self.eval_scores = idx_label_score
self.eval_auc = auc
if print_to_logger:
logger.info(f'Evaluation Time : {self.eval_time}')
logger.info(f'Evaluation AUC : {self.eval_auc:.3%}')
logger.info('Finished Evaluating the DMSAD.')
if return_auc:
return auc
def initialize_centers(self, loader, net, eps=0.1):
"""
Initialize the multiple centers using the K-Means algorithm on the
embedding of all the normal samples.
----------
INPUT
|---- loader (torch.utils.data.Dataloader) the loader of the data.
|---- net (nn.Module) the DMSAD network. The output must be a vector
| embedding of the input. The network should be an
| autoencoder for which the forward pass returns both the
| reconstruction and the embedding of the input.
|---- eps (float) minimal value for center coordinates, to avoid
| center too close to zero.
OUTPUT
|---- None
"""
# Get sample embedding
repr = []
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data
input, _, _, semi_label, _ = data
input = input.to(self.device).float()
semi_label = semi_label.to(self.device)
# keep only normal samples
input = input[semi_label != -1]
# get embdeding of batch
_, embed = net(input)
repr.append(embed)
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\tBatch', Size=40, erase=True)
repr = torch.cat(repr, dim=0).cpu().numpy()
# Apply Kmeans algorithm on embedding
kmeans = KMeans(n_clusters=self.n_sphere_init).fit(repr)
self.c = torch.tensor(kmeans.cluster_centers_).to(self.device)
# check if c_i are epsilon too close to zero to avoid them to be trivialy matched to zero
self.c[(torch.abs(self.c) < eps) & (self.c < 0)] = -eps
self.c[(torch.abs(self.c) < eps) & (self.c > 0)] = eps
def set_radius(self, loader, net):
"""
compute radius as 1-gamma quatile of normal sample distance to center.
Then anomaly score is ||net(x) - c_j||^2 - R_j^2 <--- negative if in, positive if out.
----------
INPUT
|---- loader (torch.utils.data.Dataloader) the loader of the data.
|---- net (nn.Module) the DMSAD network. The output must be a vector
| embedding of the input. The network should be an
| autoencoder for which the forward pass returns both the
| reconstruction and the embedding of the input.
OUTPUT
|---- None
"""
dist_list = [[] for _ in range(self.c.shape[0])] # initialize N_sphere lists
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data
input, _, _, semi_label, _ = data
input = input.to(self.device).float()
semi_label = semi_label.to(self.device)
# keep only normal samples
input = input[semi_label != -1]
# get embdeding of batch
_, embed = net(input)
# get the closest sphere and count the number of normal samples per sphere
dist, idx = torch.min(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1)
for i, d in zip(idx, dist):
dist_list[i].append(d)
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\tBatch', Size=40, erase=True)
# compute the radius as 1-gamma quantile of the normal distances of each spheres
self.R = torch.zeros(self.c.shape[0], device=self.device)
for i, dist in enumerate(dist_list):
dist = torch.stack(dist, dim=0)
self.R[i] = torch.kthvalue(dist, k=int((1 - self.gamma) * dist.shape[0]))[0]
| 42.708455
| 123
| 0.558263
| 1,806
| 14,649
| 4.403101
| 0.170543
| 0.013204
| 0.019618
| 0.013833
| 0.34419
| 0.303949
| 0.265342
| 0.246605
| 0.246605
| 0.235538
| 0
| 0.009384
| 0.338044
| 14,649
| 342
| 124
| 42.833333
| 0.810663
| 0.309031
| 0
| 0.247059
| 0
| 0.005882
| 0.081267
| 0.009536
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.064706
| 0
| 0.111765
| 0.094118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f93b09d7873482279865a3e138f9e289b66d1ef0
| 7,600
|
py
|
Python
|
escher/tests/test_plots.py
|
phantomas1234/escher
|
47f3291beefd7cc90207755c717e83f385262956
|
[
"MIT"
] | null | null | null |
escher/tests/test_plots.py
|
phantomas1234/escher
|
47f3291beefd7cc90207755c717e83f385262956
|
[
"MIT"
] | null | null | null |
escher/tests/test_plots.py
|
phantomas1234/escher
|
47f3291beefd7cc90207755c717e83f385262956
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, unicode_literals
from escher import __schema_version__
import escher.server
from escher import Builder, get_cache_dir, clear_cache
from escher.plots import (_load_resource, local_index, server_index,
model_json_for_name, map_json_for_name)
from escher.urls import get_url
import os
import sys
from os.path import join
import json
from pytest import raises, mark
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
if sys.version < '3':
unicode_type = unicode
else:
unicode_type = str
# cache
def test_get_cache_dir():
d = get_cache_dir()
assert os.path.isdir(d)
d = get_cache_dir(name='maps')
assert os.path.isdir(d)
def test_clear_cache(tmpdir, request):
(tmpdir.mkdir('maps').mkdir('Escherichia coli')
.join('iJO1366.Central metabolism.json').write('temp'))
(tmpdir.mkdir('models').mkdir('Escherichia coli')
.join('iJO1366.json').write('temp'))
clear_cache(str(tmpdir))
assert os.listdir(str(tmpdir)) == []
def fin():
tmpdir.remove()
request.addfinalizer(fin)
def test_local_index(tmpdir, request):
maps = tmpdir.mkdir('maps')
maps.mkdir('Escherichia coli').join('iJO1366.Central metabolism.json').write('temp')
# ignore these
maps.join('ignore_md.json').write('ignore')
tmpdir.mkdir('models').mkdir('Escherichia coli').join('iJO1366.json').write('temp')
assert local_index(str(tmpdir)) == { 'maps': [ { 'organism': 'Escherichia coli',
'map_name': 'iJO1366.Central metabolism' } ],
'models': [ { 'organism': 'Escherichia coli',
'model_name': 'iJO1366' } ] }
def fin():
tmpdir.remove()
request.addfinalizer(fin)
# server
@mark.web
def test_server_index():
index = server_index()
map_0 = index['maps'][0]
assert 'organism' in map_0
assert 'map_name' in map_0
model_0 = index['models'][0]
assert 'organism' in model_0
assert 'model_name' in model_0
# model and maps
def test_model_json_for_name(tmpdir):
models = tmpdir.mkdir('models')
models.mkdir('Escherichia coli').join('iJO1366.json').write('"temp"')
json = model_json_for_name('iJO1366', cache_dir=str(tmpdir))
assert json == '"temp"'
@mark.web
def test_model_json_for_name_web(tmpdir):
data = model_json_for_name('iJO1366', cache_dir=str(tmpdir))
assert 'reactions' in data
assert 'metabolites' in data
def test_map_json_for_name(tmpdir):
maps = tmpdir.mkdir('maps')
maps.mkdir('Escherichia coli').join('iJO1366.Central metabolism.json').write('"temp"')
json = map_json_for_name('iJO1366.Central metabolism', cache_dir=str(tmpdir))
assert json == '"temp"'
@mark.web
def test_map_json_for_name_web(tmpdir):
data = map_json_for_name('iJO1366.Central metabolism', cache_dir=str(tmpdir))
root = get_url('escher_root', protocol='https').rstrip('/')
assert json.loads(data)[0]['schema'] == '/'.join([root, 'escher', 'jsonschema',
__schema_version__ + '#'])
# helper functions
def test__load_resource(tmpdir):
assert _load_resource('{"r": "val"}', 'name') == '{"r": "val"}'
directory = os.path.abspath(os.path.dirname(__file__))
assert _load_resource(join(directory, 'example.json'), 'name').strip() == '{"r": "val"}'
with raises(ValueError) as err:
p = join(str(tmpdir), 'dummy')
with open(p, 'w') as f:
f.write('dummy')
_load_resource(p, 'name')
assert 'not a valid json file' in err.value
@mark.web
def test__load_resource_web(tmpdir):
url = '/'.join([get_url('map_download', protocol='https'),
'Escherichia%20coli/iJO1366.Central%20metabolism.json'])
_ = json.loads(_load_resource(url, 'name'))
def test_Builder(tmpdir):
b = Builder(map_json='{"r": "val"}', model_json='{"r": "val"}')
# Cannot load dev/local version without an explicit css string property.
# TODO include a test where these do not raise.
with raises(Exception):
b.display_in_notebook(js_source='dev')
with raises(Exception):
b.display_in_notebook(js_source='local')
# ok with embedded_css arg
b = Builder(map_json='{"r": "val"}', model_json='{"r": "val"}', embedded_css='')
b.display_in_notebook(js_source='dev')
b.save_html(join(str(tmpdir), 'Builder.html'), js_source='dev')
# test options
with raises(Exception):
b._get_html(js_source='devv')
with raises(Exception):
b._get_html(menu='')
with raises(Exception):
b._get_html(scroll_behavior='asdf')
b._get_html(js_source='local')
b._get_html(menu='all')
b._get_html(scroll_behavior='zoom')
@mark.web
def test_Builder_download():
# download
b = Builder(map_name='iJO1366.Central metabolism',
model_name='iJO1366')
assert b.loaded_map_json is not None
assert b.loaded_model_json is not None
b._get_html(js_source='web')
b.display_in_notebook(height=200)
# data
b = Builder(map_name='iJO1366.Central metabolism',
model_name='iJO1366',
reaction_data=[{'GAPD': 123}, {'GAPD': 123}])
b = Builder(map_name='iJO1366.Central metabolism',
model_name='iJO1366',
metabolite_data=[{'nadh_c': 123}, {'nadh_c': 123}])
b = Builder(map_name='iJO1366.Central metabolism',
model_name='iJO1366',
gene_data=[{'gapA': 123}, {'adhE': 123}])
assert type(b.the_id) is unicode_type
assert len(b.the_id) == 10
def test_Builder_options():
b = Builder(embedded_css='')
b.set_metabolite_no_data_color('white')
assert b.metabolite_no_data_color=='white'
html = b._get_html(js_source='local')
assert 'metabolite_no_data_color: "white"' in html
def test__draw_js():
b = Builder(map_json='"useless_map"', model_json='"useless_model"',
embedded_css='')
def look_for_string(st, substring):
"""Look for the string in the substring. This solves a bug in py.test
for these cases"""
try:
found = st.find(substring)
assert found > -1
except AssertionError:
raise AssertionError('Could not find\n\n%s\n\nin\n\n%s' % (substring, st))
# no static parse, dev
ijs = b._initialize_javascript('id', 'local')
js = b._draw_js('id', True, 'all', True, True, True, 'pan', True, None)
look_for_string(ijs, 'var map_data_id = "useless_map";')
look_for_string(ijs, 'var model_data_id = "useless_model";')
look_for_string(js, 'Builder(map_data_id, model_data_id, embedded_css_id, d3.select("#id"), options);')
# static parse, not dev
ijs = b._initialize_javascript('id', 'local')
static_index = '{"my": ["useless", "index"]}'
js = b._draw_js('id', True, 'all', True, False, True, 'pan', True, static_index)
look_for_string(ijs, 'var map_data_id = "useless_map";')
look_for_string(ijs, 'var model_data_id = "useless_model";')
look_for_string(js, 'escher.static.load_map_model_from_url("%s/maps/", "%s/models/",' % (__schema_version__, __schema_version__))
look_for_string(js, static_index)
look_for_string(js, 'options, function(map_data_id, model_data_id, options) {')
look_for_string(js, 'escher.Builder(map_data_id, model_data_id, embedded_css_id, d3.select("#id"), options);')
| 37.073171
| 133
| 0.644342
| 1,018
| 7,600
| 4.540275
| 0.191552
| 0.021203
| 0.023799
| 0.042406
| 0.44894
| 0.391173
| 0.338598
| 0.299654
| 0.290134
| 0.259844
| 0
| 0.020858
| 0.211447
| 7,600
| 204
| 134
| 37.254902
| 0.750375
| 0.046447
| 0
| 0.246835
| 0
| 0
| 0.22313
| 0.027147
| 0
| 0
| 0
| 0.004902
| 0.158228
| 1
| 0.107595
| false
| 0
| 0.088608
| 0
| 0.196203
| 0.006329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f94563e81861f76b57c556bc8928617eb8ac0410
| 19,471
|
py
|
Python
|
symbol.py
|
LizhengMathAi/symbol_FEM
|
a2679ff90cfffa40316e33102be1a802e210768a
|
[
"Apache-2.0"
] | 1
|
2021-02-07T00:53:51.000Z
|
2021-02-07T00:53:51.000Z
|
symbol.py
|
LizhengMathAi/symbol_FEM
|
a2679ff90cfffa40316e33102be1a802e210768a
|
[
"Apache-2.0"
] | null | null | null |
symbol.py
|
LizhengMathAi/symbol_FEM
|
a2679ff90cfffa40316e33102be1a802e210768a
|
[
"Apache-2.0"
] | null | null | null |
from functools import reduce
import numpy as np
from sparse import SparseTensor
def reduce_prod(seq): return reduce(lambda item_1, item_2: item_1 * item_2, seq)
class Polynomial:
def __init__(self, coeff, indices, merge=True):
"""\\sum_{i=0}^{N-1} coeff[i] \\Pi_{j=0}^{NV-1} x_j^{indices[i, j]}"""
self.degree = np.max(np.sum(indices, axis=-1))
self.n_elements = indices.shape[-1]
if merge:
self.coeff, self.indices = SparseTensor.merge(coeff, indices)
else:
self.coeff, self.indices = coeff, indices
def __call__(self, x):
coeff = np.reshape(self.coeff, newshape=(1, -1))
x = np.reshape(x, newshape=(-1, 1, self.n_elements))
indices = np.reshape(self.indices, newshape=(1, -1, self.n_elements))
return np.sum(coeff * np.prod(np.power(x, indices), axis=2), axis=1)
def __str__(self): return '\n'.join(["{:.2f}\t{}".format(c, index) for c, index in zip(self.coeff, self.indices)])
def __neg__(self): return Polynomial(-self.coeff, self.indices, merge=False)
def __add__(self, other):
if type(other).__name__ in ["bool", "int", "float", "int64", "float64"]:
other = Polynomial(np.array([other, ]), np.zeros(shape=(1, self.n_elements), dtype=self.indices.dtype), merge=False)
return self.__add__(other)
elif isinstance(other, Polynomial):
assert self.n_elements == other.n_elements
return Polynomial(np.hstack([self.coeff, other.coeff]), np.vstack([self.indices, other.indices]), merge=True)
else:
raise ValueError
def __sub__(self, other): return self.__add__(other.__neg__())
def __mul__(self, other):
if type(other).__name__ in ["bool", "int", "float", "int64", "float64"]:
return Polynomial(self.coeff * other, self.indices, merge=False)
elif isinstance(other, Polynomial):
assert self.n_elements == other.n_elements
coeff = np.expand_dims(self.coeff, axis=0) * np.expand_dims(other.coeff, axis=1)
coeff = coeff.flatten()
indices = np.expand_dims(self.indices, axis=0) + np.expand_dims(other.indices, axis=1)
indices = np.reshape(indices, newshape=(-1, self.n_elements))
return Polynomial(coeff, indices, merge=True)
else:
raise ValueError
def derivative(self, order=1):
"""
+----------+-----------------+--------------+
| item | data type | shape |
+----------+-----------------+--------------+
| order | int | [] |
| return | PolynomialArray | [ND] * order |
+----------+-----------------+--------------+
"""
array = [self]
for _ in range(order):
collection = []
for poly in array:
for i in range(self.indices.shape[1]):
coeff = poly.coeff * poly.indices[:, i]
indices = np.maximum(poly.indices - np.eye(poly.n_elements, dtype=poly.indices.dtype)[[i], :], 0)
collection.append(Polynomial(coeff, indices, merge=True))
array = collection
return PolynomialArray(array, shape=[self.indices.shape[1]] * order)
def directional_derivative(self, c, order=1):
"""
+----------+---------------+--------------+
| item | data type | shape |
+----------+---------------+--------------+
| order | numpy.ndarray | [ND] * order |
| order | int | [] |
| return | Polynomial | [ND] * order |
+----------+---------------+--------------+
return: \\sum_{ij...} c_{ij...} \\frac{\\partial^ self}{\partial \lambda_i \partial \lambda_j ...}
"""
coeff = self.coeff
indices = self.indices
dim = self.n_elements
for axis in range(order):
coeff = np.expand_dims(coeff, axis=0) * np.transpose(indices, axes=[-1] + list(range(axis+1)))
indices = np.expand_dims(indices, axis=0) - np.expand_dims(np.eye(dim, dtype=np.int), axis=list(range(1, axis + 2)))
indices = np.maximum(indices, 0)
coeff = (np.expand_dims(c, axis=-1) * coeff).flatten()
indices = np.reshape(indices, newshape=(-1, dim))
return Polynomial(coeff, indices, merge=True)
class PolynomialArray:
def __init__(self, array, shape): self.array, self.shape = array, list(shape)
def reshape(self, shape):
shape = list(shape)
for axis in range(shape.__len__()):
if shape[axis] == -1:
shape[axis] = -reduce_prod(self.shape) // reduce_prod(shape)
break
return PolynomialArray(self.array, shape)
def transpose(self, axes):
transpose_indices = np.transpose(np.reshape(np.arange(self.array.__len__()), newshape=self.shape), axes=axes)
array = [self.array[index] for index in transpose_indices.flatten()]
shape = [self.shape[axis] for axis in axes]
return PolynomialArray(array, shape)
def sum(self, axis, keep_dim=False):
axes = [axis] + [ax for ax in range(self.shape.__len__()) if ax != axis]
transpose_array = self.transpose(axes)
result = reduce(lambda u, v: u + v, [transpose_array[k] for k in range(transpose_array.shape[0])])
if keep_dim:
result.shape.insert(axis, 1)
return result
def __call__(self, x): return np.reshape(np.stack([poly(x) for poly in self.array], axis=1), newshape=[-1] + self.shape)
def __getitem__(self, item):
valid_indices = np.reshape(np.arange(self.array.__len__()), newshape=self.shape)[item]
array = [self.array[index] for index in valid_indices.flatten()]
shape = valid_indices.shape
return array[0] if shape == () else PolynomialArray(array, shape)
def __eq__(self, other): return (self.shape == other.shape) and sum([sp != op for sp, op in zip(self.array, other.array)]) == 0
def __neg__(self): return PolynomialArray([-array for array in self.array], self.shape)
def __add__(self, other): # TODO: in large scale calculation, this operator works slowly in serial mode.
if type(other).__name__ in ["bool", "int", "float", "Polynomial"]:
array = PolynomialArray([sa + other for sa in self.array], self.shape)
return array.reshape(self.shape)
elif isinstance(other, np.ndarray):
n_elements, dtype = self.array[0].n_elements, self.array[0].indices.dtype
arr = [Polynomial(np.array([item, ]), np.zeros(shape=(1, n_elements), dtype=dtype)) for item in other.flatten()]
return self.__add__(PolynomialArray(arr, shape=other.shape))
elif isinstance(other, PolynomialArray):
self_indices = np.reshape(np.arange(np.prod(self.shape)), self.shape)
o_indices = np.reshape(np.arange(np.prod(other.shape)), other.shape)
self_indices, o_indices = self_indices + np.zeros_like(o_indices), o_indices + np.zeros_like(self_indices)
array = [self.array[si] + other.array[oi] for si, oi in zip(self_indices.flatten(), o_indices.flatten())]
return PolynomialArray(array, shape=self_indices.shape)
else:
raise ValueError
def __sub__(self, other): return self.__add__(other.__neg__())
def __mul__(self, other): # TODO: in large scale calculation, this operator works slowly in serial mode.
if type(other).__name__ in ["bool", "int", "float", "Polynomial"]:
array = PolynomialArray([sa * other for sa in self.array], self.shape)
return array.reshape(self.shape)
elif isinstance(other, np.ndarray):
n_elements, dtype = self.array[0].n_elements, self.array[0].indices.dtype
arr = [Polynomial(np.array([item, ]), np.zeros(shape=(1, n_elements), dtype=dtype)) for item in other.flatten()]
return self.__mul__(PolynomialArray(arr, shape=other.shape))
elif isinstance(other, PolynomialArray):
self_indices = np.reshape(np.arange(np.prod(self.shape)), self.shape)
o_indices = np.reshape(np.arange(np.prod(other.shape)), other.shape)
self_indices, o_indices = self_indices + np.zeros_like(o_indices), o_indices + np.zeros_like(self_indices)
array = [self.array[si] * other.array[oi] for si, oi in zip(self_indices.flatten(), o_indices.flatten())]
return PolynomialArray(array, shape=self_indices.shape)
else:
raise ValueError
@classmethod
def stack(cls, arrays, axis):
axis %= arrays[0].shape.__len__() + 1
array = sum([item.array for item in arrays], [])
shape = [arrays.__len__()] + list(arrays[0].shape)
axes = [i for i in range(shape.__len__()) if i != axis]
axes.insert(axis, 0)
return PolynomialArray(array, shape).transpose(axes)
@classmethod
def concat(cls, arrays, axis):
axes = [axis] + [i for i in range(arrays[0].shape.__len__()) if i != axis]
shape = [-1] + [dim for i, dim in enumerate(arrays[0].shape) if i != axis]
arrays = sum([cls.transpose(array, axes).array for array in arrays], [])
arrays = cls(arrays, shape=(arrays.__len__(), ))
arrays = arrays.reshape(shape)
axes = list(range(1, shape.__len__()))
axes.insert(axis, 0)
return arrays.transpose(axes)
def derivative(self, order=1):
"""
+----------+-----------------+---------------------------+
| item | data type | shape |
+----------+-----------------+---------------------------+
| order | int | [] |
| return | PolynomialArray | self.shape + [ND] * order |
+----------+-----------------+---------------------------+
"""
array = PolynomialArray.stack([poly.derivative(order) for poly in self.array], axis=0)
return array.reshape(self.shape + array.shape[1:])
def directional_derivative(self, c, order=1):
"""
+----------+-----------------+---------------------------+
| item | data type | shape |
+----------+-----------------+---------------------------+
| c | numpy.ndarray | self.shape + [ND] * order |
| order | int | [] |
| return | numpy.ndarray | self.shape |
+----------+-----------------+---------------------------+
return: \\sum_{ij...} c_{ij...}^{uv...} \\frac{\\partial^ self_{uv...}}{\partial \lambda_i \partial \lambda_j ...}
"""
ni = max([p.coeff.__len__() for p in self.array])
dim = self.array[0].n_elements
coeff = [np.concatenate([p.coeff, np.zeros(shape=(ni - p.coeff.__len__(), ))], axis=0) for p in self.array]
coeff = np.stack(coeff, axis=1) # shape = [NI, ?]
indices = [np.concatenate([p.indices, np.zeros(shape=(ni - p.coeff.__len__(), dim), dtype=np.int)], axis=0) for p in self.array]
indices = np.stack(indices, axis=2) # shape = [NI, ND, ?]
for axis in range(order):
axes = [axis + 1] + [i for i in range(axis + 3) if i != axis + 1]
coeff = np.expand_dims(coeff, axis=0) * np.transpose(indices, axes=axes)
axes = list(range(1, axis + 2)) + [axis + 3]
indices = np.expand_dims(indices, axis=0) - np.expand_dims(np.eye(dim, dtype=np.int), axis=axes)
indices = np.maximum(indices, 0)
c = np.reshape(c, newshape=[-1, 1] + [dim] * order)
c = np.transpose(c, axes=list(range(2, order + 2)) + [1, 0]) # shape = [ND] * order + [1] + [?]
coeff = np.reshape((c * coeff), newshape=(dim ** order * ni, -1)) # shape = [ND] * order + [NI] + [?]
indices = np.reshape(indices, newshape=(dim ** order * ni, dim, -1)) # shape = [ND] * order + [NI] + [ND] + [?]
return PolynomialArray([Polynomial(coeff[:, i], indices[:, :, i], merge=True) for i in range(coeff.shape[-1])], shape=self.shape)
def integral(self, dim, determinant):
"""
Working correctly in triangulation grid only!
\Pi_i \alpha_i!
\int_K \Pi_i \lambda_i^{\alpha_i} dx = ------------------------ * determinant
(dim + \Sum_i \alpha_i)!
"""
ni = max([p.coeff.__len__() for p in self.array])
nd = self.array[0].n_elements
coeff = [np.concatenate([p.coeff, np.zeros(shape=(ni - p.coeff.__len__(), ))], axis=0) for p in self.array]
coeff = np.stack(coeff, axis=1) # shape = [NI, ?]
indices = [np.concatenate([p.indices, np.zeros(shape=(ni - p.coeff.__len__(), nd), dtype=np.int)], axis=0) for p in self.array]
indices = np.stack(indices, axis=2) # shape = [NI, ND, ?]
degree = np.max(indices)
if degree == 0:
numerator = np.ones_like(indices) # shape = [NI, ND, ?]
else:
numerator = reduce_prod([np.maximum(indices - i, 1) for i in range(degree)]) # shape = [NI, ND, ?]
numerator = np.prod(numerator, axis=1) # shape = [NI, ?]
denominator = np.sum(indices, axis=1) + dim # shape = [NI, ?]
denominator = reduce_prod([np.maximum(denominator - i, 1) for i in range(degree + dim)]) # shape = [NI, ?]
return np.reshape(np.sum(coeff * numerator / denominator, axis=0), newshape=self.shape) * determinant
def unit_test():
np.set_printoptions(precision=2)
x = np.random.rand(4, 3)
const_array = np.random.rand(8, 7)
# item 6, degree 2, elements 3
poly = Polynomial(coeff=np.random.rand(6), indices=np.random.randint(0, 3, size=(6, 3)))
polys_1 = [Polynomial(coeff=np.random.rand(5), indices=np.random.randint(0, 5, size=(5, 3))) for _ in range(56)]
polys_1 = PolynomialArray(polys_1, [8, 7])
polys_2 = [Polynomial(coeff=np.random.rand(4), indices=np.random.randint(0, 5, size=(4, 3))) for i in range(56)]
polys_2 = PolynomialArray(polys_2, [8, 7])
polys_3 = [Polynomial(coeff=np.random.rand(3), indices=np.random.randint(0, 5, size=(3, 3))) for i in range(7*8*9)]
polys_3 = PolynomialArray(polys_3, [9, 8, 7])
# four fundamental rules
print("polys_1(x) + np.pi - (polys_1 + np.pi)(x):")
print(np.max(np.abs(polys_1(x) + np.pi - (polys_1 + np.pi)(x))))
print("polys_1(x) + poly(x) - (polys_1 + poly)(x):")
print(np.max(np.abs(polys_1(x) + np.reshape(poly(x), (-1, 1, 1)) - (polys_1 + poly)(x))))
print("polys_1(x) + np.expand_dims(const_array, axis=0) - (polys_1 + const_array)(x):")
print(np.max(np.abs(polys_1(x) + np.expand_dims(const_array, axis=0) - (polys_1 + const_array)(x))))
print("polys_1(x) + polys_2(x) - (polys_1 + polys_2)(x):")
print(np.max(np.abs(polys_1(x) + polys_2(x) - (polys_1 + polys_2)(x))))
print("polys_1[:, [1]](x) + polys_2[[-1], :](x) - (polys_1[:, [1]] + polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) + polys_2[[-1], :](x) - (polys_1[:, [1]] + polys_2[[-1], :])(x))))
print("polys_1(x) - np.pi - (polys_1 - np.pi)(x):")
print(np.max(np.abs(polys_1(x) - np.pi - (polys_1 - np.pi)(x))))
print("polys_1(x) - poly(x) - (polys_1 - poly)(x):")
print(np.max(np.abs(polys_1(x) - np.reshape(poly(x), (-1, 1, 1)) - (polys_1 - poly)(x))))
print("polys_1(x) - np.expand_dims(const_array, axis=0) - (polys_1 - const_array)(x):")
print(np.max(np.abs(polys_1(x) - np.expand_dims(const_array, axis=0) - (polys_1 - const_array)(x))))
print("polys_1(x) - polys_2(x) - (polys_1 - polys_2)(x):")
print(np.max(np.abs(polys_1(x) - polys_2(x) - (polys_1 - polys_2)(x))))
print("polys_1[:, [1]](x) - polys_2[[-1], :](x) - (polys_1[:, [1]] - polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) - polys_2[[-1], :](x) - (polys_1[:, [1]] - polys_2[[-1], :])(x))))
print("polys_1(x) * np.pi - (polys_1 * np.pi)(x):")
print(np.max(np.abs(polys_1(x) * np.pi - (polys_1 * np.pi)(x))))
print("polys_1(x) * poly(x) - (polys_1 * poly)(x):")
print(np.max(np.abs(polys_1(x) * np.reshape(poly(x), (-1, 1, 1)) - (polys_1 * poly)(x))))
print("polys_1(x) * np.expand_dims(const_array, axis=0) - (polys_1 * const_array)(x):")
print(np.max(np.abs(polys_1(x) * np.expand_dims(const_array, axis=0) - (polys_1 * const_array)(x))))
print("polys_1(x) * polys_2(x) - (polys_1 * polys_2)(x):")
print(np.max(np.abs(polys_1(x) * polys_2(x) - (polys_1 * polys_2)(x))))
print("polys_1[:, [1]](x) * polys_2[[-1], :](x) - (polys_1[:, [1]] * polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) * polys_2[[-1], :](x) - (polys_1[:, [1]] * polys_2[[-1], :])(x))))
print(np.max(np.abs(polys_1.reshape(shape=[2, 4, 7])(x) - np.reshape(polys_1(x), newshape=(-1, 2, 4, 7)))))
# check concat
print("PolynomialArray.concat([polys_1, polys_2], axis=1)(x) - np.concatenate([polys_1(x), polys_2(x)], axis=1):")
print(np.max(np.abs(PolynomialArray.concat([polys_1, polys_2], axis=1)(x) - np.concatenate([polys_1(x), polys_2(x)], axis=2))))
# check sum
print(np.max(np.abs(polys_3.sum(axis=0, keep_dim=True)(x) - np.sum(polys_3(x), axis=0 + 1, keepdims=True))))
print(np.max(np.abs(polys_3.sum(axis=1, keep_dim=True)(x) - np.sum(polys_3(x), axis=1 + 1, keepdims=True))))
print(np.max(np.abs(polys_3.sum(axis=2, keep_dim=True)(x) - np.sum(polys_3(x), axis=2 + 1, keepdims=True))))
# check integral
poly_1 = Polynomial(
coeff=np.array([
1,
3,
]),
indices=np.array([
[1, 2, 3, 4],
[1, 1, 1, 1],
])
)
poly_2 = Polynomial(
coeff=np.array([
2,
4,
]),
indices=np.array([
[4, 3, 2, 1],
[0, 0, 0, 0],
])
)
poly = PolynomialArray(array=[poly_1, poly_2], shape=(2, ))
ans_1 = 0.5 * 1 * (1 * 2 * 6 * 24) / reduce_prod(list(range(1, 14)))
ans_1 += 0.5 * 3 * (1 * 1 * 1 * 1) / reduce_prod(list(range(1, 8)))
ans_2 = 2 * 2 * (1 * 2 * 6 * 24) / reduce_prod(list(range(1, 14)))
ans_2 += 2 * 4 * (1 * 1 * 1 * 1) / reduce_prod(list(range(1, 4)))
print(poly.integral(dim=3, determinant=np.array([0.5, 2])) - np.array([ans_1, ans_2]))
# check derivative
poly = poly.derivative(order=1)
print(poly[0, 1])
# check derivative in Polynomial
c = np.random.rand(3, 3)
coeff = np.random.randint(100, size=(4, )) / 100
indices = np.random.randint(10, size=(4, 3))
poly = Polynomial(coeff, indices)
type_1 = (poly.derivative(order=2) * c).sum(axis=0).sum(axis=0)
type_2 = poly.directional_derivative(c, order=2)
error = type_1 - type_2
error = Polynomial(error.coeff, error.indices, merge=True)
print("error:", error)
# check derivative in PolynomialArray
poly = PolynomialArray([poly, poly+1, poly-1, poly*2], shape=(2, 2))
c = np.random.rand(2, 2, 3, 3)
type_1 = (poly.derivative(order=2) * c).sum(axis=2).sum(axis=2)
type_2 = poly.directional_derivative(c, order=2)
for item in (type_1 - type_2).array:
item = Polynomial(item.coeff, item.indices, merge=True)
print("error:", item)
if __name__ == "__main__":
unit_test()
| 49.544529
| 137
| 0.558472
| 2,684
| 19,471
| 3.889717
| 0.069672
| 0.039655
| 0.018103
| 0.022989
| 0.590805
| 0.517241
| 0.483621
| 0.456513
| 0.449425
| 0.435824
| 0
| 0.029213
| 0.240512
| 19,471
| 392
| 138
| 49.670918
| 0.676765
| 0.126547
| 0
| 0.271062
| 0
| 0.025641
| 0.066955
| 0.008414
| 0
| 0
| 0
| 0.002551
| 0.007326
| 1
| 0.098901
| false
| 0
| 0.010989
| 0.029304
| 0.197802
| 0.150183
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f94e553843e7ec006e6711f29cd3c8bedc298b1e
| 18,184
|
py
|
Python
|
pfstats.py
|
altinukshini/pfstats
|
90137cdfdc7c5ae72b782c3fc113d56231e2667d
|
[
"MIT"
] | 18
|
2017-09-03T19:59:08.000Z
|
2022-02-02T11:59:48.000Z
|
pfstats.py
|
altinukshini/pfstats
|
90137cdfdc7c5ae72b782c3fc113d56231e2667d
|
[
"MIT"
] | 3
|
2018-04-23T14:09:47.000Z
|
2020-09-30T10:26:16.000Z
|
pfstats.py
|
altinukshini/pfstats
|
90137cdfdc7c5ae72b782c3fc113d56231e2667d
|
[
"MIT"
] | 14
|
2017-09-03T19:59:10.000Z
|
2022-03-15T12:19:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Postfix mail log parser and filter.
This script filters and parses Postfix logs based on provided filter parameters.
Example:
To use this script type 'python pfstats.py -h'. Below is an example
that filteres postfix log file (even gziped) based on date,
sender of the email and email status::
$ python pfstats.py -d 'Jul 26' -t 'bounced' -s 'info@altinukshini.com'
Todo:
* Filter and parse logs from a year ago
* Add receiver filter
* Maybe provide from-to date filtering option
"""
__author__ = "Altin Ukshini"
__copyright__ = "Copyright (c) 2017, Altin Ukshini"
__license__ = "MIT License"
__version__ = "1.0"
__maintainer__ = "Altin Ukshini"
__email__ = "altin.ukshini@gmail.com"
__status__ = "Production"
import re
import os
import sys
import gzip
import time
import argparse
import datetime
from random import randint
from argparse import RawTextHelpFormatter
from collections import defaultdict
########################################################
# Config
########################################################
default_log_file = r'/var/log/postfix/mail.log'
default_log_dir = r'/var/log/postfix/' # Must end with slash '/'
########################################################
# Predefined variables
########################################################
sender_lines = []
status_lines = []
status_lines_by_type = {'bounced' : [], 'deferred' : [], 'sent' : [], 'rejected' : []}
status_types = ['bounced', 'deferred', 'sent', 'rejected']
file_random_no = randint(100000, 999990)
generated_results = defaultdict(dict)
working_dir = os.getcwd() + '/'
start_time = time.time()
### All this formatting bcs of postfix date format :)
date = datetime.datetime.now()
date_today_year = date.strftime("%Y")
date_today_month = date.strftime("%b")
date_today_day = date.strftime("%d").lstrip('0')
date_today = date_today_month + " " + date_today_day
if int(date_today_day) < 10:
date_today = date_today_month + " " + date_today_day
########################################################
# Functions
########################################################
def get_receiver(line):
"""Return a string
Filter line and get the email receiver to=<>.
"""
receiver = re.search('(?<=to=<).*?(?=>)', line)
return receiver.group()
def get_sender(line):
"""Return a string
Filter line and get the email sender from=<>.
"""
sender = re.search('(?<=from=<).*?(?=>)', line)
return sender.group()
def get_email_subject(line):
"""Return a string
Filter line and get the email subject Subject:.
"""
subject = re.search('(?<=Subject: ).*?(?=\sfrom)', line)
return subject.group()
def get_email_status(line):
"""Return a string
Filter line and get the email status (sent, bounced, deferred, rejected).
"""
status = re.search('(?<=status=).*?(?=\s)', line)
return status.group()
def get_host_message(line, status):
"""Return a string
Filter line and get the host message located after status.
"""
message = re.search('status=' + status + ' (.*)', line)
return message.group(1)
def get_message_id(line):
"""Return a string
Filter line and get the email/message id.
"""
return line.split()[5].replace(":","")
def get_line_date(line):
"""Return a string
Filter line and get the email date (beginning of the line).
"""
return line.split()[0] + " " + str(line.split()[1])
def check_sender_line(line):
"""Return a boolean
Check if line contains specific words to validate if that's the line
we want.
"""
return 'cleanup' in line and 'from=' in line and 'Subject' in line
def filter_line_sender_subject(line):
"""Return void
Filter line based on sender and subject message and
append it to predefined dicts.
"""
global args, sender_lines
if args.sender is not None and args.message is not None:
if args.sender in line and args.message in line:
sender_lines.append(line)
elif args.sender is not None and args.message is None:
if args.sender in line:
sender_lines.append(line)
elif args.message is not None and args.sender is None:
if args.message in line:
sender_lines.append(line)
else:
sender_lines.append(line)
def filter_line(line):
"""Return void
Filter line based on check_sender_line() and email status type and append to
corresponding predefined dicts
"""
global sender_lines, status_lines, status_lines_by_type, status_types
if check_sender_line(line):
filter_line_sender_subject(line)
elif args.type in status_types:
if str('status='+args.type) in line and 'to=' in line and 'dsn=' in line:
status_lines.append(line)
else:
if 'status=' in line and 'to=' in line and 'dsn=' in line :
line_email_status = get_email_status(line)
if line_email_status in status_types:
status_lines_by_type[line_email_status].append(line)
def check_if_gz(file_name):
"""Return a boolean
Check if filename ends with gz extension
"""
return file_name.endswith('.gz')
def filter_log_file(log_file):
"""Return a string
Open file and start filtering line by line.
Apply date filtering as well.
"""
global date_today, date_filter
if check_if_gz(log_file):
with gzip.open(log_file, 'rt') as log_file:
for line in log_file:
print(line)
if date_filter in line:
filter_line(line)
else:
with open(log_file,'r') as log_file:
for line in log_file:
if date_filter in line:
filter_line(line)
log_file.close()
def process_line(sender_line, status_lines, status_type, file):
"""Return void
For each sender, check corresponding message status by message id, extract the required
parameters from lines and write them to generated file.
"""
global args, generated_results
message_id = get_message_id(sender_line)
sender = get_sender(sender_line)
subject = get_email_subject(sender_line)
for status_line in status_lines:
if message_id in status_line:
receiver = get_receiver(status_line)
host_message = get_host_message(status_line, status_type)
line_date = get_line_date(status_line)
generated_results[status_type] += 1
file.write(
line_date + args.output_delimiter +
sender + args.output_delimiter +
receiver + args.output_delimiter +
message_id + args.output_delimiter +
subject + args.output_delimiter +
host_message + "\n")
def write_file_header(file):
"""Return void
Writes file header that represent columns.
"""
global args
file.write(
"date" + args.output_delimiter +
"sender" + args.output_delimiter +
"receiver" + args.output_delimiter +
"message_id" + args.output_delimiter +
"subject" + args.output_delimiter +
"host_message\n")
def date_filter_formated(date_filter):
"""Return datetime
Returns the date provided to a specific format '%Y %b %d'.
"""
return datetime.datetime.strptime(datetime.datetime.now().strftime('%Y ') + date_filter, '%Y %b %d')
def date_filter_int(date_filter):
"""Return int
Returns the datetime provided to a specific format '%Y%b%d' as integer.
"""
return int(date_filter_formated(date_filter).strftime('%Y%m%d'))
def get_files_in_log_dir(default_log_dir):
"""Return list
Returns a list of files from provided directory path.
"""
all_log_files = [f for f in os.listdir(default_log_dir) if os.path.isfile(os.path.join(default_log_dir, f))]
if not all_log_files:
sys.exit("Default log directory has no files in it!")
return all_log_files
def generate_files_to_check(date_filter):
"""Return list
Based on the date filter provided as argument (or today's date), generate the supposed filenames (with specific date and format)
to check in log directory. This will return two filenames.
"""
today_plusone = datetime.datetime.now() + datetime.timedelta(days = 1)
today_minusone = datetime.datetime.now() - datetime.timedelta(days = 1)
date_filter_plusone = date_filter_formated(date_filter) + datetime.timedelta(days = 1)
if (date_filter_int(date_filter) < int(datetime.datetime.now().strftime('%Y%m%d')) and
date_filter_int(date_filter) == int(today_minusone.strftime('%Y%m%d'))):
return [
'mail.log-' + datetime.datetime.now().strftime('%Y%m%d'),
'mail.log-' + date_filter_formated(date_filter).strftime('%Y%m%d') + '.gz'
]
elif (date_filter_int(date_filter) < int(datetime.datetime.now().strftime('%Y%m%d')) and
date_filter_int(date_filter) < int(today_minusone.strftime('%Y%m%d'))):
return [
'mail.log-' + date_filter_formated(date_filter).strftime('%Y%m%d') + '.gz',
'mail.log-' + date_filter_plusone.strftime('%Y%m%d') + '.gz'
]
return []
def populate_temp_log_file(file_name, temp_log_file):
"""Return void
Populates the combined temporary log file from provided log in log directory.
"""
if check_if_gz(file_name):
with gzip.open(file_name, 'rt') as gz_mail_log:
for line in gz_mail_log:
temp_log_file.write(line)
gz_mail_log.close()
else:
with open(file_name, 'r') as mail_log:
for line in mail_log:
temp_log_file.write(line)
mail_log.close()
def generate_working_log(date_filter):
"""Return void
Generates combined working log from different logs from postfix log directory based on date filter.
"""
global args, log_file, working_dir
log_dir_files = get_files_in_log_dir(args.log_dir)
selected_files = generate_files_to_check(date_filter)
temp_log_file = open(working_dir + 'temp-' + str(date_filter_formated(date_filter).strftime('%Y%m%d')) + '.log', 'w')
for selected_file in selected_files:
if selected_file in log_dir_files:
populate_temp_log_file(args.log_dir + selected_file, temp_log_file)
else:
print("File not found: " + selected_file)
temp_log_file.close()
log_file = working_dir + 'temp-' + str(date_filter_formated(date_filter).strftime('%Y%m%d')) + '.log'
def print_results(results):
"""Return void
Prints the end results of the file processing
"""
global args, file_random_no
print("\n************************* RESULTS *************************\n")
if results:
total = 0
for result in results:
total += results[result]
if result == 'sent':
print(result + ": \t" + str(results[result]) \
+ "\t\t" + result + "-" + str(file_random_no) \
+ "." + args.output_filetype)
else:
print(result + ":\t" + str(results[result]) + "\t\t" \
+ result + "-" + str(file_random_no) \
+ "." + args.output_filetype)
print("\n-----\nTotal:\t\t" + str(total))
else:
print('Results could not be printed')
print("\n***********************************************************")
if __name__ == "__main__":
########################################################
# Argument(s) Parser
########################################################
parser = argparse.ArgumentParser(description='Filter and parse Postfix log files.', formatter_class=RawTextHelpFormatter)
parser.add_argument('-d', '--date',
dest='date',
default=date_today,
metavar='',
help='''Specify different date. Default is current date.\nFormat: Jan 20 (note one space) &
Jan 2 (note two spaces).\nDefault is todays date: ''' + date_today + '\n\n')
parser.add_argument('-t', '--type',
dest='type',
default='all',
metavar='',
help='Type of email status: bounced, sent, rejected, deferred.\nDefault is all.\n\n')
parser.add_argument('-s', '--sender',
dest='sender',
metavar='',
help='Specify senders address in order to query logs matching this parameter\n\n')
parser.add_argument('-m', '--message',
dest='message',
metavar='',
help='''Postfix default log format must be changed for this option to work.
Add subject message in logs, and then you can use this option to query\nthose emails with specific subject message.\n\n''')
parser.add_argument('-l', '--log',
dest='log',
default=default_log_file,
metavar='',
help='Specify the log file you want to use.\nDefault is: ' + default_log_file + '\n\n')
parser.add_argument('--log-dir',
dest='log_dir',
default=default_log_dir,
metavar='',
help='Specify the log directory.\nDefault is: ' + default_log_dir + '\n\n')
parser.add_argument('--output-directory',
dest='output_directory',
default=working_dir,
metavar='',
help='Specify the generated file(s) directory.\nDefault is current working directory: ' + working_dir + '\n\n')
parser.add_argument('--output-delimiter',
dest='output_delimiter',
default=';',
metavar='',
help='Specify the generated output delimiter.\nDefault is ";"\n\n')
parser.add_argument('--output-filetype',
dest='output_filetype',
default='csv',
metavar='',
help='Specify the generated output file type.\nDefault is "csv"\n\n')
args = parser.parse_args()
## Validate arguments
log_file = default_log_file
date_filter = date_today
# Check if provided parameters are valid
if os.path.isfile(args.log) is not True:
parser.error('Provided log file does not exist: ' + args.log)
if args.output_directory != working_dir and args.output_directory.endswith('/') is not True:
parser.error('Generated output file(s) directory must end with slash "/"')
if args.log_dir != default_log_dir and args.log_dir.endswith('/') is not True:
parser.error('Log directory must end with slash "/"')
if os.path.exists(args.output_directory) is not True:
parser.error('Generated output file(s) directory does not exist: ' + args.output_directory)
if os.path.exists(args.log_dir) is not True:
parser.error('This log directory does not exist in this system: ' + args.log_dir + '\nMaybe provide a different log dir with --log-dir')
# If date provided, change date filter to the provided one
if args.date != date_filter:
date_filter = args.date
# If log provided, change default log file to provided one
if args.log != log_file:
log_file = args.log
########################################################
# Execution / Log parsing and filtering
########################################################
# Check if provided date is valid
if int(date_filter_formated(date_filter).strftime('%Y%m%d')) > int(datetime.datetime.now().strftime('%Y%m%d')):
sys.exit("Provided date format is wrong or higher than today's date!")
# In case the date filter is provided, and it is different from today,
# it means that we will have to generate a temp log which contains
# combined logs from default log dir (gzip logrotated files included)
if date_filter != date_today and log_file == default_log_file:
generate_working_log(date_filter)
# Start filtering log file based on provided filters
filter_log_file(log_file)
# If there were no senders/filter matches, exit
if not sender_lines:
sys.exit("No matching lines found to be processed with provided filters in log file (" + log_file + "). Exiting...")
# Start parsing
# If message status type provided, filter only those messages
if args.type in status_types:
generated_results[args.type] = 0
with open(args.output_directory + args.type + '-' \
+ str(file_random_no) + '.' \
+ args.output_filetype, 'w') as generated_file:
write_file_header(generated_file)
for sender_line in sender_lines:
process_line(sender_line, status_lines, args.type, generated_file)
generated_file.close()
# Else, filter all status types (bounced, sent, rejected, deferred)
else:
for status_type in status_types:
generated_results[status_type] = 0
with open(args.output_directory + status_type + '-' \
+ str(file_random_no) + '.' \
+ args.output_filetype, 'w') as generated_file:
write_file_header(generated_file)
for sender_line in sender_lines:
process_line(sender_line, status_lines_by_type[status_type], \
status_type, generated_file)
generated_file.close()
# Generate and print results
print_results(generated_results)
print("--- %s seconds ---" % (time.time() - start_time))
| 31.460208
| 144
| 0.592444
| 2,245
| 18,184
| 4.606236
| 0.138976
| 0.042549
| 0.012571
| 0.013828
| 0.372208
| 0.290204
| 0.247268
| 0.213616
| 0.165458
| 0.158689
| 0
| 0.002851
| 0.267048
| 18,184
| 577
| 145
| 31.514731
| 0.773034
| 0.179333
| 0
| 0.165493
| 0
| 0.003521
| 0.163503
| 0.013132
| 0
| 0
| 0
| 0.001733
| 0
| 1
| 0.073944
| false
| 0
| 0.035211
| 0
| 0.161972
| 0.038732
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9576382290725337f6455bafa4ade3618c4bd12
| 8,349
|
py
|
Python
|
pod.py
|
ddh0/pod
|
5c630f609db6d4e2d6704874144faf9fe64ee15b
|
[
"MIT"
] | 1
|
2020-11-20T16:35:07.000Z
|
2020-11-20T16:35:07.000Z
|
pod.py
|
ddh0/pod
|
5c630f609db6d4e2d6704874144faf9fe64ee15b
|
[
"MIT"
] | null | null | null |
pod.py
|
ddh0/pod
|
5c630f609db6d4e2d6704874144faf9fe64ee15b
|
[
"MIT"
] | null | null | null |
# Program that downloads all episodes of a podcast
# Features
# -- Functions: add, remove, update
# - Run the file to update without having to use python interpreter
# - Download all episodes of a podcast, put into the correct folder
# - Tag each file with metadata from the feed and the stored config
import os
import sys
import pickle
import requests
import datetime
import feedparser
import subprocess
STORAGE_DIR = "C:\\Users\\Dylan\\Python\\pod\\storage\\"
LOGFILE = "C:\\Users\\Dylan\\Python\\pod\\log.txt"
FFMPEG_PATH = "C:\\Users\\Dylan\\Python\\pod\\ffmpeg.exe"
TEMP_DIR = "C:\\Users\\Dylan\\AppData\\Local\\Temp\\"
debug = False
class Podcast:
"""For internal use. Class to hold attributes of a podcast."""
def __init__(self, name: str, feed: str, storage_dir: str, prefix: str,
album: str, artist: str, year: str, art: str):
self.name = name
self.feed = feed
self.storage_dir = storage_dir
self.prefix = prefix
self.album = album
self.artist = artist
self.year = year
self.art = art
def log(text):
"""For internal use. Easily log events.
To display these events onscreen as they occur, set pod.debug = True."""
if debug: print("--debug: " + text)
with open(LOGFILE, 'a') as log:
log.write(datetime.datetime.now().isoformat() + ': ' + str(text) + '\n')
def add():
"""Creates a stored configuration file for the given feed, "*.pod", so that
the feed can be checked quickly without having to specify the URL or metadata again."""
podcast_obj = Podcast(
input("Podcast name: "),
input("Feed URL: "),
input("Storage dir: "),
input("Prefix: "),
input("Album: "),
input("Artist: "),
input("Release year: "),
input("Album art URL: ")
)
with open(STORAGE_DIR + podcast_obj.name + '.pod', 'wb') as file:
pickle.dump(podcast_obj, file)
def remove():
"""Removes the configuration file associated with the given podcast."""
name = input("Name of podcast to remove: ")
if os.path.exists(STORAGE_DIR + name + '.pod'):
os.remove(STORAGE_DIR + name + '.pod')
else:
print('-- %s does not exist' % name)
def update():
"""Checks for new entries from all feeds, download and tag new episodes."""
# For each stored podcast config
for file in os.listdir(STORAGE_DIR):
with open(STORAGE_DIR + file, 'rb') as f:
podcast_obj = pickle.load(f)
log("Updating podcast: %s" % podcast_obj.name)
print('Updating "%s":' % podcast_obj.name)
# Get feed
feed = feedparser.parse(podcast_obj.feed)
length = len(feed.entries)
# Create storage dir if it does not exist
if not os.path.exists(podcast_obj.storage_dir):
os.mkdir(podcast_obj.storage_dir)
# Download image if it does not exist
image_path = podcast_obj.storage_dir + podcast_obj.prefix + "_Album_Art.png"
if not os.path.exists(image_path):
print("Downloading podcast cover art...")
log("Downloading image")
response = requests.get(podcast_obj.art)
with open(image_path, 'wb') as imgfile:
imgfile.write(response.content)
# Set podcast-specific metadata
# image_path set above, title set per-episode
album = podcast_obj.album
artist = podcast_obj.artist
year = podcast_obj.year
# Get episodes from feed in chronological order
for i in range(length-1, -1, -1):
# Get current episode number
ep_num = length - i
display_prefix = podcast_obj.prefix + "_" + str(ep_num).zfill(3)
# Get episode title
title = feed.entries[i].title
# Get episode URL
episode_url = "" # Variables for
x = 0 # the while loop
skip_this_item = False
while ('.mp3' not in episode_url and
'.wav' not in episode_url and
'.m4a' not in episode_url):
try:
episode_url = feed.entries[i]['links'][x]['href']
except:
skip_this_item = True
break
log("episode_url: %s" % episode_url)
x += 1
if ".mp3" in episode_url:
ext = ".mp3"
if ".wav" in episode_url:
ext = ".wav"
if ".m4a" in episode_url:
ext = ".m4a"
# Get full episode destination path
# xpath is the temporary file as it was downloaded with only the name changed
# path is the final file
xpath = TEMP_DIR + display_prefix + "X" + ext
path = podcast_obj.storage_dir + display_prefix + ext
# Skip this episode if already downloaded
if os.path.exists(path):
continue
if skip_this_item:
print(display_prefix + ": Skipped due to file extension (item likely not audio)")
log(display_prefix + ": Skipped due to file extension (item likely not audio)")
skip_this_item = False
continue
# Show which episode is in progress
print(display_prefix + ': Downloading...')
log('In progress: %s' % display_prefix)
# Download episode
HEADER_STRING = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66'}
response = requests.get(episode_url, headers=HEADER_STRING)
# Fail if size is less than 1MB
if sys.getsizeof(response.content) < 1000000: # If size is less than 1MB
log("FATAL ERROR: response.content = %s bytes" % sys.getsizeof(response))
raise IOError("-- response.content was only %s bytes" % sys.getsizeof(response.content))
# Fail upon bad HTTP status code
if not response.ok:
log("FATAL ERROR: Bad response: status code %s" % response.status_code)
raise ConnectionError("-- Response not ok, status code %s" % response.status_code)
# Write mp3 data to file
# Since this is done after the download is complete, interruptions will only break episodes
# if they occur during the file being written to disk. If the script is interrupted during download,
# the script will simply restart the download of the interrupted episode on the next run.
with open(xpath, 'wb') as f:
f.write(response.content)
# Write correct metadata to clean file
# Force using ID3v2.3 tags for best results
# Only fatal errors will be displayed
print(display_prefix + ": Writing correct metadata...")
log("Writing metadata")
subprocess.run([FFMPEG_PATH, "-i", xpath, "-i", image_path, "-map", "0:0", "-map", "1:0", "-codec", "copy",
"-id3v2_version", "3", "-metadata:s:v", 'title="Album cover"',"-metadata:s:v", 'comment="Cover (front)"',
"-metadata", "track=" + str(ep_num),
"-metadata", "title=" + title,
"-metadata", "album=" + album,
"-metadata", "album_artist=" + artist,
"-metadata", "artist=" + artist,
"-metadata", "year=" + year,
"-metadata", "genre=Podcast",
"-loglevel", "fatal", path])
# Delete temporary file
os.remove(xpath)
log("Download complete: %s" % path)
log("Update complete.")
print("Files located in the following folder: %s" % podcast_obj.storage_dir)
if __name__ == '__main__':
update()
| 39.947368
| 177
| 0.548928
| 979
| 8,349
| 4.587334
| 0.28907
| 0.04008
| 0.016032
| 0.022267
| 0.114006
| 0.046315
| 0.024939
| 0.024939
| 0.024939
| 0.024939
| 0
| 0.01247
| 0.346868
| 8,349
| 208
| 178
| 40.139423
| 0.811113
| 0.21811
| 0
| 0.03125
| 0
| 0.007813
| 0.207493
| 0.025456
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039063
| false
| 0
| 0.054688
| 0
| 0.101563
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9577ac9ab9b2574ecfc469b539a86e4c283b783
| 1,954
|
py
|
Python
|
threading_ext/RecordingThread.py
|
Antoine-BL/chess-ai.py
|
c68ca76063c14b1b8b91d338c8cead9f411521ca
|
[
"MIT"
] | 2
|
2019-08-21T15:52:29.000Z
|
2021-09-11T23:07:17.000Z
|
threading_ext/RecordingThread.py
|
Antoine-BL/chess-ai.py
|
c68ca76063c14b1b8b91d338c8cead9f411521ca
|
[
"MIT"
] | 5
|
2020-09-25T23:15:31.000Z
|
2022-02-10T00:07:33.000Z
|
threading_ext/RecordingThread.py
|
Antoine-BL/EuroTruck-ai.py
|
c68ca76063c14b1b8b91d338c8cead9f411521ca
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import cv2
from mss import mss
from threading_ext.GameRecorder import GameRecorder
from threading_ext.PausableThread import PausableThread
class RecordingThread(PausableThread):
def __init__(self, training_data_path: str, session_number: int, recorder: GameRecorder):
super(RecordingThread, self).__init__()
self.recorder = recorder
self.training_data = []
self.training_data_path = training_data_path
self.session_number = session_number
def run(self):
monitor = {"top": 40, "left": 0, "width": 1024, "height": 728}
s_to_ms = 1000
with mss() as sct:
while not self.killed:
start_time_ms = round(time.time() * s_to_ms, 0)
screen = np.asarray(sct.grab(monitor))
screen = cv2.resize(screen, (480, 270))
screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
self.training_data.append([screen, np.asarray(self.recorder.flattened_state())])
self.sleep_if_paused()
self.save_if_necessary()
self.wait(start_time_ms)
def save_if_necessary(self):
if len(self.training_data) % 100 == 0:
print(len(self.training_data))
if len(self.training_data) == 500:
np.save(self.training_data_path.format(self.session_number), self.training_data)
print('saved_data in file nb {}'.format(self.session_number))
self.session_number += 1
self.training_data = []
def wait(self, start_time_ms: int):
delay_ms = 1000 / 6
end_time_ms = round(time.time() * 1000, 0)
duration_ms = end_time_ms - start_time_ms
print('loop time {}ms'.format(duration_ms))
time.sleep(max((delay_ms - duration_ms)/1000, 0))
def rewind(self):
self.session_number -= 1
self.training_data = []
| 33.689655
| 96
| 0.619754
| 244
| 1,954
| 4.704918
| 0.336066
| 0.125436
| 0.15331
| 0.052265
| 0.172474
| 0.059233
| 0.059233
| 0
| 0
| 0
| 0
| 0.035361
| 0.276356
| 1,954
| 57
| 97
| 34.280702
| 0.776521
| 0
| 0
| 0.069767
| 0
| 0
| 0.028659
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.139535
| 0
| 0.27907
| 0.069767
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f957da3a4215ef9104b40d885730febc525fd16f
| 638
|
py
|
Python
|
multicast/mtc_recv.py
|
Tatchakorn/Multi-threaded-Server-
|
d5502a3da942e06736d07efc8d64186bc03a23d7
|
[
"Beerware"
] | 2
|
2021-11-11T12:14:35.000Z
|
2021-12-07T15:03:41.000Z
|
multicast/mtc_recv.py
|
Tatchakorn/Multi-threaded-Server-
|
d5502a3da942e06736d07efc8d64186bc03a23d7
|
[
"Beerware"
] | null | null | null |
multicast/mtc_recv.py
|
Tatchakorn/Multi-threaded-Server-
|
d5502a3da942e06736d07efc8d64186bc03a23d7
|
[
"Beerware"
] | null | null | null |
#! /usr/bin/python3
import threading
import socket
from test import create_upd_clients
from client import multicast_receive
def test_multicast_receive():
clients = create_upd_clients(3)
def run(client: socket.socket) -> None:
multicast_receive(client)
threads = [threading.Thread(name=f'client_[{i+1}]', target=run, args=(client,))
for i, client in enumerate(clients)]
for t in threads: t.start()
for t in threads: t.join()
if __name__ == '__main__':
try:
test_multicast_receive()
except Exception as e:
print(e)
finally:
input('Press [ENTER]...')
| 24.538462
| 84
| 0.652038
| 83
| 638
| 4.783133
| 0.53012
| 0.161209
| 0.080605
| 0.065491
| 0.070529
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006135
| 0.233542
| 638
| 26
| 85
| 24.538462
| 0.805726
| 0.028213
| 0
| 0
| 0
| 0
| 0.06129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.315789
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f958f1f208280fca2b61c5a648551399de305a52
| 2,135
|
py
|
Python
|
train/name_test.py
|
csgwon/dl-pipeline
|
5ac2cdafe0daac675d3f3e810918133de3466f8a
|
[
"Apache-2.0"
] | 7
|
2018-06-26T13:09:12.000Z
|
2020-07-15T18:18:38.000Z
|
train/name_test.py
|
csgwon/dl-pipeline
|
5ac2cdafe0daac675d3f3e810918133de3466f8a
|
[
"Apache-2.0"
] | null | null | null |
train/name_test.py
|
csgwon/dl-pipeline
|
5ac2cdafe0daac675d3f3e810918133de3466f8a
|
[
"Apache-2.0"
] | 1
|
2018-08-30T19:51:08.000Z
|
2018-08-30T19:51:08.000Z
|
from tools import *
from model import *
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
class NamesDataset(Dataset):
"""Name Classification dataset"""
def __init__(self, path):
self.data = pd.read_csv(path, sep='\t').dropna()
self.X = self.data['name']
self.y = self.data['label']
def __len__(self):
return len(self.X)
def __getitem__(self, index):
content = torch.from_numpy(encode_input(self.data['name'][index])).float()
label = label_to_number[self.data['label'][index]]
sample = {'X': content, 'y': label}
return sample
name_dataset = NamesDataset('data/names/names_train_new.csv')
dataloader = DataLoader(name_dataset, batch_size=32, shuffle=True, num_workers=0)
charcnn = CharCNN(n_classes=len(set(name_data['label'])), vocab_size=len(chars), max_seq_length=max_name_len)
criterion = nn.CrossEntropyLoss()
from tqdm import tqdm_notebook
def train(model, dataloader, num_epochs):
cuda = torch.cuda.is_available()
if cuda:
model.cuda()
optimizer = torch.optim.Adam(model.parameters())
loss_history_avg = []
loss_history = []
#bar = tqdm_notebook(total=len(dataloader))
for i in range(num_epochs):
per_epoch_losses = []
for batch in dataloader:
X = Variable(batch['X'])
y = Variable(batch['y'])
if cuda:
X = X.cuda()
y = y.cuda()
model.zero_grad()
outputs = model(X)
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
per_epoch_losses.append(loss.data[0])
#bar.set_postfix(loss=loss.data[0])
#bar.update(1)
loss_history_avg.append(np.mean(per_epoch_losses))
loss_history.append( loss.data[0] )
print('epoch[%d] loss: %.4f' % (i, loss.data[0]))
return loss_history, loss_history_avg
loss_history, loss_history_avg = train(charcnn, dataloader, 100)
torch.save(charcnn, 'charcnn.pth')
| 32.348485
| 109
| 0.635597
| 281
| 2,135
| 4.633452
| 0.377224
| 0.067588
| 0.043011
| 0.02765
| 0.057604
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007348
| 0.235129
| 2,135
| 65
| 110
| 32.846154
| 0.789957
| 0.054801
| 0
| 0.039216
| 0
| 0
| 0.044776
| 0.014925
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.156863
| 0.019608
| 0.313725
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f95de109f7f76174c635351d3c9d2f28ebfb7d06
| 3,651
|
py
|
Python
|
descartes_rpa/fetch/descartes.py
|
reactome/descartes
|
7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25
|
[
"Apache-2.0"
] | 2
|
2021-08-02T18:09:07.000Z
|
2022-01-18T08:29:59.000Z
|
descartes_rpa/fetch/descartes.py
|
reactome/descartes
|
7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25
|
[
"Apache-2.0"
] | 5
|
2021-06-22T22:27:23.000Z
|
2021-08-04T02:04:09.000Z
|
descartes_rpa/fetch/descartes.py
|
reactome/descartes_rpa
|
7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25
|
[
"Apache-2.0"
] | null | null | null |
import requests
import shutil
import pandas as pd
from typing import Dict, List
def fetch_descartes_human_tissue(out_file: str, verbose: bool = True) -> None:
"""Function to fetch Loom Single-Cell tissue data from
Descartes human database.
Args:
out_file: Output file that is going to store .loom data
verbose: If True (default), print statements about download
Examples:
>>> fetch_descartes_human_tissue("Human_Tissue.loom")
"""
url = (
"https://shendure-web.gs.washington.edu/content/members/cao1025/"
"public/FCA_RNA_supp_files/scanpy_cells_all/"
"Human_RNA_processed.loom"
)
if verbose:
print("Downloading Human Single-Cell data from Descartes database")
print(f"data url: {url}")
with requests.get(url, stream=True, timeout=60) as data:
with open(out_file, 'wb') as out:
shutil.copyfileobj(data.raw, out)
if verbose:
print(f"Downloaded data to {out_file}")
def fetch_descartes_by_tissue(
list_tissues: List[str],
out_dir: str,
verbose: bool = True
) -> None:
"""Function to fetch Loom Single-Cell tissue data from
Descartes human database by choosing which tissues will be donwloaded.
Args:
list_tissues: List of tissues names to be downloaded.
out_dir: Output directory that is going to store .loom data.
verbose: If True (default), print statements about download.
Examples:
>>> fetch_descartes_by_tissue(
list_tissues=["Thymus", "Hearth"]
out_dir="data"
)
"""
base_url = (
"https://shendure-web.gs.washington.edu/content/members/cao1025/"
"public/FCA_RNA_supp_files/scanpy_cells_by_tissue"
)
for tissue in list_tissues:
url = f"{base_url}/{tissue}_processed.loom"
if verbose:
print((
f"Downloading {tissue} tissue Human Single-Cell data "
"from Descartes database"
))
print(f"data url: {url}")
file_name = f"{out_dir}/{tissue}_data.loom"
with requests.get(url, stream=True, timeout=60) as data:
with open(file_name, 'wb') as out:
shutil.copyfileobj(data.raw, out)
if verbose:
print(f"Downloaded {file_name} to {out_dir}")
def fetch_de_genes_for_cell_type(
verbose: bool = False
) -> Dict[str, List[str]]:
"""Function to fetch Differentially Expressed (DE) genes from Descartes
Human Atlas from 77 Main Cell types found in 15 Organs.
Args:
verbose: If True (default), print statements about download
Returns:
Dictionary mapping each main cell type to its differentially
expressed genes. Example: {
"Acinar cells": ["MIR1302-11", "FAM138A", ...],
"Myeloid cells": ["CU459201.1", "OR4G4P", ...] ...
}
"""
url = (
"https://atlas.fredhutch.org/data/bbi/descartes/human_gtex/"
"downloads/data_summarize_fetus_data/DE_gene_77_main_cell_type.csv"
)
if verbose:
print((
"Downloading Human Single-Cell Differentially Expressed"
"genes for 77 Main Cell types found in 15 Organs."
))
print(f"data url: {url}")
de_df = pd.read_csv(url)
cell_types = de_df["max.cluster"].unique()
de_mapping = {}
for type in cell_types:
list_genes = de_df[
de_df["max.cluster"] == type
]["gene_id"].tolist()
list_genes = [gene.replace("'", "") for gene in list_genes]
de_mapping[type] = list_genes
return de_mapping
| 30.940678
| 78
| 0.621747
| 462
| 3,651
| 4.751082
| 0.292208
| 0.038269
| 0.031891
| 0.027335
| 0.525285
| 0.499772
| 0.476082
| 0.446469
| 0.397267
| 0.397267
| 0
| 0.015072
| 0.273076
| 3,651
| 117
| 79
| 31.205128
| 0.811982
| 0.314434
| 0
| 0.3125
| 0
| 0
| 0.338962
| 0.102153
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0
| 0.0625
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f96072c2c69a90c36b742be295db7b6791bf37ec
| 1,173
|
py
|
Python
|
src/Support/Scripts/AlignOperators.py
|
bobthecow/ManipulateCoda
|
5a0e03fb535cfc623070ddd44a9e09d34d313193
|
[
"MIT"
] | 5
|
2015-01-05T21:44:18.000Z
|
2017-09-08T09:31:44.000Z
|
src/Support/Scripts/AlignOperators.py
|
bobthecow/ManipulateCoda
|
5a0e03fb535cfc623070ddd44a9e09d34d313193
|
[
"MIT"
] | 3
|
2015-01-06T15:21:58.000Z
|
2019-04-09T12:03:13.000Z
|
src/Support/Scripts/AlignOperators.py
|
bobthecow/ManipulateCoda
|
5a0e03fb535cfc623070ddd44a9e09d34d313193
|
[
"MIT"
] | null | null | null |
'''Line up operators...'''
import cp_actions as cp
import re
def act(controller, bundle, options):
'''
Required action method
'''
context = cp.get_context(controller)
line_ending = cp.get_line_ending(context)
lines, range = cp.lines_and_range(context)
newlines = line_ending.join(balance_operators(lines.split(line_ending)))
cp.insert_text_and_select(context, newlines, range, cp.new_range(range.location, len(newlines)))
def balance_operators(lines):
r = re.compile("^(.*[^\s])\s*((?:==?|<<|>>|&|\||\^)=|=[>&\*]|(?<![\.\+\-\*\/~%])[\.\+\-\*\/~%]?=)\s*([^\s].*)$")
vars = []
ops = []
vals = []
ret = []
for line in lines:
result = r.match(line)
if result:
vars.append(result.group(1))
ops.append(result.group(2))
vals.append(result.group(3))
for line in lines:
result = r.match(line)
if result:
ret.append(" ".join((result.group(1).ljust(len(max(vars, key=len))), result.group(2).rjust(len(max(ops, key=len))), result.group(3))))
else:
ret.append(line)
return ret
| 27.27907
| 146
| 0.546462
| 142
| 1,173
| 4.408451
| 0.387324
| 0.105431
| 0.08147
| 0.044728
| 0.121406
| 0.121406
| 0.121406
| 0.121406
| 0.121406
| 0.121406
| 0
| 0.006818
| 0.249787
| 1,173
| 43
| 147
| 27.27907
| 0.704545
| 0.036658
| 0
| 0.222222
| 0
| 0
| 0.085586
| 0.084685
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9635ddcc69eb4603c2a017bb384ecbb61ddeafe
| 1,333
|
py
|
Python
|
continuous-variables/literature-code-in-python/random_reaction_gen.py
|
YANGZ001/OrganicChem-LabMate-AI
|
fb826d85dd852aab987b9bef6856d8da6a4bd9be
|
[
"MIT"
] | null | null | null |
continuous-variables/literature-code-in-python/random_reaction_gen.py
|
YANGZ001/OrganicChem-LabMate-AI
|
fb826d85dd852aab987b9bef6856d8da6a4bd9be
|
[
"MIT"
] | null | null | null |
continuous-variables/literature-code-in-python/random_reaction_gen.py
|
YANGZ001/OrganicChem-LabMate-AI
|
fb826d85dd852aab987b9bef6856d8da6a4bd9be
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import itertools
'''
Section below creates lists for your reaction parameters. Change names of lists where appropriate
'''
#For bigger lists use np.arange(min_value, max_value, step)
Pyridine = [0.1, 0.2, 0.3] # in mmol
Aldehyde = [0.1, 0.2, 0.3] # in mmol
Isocyanide = [0.1, 0.2, 0.3] # in mmol
Temperature = [10, 20, 40, 60, 80] # in C
Solvent = [0.1, 0.25, 0.5, 1, 1.5] # in mL
Catalyst = [0, 1, 2, 3, 4, 5, 7.5, 10] # in mol%
Time = [5, 10, 15, 30, 60] # in minutes
'''
The following lines create all combos possible for the values listed above and saves as text file. Change names where appropriate.
'''
combinations = list(itertools.product(Pyridine, Aldehyde, Isocyanide, Temperature, Solvent, Catalyst, Time))
df = pd.DataFrame(combinations)
df.to_csv('all_combos716.txt', sep = '\t', header = ['Pyridine', 'Aldehyde', 'Isocyanide', 'Temperature', 'Solvent', 'Catalyst', 'Time'])
'''
Below, 10 random reaction are selected from all possible combinations. The reactions are stored in a text file. Change names of header as appropriate.
'''
random_data = df.sample(n=10, random_state=1)
df_random_data = pd.DataFrame(random_data)
df_random_data.to_csv('train_data716.txt', sep= '\t', header = ['Pyridine', 'Aldehyde', 'Isocyanide', 'Temperature', 'Solvent', 'Catalyst', 'Time'])
| 37.027778
| 150
| 0.701425
| 212
| 1,333
| 4.349057
| 0.443396
| 0.010846
| 0.013015
| 0.013015
| 0.249458
| 0.249458
| 0.249458
| 0.18872
| 0.149675
| 0.149675
| 0
| 0.060124
| 0.151538
| 1,333
| 35
| 151
| 38.085714
| 0.755084
| 0.084021
| 0
| 0
| 0
| 0
| 0.184729
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9686a6e64b3ada450c52aa9db27ba394fa0f073
| 2,241
|
py
|
Python
|
mechroutines/es/newts/_fs.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechroutines/es/newts/_fs.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechroutines/es/newts/_fs.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | 8
|
2019-12-18T20:09:46.000Z
|
2020-11-14T16:37:28.000Z
|
""" rpath task function
"""
from mechlib import filesys
from mechlib.filesys import build_fs
from mechlib.filesys import root_locs
def rpath_fs(ts_dct, tsname,
mod_ini_thy_info,
es_keyword_dct,
run_prefix, save_prefix):
""" reaction path filesystem
"""
# Set up coordinate name
rxn_coord = es_keyword_dct.get('rxncoord')
# Get the zma and ts locs
zma_locs = (ts_dct['zma_idx'],)
ts_locs = (int(tsname.split('_')[-1]),)
# Build filesys object down to TS FS
ts_fs = build_fs(
run_prefix, save_prefix, 'TRANSITION STATE',
thy_locs=mod_ini_thy_info[1:],
**root_locs(ts_dct, saddle=True))
ini_ts_run_fs, ini_ts_save_fs = ts_fs
# generate fs
if rxn_coord == 'irc':
# Try and locate a minimum-energy conformer
cnf_fs = build_fs(
run_prefix, save_prefix, 'CONFORMER',
thy_locs=mod_ini_thy_info[1:],
**root_locs(ts_dct, saddle=True, name=tsname))
ini_cnf_run_fs, ini_cnf_save_fs = cnf_fs
ini_loc_info = filesys.mincnf.min_energy_conformer_locators(
ini_cnf_save_fs, mod_ini_thy_info)
ini_min_locs, ini_pfx_save_path = ini_loc_info
if any(ini_min_locs):
# Run IRC from saddle point minimum-energy conformer
ini_pfx_run_path = ini_cnf_run_fs[-1].path(ini_min_locs)
ini_pfx_save_path = ini_cnf_save_fs[-1].path(ini_min_locs)
scn_alg = 'irc-sadpt'
else:
# Run IRC from series of points {Rmax, Rmax-1, ...}
ini_pfx_run_path = ini_ts_run_fs[-1].path(ts_locs)
ini_pfx_save_path = ini_ts_save_fs[-1].path(ts_locs)
scn_alg = 'irc-rmax'
else:
# Run a scan along the requested reaction coordinates
# Have an auto option that just selects the coordinate?
ini_pfx_run_path = ini_ts_run_fs[-1].path(ts_locs)
ini_pfx_save_path = ini_ts_save_fs[-1].path(ts_locs)
scn_alg = 'drp'
# Set up the scan filesystem objects using the predefined prefix
scn_fs = build_fs(
ini_pfx_run_path, ini_pfx_save_path, 'SCAN',
zma_locs=zma_locs)
return scn_alg, scn_fs, cnf_fs, ini_min_locs
| 33.954545
| 70
| 0.646586
| 345
| 2,241
| 3.805797
| 0.26087
| 0.053313
| 0.031988
| 0.053313
| 0.309216
| 0.284844
| 0.266565
| 0.223915
| 0.182788
| 0.182788
| 0
| 0.006072
| 0.26506
| 2,241
| 65
| 71
| 34.476923
| 0.791135
| 0.203481
| 0
| 0.2
| 0
| 0
| 0.038593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.075
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f968be6f1cca8629346c90e2699c898d9571ac20
| 1,361
|
py
|
Python
|
computation/listallfiles.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
computation/listallfiles.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
computation/listallfiles.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 14:38:54 2020
@author: https://stackoverflow.com/questions/18262293/how-to-open-every-file-in-a-folder
"""
import os #os module imported here
location = os.getcwd() # get present working directory location here
counter = 0 #keep a count of all files found
csvfiles = [] #list to store all csv files found at location
filebeginwithhello = [] # list to keep all files that begin with 'hello'
otherfiles = [] #list to keep any other file that do not match the criteria
for file in os.listdir(location):
try:
if file.endswith(".csv"):
print( "csv file found:\t", file)
csvfiles.append(str(file))
counter = counter+1
elif file.endswith(".csv"): #because some files may start with hello and also be a csv file
print( "csv file found:\t", file)
csvfiles.append(str(file))
counter = counter+1
elif file.startswith("hello"):
print( "hello files found: \t", file)
filebeginwithhello.append(file)
counter = counter+1
else:
otherfiles.append(file)
print(file)
counter = counter+1
except Exception as e:
raise e
print("No files found here!")
print("Total files found:\t", counter)
| 34.897436
| 100
| 0.603968
| 179
| 1,361
| 4.592179
| 0.502793
| 0.060827
| 0.087591
| 0.092457
| 0.160584
| 0.160584
| 0.160584
| 0.160584
| 0.160584
| 0.160584
| 0
| 0.026971
| 0.291697
| 1,361
| 39
| 101
| 34.897436
| 0.825726
| 0.336517
| 0
| 0.285714
| 0
| 0
| 0.12691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.035714
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f96a73f7ebbc6d2b474f86a30e29cb3233db9724
| 5,317
|
py
|
Python
|
rootfs/api/apps_extra/social_core/actions.py
|
jianxiaoguo/controller
|
8cc1e11601e5725e583f0fa82cdb2c10872ca485
|
[
"Apache-2.0"
] | null | null | null |
rootfs/api/apps_extra/social_core/actions.py
|
jianxiaoguo/controller
|
8cc1e11601e5725e583f0fa82cdb2c10872ca485
|
[
"Apache-2.0"
] | 19
|
2020-07-30T06:31:29.000Z
|
2022-03-14T07:33:44.000Z
|
rootfs/api/apps_extra/social_core/actions.py
|
jianxiaoguo/controller
|
8cc1e11601e5725e583f0fa82cdb2c10872ca485
|
[
"Apache-2.0"
] | 9
|
2020-07-30T02:50:12.000Z
|
2020-12-11T06:44:19.000Z
|
from urllib.parse import quote
from social_core.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, partial_pipeline_data, setting_url
def do_auth(backend, redirect_name='next'):
# Save any defined next value into session
data = backend.strategy.request_data(merge=False)
# Save extra data into session.
for field_name in backend.setting('FIELDS_STORED_IN_SESSION', []):
if field_name in data:
backend.strategy.session_set(field_name, data[field_name])
else:
backend.strategy.session_set(field_name, None)
# uri = None
if redirect_name in data:
# Check and sanitize a user-defined GET/POST next field value
redirect_uri = data[redirect_name]
if backend.setting('SANITIZE_REDIRECTS', True):
allowed_hosts = backend.setting('ALLOWED_REDIRECT_HOSTS', []) + \
[backend.strategy.request_host()]
redirect_uri = sanitize_redirect(allowed_hosts, redirect_uri)
backend.strategy.session_set(
redirect_name,
redirect_uri or backend.setting('LOGIN_REDIRECT_URL')
)
response = backend.start()
url = response.url.split('?')[1]
def form2json(form_data):
from urllib.parse import parse_qs, urlparse
query = urlparse('?' + form_data).query
params = parse_qs(query)
return {key: params[key][0] for key in params}
from django.core.cache import cache
cache.set("oidc_key_" + data.get('key', ''), form2json(url).get('state'), 60 * 10)
return response
def do_complete(backend, login, user=None, redirect_name='next',
*args, **kwargs):
data = backend.strategy.request_data()
is_authenticated = user_is_authenticated(user)
user = user if is_authenticated else None
partial = partial_pipeline_data(backend, user, *args, **kwargs)
if partial:
user = backend.continue_pipeline(partial)
# clean partial data after usage
backend.strategy.clean_partial_pipeline(partial.token)
else:
user = backend.complete(user=user, *args, **kwargs)
# pop redirect value before the session is trashed on login(), but after
# the pipeline so that the pipeline can change the redirect if needed
redirect_value = backend.strategy.session_get(redirect_name, '') or \
data.get(redirect_name, '')
# check if the output value is something else than a user and just
# return it to the client
user_model = backend.strategy.storage.user.user_model()
if user and not isinstance(user, user_model):
return user
if is_authenticated:
if not user:
url = setting_url(backend, redirect_value, 'LOGIN_REDIRECT_URL')
else:
url = setting_url(backend, redirect_value,
'NEW_ASSOCIATION_REDIRECT_URL',
'LOGIN_REDIRECT_URL')
elif user:
if user_is_active(user):
# catch is_new/social_user in case login() resets the instance
is_new = getattr(user, 'is_new', False)
social_user = user.social_user
login(backend, user, social_user)
# store last login backend name in session
backend.strategy.session_set('social_auth_last_login_backend',
social_user.provider)
if is_new:
url = setting_url(backend,
'NEW_USER_REDIRECT_URL',
redirect_value,
'LOGIN_REDIRECT_URL')
else:
url = setting_url(backend, redirect_value,
'LOGIN_REDIRECT_URL')
else:
if backend.setting('INACTIVE_USER_LOGIN', False):
social_user = user.social_user
login(backend, user, social_user)
url = setting_url(backend, 'INACTIVE_USER_URL', 'LOGIN_ERROR_URL',
'LOGIN_URL')
else:
url = setting_url(backend, 'LOGIN_ERROR_URL', 'LOGIN_URL')
if redirect_value and redirect_value != url:
redirect_value = quote(redirect_value)
url += ('&' if '?' in url else '?') + \
'{0}={1}'.format(redirect_name, redirect_value)
if backend.setting('SANITIZE_REDIRECTS', True):
allowed_hosts = backend.setting('ALLOWED_REDIRECT_HOSTS', []) + \
[backend.strategy.request_host()]
url = sanitize_redirect(allowed_hosts, url) or \
backend.setting('LOGIN_REDIRECT_URL')
response = backend.strategy.redirect(url)
social_auth = user.social_auth.filter(provider='drycc').\
order_by('-modified').last()
response.set_cookie("name", user.username,
max_age=social_auth.extra_data.get('expires_in'))
response.set_cookie("id_token", social_auth.extra_data.get('id_token'),
max_age=social_auth.extra_data.get('expires_in'))
from django.core.cache import cache
cache.set("oidc_state_" + data.get('state'),
{'token': social_auth.extra_data.get('id_token', 'fail'),
'username': user.username},
60 * 10)
return response
| 42.536
| 86
| 0.618018
| 623
| 5,317
| 5.025682
| 0.213483
| 0.05749
| 0.030661
| 0.038326
| 0.331523
| 0.292239
| 0.261897
| 0.261897
| 0.210157
| 0.159693
| 0
| 0.00369
| 0.28644
| 5,317
| 124
| 87
| 42.879032
| 0.82156
| 0.094414
| 0
| 0.27551
| 0
| 0
| 0.106808
| 0.030606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030612
| false
| 0
| 0.05102
| 0
| 0.122449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f96c7c015c3ad71d48f4085619b3a3dcae5954cc
| 1,667
|
py
|
Python
|
Part_1/src/manual_split_test/make_paired_cases.py
|
Bhaskers-Blu-Org2/datascience4managers
|
2410182fe6913a8c986d2f28f5db6850cddb75f2
|
[
"MIT"
] | 8
|
2019-11-24T08:23:12.000Z
|
2021-01-19T02:48:46.000Z
|
Part_1/src/manual_split_test/make_paired_cases.py
|
Bhaskers-Blu-Org2/datascience4managers
|
2410182fe6913a8c986d2f28f5db6850cddb75f2
|
[
"MIT"
] | 1
|
2021-06-02T02:05:15.000Z
|
2021-06-02T02:05:15.000Z
|
Part_1/src/manual_split_test/make_paired_cases.py
|
microsoft/datascience4managers
|
7c332bf23a85f281237c841e1981ab21ed4ca072
|
[
"MIT"
] | 9
|
2019-10-29T18:45:36.000Z
|
2021-03-27T07:23:13.000Z
|
#!/usr/bin/python
# Oct 2019 JMA
# make_samples.py Use the splits_aggregator module to create samples
'''
Write a short description of what the program does here.
Usage:
$ ./make_samples.py [-v] [-d ROOT_DIR] [-c pair_cnt]
-v verbose output
-d data directory to read from
-c number of randomly paired cases to generate
'''
import os, sys
import glob
import pprint
import re
import time
from pathlib import Path
import pyarrow
import pandas as pd
import splits_aggregator as sa
### config constants
VERBOSE = False
ROOT_DIR = Path('D:/OneDrive - Microsoft/data/20news')
PAIR_CNT = 1
########################################################################
class x(object):
''
pass
###############################################################################
def main(input_dir, pair_cnt):
cs = sa.BinaryComparisons(input_dir)
pairs_df = cs.random_pairs(pair_cnt)
if VERBOSE:
print("Pairs: ", pairs_df.shape)
cs.embed_in_excel(pairs_df)
########################################################################
if __name__ == '__main__':
if '-v' in sys.argv:
k = sys.argv.index('-v')
VERBOSE = True
## Inputs
if '-d' in sys.argv:
d = sys.argv.index('-d')
ROOT_DIR = Path(sys.argv[d+1])
# else:
if '-c' in sys.argv:
g = sys.argv.index('-c')
PAIR_CNT= int(sys.argv[g+1])
main(ROOT_DIR, PAIR_CNT)
print(sys.argv, "\nDone in ",
'%5.3f' % time.process_time(),
" secs! At UTC: ",
time.asctime(time.gmtime()), file=sys.stderr)
| 24.880597
| 80
| 0.515297
| 207
| 1,667
| 4
| 0.497585
| 0.076087
| 0.032609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008765
| 0.247151
| 1,667
| 67
| 81
| 24.880597
| 0.650996
| 0.214757
| 0
| 0
| 0
| 0
| 0.091542
| 0.020896
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.027778
| 0.25
| 0
| 0.305556
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f96ed484656fab8971f82e7e48fafd3dcd557e30
| 2,393
|
py
|
Python
|
aerosandbox/tools/miscellaneous.py
|
SzymonSzyszko/AeroSandbox
|
d4084899b665f735c1ec218282b2e4aee08eacff
|
[
"MIT"
] | null | null | null |
aerosandbox/tools/miscellaneous.py
|
SzymonSzyszko/AeroSandbox
|
d4084899b665f735c1ec218282b2e4aee08eacff
|
[
"MIT"
] | null | null | null |
aerosandbox/tools/miscellaneous.py
|
SzymonSzyszko/AeroSandbox
|
d4084899b665f735c1ec218282b2e4aee08eacff
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
def eng_string(x, format='%.3g', si=True):
'''
Taken from: https://stackoverflow.com/questions/17973278/python-decimal-engineering-notation-for-mili-10e-3-and-micro-10e-6/40691220
Returns float/int value <x> formatted in a simplified engineering format -
using an exponent that is a multiple of 3.
format: printf-style string used to format the value before the exponent.
si: if true, use SI suffix for exponent, e.g. k instead of e3, n instead of
e-9 etc.
E.g. with format='%.2f':
1.23e-08 => 12.30e-9
123 => 123.00
1230.0 => 1.23e3
-1230000.0 => -1.23e6
and with si=True:
1230.0 => 1.23k
-1230000.0 => -1.23M
'''
sign = ''
if x < 0:
x = -x
sign = '-'
exp = int(math.floor(math.log10(x)))
exp3 = exp - (exp % 3)
x3 = x / (10 ** exp3)
if si and exp3 >= -24 and exp3 <= 24 and exp3 != 0:
exp3_text = 'yzafpnum kMGTPEZY'[(exp3 + 24) // 3]
elif exp3 == 0:
exp3_text = ''
else:
exp3_text = 'e%s' % exp3
return ('%s' + format + '%s') % (sign, x3, exp3_text)
remove_nans = lambda x: x[~np.isnan(x)]
import sys
import os
from contextlib import contextmanager
@contextmanager
def stdout_redirected(to=os.devnull):
'''
From StackOverflow: https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python
Usage:
import os
with stdout_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
'''
fd = sys.stdout.fileno()
##### assert that Python and C stdio write using the same file descriptor
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
| 28.488095
| 136
| 0.600501
| 349
| 2,393
| 4.063037
| 0.467049
| 0.005642
| 0.03385
| 0.042313
| 0.01763
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070366
| 0.269536
| 2,393
| 83
| 137
| 28.831325
| 0.740847
| 0.497284
| 0
| 0
| 0
| 0
| 0.029493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.142857
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f974a73c7d07887b66165d4b3f68128150448a37
| 530
|
py
|
Python
|
setup.py
|
Moi-Teaching-Referral-Hospital/ERPNextMTRHModifications
|
393cef3294d6b07f2c7ff21899c99a82276be43f
|
[
"MIT"
] | null | null | null |
setup.py
|
Moi-Teaching-Referral-Hospital/ERPNextMTRHModifications
|
393cef3294d6b07f2c7ff21899c99a82276be43f
|
[
"MIT"
] | 1
|
2021-01-09T20:00:38.000Z
|
2021-01-09T20:00:38.000Z
|
setup.py
|
Moi-Teaching-Referral-Hospital/mtrh_dev
|
367af3922d0fe0c19e35b0edd999dfc42f9a225b
|
[
"MIT"
] | 2
|
2020-07-28T22:22:04.000Z
|
2020-08-16T16:12:56.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in mtrh_dev/__init__.py
from mtrh_dev import __version__ as version
setup(
name='mtrh_dev',
version=version,
description='For all MTRH dev Frappe and ERPNext modifications',
author='MTRH',
author_email='erp@mtrh.go.ke',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 25.238095
| 65
| 0.762264
| 76
| 530
| 5
| 0.644737
| 0.073684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002137
| 0.116981
| 530
| 20
| 66
| 26.5
| 0.809829
| 0.156604
| 0
| 0
| 0
| 0
| 0.209459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f974ccb0279e3323702f280c06f3f6d71a27a8f5
| 23,062
|
py
|
Python
|
tools/programController.py
|
brewpi-remix/uno-test
|
a153a5277bea2a8e58ee479792d6977f0beb853e
|
[
"MIT"
] | null | null | null |
tools/programController.py
|
brewpi-remix/uno-test
|
a153a5277bea2a8e58ee479792d6977f0beb853e
|
[
"MIT"
] | null | null | null |
tools/programController.py
|
brewpi-remix/uno-test
|
a153a5277bea2a8e58ee479792d6977f0beb853e
|
[
"MIT"
] | 1
|
2021-07-31T15:23:07.000Z
|
2021-07-31T15:23:07.000Z
|
#!/usr/bin/env python3
import subprocess as sub
import time
import simplejson as json
import os
from sys import stderr
import subprocess
import platform
import sys
import stat
import pwd
import grp
import BrewPiUtil as util
import brewpiVersion
import expandLogMessage
from packaging import version
from MigrateSettings import MigrateSettings
from ConvertBrewPiDevice import ConvertBrewPiDevice
msg_map = {"a": "Arduino"}
def printStdErr(*objs):
# Log to stderr.txt
print(*objs, file=sys.stderr)
sys.stderr.flush()
def printStdOut(*objs):
# Log to stdout.txt
print(*objs, file=sys.stdout)
sys.stderr.flush()
def asbyte(v):
return chr(v & 0xFF)
class LightYModem:
"""
Receive_Packet
- first byte SOH/STX (for 128/1024 byte size packets)
- EOT (end)
- CA CA abort
- ABORT1 or ABORT2 is abort
Then 2 bytes for seqno (although the sequence number isn't checked)
Then the packet data
Then CRC16?
First packet sent is a filename packet:
- zero-terminated filename
- file size (ascii) followed by space?
"""
packet_len = 1024
stx = 2
eot = 4
ack = 6
nak = 0x15
ca = 0x18
crc16 = 0x43
abort1 = 0x41
abort2 = 0x61
def __init__(self):
self.seq = None
self.ymodem = None
def _read_response(self):
ch1 = ''
while not ch1:
ch1 = self.ymodem.read(1)
ch1 = ord(ch1)
if ch1 == LightYModem.ack and self.seq == 0: # may send also a crc16
ch2 = self.ymodem.read(1)
elif ch1 == LightYModem.ca: # cancel, always sent in pairs
ch2 = self.ymodem.read(1)
return ch1
def _send_ymodem_packet(self, data):
# pad string to 1024 chars
data = data.ljust(LightYModem.packet_len)
seqchr = asbyte(self.seq & 0xFF)
seqchr_neg = asbyte((-self.seq-1) & 0xFF)
crc16 = '\x00\x00'
packet = asbyte(LightYModem.stx) + seqchr + seqchr_neg + data + crc16
if len(packet) != 1029:
raise Exception("packet length is wrong!")
self.ymodem.write(packet)
self.ymodem.flush()
response = self._read_response()
if response == LightYModem.ack:
printStdErr("Sent packet nr %d " % (self.seq))
self.seq += 1
return response
def _send_close(self):
self.ymodem.write(asbyte(LightYModem.eot))
self.ymodem.flush()
response = self._read_response()
if response == LightYModem.ack:
self.send_filename_header("", 0)
self.ymodem.close()
def send_packet(self, file, output):
response = LightYModem.eot
data = file.read(LightYModem.packet_len)
if len(data):
response = self._send_ymodem_packet(data)
return response
def send_filename_header(self, name, size):
self.seq = 0
packet = name + asbyte(0) + str(size) + ' '
return self._send_ymodem_packet(packet)
def transfer(self, file, ymodem, output):
self.ymodem = ymodem
# file: the file to transfer via ymodem
# ymodem: the ymodem endpoint (a file-like object supporting write)
# output: a stream for output messages
file.seek(0, os.SEEK_END)
size = file.tell()
file.seek(0, os.SEEK_SET)
response = self.send_filename_header("binary", size)
while response == LightYModem.ack:
response = self.send_packet(file, output)
file.close()
if response == LightYModem.eot:
self._send_close()
return response
def fetchBoardSettings(boardsFile, boardType):
boardSettings = {}
for line in boardsFile:
line = line.decode()
if line.startswith(boardType):
# strip board name, period and \n
setting = line.replace(boardType + '.', '', 1).strip()
[key, sign, val] = setting.rpartition('=')
boardSettings[key] = val
return boardSettings
def loadBoardsFile(arduinohome):
boardsFileContent = None
try:
boardsFileContent = open(
arduinohome + 'hardware/arduino/boards.txt', 'rb').readlines()
except IOError:
printStdErr(
"Could not read boards.txt from Arduino, probably because Arduino has not been installed.")
printStdErr("Please install it with: 'sudo apt install arduino-core'")
return boardsFileContent
def programController(config, boardType, hexFile, restoreWhat):
programmer = SerialProgrammer.create(config, boardType)
return programmer.program(hexFile, restoreWhat)
def json_decode_response(line):
try:
return json.loads(line[2:])
except json.decoder.JSONDecodeError as e:
printStdErr("\nJSON decode error: {0}".format(str(e)))
printStdErr("\nLine received was: {0}".format(line))
class SerialProgrammer:
@staticmethod
def create(config, boardType):
if boardType == 'arduino':
msg_map["a"] = "Arduino"
programmer = ArduinoProgrammer(config, boardType)
if boardType == 'uno':
msg_map["a"] = "Arduino"
programmer = ArduinoProgrammer(config, boardType)
else:
msg_map["a"] = "Arduino"
programmer = ArduinoProgrammer(config, boardType)
return programmer
def __init__(self, config):
self.config = config
self.restoreSettings = False
self.restoreDevices = False
self.ser = None
self.versionNew = None
self.versionOld = None
self.oldSettings = {}
def program(self, hexFile, restoreWhat):
printStdErr("\n%(a)s program script started." % msg_map)
self.parse_restore_settings(restoreWhat)
if self.restoreSettings or self.restoreDevices:
printStdErr("Checking old version before programming.\n")
if not self.open_serial(self.config, 57600, 0.2):
return 0
self.delay_serial_open()
# request all settings from board before programming
if self.fetch_current_version():
self.retrieve_settings_from_serial()
self.save_settings_to_file()
if not self.ser:
if not self.open_serial(self.config, 57600, 0.2):
return 0
self.delay_serial_open()
if(hexFile):
if not self.flash_file(hexFile):
return 0
self.fetch_new_version()
self.reset_settings()
if self.restoreSettings or self.restoreDevices:
printStdErr(
"\nChecking which settings and devices may be restored.")
if self.versionNew is None:
printStdErr("\nWarning: Cannot receive version number from controller after programming.",
"\nRestoring settings/devices settings failed.")
return 0
if not self.versionOld and (self.restoreSettings or self.restoreDevices):
printStdErr("\nCould not receive valid version number from old board, no settings/devices",
"\nhave been restored.")
return 0
if self.restoreSettings:
printStdErr("\nTrying to restore compatible settings from {0} to {1}".format(self.versionOld.toString(), self.versionNew.toString()))
if(self.versionNew.isNewer("0.2")):
printStdErr(
"\nSettings may only be restored when updating to BrewPi 0.2.0 or higher")
self.restoreSettings = False
if self.restoreSettings:
self.restore_settings()
if self.restoreDevices:
self.restore_devices()
printStdErr("\n%(a)s program script complete." % msg_map)
self.ser.close()
self.ser = None
return 1
def parse_restore_settings(self, restoreWhat):
restoreSettings = False
restoreDevices = False
if 'settings' in restoreWhat:
if restoreWhat['settings']:
if version.parse(self.versionNew) >= version.parse(self.versionOld): # Only restore settings on same or newer
restoreSettings = True
if 'devices' in restoreWhat:
if restoreWhat['devices']:
if version.parse(self.versionNew) >= version.parse(self.versionOld): # Only restore devices on same or newer
restoreDevices = True
# Even when restoreSettings and restoreDevices are set to True here,
# they might be set to false due to version incompatibility later
printStdErr("\nSettings will {0}be restored{1}.".format(("" if restoreSettings else "not "), (" if possible" if restoreSettings else "")))
printStdErr("\nDevices will {0}be restored{1}.\n".format(("" if restoreDevices else "not "), (" if possible" if restoreDevices else "")))
self.restoreSettings = restoreSettings
self.restoreDevices = restoreDevices
def open_serial(self, config, baud, timeout):
if self.ser:
self.ser.close()
self.ser = None
self.ser = util.setupSerial(config, baud, timeout, 1.0, True)
if self.ser is None:
return False
return True
def open_serial_with_retry(self, config, baud, timeout):
# reopen serial port
retries = 30
self.ser = None
while retries:
time.sleep(1)
if self.open_serial(config, baud, timeout):
return True
retries -= 1
return False
def delay_serial_open(self):
pass
def fetch_version(self, msg):
version = brewpiVersion.getVersionFromSerial(self.ser)
if version is None:
printStdErr("\nWarning: Cannot receive version number from controller. Your controller is",
"\neither not programmed yet or running a very old version of BrewPi. It will",
"\nbe reset to defaults.")
else:
printStdErr("{0}\nFound:\n{1}\non port:{2}".format(msg, version.toExtendedString(), self.ser.name))
return version
def fetch_current_version(self):
self.versionOld = self.fetch_version("\nChecking current version:\n")
return self.versionOld
def fetch_new_version(self):
self.versionNew = self.fetch_version("\nChecking new version:\n")
return self.versionNew
def retrieve_settings_from_serial(self):
ser = self.ser
self.oldSettings.clear()
printStdErr("\nRequesting old settings from %(a)s." % msg_map)
expected_responses = 2
# versions older than 2.0.0 did not have a device manager
if not self.versionOld.isNewer("0.2.0"):
expected_responses += 1
ser.write("d{}".encode()) # installed devices
time.sleep(1)
ser.write("c".encode()) # control constants
ser.write("s".encode()) # control settings
time.sleep(2)
while expected_responses:
line = ser.readline().decode()
if line:
line = util.asciiToUnicode(str(line))
if line[0] == 'C':
expected_responses -= 1
self.oldSettings['controlConstants'] = json_decode_response(
line)
elif line[0] == 'S':
expected_responses -= 1
self.oldSettings['controlSettings'] = json_decode_response(
line)
elif line[0] == 'd':
expected_responses -= 1
self.oldSettings['installedDevices'] = json_decode_response(
line)
def save_settings_to_file(self):
# This is format" "2019-01-08-16-50-15"
oldSettingsFileName = 'settings-{0}.json'.format(time.strftime("%Y-%m-%dT%H-%M-%S"))
settingsBackupDir = '{0}settings/controller-backup/'.format(util.addSlash(util.scriptPath()))
if not os.path.exists(settingsBackupDir):
os.makedirs(settingsBackupDir)
# Set owner and permissions for directory
fileMode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH # 775
owner = 'brewpi'
group = 'brewpi'
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(settingsBackupDir, uid, gid) # chown dir
os.chmod(settingsBackupDir, fileMode) # chmod dir
oldSettingsFilePath = os.path.join(
settingsBackupDir, oldSettingsFileName)
oldSettingsFile = open(oldSettingsFilePath, 'w')
oldSettingsFile.write(json.dumps(self.oldSettings))
oldSettingsFile.truncate()
oldSettingsFile.close()
# Set owner and permissions for file
fileMode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP # 660
owner = 'brewpi'
group = 'brewpi'
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(oldSettingsFilePath, uid, gid) # chown file
os.chmod(oldSettingsFilePath, fileMode) # chmod file
printStdErr("\nSaved old settings to file {0}.".format(oldSettingsFileName))
def delay(self, countDown):
printStdErr("")
while countDown > 0:
time.sleep(1)
printStdErr("Back up in {0}.".format(str(countDown)))
countDown -= 1
def reset_settings(self, setTestMode=False):
printStdErr("\nResetting EEPROM to default settings.")
self.ser.write('E\n'.encode())
if setTestMode:
self.ser.write('j{mode:t}'.encode())
time.sleep(5) # resetting EEPROM takes a while, wait 5 seconds
# read log messages from arduino
while 1: # read all lines on serial interface
line = self.ser.readline()
if line: # line available?
if line[0] == 'D':
self.print_debug_log(line)
else:
break
def print_debug_log(self, line):
try: # debug message received
expandedMessage = expandLogMessage.expandLogMessage(line[2:])
printStdErr(expandedMessage)
except Exception as e: # catch all exceptions, because out of date file could cause errors
printStdErr("\nError while expanding log message: {0}".format(str(e)))
printStdErr(("%(a)s debug message: " % msg_map) + line[2:])
def restore_settings(self):
oldSettingsDict = self.get_combined_settings_dict(self.oldSettings)
ms = MigrateSettings()
restored, omitted = ms.getKeyValuePairs(oldSettingsDict,
self.versionOld.toString(),
self.versionNew.toString())
printStdErr("\nMigrating these settings:\n{0}".format(json.dumps(dict(restored.items()))))
printStdErr("\nOmitting these settings:\n{0}".format(json.dumps(dict(omitted.items()))))
self.send_restored_settings(restored)
def get_combined_settings_dict(self, oldSettings):
# copy keys/values from controlConstants
combined = oldSettings.get('controlConstants').copy()
# add keys/values from controlSettings
combined.update(oldSettings.get('controlSettings'))
return combined
def send_restored_settings(self, restoredSettings):
for key in restoredSettings:
setting = restoredSettings[key]
command = "j{" + json.dumps(key) + ":" + \
json.dumps(setting) + "}\n"
self.ser.write(command.encode())
# make readline blocking for max 5 seconds to give the controller time to respond after every setting
oldTimeout = self.ser.timeout
self.ser.timeout = 5
# read all replies
while 1:
line = self.ser.readline()
if line: # line available?
if line[0] == 'D':
self.print_debug_log(line)
if self.ser.inWaiting() == 0:
#if self.ser.readline() == 0: # WiFi Change
break
self.ser.timeout = 5
def restore_devices(self):
ser = self.ser
oldDevices = self.oldSettings.get('installedDevices')
if oldDevices:
printStdErr("\nNow trying to restore previously installed devices:\n{0}".format(oldDevices))
else:
printStdErr("\nNo devices to restore.")
return
detectedDevices = None
for device in oldDevices:
printStdErr("\nRestoring device:\n{0}".format(json.dumps(device)))
if "a" in device.keys(): # check for sensors configured as first on bus
if int(device['a'], 16) == 0:
printStdErr("A OneWire sensor was configured to be autodetected as the first sensor on the",
"\nbus, but this is no longer supported. We'll attempt to automatically find the",
"\naddress and add the sensor based on its address.")
if detectedDevices is None:
ser.write("h{}".encode()) # installed devices
time.sleep(1)
# get list of detected devices
for line in ser:
if line[0] == 'h':
detectedDevices = json_decode_response(line)
for detectedDevice in detectedDevices:
if device['p'] == detectedDevice['p']:
# get address from sensor that was first on bus
device['a'] = detectedDevice['a']
_temp = "U" + json.dumps(device)
ser.write(_temp.encode())
requestTime = time.time()
# read log messages from arduino
while 1: # read all lines on serial interface
line = ser.readline()
if line: # line available?
if line[0] == 'D':
self.print_debug_log(line)
elif line[0] == 'U':
printStdErr(
("%(a)s reports: device updated to: " % msg_map) + line[2:])
break
if time.time() > requestTime + 5: # wait max 5 seconds for an answer
break
printStdErr("\nRestoring installed devices done.")
class ArduinoProgrammer(SerialProgrammer):
def __init__(self, config, boardType):
SerialProgrammer.__init__(self, config)
self.boardType = boardType
def delay_serial_open(self):
if self.boardType == "uno":
# Allow Arduino UNO to restart
time.sleep(10)
def flash_file(self, hexFile):
config, boardType = self.config, self.boardType
printStdErr("\nLoading programming settings from board.txt.")
# Location of Arduino sdk
arduinohome = config.get('arduinoHome', '/usr/share/arduino/')
# Location of avr tools
avrdudehome = config.get(
'avrdudeHome', arduinohome + 'hardware/tools/')
# Default to empty string because avrsize is on path
avrsizehome = config.get('avrsizeHome', '')
# Location of global avr conf
avrconf = config.get('avrConf', avrdudehome + 'avrdude.conf')
boardsFile = loadBoardsFile(arduinohome)
if not boardsFile:
return False
boardSettings = fetchBoardSettings(boardsFile, boardType)
# Parse the Arduino board file to get the right program settings
for line in boardsFile:
line = line.decode()
if line.startswith(boardType):
# strip board name, period and \n
_boardType = (boardType + '.').encode()
setting = line.encode().replace(_boardType, ''.encode(), 1).strip()
[key, sign, val] = setting.rpartition('='.encode())
boardSettings[key] = val
printStdErr("\nChecking hex file size with avr-size.")
# Start programming the Arduino
avrsizeCommand = avrsizehome + 'avr-size ' + "\"" + hexFile + "\""
# Check program size against maximum size
p = sub.Popen(avrsizeCommand, stdout=sub.PIPE,
stderr=sub.PIPE, shell=True)
output, errors = p.communicate()
programSize = output.split()[7]
printStdErr('\nProgram size: {0} bytes out of max {1}.'.format(programSize.decode(), boardSettings['upload.maximum_size']))
# Another check just to be sure
if int(programSize.decode()) > int(boardSettings['upload.maximum_size']):
printStdErr(
"\nERROR: Program size is bigger than maximum size for your Arduino {0}.".format(boardType))
return False
hexFileDir = os.path.dirname(hexFile)
hexFileLocal = os.path.basename(hexFile)
time.sleep(1)
# Get serial port while in bootloader
# Convert udev rule based port to /dev/tty*
if not config['port'].startswith("/dev/tty"):
convert = ConvertBrewPiDevice()
config['port'] = convert.get_device_from_brewpidev(config['port'])
bootLoaderPort = util.findSerialPort(bootLoader=True, my_port=config['port'])
if not bootLoaderPort:
printStdErr("\nERROR: Could not find port in bootloader.")
return False
programCommand = (avrdudehome + 'avrdude' +
' -F' + # override device signature check
' -e' + # erase flash and eeprom before programming. This prevents issues with corrupted EEPROM
' -p ' + boardSettings['build.mcu'] +
' -c ' + boardSettings['upload.protocol'] +
' -b ' + boardSettings['upload.speed'] +
' -P ' + bootLoaderPort +
' -U ' + 'flash:w:' + "\"" + hexFileLocal + "\"" +
' -C ' + avrconf)
print("DEBUG: Programming command: {}".format(programCommand))
printStdErr("\nProgramming Arduino with avrdude.")
p = sub.Popen(programCommand, stdout=sub.PIPE,
stderr=sub.PIPE, shell=True, cwd=hexFileDir)
output, errors = p.communicate()
output = output.decode()
errors = errors.decode()
# avrdude only uses stderr, append its output to the returnString
printStdErr("\nResult of invoking avrdude:{0}".format(errors))
if("bytes of flash verified" in errors):
printStdErr("Avrdude done, programming successful.")
else:
printStdErr("There was an error while programming.")
return False
printStdErr("\nGiving the Arduino 10 seconds to reset.")
self.delay(10)
return True
| 37.683007
| 146
| 0.588674
| 2,485
| 23,062
| 5.396378
| 0.222938
| 0.013572
| 0.004027
| 0.008203
| 0.190231
| 0.15384
| 0.125653
| 0.108352
| 0.085533
| 0.085533
| 0
| 0.013253
| 0.312939
| 23,062
| 611
| 147
| 37.744681
| 0.83307
| 0.109314
| 0
| 0.258206
| 0
| 0
| 0.13824
| 0.002791
| 0
| 0
| 0.001567
| 0
| 0
| 1
| 0.078775
| false
| 0.002188
| 0.037199
| 0.002188
| 0.210066
| 0.113786
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f975435861ec73bfce0399c6d6ca18e0c1beb891
| 10,086
|
py
|
Python
|
common/hil_slurm_helpers.py
|
mghpcc-projects/user_level_slurm_reservations
|
eae56588bb00abfe043714317a27481e036fcc29
|
[
"MIT"
] | null | null | null |
common/hil_slurm_helpers.py
|
mghpcc-projects/user_level_slurm_reservations
|
eae56588bb00abfe043714317a27481e036fcc29
|
[
"MIT"
] | 11
|
2017-09-14T17:21:31.000Z
|
2021-06-01T21:48:47.000Z
|
common/hil_slurm_helpers.py
|
mghpcc-projects/user_level_slurm_reservations
|
eae56588bb00abfe043714317a27481e036fcc29
|
[
"MIT"
] | 3
|
2017-08-16T13:54:40.000Z
|
2018-01-10T19:26:59.000Z
|
"""
MassOpenCloud / Hardware Isolation Layer (MOC/HIL)
Slurm and *NX Subprocess Command Helpers
May 2017, Tim Donahue tpd001@gmail.com
"""
import os
from pwd import getpwnam, getpwuid
from subprocess import Popen, PIPE
from time import time
from hil_slurm_constants import (HIL_RESNAME_PREFIX, HIL_RESNAME_FIELD_SEPARATOR,
HIL_RESERVATION_OPERATIONS, RES_CREATE_FLAGS,
HIL_RESERVE, HIL_RELEASE)
from hil_slurm_settings import SLURM_INSTALL_DIR
from hil_slurm_logging import log_debug, log_info, log_error
def _output_debug_info(fname, stdout_data, stderr_data):
log_debug('%s: Stdout %s' % (fname, stdout_data))
log_debug('%s: Stderr %s' % (fname, stderr_data))
def _exec_subprocess_cmd(cmd):
'''
Execute a command in a subprocess and wait for completion
'''
debug = False
p = None
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
(stdout_data, stderr_data) = p.communicate()
except Exception as e:
stdout_data = None
stderr_data ='error: Exception on Popen or communicate'
log_debug('Exception on Popen or communicate')
log_debug('Exception: %s' % e)
if debug:
f = _exec_subprocess_cmd.__name__
log_debug('%s: cmd is %s' % (f, cmd))
log_debug('%s: stdout is %s' % (f, stdout_data))
log_debug('%s: stderr is %s' % (f, stderr_data))
return stdout_data, stderr_data
def _scontrol_show_stdout_to_dict_list(stdout_data, stderr_data, debug=False):
'''
Convert the 'scontrol show' stdout data to a list of dicts
Nearly all params are of the form "keyword=value".
If they all were, a neat functional one-liner would do...
'''
stdout_dict_list = []
if len(stderr_data):
return []
# Split the output and remove the trailing None from the subprocess output
stdout_lines = stdout_data.split(os.linesep)
stdout_lines = filter(None, stdout_lines)
# Convert the output to a list of dicts
for line in stdout_lines:
stdout_line_dict = {}
for kv_pair in line.split(' '):
kv = kv_pair.split('=')
if (len(kv) == 2):
stdout_line_dict[kv[0]] = kv[1]
elif debug:
log_debug('Failed to convert `$s`' % kv_pair)
stdout_dict_list.append(stdout_line_dict)
return stdout_dict_list
def exec_scontrol_cmd(action, entity, entity_id=None, debug=True, **kwargs):
'''
Build an 'scontrol <action> <entity>' command and pass to an executor
Specify single-line output to support stdout postprocessing
'''
cmd = [os.path.join(SLURM_INSTALL_DIR, 'scontrol'), action]
if entity:
cmd.append(entity)
if entity_id:
cmd.append(entity_id)
cmd.append('-o')
if kwargs:
for k, v in kwargs.iteritems():
cmd.append('%s=%s' % (k,v))
if debug:
log_debug('exec_scontrol_cmd(): Command %s' % cmd)
stdout_data, stderr_data = _exec_subprocess_cmd(cmd)
if debug:
log_debug('exec_scontrol_cmd(): Stdout %s' % stdout_data)
log_debug('exec_scontrol_cmd(): Stderr %s' % stderr_data)
return stdout_data, stderr_data
def exec_scontrol_show_cmd(entity, entity_id, debug=False, **kwargs):
'''
Run the 'scontrol show' command on the entity and ID
Convert standard output data to a list of dictionaries, one per line
'''
stdout_data, stderr_data = exec_scontrol_cmd('show', entity, entity_id, debug=debug, **kwargs)
# Check for errors.
# If anything in stderr, return it
# Next, check if stdout includes various error strings - 'scontrol show'
# writes error output to stdout.
# Failure indications:
# Reservation: stdout includes 'not found'
# Job: stdout includes 'Invalid job id'
# Copy stdout to stderr if found.
# If stderr is empty, and stdout does not contain an error string,
# convert stdout to a list of dicts and return that
stdout_dict_list = []
entity_error_dict = {
'reservation': 'not found',
'job': 'Invalid job id'
}
cmd = 'scontrol show ' + entity
if (len(stderr_data) != 0):
log_debug('Command `%s` failed' % cmd)
log_debug(' stderr: %s' % stderr_data)
elif (entity in entity_error_dict) and (entity_error_dict[entity] in stdout_data):
if debug:
log_debug('Command `%s` failed' % cmd)
log_debug(' stderr: %s' % stderr_data)
stderr_data = stdout_data
stdout_data = None
else:
stdout_dict_list = _scontrol_show_stdout_to_dict_list(stdout_data, stderr_data)
return stdout_dict_list, stdout_data, stderr_data
def create_slurm_reservation(name, user, t_start_s, t_end_s, nodes=None,
flags=RES_CREATE_FLAGS, features=None, debug=False):
'''
Create a Slurm reservation via 'scontrol create reservation'
'''
if nodes is None:
nodes = 'ALL'
t_end_arg = {'duration': 'UNLIMITED'} if t_end_s is None else {'endtime': t_end_s}
return exec_scontrol_cmd('create', 'reservation', entity_id=None, debug=debug,
ReservationName=name, starttime=t_start_s,
user=user, nodes=nodes, flags=flags, features=features,
**t_end_arg)
def delete_slurm_reservation(name, debug=False):
'''
Delete a Slurm reservation via 'scontrol delete reservation=<name>'
'''
return exec_scontrol_cmd('delete', None, debug=debug, reservation=name)
def update_slurm_reservation(name, debug=False, **kwargs):
'''
Update a Slurm reservation via 'scontrol update reservation=<name> <kwargs>'
'''
return exec_scontrol_cmd('update', None, reservation=name, debug=debug, **kwargs)
def get_hil_reservation_name(env_dict, restype_s, t_start_s):
'''
Create a reservation name, combining the HIL reservation prefix,
the username, the job ID, and the ToD (YMD_HMS)
Structure:
NamePrefix _ [release|reserve] _ uname _ job_UID _ str(int(time()))
'''
resname = HIL_RESNAME_PREFIX + restype_s + HIL_RESNAME_FIELD_SEPARATOR
resname += env_dict['username'] + HIL_RESNAME_FIELD_SEPARATOR
resname += env_dict['job_uid'] + HIL_RESNAME_FIELD_SEPARATOR
resname += str(int(time()))
return resname
def parse_hil_reservation_name(resname):
'''
Attempt to split a reservation name into HIL reservation name components:
HIL reservation prefix, reservation type, user name, uid, and time
This looks like overkill, except for the presence of other reservations in the
system, with semi-arbitrary names.
'''
prefix = None
restype = None
user = None
uid = None
time_s = None
if resname.startswith(HIL_RESNAME_PREFIX):
resname_partitions = resname.partition(HIL_RESNAME_PREFIX)
prefix = resname_partitions[1]
try:
restype, user, uid, time_s = resname_partitions[2].split(HIL_RESNAME_FIELD_SEPARATOR)
except:
pass
return prefix, restype, user, uid, time_s
def is_hil_reservation(resname, restype_in):
'''
Check if the passed reservation name:
- Starts with the HIL reservation prefix
- Is a HIL reserve or release reservation
- Contains a valid user name and UID
- Optionally, is specifically a reserve or release reservation
- $$$ Could verify nodes have HIL property set
'''
prefix, restype, uname, uid, _ = parse_hil_reservation_name(resname)
if (prefix != HIL_RESNAME_PREFIX):
# log_error('No HIL reservation prefix')
return False
if restype_in:
if (restype != restype_in):
# log_error('Reservation type mismatch')
return False
elif restype not in HIL_RESERVATION_OPERATIONS:
log_error('Unknown reservation type')
return False
try:
pwdbe1 = getpwnam(uname)
pwdbe2 = getpwuid(int(uid))
if pwdbe1 != pwdbe2:
# log_error('Reservation `%s`: User and UID inconsistent' % resname)
return False
except KeyError:
# log_error('Key error')
return False
return True
def get_object_data(what_obj, obj_id, debug=False):
'''
Get a list of dictionaries of information on the object, via
'scontrol show <what_object> <object_id>'
'''
objdata_dict_list, stdout_data, stderr_data = exec_scontrol_show_cmd(what_obj,
obj_id, debug=False)
if (len(stderr_data) != 0):
if debug:
log_debug('Failed to retrieve data for %s `%s`' % (what_obj, obj_id))
log_debug(' %s' % stderr_data)
return objdata_dict_list
def get_partition_data(partition_id):
'''
Get a list of dictionaries of information on the partition(s),
via 'scontrol show partition'
'''
return get_object_data('partition', partition_id, debug=False)
def get_job_data(job_id):
'''
Get a list of dictionaries of information on the job(s),
via 'scontrol show job'
'''
return get_object_data('job', job_id, debug=False)
def get_hil_reservations():
'''
Get a list of all Slurm reservations, return that subset which are HIL reservations
'''
resdata_dict_list = []
resdata_dict_list, stdout_data, stderr_data = exec_scontrol_show_cmd('reservation', None)
for resdata_dict in resdata_dict_list:
if resdata_dict and is_hil_reservation(resdata_dict['ReservationName'], None):
continue
else:
resdata_dict_list.remove(resdata_dict)
return resdata_dict_list
def log_hil_reservation(resname, stderr_data, t_start_s=None, t_end_s=None):
if len(stderr_data):
log_error('Error creating reservation `%s`'% resname)
log_error(' Error string: %s' % stderr_data.strip('\n'), separator=False)
else:
log_info('Created HIL reservation `%s`' % resname)
# EOF
| 31.716981
| 98
| 0.65348
| 1,327
| 10,086
| 4.731726
| 0.186134
| 0.039815
| 0.026756
| 0.035037
| 0.216117
| 0.137761
| 0.113235
| 0.091575
| 0.065934
| 0.059564
| 0
| 0.002394
| 0.254412
| 10,086
| 317
| 99
| 31.817035
| 0.83258
| 0.262443
| 0
| 0.1875
| 0
| 0
| 0.092642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.00625
| 0.04375
| 0
| 0.26875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97830507a9e81ba352de5e77becd93d7de239ce
| 1,112
|
py
|
Python
|
tests/test_statsutils.py
|
ofek/boltons
|
395f690f4a24331c4554e2169ac18a15955a4eab
|
[
"BSD-3-Clause"
] | 6,058
|
2015-03-18T16:44:39.000Z
|
2022-03-28T08:42:16.000Z
|
tests/test_statsutils.py
|
ofek/boltons
|
395f690f4a24331c4554e2169ac18a15955a4eab
|
[
"BSD-3-Clause"
] | 289
|
2015-04-09T23:09:24.000Z
|
2022-03-30T00:29:33.000Z
|
tests/test_statsutils.py
|
ofek/boltons
|
395f690f4a24331c4554e2169ac18a15955a4eab
|
[
"BSD-3-Clause"
] | 407
|
2015-04-09T20:09:15.000Z
|
2022-03-30T10:43:22.000Z
|
# -*- coding: utf-8 -*-
from boltons.statsutils import Stats
def test_stats_basic():
da = Stats(range(20))
assert da.mean == 9.5
assert round(da.std_dev, 2) == 5.77
assert da.variance == 33.25
assert da.skewness == 0
assert round(da.kurtosis, 1) == 1.9
assert da.median == 9.5
def _test_pearson():
import random
from statsutils import pearson_type
def get_pt(dist):
vals = [dist() for x in range(10000)]
pt = pearson_type(vals)
return pt
for x in range(3):
# pt = get_pt(dist=lambda: random.normalvariate(15, 5)) # expect 0, normal
# pt = get_pt(dist=lambda: random.weibullvariate(2, 3)) # gets 1, beta, weibull not specifically supported
# pt = get_pt(dist=lambda: random.gammavariate(2, 3)) # expect 3, gamma
# pt = get_pt(dist=lambda: random.betavariate(2, 3)) # expect 1, beta
# pt = get_pt(dist=lambda: random.expovariate(0.2)) # expect 3, beta
pt = get_pt(dist=lambda: random.uniform(0.0, 10.0)) # gets 2
print('pearson type:', pt)
# import pdb;pdb.set_trace()
| 32.705882
| 115
| 0.615108
| 168
| 1,112
| 3.982143
| 0.392857
| 0.052317
| 0.09417
| 0.098655
| 0.218236
| 0.218236
| 0.080717
| 0
| 0
| 0
| 0
| 0.056355
| 0.25
| 1,112
| 33
| 116
| 33.69697
| 0.745803
| 0.393885
| 0
| 0
| 0
| 0
| 0.019697
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 1
| 0.157895
| false
| 0
| 0.157895
| 0
| 0.368421
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f979feef783a84ff7f70e9da364235d7c960d2cb
| 1,018
|
py
|
Python
|
funcs.py
|
pgDora56/shinyintro
|
15cc153106ebd88a5f73801f2bf0bef52d37cdab
|
[
"MIT"
] | null | null | null |
funcs.py
|
pgDora56/shinyintro
|
15cc153106ebd88a5f73801f2bf0bef52d37cdab
|
[
"MIT"
] | null | null | null |
funcs.py
|
pgDora56/shinyintro
|
15cc153106ebd88a5f73801f2bf0bef52d37cdab
|
[
"MIT"
] | null | null | null |
import os
import pprint
import json
import random
accept = False
colors = {
"Vo": "#e05ab4",
"Da": "#59afe1",
"Vi": "#e0e05a"
}
with open('idols.json') as f:
idols = (json.load(f))["idols"] # Insert 23 data
def pick(msg):
global accept
if not accept:
print("Not accept")
return
for i in range(5):
pick_one(msg)
accept = False
def pick_one(msg):
global idols
idolno = random.randrange(len(idols))
idol = idols[idolno]
print(f"{msg.user['real_name']} gets {idol['unit']} {idol['name']}")
attachments = [
{
'title': idol['name'],
'text': f"所属ユニット: {idol['unit']}",
'color': colors[idol["type"]],
'image_url': idol["url"]
}]
msg.send_webapi('', json.dumps(attachments))
def command():
global accept
ipt = input()
print(f"Get input: {ipt}")
if ipt == "a":
accept = True
print("Set accept")
elif ipt == "d":
accept = False
print("Set deny")
| 18.851852
| 72
| 0.54224
| 128
| 1,018
| 4.273438
| 0.523438
| 0.060329
| 0.036563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016506
| 0.285855
| 1,018
| 53
| 73
| 19.207547
| 0.735901
| 0.013752
| 0
| 0.116279
| 0
| 0
| 0.201798
| 0.022977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.093023
| 0
| 0.186047
| 0.139535
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97c2537579109b781456eb2fe785026c3ea5e59
| 10,782
|
py
|
Python
|
UWBsim/interface/plot_widgets.py
|
kianheus/uwb-simulator
|
888cdcae0d4ca101970971afbdf0113ba3bb1480
|
[
"MIT"
] | 2
|
2021-08-25T03:27:06.000Z
|
2021-09-26T05:08:19.000Z
|
UWBsim/interface/plot_widgets.py
|
kianheus/uwb-simulator
|
888cdcae0d4ca101970971afbdf0113ba3bb1480
|
[
"MIT"
] | null | null | null |
UWBsim/interface/plot_widgets.py
|
kianheus/uwb-simulator
|
888cdcae0d4ca101970971afbdf0113ba3bb1480
|
[
"MIT"
] | 1
|
2021-07-17T10:59:15.000Z
|
2021-07-17T10:59:15.000Z
|
"""Plot Widgets for the UWB Simulation GUI
This file contains several plot widgets that can be used to plot
simulation data in real time and redraw the plots with matplotlib for
better quality.
Classes:
QLivePlot: Base class for real time plots
QLivePlot_Groundtrack: Real time plot for groundtrack
QLivePlot_Position: Real time plot for x, y, z positions
QLivePlot_Velocity: Real time plot for x, y, z velocities
QLivePlot_Attitude: Real time plot for attitude
"""
from PyQt5 import QtWidgets
import pyqtgraph as pg
import matplotlib.pyplot as plt
import numpy as np
from UWBsim.utils import dataTypes
class QLivePlot(QtWidgets.QWidget):
"""Base Class for real time plots using pyqtgraph
Methods:
reset: clear the plot area and data
update_data: Pass new data to the plot widget
update_plot: Update the plot with the most recent data
"""
def __init__(self, *args, **kwargs):
"""Initialize the QLivePlot class
Initializes the widget and creates the basic elements required
for plotting.
"""
super(QLivePlot,self).__init__(*args, **kwargs)
self.layout = QtWidgets.QVBoxLayout()
self.canvas = pg.GraphicsLayoutWidget()
self.canvas.setBackground('#FAFAFA')
self.layout.addWidget(self.canvas)
self.export_button = QtWidgets.QPushButton('plot with matplotlib')
self.export_button.clicked.connect(self._export_button_clicked)
self.layout.addWidget(self.export_button)
self.setLayout(self.layout)
# same colors used by matplotlib
#self.data_colors = ['#1F77B4','#FF7F0E','#2CA02C','#D62728','#9467BD','#8C564B']
# Switch true and ekf color for publication
self.data_colors = ['#2CA02C','#FF7F0E','#1F77B4','#D62728','#9467BD','#8C564B']
self.n_subplots = 1
self.data = [{}]
self.lines = [{}]
def reset(self):
for i in range(self.n_subplots):
self.plot_area[i].clear()
self.plot_area[i].legend.items = []
self.data = [{} for _ in range(self.n_subplots)]
self.lines = [{} for _ in range(self.n_subplots)]
def update_data(self, **estimates):
return NotImplemented
def update_plot(self):
for i in range(self.n_subplots):
for key, values in self.data[i].items():
if key in self.lines[i]:
self.lines[i][key].setData(values[0], values[1])
else:
color_i = len(self.lines[i])
self.lines[i][key] = self.plot_area[i].plot(values[0], values[1], name=key, pen=self.data_colors[color_i])
def _export_button_clicked(self):
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Computer Modern Roman"],
})
fig = plt.figure()
ax = []
for i in range(self.n_subplots):
ax.append(fig.add_subplot(self.n_subplots,1,i+1))
legend = []
for key, values in self.data[i].items():
ax[i].plot(values[0], values[1])
legend.append(key)
ax[i].legend(legend)
ax[i].grid(b=True)
plt.show()
class QLivePlot_GroundTrack(QLivePlot):
def __init__(self, *args, **kwargs):
super(QLivePlot_GroundTrack, self).__init__(*args, **kwargs)
self.n_subplots = 1
self.plot_area = []
self.plot_area.append(self.canvas.addPlot())
self.plot_area[0].setTitle('Ground Track')
self.plot_area[0].setLabels(left='y [m]', bottom='x [m]')
self.plot_area[0].setXRange(-4,4,padding=0)
self.plot_area[0].setYRange(-4,4,padding=0)
self.plot_area[0].showGrid(x=True, y=True)
self.plot_area[0].addLegend()
self.color_i = 0
def update_data(self, **drone_state_data):
for key, state in drone_state_data.items():
if key == 'time':
continue
else:
x = state[0]
y = state[1]
if key in self.data[0]:
self.data[0][key][0].append(x)
self.data[0][key][1].append(y)
else:
self.data[0][key] = [[x],[y]]
self.color_i += 1
def _export_button_clicked(self):
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Computer Modern Roman"],
})
fig = plt.figure()
ax = []
for i in range(self.n_subplots):
ax.append(fig.add_subplot(self.n_subplots,1,i+1))
legend = []
j = 0
for key, values in self.data[i].items():
ax[i].plot(values[0], values[1], color=self.data_colors[j])
legend.append(key)
j += 1
#ax[i].legend(legend)
# Publication legend
ax[i].legend(["Ground truth","MHE", "EKF"])
ax[i].grid(b=True)
ax[i].set_xlabel('x [m]')
ax[i].set_ylabel('y [m]')
#ax[i].set_title('Groundtrack')
plt.show()
class QLivePlot_Position(QLivePlot):
def __init__(self, *args, **kwargs):
super(QLivePlot_Position, self).__init__(*args, **kwargs)
self.n_subplots = 3
self.data = [{},{},{}]
self.lines = [{},{},{}]
self.plot_area = []
for i in range(self.n_subplots):
self.plot_area.append(self.canvas.addPlot())
self.plot_area[i].showGrid(x=True, y=True)
self.plot_area[i].addLegend()
self.canvas.nextRow()
self.plot_area[0].setLabels(left='x [m]', bottom='t [s]')
self.plot_area[1].setLabels(left='y [m]', bottom='t [s]')
self.plot_area[2].setLabels(left='z [m]', bottom='t [s]')
self.color_i = 0
def update_data(self, **drone_state_data):
for key,state in drone_state_data.items():
if key == 'time':
continue
else:
x = state[0]
y = state[1]
z = state[2]
t = drone_state_data['time']
if key in self.data[0]:
self.data[0][key][0].append(t)
self.data[0][key][1].append(x)
self.data[1][key][0].append(t)
self.data[1][key][1].append(y)
self.data[2][key][0].append(t)
self.data[2][key][1].append(z)
else:
self.data[0][key] = [[t],[x]]
self.data[1][key] = [[t],[y]]
self.data[2][key] = [[t],[z]]
def _export_button_clicked(self):
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Computer Modern Roman"],
})
ylabels = ['x [m]', 'y [m]', 'z [m]']
fig = plt.figure()
ax = []
for i in range(self.n_subplots):
ax.append(fig.add_subplot(self.n_subplots,1,i+1))
legend = []
for key, values in self.data[i].items():
ax[i].plot(values[0], values[1])
legend.append(key)
ax[i].legend(legend)
ax[i].grid(b=True)
ax[i].set_xlabel('t [s]')
ax[i].set_ylabel(ylabels[i])
plt.show()
class QLivePlot_Velocity(QLivePlot):
def __init__(self, *args, **kwargs):
super(QLivePlot_Velocity, self).__init__(*args, **kwargs)
self.n_subplots = 3
self.data = [{},{},{}]
self.lines = [{},{},{}]
self.plot_area = []
for i in range(self.n_subplots):
self.plot_area.append(self.canvas.addPlot())
self.plot_area[i].showGrid(x=True, y=True)
self.plot_area[i].addLegend()
self.canvas.nextRow()
self.plot_area[0].setLabels(left='vx [m/s]', bottom='t [s]')
self.plot_area[1].setLabels(left='vy [m/s]', bottom='t [s]')
self.plot_area[2].setLabels(left='vz [m/s]', bottom='t [s]')
self.color_i = 0
def update_data(self, **drone_state_data):
for key,state in drone_state_data.items():
if key == 'time':
continue
else:
vx = state[3]
vy = state[4]
vz = state[5]
t = drone_state_data['time']
if key in self.data[0]:
self.data[0][key][0].append(t)
self.data[0][key][1].append(vx)
self.data[1][key][0].append(t)
self.data[1][key][1].append(vy)
self.data[2][key][0].append(t)
self.data[2][key][1].append(vz)
else:
self.data[0][key] = [[t],[vx]]
self.data[1][key] = [[t],[vy]]
self.data[2][key] = [[t],[vz]]
class QLivePlot_Attitude(QLivePlot):
def __init__(self, *args, **kwargs):
super(QLivePlot_Attitude, self).__init__(*args, **kwargs)
self.n_subplots = 3
self.data = [{},{},{}]
self.lines = [{},{},{}]
self.plot_area = []
for i in range(self.n_subplots):
self.plot_area.append(self.canvas.addPlot())
self.plot_area[i].showGrid(x=True, y=True)
self.plot_area[i].addLegend()
self.canvas.nextRow()
self.plot_area[0].setLabels(left='Roll [rad]', bottom='t [s]')
self.plot_area[1].setLabels(left='Pitch [rad]', bottom='t [s]')
self.plot_area[2].setLabels(left='Yaw [rad]', bottom='t [s]')
self.color_i = 0
def update_data(self, **kwargs):
for key,value in kwargs.items():
if isinstance(value, dataTypes.State_XVQW):
r = value.q.get_roll()
p = value.q.get_pitch()
y = value.q.get_yaw()
t = value.timestamp
if key in self.data[0]:
self.data[0][key][0].append(t)
self.data[0][key][1].append(r)
self.data[1][key][0].append(t)
self.data[1][key][1].append(p)
self.data[2][key][0].append(t)
self.data[2][key][1].append(y)
else:
self.data[0][key] = [[t],[r]]
self.data[1][key] = [[t],[p]]
self.data[2][key] = [[t],[y]]
else:
pass
#print('Plot can only be updated with State_XVQW data type.')
| 34.66881
| 126
| 0.516602
| 1,363
| 10,782
| 3.96405
| 0.142333
| 0.069591
| 0.071072
| 0.026652
| 0.634277
| 0.602258
| 0.566907
| 0.544512
| 0.486026
| 0.433463
| 0
| 0.021712
| 0.333612
| 10,782
| 311
| 127
| 34.66881
| 0.730271
| 0.099518
| 0
| 0.578947
| 0
| 0
| 0.046413
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065789
| false
| 0.004386
| 0.02193
| 0.004386
| 0.114035
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97ca4c83d65c548b29075ec69330e20d6ca30b3
| 1,018
|
py
|
Python
|
scripts/plot_summary_stats.py
|
JackKelly/slicedpy
|
c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62
|
[
"Apache-2.0"
] | 3
|
2017-02-03T22:05:25.000Z
|
2017-08-29T19:06:17.000Z
|
scripts/plot_summary_stats.py
|
JackKelly/slicedpy
|
c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62
|
[
"Apache-2.0"
] | null | null | null |
scripts/plot_summary_stats.py
|
JackKelly/slicedpy
|
c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from pda.dataset import init_aggregate_and_appliance_dataset_figure
import matplotlib.pyplot as plt
from scipy.stats import *
import numpy as np
subplots, chan = init_aggregate_and_appliance_dataset_figure(
start_date='2013/6/4 10:00', end_date='2013/6/4 13:30',
n_subplots=2, date_format='%H:%M:%S', alpha=0.6,
plot_appliance_ground_truth=False)
DISPLAY = ['mean', 'std', 'ptp', 'gmean', 'skew']
WINDOW = 60
n = chan.series.size - WINDOW
labels = ['mean', 'std', 'ptp', 'gmean', 'skew']
summary_stats = np.empty((n,len(labels)))
print("Calculating...")
for i in range(1,n):
chunk = chan.series.values[i:i+WINDOW]
summary_stats[i] = (chunk.mean(), chunk.std(), chunk.ptp(),
gmean(chunk), skew(chunk))
print("Plotting...")
for i, label in enumerate(labels):
if label in DISPLAY:
subplots[1].plot(chan.series.index[WINDOW:], summary_stats[:,i],
label=label)
plt.legend()
plt.grid()
plt.show()
print("Done!")
| 28.277778
| 73
| 0.656189
| 151
| 1,018
| 4.291391
| 0.509934
| 0.037037
| 0.049383
| 0.07716
| 0.175926
| 0.117284
| 0
| 0
| 0
| 0
| 0
| 0.032105
| 0.17387
| 1,018
| 35
| 74
| 29.085714
| 0.738407
| 0.019646
| 0
| 0
| 0
| 0
| 0.104313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97cacad56a833075fdbf1486e99e188f8024b55
| 2,691
|
py
|
Python
|
sciibo/network/connection.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | 14
|
2017-06-16T14:16:57.000Z
|
2021-02-26T13:53:56.000Z
|
sciibo/network/connection.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | 1
|
2018-06-27T16:11:48.000Z
|
2019-01-23T12:02:17.000Z
|
sciibo/network/connection.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | null | null | null |
import socket
import json
import struct
from sciibo.core.helpers import Queue
from .thread import SocketThread
class ConnectionThread(SocketThread):
def __init__(self, sock):
super(ConnectionThread, self).__init__()
self.sock = sock
# The number of bytes we are expecting
self.expecting = None
# Partial message we received so far
self.partial = None
# Outgoing message queue
self.queue = Queue()
# Player id this connection belongs to
self.player = None
def disconnected(self):
self.stop()
self.trigger('receive', self, {'type': 'disconnect'})
def action(self):
if not self.expecting:
# Send messages
while not self.queue.empty():
data = self.queue.get()
message = json.dumps(data)
self.sock.sendall(struct.pack("i", len(message)) + message.encode())
self.queue.task_done()
# Receive message size
data = self.sock.recv(struct.calcsize("i"))
if not data:
self.disconnected()
return
# We are now looking for a message
self.expecting = struct.unpack("i", data)[0]
self.partial = ""
return
# Receive at most what we are expecting
data = self.sock.recv(self.expecting)
if not data:
self.disconnected()
return
# Bytes to string
data = data.decode()
self.partial += data
self.expecting -= len(data)
# Received complete message
if not self.expecting:
try:
data = json.loads(self.partial)
except ValueError:
return
if not isinstance(data, dict):
return
type = data.get('type')
if not type:
return
self.trigger('receive', self, data)
self.expecting = None
self.partial = None
def send(self, data):
self.queue.put(data)
def on_error(self):
# Trigger disconnect when an error occurs, but not
# when the connection was stopped (using after_actions).
self.disconnected()
def after_actions(self):
# Send out queued messages before closing socket
try:
while not self.queue.empty():
data = self.queue.get()
message = json.dumps(data)
self.sock.sendall(struct.pack("i", len(message)) + message.encode())
self.queue.task_done()
except socket.error:
return
| 27.459184
| 84
| 0.544036
| 290
| 2,691
| 5.003448
| 0.341379
| 0.060648
| 0.033081
| 0.030324
| 0.213646
| 0.213646
| 0.170917
| 0.170917
| 0.170917
| 0.170917
| 0
| 0.000587
| 0.36715
| 2,691
| 97
| 85
| 27.742268
| 0.851439
| 0.159792
| 0
| 0.47619
| 0
| 0
| 0.016021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.079365
| 0
| 0.301587
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97d02d723f0a4441c6c06372a7158427073778d
| 2,651
|
py
|
Python
|
libs/PieMeter.py
|
lionheart/TimeTracker-Linux
|
64405d53fd12d2593ef4879b867ff38a4d5b9ca9
|
[
"MIT"
] | 12
|
2015-02-06T19:06:49.000Z
|
2019-09-24T17:58:17.000Z
|
libs/PieMeter.py
|
lionheart/TimeTracker-Linux
|
64405d53fd12d2593ef4879b867ff38a4d5b9ca9
|
[
"MIT"
] | null | null | null |
libs/PieMeter.py
|
lionheart/TimeTracker-Linux
|
64405d53fd12d2593ef4879b867ff38a4d5b9ca9
|
[
"MIT"
] | 6
|
2015-11-22T01:58:31.000Z
|
2019-11-04T22:56:38.000Z
|
# Copyright (C) 2008 Jimmy Do <jimmydo@users.sourceforge.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import math
import gobject
import gtk
class PieMeter(gtk.Image):
_DEFAULT_SIZE = 24
def __init__(self):
gtk.Image.__init__(self)
self._progress = 0.0
self._fill_color = (0.0, 1.0, 0.0)
def set_progress(self, progress):
assert progress >= 0.0
assert progress <= 1.0
self._progress = progress
if self.window is not None:
self.window.invalidate_rect(self.allocation, True)
def set_fill_color(self, red, green, blue):
assert 0.0 <= red <= 1.0
assert 0.0 <= green <= 1.0
assert 0.0 <= blue <= 1.0
self._fill_color = (red, green, blue)
if self.window is not None:
self.window.invalidate_rect(self.allocation, True)
def do_size_request(self, requisition):
requisition.width = PieMeter._DEFAULT_SIZE
requisition.height = PieMeter._DEFAULT_SIZE
def do_expose_event(self, event):
context = event.window.cairo_create()
rect = self.allocation
x = rect.x + (rect.width / 2)
y = rect.y + (rect.height / 2)
radius = (min(rect.width, rect.height) / 2)
# Draw background circle
context.arc(x, y, radius, 0, 2 * math.pi)
context.set_source_rgba(0.8, 0.8, 0.8)
context.fill()
# Draw pie
context.arc(x, y, radius, (-0.5 * math.pi) + self._progress * 2 * math.pi, 1.5 * math.pi)
context.line_to(x, y)
context.close_path()
(red, green, blue) = self._fill_color
context.set_source_rgb(red, green, blue)
context.fill()
# Draw circle outline
context.arc(x, y, radius, 0, 2 * math.pi)
context.set_source_rgba(1, 1, 1)
context.set_line_width(1.0)
context.stroke()
gobject.type_register(PieMeter)
| 33.987179
| 97
| 0.626556
| 378
| 2,651
| 4.277778
| 0.378307
| 0.009895
| 0.029685
| 0.03525
| 0.213358
| 0.18491
| 0.138528
| 0.138528
| 0.138528
| 0.138528
| 0
| 0.035454
| 0.276499
| 2,651
| 77
| 98
| 34.428571
| 0.807612
| 0.296115
| 0
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.066667
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97d4fd046debdeff0094ec80a682b86eb50db54
| 6,192
|
py
|
Python
|
examples/pinball.py
|
jgrigonis/arcade
|
9b624da7da52e3909f6e82c552446b90249041f1
|
[
"MIT"
] | 1
|
2021-05-23T20:30:46.000Z
|
2021-05-23T20:30:46.000Z
|
examples/pinball.py
|
jgrigonis/arcade
|
9b624da7da52e3909f6e82c552446b90249041f1
|
[
"MIT"
] | null | null | null |
examples/pinball.py
|
jgrigonis/arcade
|
9b624da7da52e3909f6e82c552446b90249041f1
|
[
"MIT"
] | null | null | null |
import arcade
import timeit
BALL_DRAG = 0.001
NO_FLIPPER = 0
FLIPPER_UP = 1
class MyApplication(arcade.Window):
""" Main application class. """
def __init__(self, width, height, resizable):
super().__init__(width, height, resizable=resizable)
self.sprite_list = arcade.SpriteList()
self.left_flipper_list = arcade.SpriteList()
self.right_flipper_list = arcade.SpriteList()
self.left_flipper_state = NO_FLIPPER
self.right_flipper_state = NO_FLIPPER
self.time = 0
arcade.set_background_color(arcade.color.DARK_SLATE_GRAY)
# Top wall
for x in range(20, 800, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, 980], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left wall
for y in range(260, 980, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [20, y], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Right wall
for y in range(260, 980, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [780, y], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left bottom slope
y = 260
for x in range(40, 280, 10):
y -= 5
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Right bottom slope
y = 260
for x in range(760, 520, -10):
y -= 5
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left flipper
y = 135
for x in range(280, 350, 10):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
self.left_flipper_list.append(wall)
y -= 5
# Right flipper
y = 135
for x in range(520, 440, -10):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
self.right_flipper_list.append(wall)
y -= 5
# Bumpers
for row in range(2):
for column in range(2):
bumper = arcade.PhysicsCircle("images/bumper.png", [250 + 300 * column, 450 + 300 * row], 35, [0, 0], 1.5, 100, BALL_DRAG)
bumper.static = True
self.sprite_list.append(bumper)
wall = arcade.PhysicsAABB("images/python_logo.png", [400, 600], [150, 150], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
self.sprite_list.draw()
start_x = 20
start_y = 10
arcade.draw_text("Processing time: {:.3f}".format(self.time), start_x, start_y, arcade.color.BLACK, 12)
def update(self, x):
""" Move everything """
start_time = timeit.default_timer()
arcade.process_2d_physics_movement(self.sprite_list, gravity=0.08)
arcade.process_2d_physics_collisions(self.sprite_list)
# -- Left flipper control
if self.left_flipper_state == FLIPPER_UP and self.left_flipper_list[0].center_y < 145:
y = 2
y_change = 2
for sprite in self.left_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
elif self.left_flipper_state == NO_FLIPPER and self.left_flipper_list[0].center_y > 135:
y = -2
y_change = -2
for sprite in self.left_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
else:
for sprite in self.left_flipper_list:
sprite.change_y = 0
sprite.frozen = True
# -- Right flipper control
if self.right_flipper_state == FLIPPER_UP and self.right_flipper_list[0].center_y < 145:
y = 2
y_change = 2
for sprite in self.right_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
elif self.right_flipper_state == NO_FLIPPER and self.right_flipper_list[0].center_y > 135:
y = -2
y_change = -2
for sprite in self.right_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
else:
for sprite in self.right_flipper_list:
sprite.change_y = 0
sprite.frozen = True
for sprite in self.sprite_list:
if sprite.center_y < -20:
sprite.kill()
self.time = timeit.default_timer() - start_time
def on_key_press(self, key, modifiers):
"""
Called whenever the mouse moves.
"""
if key == arcade.key.LEFT:
self.left_flipper_state = FLIPPER_UP
elif key == arcade.key.RIGHT:
self.right_flipper_state = FLIPPER_UP
elif key == arcade.key.SPACE:
x = 720
y = 300
ball = arcade.PhysicsCircle("images/pool_cue_ball.png", [x, y], 15, [0, +20], 1, .25, BALL_DRAG)
self.sprite_list.append(ball)
def on_key_release(self, key, modifiers):
"""
Called when the user presses a mouse button.
"""
if key == arcade.key.LEFT:
self.left_flipper_state = NO_FLIPPER
elif key == arcade.key.RIGHT:
self.right_flipper_state = NO_FLIPPER
window = MyApplication(800, 1000, resizable=False)
window.set_size(700, 700)
arcade.run()
| 33.652174
| 138
| 0.55491
| 797
| 6,192
| 4.134253
| 0.186951
| 0.045524
| 0.063733
| 0.060698
| 0.618209
| 0.607587
| 0.511077
| 0.497724
| 0.443399
| 0.384522
| 0
| 0.066049
| 0.337371
| 6,192
| 183
| 139
| 33.836066
| 0.737022
| 0.055071
| 0
| 0.542636
| 0
| 0
| 0.046536
| 0.03959
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03876
| false
| 0
| 0.015504
| 0
| 0.062016
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97e5968772769d07d1c5c3519564d5e93b96cb9
| 2,350
|
py
|
Python
|
pygomas/pack.py
|
sfp932705/pygomas
|
8cdd7e973b8b4e8de467803c106ec44ca6b8bd03
|
[
"MIT"
] | 3
|
2019-06-20T08:55:36.000Z
|
2019-07-04T14:10:40.000Z
|
pygomas/pack.py
|
sfp932705/pygomas
|
8cdd7e973b8b4e8de467803c106ec44ca6b8bd03
|
[
"MIT"
] | null | null | null |
pygomas/pack.py
|
sfp932705/pygomas
|
8cdd7e973b8b4e8de467803c106ec44ca6b8bd03
|
[
"MIT"
] | null | null | null |
import json
from loguru import logger
from .config import PERFORMATIVE, PERFORMATIVE_PACK, PERFORMATIVE_PACK_TAKEN, TEAM, X, Y, Z, NAME, ACTION, CREATE, \
TYPE
from .agent import AbstractAgent, LONG_RECEIVE_WAIT
from .vector import Vector3D
from spade.message import Message
from spade.behaviour import OneShotBehaviour, CyclicBehaviour
from spade.template import Template
from spade.agent import Agent
PACK_NONE: int = 1000
PACK_MEDICPACK: int = 1001
PACK_AMMOPACK: int = 1002
PACK_OBJPACK: int = 1003
PACK_NAME = {
PACK_NONE: 'NONE',
PACK_MEDICPACK: 'MEDIC',
PACK_AMMOPACK: 'AMMO',
PACK_OBJPACK: 'OBJ'
}
PACK_AUTODESTROY_TIMEOUT: int = 25
class Pack(AbstractAgent, Agent):
def __str__(self):
return "P(" + str(PACK_NAME[self.type]) + "," + str(self.position) + ")"
def __init__(self, name, passwd="secret", manager_jid="cmanager@localhost", x=0, z=0, team=0):
Agent.__init__(self, name, passwd)
AbstractAgent.__init__(self, name, team)
self.type = PACK_NONE
self.manager = manager_jid
self.position = Vector3D()
self.position.x = x
self.position.y = 0
self.position.z = z
async def setup(self):
self.add_behaviour(self.CreatePackBehaviour())
t = Template()
t.set_metadata(PERFORMATIVE, PERFORMATIVE_PACK_TAKEN)
self.add_behaviour(self.PackTakenResponderBehaviour(), t)
class CreatePackBehaviour(OneShotBehaviour):
async def run(self):
msg = Message(to=self.agent.manager)
msg.set_metadata(PERFORMATIVE, PERFORMATIVE_PACK)
msg.body = json.dumps({
NAME: self.agent.name,
TEAM: self.agent.team,
ACTION: CREATE,
TYPE: self.agent.type,
X: self.agent.position.x,
Y: self.agent.position.y,
Z: self.agent.position.z
})
await self.send(msg)
logger.info("CreatePack msg sent: {}".format(msg))
class PackTakenResponderBehaviour(CyclicBehaviour):
async def run(self):
msg = await self.receive(timeout=LONG_RECEIVE_WAIT)
if msg is not None:
content = msg.body
await self.agent.perform_pack_taken(content)
# await self.agent.stop()
| 31.333333
| 116
| 0.635745
| 277
| 2,350
| 5.223827
| 0.306859
| 0.055978
| 0.058051
| 0.024879
| 0.078784
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013897
| 0.265106
| 2,350
| 74
| 117
| 31.756757
| 0.823972
| 0.009787
| 0
| 0.033898
| 0
| 0
| 0.028817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0.033898
| 0.152542
| 0.016949
| 0.254237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97e89c0eb4e106c1ec357be4b95f0207161d996
| 2,178
|
py
|
Python
|
Utils/initialize.py
|
soshishimada/PhysCap_demo_release
|
542756ed9ecdca77eda8b6b44ba2348253b999c3
|
[
"Unlicense"
] | 62
|
2021-09-05T19:36:06.000Z
|
2022-03-29T11:47:09.000Z
|
Utils/initialize.py
|
soshishimada/PhysCap_demo_release
|
542756ed9ecdca77eda8b6b44ba2348253b999c3
|
[
"Unlicense"
] | 4
|
2021-09-21T09:52:02.000Z
|
2022-03-27T09:08:30.000Z
|
Utils/initialize.py
|
soshishimada/PhysCap_demo_release
|
542756ed9ecdca77eda8b6b44ba2348253b999c3
|
[
"Unlicense"
] | 10
|
2021-09-05T00:27:17.000Z
|
2022-03-22T13:25:57.000Z
|
import numpy as np
import pybullet as p
class Initializer():
def __init__(self,floor_known=None,floor_frame_path=None,):
if floor_known:
self.RT = np.load(floor_frame_path)
else:
self.RT =np.eye(4)
self.rbdl2bullet = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 20, 21, 22]
r_knee_id = 26
r_ankle_id = 28
r_foot_id = 30
r_toe_id = 34
r_heel_id = 35
self.r_kafth_ids = [r_knee_id, r_ankle_id, r_foot_id, r_toe_id, r_heel_id]
l_knee_id = 9
l_ankle_id = 11
l_foot_id = 13
l_toe_id = 17
l_heel_id = 18
self.l_kafth_ids = [l_knee_id, l_ankle_id, l_foot_id, l_toe_id, l_heel_id]
self.params1={
"scale":1000,"iter":8,"delta_t":0.001,"j_kp":117497,"j_kd":3300,"bt_kp":155000,
"bt_kd":2300,"br_kp":50000,"br_kd":2800}
self.params2={
"scale":1000,"iter":8,"delta_t":0.01,"j_kp":300,"j_kd":150,"bt_kp":600,
"bt_kd":300,"br_kp":300,"br_kd":150}
self.con_j_ids_bullet = {"r_toe_id":34,"r_heel_id":35,"l_toe_id":17,"l_heel_id":18}
def get_params(self):
return self.params2#self.params1
def get_con_j_idx_bullet(self):
return self.con_j_ids_bullet
def remove_collisions(self,id_a,id_b):
### turn of collision between humanoids ###
for i in range(p.getNumJoints(id_a)):
for j in range(p.getNumJoints(id_b)):
p.setCollisionFilterPair(id_a, id_b, i, j, 0)
return 0
def get_knee_ankle_foot_toe_heel_ids_rbdl(self):
return self.l_kafth_ids,self.r_kafth_ids
def get_rbdl2bullet(self):
return self.rbdl2bullet
def change_humanoid_color(self,id_robot,color):
for j in range(p.getNumJoints(id_robot)):
p.changeVisualShape(id_robot, j, rgbaColor=color)
return 0
def get_R_T(self):
R = self.RT[:3, :3]
T = self.RT[:-1, 3:].reshape(3)
return R,T
| 34.571429
| 166
| 0.573462
| 363
| 2,178
| 3.129477
| 0.333333
| 0.026408
| 0.049296
| 0.052817
| 0.191901
| 0.142606
| 0.142606
| 0.059859
| 0
| 0
| 0
| 0.112058
| 0.299357
| 2,178
| 62
| 167
| 35.129032
| 0.632372
| 0.022039
| 0
| 0.040816
| 0
| 0
| 0.059223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163265
| false
| 0
| 0.040816
| 0.081633
| 0.367347
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f97e91890c0cdcab8847df722787798324fca2ec
| 3,220
|
py
|
Python
|
nlptasks/padding.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | 2
|
2020-12-30T13:11:09.000Z
|
2021-11-04T19:40:31.000Z
|
nlptasks/padding.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | 99
|
2020-11-02T14:58:04.000Z
|
2021-04-09T18:01:34.000Z
|
nlptasks/padding.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow.keras as keras # pad_sequences
from pad_sequences import pad_sequences_adjacency
from pad_sequences import pad_sequences_sparse
def pad_idseqs(func):
def wrapper(*args, **kwargs):
# read and remove padding settings
maxlen = kwargs.pop('maxlen', None)
padding = kwargs.pop('padding', 'pre')
truncating = kwargs.pop('truncating', 'pre')
# run the NLP task
idseqs, VOCAB = func(*args, **kwargs)
# padding and update vocabulary
if maxlen is not None:
if "[PAD]" not in VOCAB:
VOCAB.append("[PAD]")
idseqs = keras.preprocessing.sequence.pad_sequences(
idseqs, maxlen=maxlen, value=VOCAB.index("[PAD]"),
padding=padding, truncating=truncating).tolist()
return idseqs, VOCAB
return wrapper
def pad_adjacmatrix(func):
def wrapper(*args, **kwargs):
# read and remove padding settings
maxlen = kwargs.pop('maxlen', None)
padding = kwargs.pop('padding', 'pre')
truncating = kwargs.pop('truncating', 'pre')
# run the NLP task
adjac_matrix, seqs_lens = func(*args, **kwargs)
# pad adjacency matrix of children relationships
if maxlen is not None:
adjac_matrix = pad_sequences_adjacency(
sequences=adjac_matrix, seqlen=seqs_lens,
maxlen=maxlen, padding=padding, truncating=truncating)
return adjac_matrix, seqs_lens
return wrapper
def pad_maskseqs(func):
def wrapper(*args, **kwargs):
# read and remove padding settings
maxlen = kwargs.pop('maxlen', None)
padding = kwargs.pop('padding', 'pre')
truncating = kwargs.pop('truncating', 'pre')
# run the NLP task
maskseqs, seqs_lens, VOCAB = func(*args, **kwargs)
# pad sparse mask sequence
if maxlen is not None:
maskseqs = pad_sequences_sparse(
sequences=maskseqs, seqlen=seqs_lens,
maxlen=maxlen, padding=padding, truncating=truncating)
return maskseqs, seqs_lens, VOCAB
return wrapper
def pad_merge_adjac_maskseqs(func):
def wrapper(*args, **kwargs):
# read and remove padding settings
maxlen = kwargs.pop('maxlen', None)
padding = kwargs.pop('padding', 'pre')
truncating = kwargs.pop('truncating', 'pre')
# run the NLP task
adjac, onehot, seqs_lens, n_classes = func(*args, **kwargs)
# pad adjacency matrix of children relationships
if maxlen is not None:
adjac = pad_sequences_adjacency(
sequences=adjac, seqlen=seqs_lens,
maxlen=maxlen, padding=padding, truncating=truncating)
onehot = pad_sequences_sparse(
sequences=onehot, seqlen=seqs_lens,
maxlen=maxlen, padding=padding, truncating=truncating)
# shift index of adjac matrix
adjac = [[(i + n_classes, j) for i, j in sent] for sent in adjac]
# merge both sparse matrices
maskseqs = [adjac[k] + onehot[k] for k in range(len(adjac))]
# done
return maskseqs, seqs_lens
return wrapper
| 34.255319
| 73
| 0.617081
| 367
| 3,220
| 5.310627
| 0.188011
| 0.055413
| 0.06157
| 0.087224
| 0.638276
| 0.560287
| 0.525398
| 0.525398
| 0.525398
| 0.463828
| 0
| 0
| 0.287578
| 3,220
| 93
| 74
| 34.623656
| 0.849608
| 0.131056
| 0
| 0.482759
| 0
| 0
| 0.047105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.051724
| 0
| 0.327586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f983fe925ecae418e3ac67726cae140e97825556
| 6,594
|
py
|
Python
|
RainbowGrades/parsexml.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | 2
|
2017-10-11T17:48:33.000Z
|
2020-12-15T16:05:05.000Z
|
RainbowGrades/parsexml.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | 4
|
2019-04-25T02:47:34.000Z
|
2020-03-31T18:56:45.000Z
|
RainbowGrades/parsexml.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | 1
|
2020-02-07T19:19:20.000Z
|
2020-02-07T19:19:20.000Z
|
#!/usr/bin/env python3
import csv
import xml.etree.ElementTree as ET
import sys
import os.path
class QuestionData:
final_answer = ""
final_answer_time = 0
first_answer = ""
attempts = 0
first_answer_time = 0
def __init__(self,final_answer,final_answer_time,attempts,first_answer,first_answer_time):
self.final_answer = final_answer
self.final_answer_time = final_answer_time
self.first_answer = first_answer
self.first_answer_time = first_answer_time
self.attempts = attempts
def xml_to_csv(xml_filename):
"""
Parses .xml files generated by newer versions of iClicker software in SessionData
A CSV file will be written to the same path as the XML file, so it is important that any path, be it
absolute or relative, is included in the xml_filename argument. The CSV file is not a perfect replica of
older (i.e. iClicker 6) CSV files, but is our best approximation at this time. It should be enough for
Rainbow Grades to function properly.
"""
csv_filename = xml_filename[:-3] + "csv"
try:
with open(xml_filename,"r") as readfile:
tree = ET.parse(xml_filename)
root = tree.getroot()
questions_in_order = []
start_times = {}
stop_times = {}
user_question_data = {}
for child in root:
if child.tag == "p": # This is a polling tag
question = child.attrib["qn"]
start_times[question] = child.attrib["strt"]
stop_times[question] = child.attrib["stp"]
questions_in_order.append(question)
question_votes = {}
for qchild in child:
if qchild.tag == "v": # This is a voting tag
clicker_id = qchild.attrib["id"]
if clicker_id not in user_question_data:
user_question_data[clicker_id] = {}
user_question_data[clicker_id][question] = {}
if "fans" in qchild.attrib:
user_question_data[clicker_id][question] = QuestionData(qchild.attrib["ans"],
qchild.attrib["fanst"],
qchild.attrib["att"],
qchild.attrib["fans"],
qchild.attrib["tm"])
question_votes[clicker_id] = qchild.attrib["ans"]
with open(csv_filename, 'w') as writefile:
csvwriter = csv.writer(writefile) # Need to change dialect to be iclicker compliant
# Write the header
# Right now we don't have min reply/min correct in XML land, instead we have MinPart_S
next_row = ["Scoring"]
if "perf" in root.attrib:
performance = root.attrib["perf"]
else:
performance = -1
if "part" in root.attrib:
participation = root.attrib["part"]
else:
participation = 1
csvwriter.writerow(["Scoring", "Performance = " + performance,
"Participation = " + participation, "Min Reply = 2",
"Min Correct = 0",
" "])
next_row = ["Question", " ", " "]
for i in range(len(questions_in_order)):
next_row = next_row + ["Question " + str(i + 1), "Score", "Final Answer Time", "Number of Attempts",
"First Response", "Time"]
csvwriter.writerow(next_row)
next_row = ["Start Time", " ", " "]
for question in questions_in_order:
next_row = next_row + [" " + start_times[question], " ", " ", " ", " ", " "]
csvwriter.writerow(next_row)
next_row = ["Stop Time", " ", " "]
first_stop = True
for question in questions_in_order:
if not first_stop:
next_row = next_row + [" " + stop_times[question], " ", " ", " ", " ", " "]
else:
next_row = next_row + [stop_times[question], " ", " ", " ", " ", " "]
first_stop = False
csvwriter.writerow(next_row)
next_row = ["Correct Answer", " ", " "]
first_stop = True
for question in questions_in_order:
if not first_stop:
next_row = next_row + [" ", " ", " ", " ", " ", " "]
else:
next_row = next_row + ["", " ", " ", " ", " ", " "]
first_stop = False
csvwriter.writerow(next_row)
for user in sorted(user_question_data.keys()):
next_row = [user, "", "0"]
for question in questions_in_order:
if question in user_question_data[user]:
qd = user_question_data[user][question]
next_row = next_row + [qd.final_answer, 0, qd.final_answer_time, qd.attempts,
qd.first_answer, qd.first_answer_time]
else:
next_row = next_row + ["", "", "", "", "", ""]
csvwriter.writerow(next_row)
except IOError as e:
print("File I/O error: {}".format(e))
exit(-1)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Correct usage is {} [file with iclicker {\"file\":...} entries]".format(sys.argv[0]))
exit(-1)
files = []
try:
with open(sys.argv[1]) as json_file:
for line in json_file:
# Extract just the filenames of the session data
files += [x.strip()[1:-1] for x in line.split("[")[1].split("]")[0].split(",")]
except IOError as e:
print("Error reading JSON excerpt: {}".format(e))
for filename in files:
if len(filename) >= 4 and filename[-4:] == ".xml":
xml_to_csv(filename)
| 44.554054
| 120
| 0.474826
| 665
| 6,594
| 4.506767
| 0.278195
| 0.063063
| 0.040374
| 0.051385
| 0.258258
| 0.167835
| 0.118452
| 0.046713
| 0.046713
| 0.046713
| 0
| 0.006307
| 0.42296
| 6,594
| 147
| 121
| 44.857143
| 0.78134
| 0.104489
| 0
| 0.252174
| 0
| 0
| 0.067326
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017391
| false
| 0
| 0.034783
| 0
| 0.104348
| 0.026087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f98449b95d48df636ca504bf4073160f56093406
| 2,255
|
py
|
Python
|
header.py
|
yufernando/inDelphi-app
|
37938f7aaa1630fb80e7568d3d13472eedb76a6d
|
[
"FSFAP"
] | 13
|
2018-11-18T21:53:46.000Z
|
2021-03-01T16:14:21.000Z
|
header.py
|
yufernando/inDelphi-app
|
37938f7aaa1630fb80e7568d3d13472eedb76a6d
|
[
"FSFAP"
] | 2
|
2020-02-11T22:34:41.000Z
|
2020-06-05T18:16:10.000Z
|
header.py
|
yufernando/inDelphi-app
|
37938f7aaa1630fb80e7568d3d13472eedb76a6d
|
[
"FSFAP"
] | 3
|
2018-12-03T05:20:01.000Z
|
2021-07-28T22:33:54.000Z
|
import dash
import dash_core_components as dcc
import dash_html_components as html
divider_text = ' • '
def get_navigation_header(page_nm):
font_size_param = 16
dot_style = dict(
color = 'gray',
fontSize = '%spx' % (font_size_param),
)
default_style = dict(
position = 'relative',
textDecoration = 'none',
textTransform = 'uppercase',
fontFamily = 'sans-serif',
color = 'black',
marginBottom = '2px',
fontSize = '%spx' % (font_size_param),
)
import copy
selected_style = copy.copy(default_style)
selected_style['borderBottom'] = '1px solid'
styles = dict()
for nm in ['single', 'batch', 'gene', 'guide', 'about']:
if page_nm == nm:
styles[nm] = selected_style
else:
styles[nm] = default_style
return html.Div(
[
html.H4(
'inDelphi',
style = dict(
textAlign = 'center',
),
),
html.Div(
[
html.A(
'Single mode',
href = 'single',
style = styles['single'],
className = 'dynamicunderline',
),
html.Span(
divider_text,
),
html.A(
'Batch mode',
href = 'batch',
style = styles['batch'],
className = 'dynamicunderline',
),
html.Span(
divider_text,
),
html.A(
'Gene mode',
href = 'gene',
style = styles['gene'],
className = 'dynamicunderline',
),
html.Span(
divider_text,
),
html.A(
'User guide',
href = 'guide',
style = styles['guide'],
className = 'dynamicunderline',
),
html.Span(
divider_text,
),
html.A(
'About',
href = 'about',
style = styles['about'],
className = 'dynamicunderline',
),
],
style = dict(
marginBottom = 20,
textAlign = 'center',
),
className = 'row',
),
],
)
| 23.010204
| 59
| 0.446563
| 184
| 2,255
| 5.336957
| 0.380435
| 0.056008
| 0.118126
| 0.13442
| 0.248473
| 0.199593
| 0.199593
| 0.199593
| 0
| 0
| 0
| 0.005495
| 0.435033
| 2,255
| 98
| 60
| 23.010204
| 0.764521
| 0
| 0
| 0.438202
| 0
| 0
| 0.13988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011236
| false
| 0
| 0.044944
| 0
| 0.067416
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f986bf3f2c420c696f6c53ea84f10ad7ccfa26ea
| 1,526
|
py
|
Python
|
demos/pandas/pandas_concat&append.py
|
szj2ys/deal_with_the_tasks_and_challenges
|
94b9f4aad26c7e2ec5a59cf67e9e977bfa3d5221
|
[
"Apache-2.0"
] | null | null | null |
demos/pandas/pandas_concat&append.py
|
szj2ys/deal_with_the_tasks_and_challenges
|
94b9f4aad26c7e2ec5a59cf67e9e977bfa3d5221
|
[
"Apache-2.0"
] | null | null | null |
demos/pandas/pandas_concat&append.py
|
szj2ys/deal_with_the_tasks_and_challenges
|
94b9f4aad26c7e2ec5a59cf67e9e977bfa3d5221
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
#定义资料集
df1 = pd.DataFrame(np.ones((3, 4)) * 0, columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)) * 1, columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)) * 2, columns=['a', 'b', 'c', 'd'])
#concat纵向合并 axis=0纵向,axis=1横向
res = pd.concat([df1, df2, df3], axis=1)
#打印结果
print(res)
#将index_ignore设定为True,序号升序显示(index从0-8)
res = pd.concat([df1, df2, df3], axis=0, ignore_index=True)
#打印结果
print(res)
#定义资料集
df1 = pd.DataFrame(np.ones((3, 4)) * 0,
columns=['a', 'b', 'c', 'd'],
index=[1, 2, 3])
df2 = pd.DataFrame(np.ones((3, 4)) * 1,
columns=['b', 'c', 'd', 'e'],
index=[2, 3, 4])
#纵向"外"合并df1与df2,默认join为outer,即相同的column才合并,不同则NaN
res = pd.concat([df1, df2], axis=0, join='outer')
#打印结果
print(res)
#纵向"内"合并df1与df2,当join为inner时column不同的数据将会被丢弃
res = pd.concat([df1, df2], axis=0, join='inner')
#打印结果
print(res)
#定义资料集
df1 = pd.DataFrame(np.ones((3, 4)) * 0, columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)) * 1, columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)) * 1, columns=['a', 'b', 'c', 'd'])
s1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
#将df2合并到df1的下面,以及重置index,并打印出结果
res = df1.append(df2, ignore_index=True) #注意append仅能纵向合并
print(res)
#合并多个df,将df2与df3合并至df1的下面,以及重置index,并打印出结果
res = df1.append([df2, df3], ignore_index=True)
print(res)
#合并series,将s1合并至df1,以及重置index,并打印出结果
res = df1.append(s1, ignore_index=True)
print(res)
| 26.310345
| 69
| 0.593054
| 244
| 1,526
| 3.688525
| 0.25
| 0.022222
| 0.03
| 0.151111
| 0.598889
| 0.505556
| 0.438889
| 0.385556
| 0.327778
| 0.294444
| 0
| 0.062205
| 0.167759
| 1,526
| 57
| 70
| 26.77193
| 0.646457
| 0.201835
| 0
| 0.37931
| 0
| 0
| 0.038238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068966
| 0
| 0.068966
| 0.241379
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9873ed126ec1fdd07f8e281595ede0f1e5dfbf6
| 4,128
|
py
|
Python
|
python-django/oauth2demo/oauth/core/oauthclient.py
|
SequencingDOTcom/oAuth2-demo
|
609bd138cff07643a0c3e1df48f4f2e4adc9be34
|
[
"MIT"
] | 1
|
2020-11-05T22:16:37.000Z
|
2020-11-05T22:16:37.000Z
|
python-django/oauth2demo/oauth/core/oauthclient.py
|
SequencingDOTcom/oAuth2-demo
|
609bd138cff07643a0c3e1df48f4f2e4adc9be34
|
[
"MIT"
] | 3
|
2018-02-24T15:01:20.000Z
|
2021-11-29T17:29:02.000Z
|
python-django/oauth2demo/oauth/core/oauthclient.py
|
SequencingDOTcom/oAuth2-demo
|
609bd138cff07643a0c3e1df48f4f2e4adc9be34
|
[
"MIT"
] | 3
|
2017-04-06T01:38:20.000Z
|
2017-05-17T09:44:35.000Z
|
import urllib
import sched
import time
from threading import Thread
from token import Token
from ..utils.http import do_basic_secure_post
from ..exceptions.exceptions import BasicAuthenticationFailedException
class DefaultSequencingOAuth2Client(object):
# Attribute for value of redirect url
ATTR_REDIRECT_URL = "redirect_uri"
# Attribute for value of response type
ATTR_RESPONSE_TYPE = "response_type"
# Attribute for value state
ATTR_STATE = "state"
# Attribute for value client id
ATTR_CLIENT_ID = "client_id"
# Attribute for value scope
ATTR_SCOPE = "scope"
# Attribute for value code
ATTR_CODE = "code"
# Attribute for value refresh token
ATTR_REFRESH_TOKEN = "refresh_token"
# Attribute for access token
ATTR_ACCESS_TOKEN = "access_token"
# Attribute for value grant type
ATTR_GRANT_TYPE = "grant_type"
# Attribute for value expires in
ATTR_EXPIRES_IN = "expires_in"
def __init__(self, auth_parameters):
self.auth_parameters = auth_parameters
self.token = None
self._token_refresher = None
def http_redirect_parameters(self):
attributes = {
self.ATTR_REDIRECT_URL: self.auth_parameters.redirect_uri,
self.ATTR_RESPONSE_TYPE: self.auth_parameters.response_type,
self.ATTR_STATE: self.auth_parameters.state,
self.ATTR_CLIENT_ID: self.auth_parameters.client_id,
self.ATTR_SCOPE: self.auth_parameters.scope
}
return attributes
def login_redirect_url(self):
params = urllib.urlencode(self.http_redirect_parameters())
return '%s?%s' % (self.auth_parameters.oauth_authorization_uri, params)
def authorize(self, response_code, response_state):
if response_state != self.auth_parameters.state:
raise ValueError("Invalid state parameter")
uri = self.auth_parameters.oauth_token_uri
params = {
self.ATTR_GRANT_TYPE: self.auth_parameters.grant_type,
self.ATTR_CODE: response_code,
self.ATTR_REDIRECT_URL: self.auth_parameters.redirect_uri
}
result = do_basic_secure_post(uri, self.auth_parameters, params)
if result is None:
raise BasicAuthenticationFailedException("Failure authentication.")
access_token = result[self.ATTR_ACCESS_TOKEN]
refresh_token = result[self.ATTR_REFRESH_TOKEN]
timelife = int(result[self.ATTR_EXPIRES_IN])
self.token = Token(access_token, refresh_token, timelife)
self._token_refresher = self.__TokenRefresher(self, timelife - 60)
self._token_refresher.start()
return self.token
def is_authorized(self):
return (self.token is not None) and (self.token.lifetime != 0)
def _refresh_token(self):
uri = self.auth_parameters.oauth_token_refresh_uri
params = {
self.ATTR_GRANT_TYPE: self.auth_parameters.grant_type_refresh_token,
self.ATTR_REFRESH_TOKEN: self.token.refresh_token
}
result = do_basic_secure_post(uri, self.auth_parameters, params)
if result is None:
raise BasicAuthenticationFailedException("Authentication against backend failed. " +
"Server replied with: " + result)
access_token = result[self.ATTR_ACCESS_TOKEN]
refresh_token = self.token.refresh_token
timelife = result[self.ATTR_EXPIRES_IN]
self.token = Token(access_token, refresh_token, timelife)
class __TokenRefresher(Thread):
def __init__(self, outer, frequency):
Thread.__init__(self)
self.outer = outer
self.frequency = frequency
self.scheduler = sched.scheduler(time.time, time.sleep)
def run(self):
self.scheduler.enter(self.frequency, 1, self.__run_refresh_token, ())
self.scheduler.run()
def __run_refresh_token(self):
self.outer._refresh_token()
self.scheduler.enter(self.frequency, 1, self.__run_refresh_token, ())
| 34.115702
| 96
| 0.678537
| 480
| 4,128
| 5.520833
| 0.18125
| 0.076981
| 0.108679
| 0.031698
| 0.344906
| 0.323774
| 0.28
| 0.28
| 0.28
| 0.207547
| 0
| 0.001929
| 0.246609
| 4,128
| 120
| 97
| 34.4
| 0.850161
| 0.073401
| 0
| 0.148148
| 0
| 0
| 0.053487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.08642
| 0.012346
| 0.395062
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f987b372c8da570186369c27352bfdc8a2dc0b25
| 1,019
|
py
|
Python
|
commands/elastic/utils.py
|
surfedushare/search-portal
|
f5486d6b07b7b04a46ce707cee5174db4f8da222
|
[
"MIT"
] | 2
|
2021-08-19T09:40:59.000Z
|
2021-12-14T11:08:20.000Z
|
commands/elastic/utils.py
|
surfedushare/search-portal
|
708a0d05eee13c696ca9abd7e84ab620d3900fbe
|
[
"MIT"
] | 159
|
2020-05-14T14:17:34.000Z
|
2022-03-23T10:28:13.000Z
|
commands/elastic/utils.py
|
nppo/search-portal
|
aedf21e334f178c049f9d6cf37cafd6efc07bc0d
|
[
"MIT"
] | 1
|
2021-11-11T13:37:22.000Z
|
2021-11-11T13:37:22.000Z
|
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
import boto3
def get_es_client(conn, silent=False):
"""
Returns the elasticsearch client connected through port forwarding settings
"""
elastic_url = "https://localhost:9222"
protocol_config = {
"scheme": "https",
"port": 9222,
"use_ssl": True,
"verify_certs": False,
}
credentials = boto3.Session(profile_name=conn.aws.profile_name).get_credentials()
http_auth = AWS4Auth(credentials.access_key, credentials.secret_key, "eu-central-1", "es",
session_token=credentials.token)
es_client = Elasticsearch(
[elastic_url],
http_auth=http_auth,
connection_class=RequestsHttpConnection,
**protocol_config
)
# test if it works
if not silent and not es_client.cat.health(request_timeout=30):
raise ValueError('Credentials do not work for Elastic search')
return es_client
| 31.84375
| 94
| 0.684004
| 115
| 1,019
| 5.86087
| 0.591304
| 0.047478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02033
| 0.227674
| 1,019
| 31
| 95
| 32.870968
| 0.836086
| 0.091266
| 0
| 0
| 0
| 0
| 0.123077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.130435
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f98c3635dfd0d3ae569222c031b018e24dab8ea9
| 1,322
|
py
|
Python
|
sendemail/views.py
|
sami-sinnari/MilestoneProject4
|
4a66f5cd5e44e9ff4dbaeeb3e8733c0e2db6629e
|
[
"W3C",
"PostgreSQL"
] | null | null | null |
sendemail/views.py
|
sami-sinnari/MilestoneProject4
|
4a66f5cd5e44e9ff4dbaeeb3e8733c0e2db6629e
|
[
"W3C",
"PostgreSQL"
] | null | null | null |
sendemail/views.py
|
sami-sinnari/MilestoneProject4
|
4a66f5cd5e44e9ff4dbaeeb3e8733c0e2db6629e
|
[
"W3C",
"PostgreSQL"
] | 1
|
2021-08-31T03:29:02.000Z
|
2021-08-31T03:29:02.000Z
|
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import ContactForm
from profiles.models import UserProfile
def contactView(request):
if request.user.is_authenticated:
try:
profile = UserProfile.objects.get(user=request.user)
form = ContactForm(initial={
'from_email': profile.user.email,
})
except UserProfile.DoesNotExist:
form = ContactForm()
else:
form = ContactForm()
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
from_email = form.cleaned_data['from_email']
message = form.cleaned_data['message']
try:
send_mail(
subject, message, from_email, ['elsinnarisami@gmail.com'])
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect('success')
context = {
'contact_page': 'active',
'form': form,
}
return render(request, "sendemail/contact.html", context)
def successView(request):
return render(request, "sendemail/contact_success.html")
| 31.47619
| 78
| 0.621785
| 132
| 1,322
| 6.128788
| 0.424242
| 0.074166
| 0.055624
| 0.069221
| 0.086527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.281392
| 1,322
| 41
| 79
| 32.243902
| 0.851579
| 0
| 0
| 0.114286
| 0
| 0
| 0.123298
| 0.056732
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.142857
| 0.028571
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f98cbeedab7f46e8e4601542568092c0c0c15c19
| 592
|
py
|
Python
|
tests/test_accounts.py
|
edgeee/buycoins-python
|
72a3130cf43d0c618e58418b3d8cb7ce73b0f133
|
[
"MIT"
] | 55
|
2021-02-02T22:09:37.000Z
|
2022-02-24T12:17:23.000Z
|
tests/test_accounts.py
|
edgeee/buycoins-python
|
72a3130cf43d0c618e58418b3d8cb7ce73b0f133
|
[
"MIT"
] | 2
|
2021-03-24T20:11:02.000Z
|
2021-04-27T13:13:27.000Z
|
tests/test_accounts.py
|
edgeee/buycoins-python
|
72a3130cf43d0c618e58418b3d8cb7ce73b0f133
|
[
"MIT"
] | 8
|
2021-02-08T17:06:53.000Z
|
2022-02-13T09:38:59.000Z
|
from tests.utils import _mock_gql
create_deposit_response = dict(
createDepositAccount=dict(
accountNumber="123",
accountName="john doe",
accountType="deposit",
bankName="Providus",
accountReference="ref",
)
)
def test_create_deposit():
from buycoins import accounts
_mock_gql(create_deposit_response)
acc = accounts.create_deposit("john doe")
assert type(acc) == accounts.VirtualDepositAccountType
assert acc.account_number == "123"
assert acc.account_reference == "ref"
assert acc.account_name == "john doe"
| 23.68
| 58
| 0.689189
| 63
| 592
| 6.253968
| 0.52381
| 0.13198
| 0.121827
| 0.101523
| 0.142132
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012876
| 0.212838
| 592
| 24
| 59
| 24.666667
| 0.832618
| 0
| 0
| 0
| 0
| 0
| 0.086149
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f98fe6f86f42085c174f1fcd733ce2400cb3d904
| 2,696
|
py
|
Python
|
system_monitor_script/build_network.py
|
carabri/carabri
|
c8b94080331ab66ee116ee6e87e13e295e4f9604
|
[
"Apache-2.0"
] | null | null | null |
system_monitor_script/build_network.py
|
carabri/carabri
|
c8b94080331ab66ee116ee6e87e13e295e4f9604
|
[
"Apache-2.0"
] | null | null | null |
system_monitor_script/build_network.py
|
carabri/carabri
|
c8b94080331ab66ee116ee6e87e13e295e4f9604
|
[
"Apache-2.0"
] | null | null | null |
import re
import networkx
import itertools
import argparse
import json
def build_network(filename,outfile,mb_threshold):
regex = re.compile('([0-9]+) kB: (.*)(?=\()')
network_list = []
current_network = []
with open(filename, 'r') as memlog:
ignore_line = True
for line in memlog.readlines():
if not ignore_line:
result = regex.search(line)
if (result is not None):
current_network.append((result.group(1), result.group(2),))
if 'Total PSS by process:' in line:
ignore_line = False
if 'Total PSS by OOM adjustment:' in line:
ignore_line = True
if 'SAMPLE_TIME:' in line:
edges = itertools.combinations(current_network,2)
g = networkx.Graph()
g.add_nodes_from(current_network)
g.add_edges_from(edges)
current_network = []
network_list.append(g)
G = networkx.Graph()
for n in network_list:
for i in n.nodes():
if int(i[0]) > mb_threshold: #if it's using more than the memory threshold
if i[1] not in G.nodes():
#include it in the summary graph
G.add_node(i[1])
for j in n.neighbors(i):
if int(j[0]) > mb_threshold:
w = int(i[0])+int(j[0])
if ([i[1]],[j[1]]) in G.edges():
G[i[1]][j[1]]['weight'] += w
elif ([j[1]],[i[1]]) in G.edges():
G[j[1]][i[1]]['weight'] += w
else:
G.add_edge(i[1],j[1],weight=w)
# write result to edge list (CSV-type file)
networkx.write_edgelist(G, outfile, data=['weight'])
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Build a network from the given usage log file, then write it to an edge list.')
argparser.add_argument('--filename',type=str,help='provide the memory log file for building the network. Defaults to ./memory.log.sample',default='./memory.log.sample')
argparser.add_argument('--outfile',type=str,help='specify the desired path/name for the output edge list. Defaults to ./example.edgelist',default='./example.edgelist')
argparser.add_argument('--threshold',type=int,help='specify the minimum memory threshold (in MB) of the processes used in the final network. Defaults to 1000',default=1000)
args = argparser.parse_args()
build_network(args.filename,args.outfile,args.threshold)
| 42.125
| 177
| 0.555267
| 346
| 2,696
| 4.219653
| 0.33237
| 0.009589
| 0.006164
| 0.008219
| 0.028767
| 0.015068
| 0
| 0
| 0
| 0
| 0
| 0.015899
| 0.323442
| 2,696
| 64
| 178
| 42.125
| 0.784539
| 0.043398
| 0
| 0.08
| 0
| 0.02
| 0.207218
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.1
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f98fff79925e8d8be7997e37273c0b32991d86de
| 1,428
|
py
|
Python
|
BigO/insertion_sort.py
|
jeff-lund/CS350
|
7227b53275c62d45e899a531136a96a866c32a16
|
[
"MIT"
] | 2
|
2019-10-16T01:59:42.000Z
|
2019-11-13T19:25:00.000Z
|
BigO/insertion_sort.py
|
jeff-lund/CS350
|
7227b53275c62d45e899a531136a96a866c32a16
|
[
"MIT"
] | null | null | null |
BigO/insertion_sort.py
|
jeff-lund/CS350
|
7227b53275c62d45e899a531136a96a866c32a16
|
[
"MIT"
] | 2
|
2019-10-16T01:59:49.000Z
|
2019-11-15T01:19:18.000Z
|
from sys import argv
from random import randint
from time import time
import matplotlib.pyplot as plt
def insertion_sort(arr):
n = len(arr)
for i in range(1, n):
v = arr[i]
j = i - 1
while j >= 0 and arr[j] > v:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = v
if __name__ == '__main__':
if len(argv) > 1:
sz = int(argv[1])
arr = [randint(1, 1000) for _ in range(sz)]
#print(arr)
start = time()
insertion_sort(arr)
end = time()
#print(arr)
print(end - start)
else:
# performs automated testing
x = []
y = []
sizes = [10, 50, 100, 200, 500, 1000, 1200, 1500, 2000, 2500, 3000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 40000]
for sz in sizes:
t = 0
print("running size", sz)
for _ in range(10):
arr = [randint(1, 10000) for _ in range(sz)]
start = time()
insertion_sort(arr)
end = time()
t += (end - start) * 1000
x.append(sz)
y.append(t // 10)
# Plot results of tests
plt.plot(x, y)
plt.xlabel("n (size of array)")
plt.ylabel("time (ms)")
plt.show()
#plt.savefig("python_running_times.png", format='png')
| 28.56
| 134
| 0.459384
| 182
| 1,428
| 3.516484
| 0.43956
| 0.04375
| 0.075
| 0.01875
| 0.1
| 0.1
| 0.1
| 0
| 0
| 0
| 0
| 0.126202
| 0.417367
| 1,428
| 49
| 135
| 29.142857
| 0.643029
| 0.085434
| 0
| 0.15
| 0
| 0
| 0.036741
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.1
| 0
| 0.125
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f994d4e53b4b2fdca4fdd7080892fff0acd2645e
| 638
|
py
|
Python
|
tasks.py
|
RevolutionTech/revolutiontech.ca
|
a3f0f1526812554938674c4fc9e7ea90ed4ffe6d
|
[
"0BSD"
] | null | null | null |
tasks.py
|
RevolutionTech/revolutiontech.ca
|
a3f0f1526812554938674c4fc9e7ea90ed4ffe6d
|
[
"0BSD"
] | 171
|
2017-11-02T05:39:37.000Z
|
2022-03-07T01:13:53.000Z
|
tasks.py
|
RevolutionTech/carrier-owl
|
f72f47e39ea819681fa7b50de2b52e393edeeb96
|
[
"0BSD"
] | 1
|
2018-01-13T08:11:26.000Z
|
2018-01-13T08:11:26.000Z
|
from invoke import Collection, task
from opstrich.invoke import check, openssl
@task
def deploy(c):
"""
Build and run a Docker container to deploy.
"""
c.run("docker build -t zappa-lambda .")
c.run("docker run -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY zappa-lambda")
c.run(
"DJANGO_CONFIGURATION=ProdCollectStaticConfig poetry run python manage.py collectstatic --noinput"
)
@task
def ci_deploy(c):
"""
Perform pre-deploy steps needed in CI and then deploy.
"""
openssl.decrypt(c, "zappa_settings.json")
deploy(c)
namespace = Collection(check, openssl, deploy, ci_deploy)
| 23.62963
| 106
| 0.694357
| 88
| 638
| 4.920455
| 0.534091
| 0.064665
| 0.046189
| 0.069284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202194
| 638
| 26
| 107
| 24.538462
| 0.850688
| 0.153605
| 0
| 0.142857
| 0
| 0
| 0.420432
| 0.127701
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f994d8e1e290868506a6fadbde009edec090ad8b
| 2,490
|
py
|
Python
|
bear/scripts/lunar_lander/remove_data.py
|
junmokane/AI602_Project
|
59c132ae04751f9fb6cf6ebb491042cbf4de003d
|
[
"Apache-2.0"
] | 1
|
2020-10-14T05:51:36.000Z
|
2020-10-14T05:51:36.000Z
|
bear/scripts/lunar_lander/remove_data.py
|
junmokane/AI602_Project
|
59c132ae04751f9fb6cf6ebb491042cbf4de003d
|
[
"Apache-2.0"
] | null | null | null |
bear/scripts/lunar_lander/remove_data.py
|
junmokane/AI602_Project
|
59c132ae04751f9fb6cf6ebb491042cbf4de003d
|
[
"Apache-2.0"
] | null | null | null |
import torch
import numpy as np
import copy
def remove(path):
data = torch.load(path)
location_list, action_list = [np.reshape(st[0], (1, 8)) for st in data], [st[1] for st in data]
location_list = np.concatenate(location_list, axis=0)
action_list = np.asarray(action_list)
action_0 = action_list == 0
action_1 = action_list == 1
action_2 = action_list == 2
action_3 = action_list == 3
location_0 = location_list[action_0, :]
location_1 = location_list[action_1, :]
location_2 = location_list[action_2, :]
location_3 = location_list[action_3, :]
action_0 = action_list[action_list == 0]
action_1 = action_list[action_list == 1]
action_2 = action_list[action_list == 2]
action_3 = action_list[action_list == 3]
action_l = copy.deepcopy([action_0, action_1, action_2, action_3])
location_l = copy.deepcopy([location_0, location_1, location_2, location_3])
a_hori, l_hori = [], []
for a, l in zip(action_l, location_l):
a = a[l[:, 0] > 0.1]
l = l[l[:, 0] > 0.1]
a_hori.append(a)
l_hori.append(l)
location_hori = np.concatenate(l_hori, axis=0)
action_hori = np.concatenate(a_hori, axis=0)
print("horizontal : ", location_hori.shape, action_hori.shape)
# Vertical
action_l = copy.deepcopy([action_0, action_1, action_2, action_3])
location_l = copy.deepcopy([location_0, location_1, location_2, location_3])
a_verti, l_verti = [], []
for a, l in zip(action_l, location_l):
a = a[l[:, 1] < 0.8]
l = l[l[:, 1] < 0.8]
a_verti.append(a)
l_verti.append(l)
location_verti = np.concatenate(l_verti, axis=0)
action_verti = np.concatenate(a_verti, axis=0)
print("vertical : ", location_verti.shape, action_verti.shape)
# Save
for i, (a, l) in enumerate(zip(a_verti, l_verti)):
torch.save([l, a], f"/home/seungjae/Desktop/lunarlander/replay_buffer_vertical_{i}.pt")
for i, (a, l) in enumerate(zip(a_hori, l_hori)):
torch.save([l, a], f"/home/seungjae/Desktop/lunarlander/replay_buffer_horizontal_{i}.pt")
# torch.save([location_hori, action_hori], "/home/seungjae/Desktop/lunarlander/replay_buffer_horizontal.pt")
# torch.save([location_verti, action_verti], "/home/seungjae/Desktop/lunarlander/replay_buffer_vertical.pt")
if __name__ == "__main__":
path = "/home/seungjae/Desktop/lunarlander/replay_buffer.pt"
remove(path)
| 35.070423
| 112
| 0.65743
| 376
| 2,490
| 4.06383
| 0.135638
| 0.098168
| 0.058901
| 0.098168
| 0.47644
| 0.47644
| 0.448953
| 0.27356
| 0.246073
| 0.246073
| 0
| 0.030839
| 0.205622
| 2,490
| 71
| 113
| 35.070423
| 0.741658
| 0.091165
| 0
| 0.122449
| 0
| 0
| 0.09429
| 0.080124
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.061224
| 0
| 0.081633
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9990629b2219f3dd1a7da2e2230e3d0eb99c9a8
| 2,435
|
py
|
Python
|
flex/http/negotiators.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
flex/http/negotiators.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
flex/http/negotiators.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
"""
Content negotiation deals with selecting an appropriate renderer given the
incoming request. Typically this will be based on the request's Accept header.
"""
import flask as fl
from . import exc
class BaseContentNegotiator(object):
def get_accept_mimetypes(self, request=None):
"""Given the incoming request, return a list of mimetypes this client
supports as :class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return (request or fl.request).accept_mimetypes
def select_renderer(self, renderers, mimetype=None, prefer=None, request=None):
raise NotImplementedError('.select_renderer() must be implemented')
class DefaultContentNegotiator(BaseContentNegotiator):
def _get_precedence(self, accepts, mimetype):
ix = accepts.find(mimetype)
if ix < 0: return 0
v = accepts[ix][0]
if '/' not in v: return 0
vtype, vsubtype = v == '*' and ('*', '*') or v.split('/', 1)
vtype, vsubtype = vtype.strip(), vsubtype.strip()
if not vtype or not vsubtype or (vtype == '*' and vsubtype != '*'):
return 0
elif vtype == '*':
return 1
elif vsubtype == '*':
return 2
else:
return 3
def _filter_renderers(self, accepts, renderers):
best_quality = -1
for r in renderers:
q = accepts.quality(r.mimetype)
if q > 0 and q >= best_quality:
p = self._get_precedence(accepts, r.mimetype)
best_quality = q
yield r, q, p
def best_matches(self, accepts, renderers, limit=None):
rv = []
best_quality = -1
renderers = self._filter_renderers(accepts, renderers)
for renderer, quality, precedence in sorted(renderers, key=lambda i: i[1:], reverse=True):
if quality < best_quality or (limit and len(rv) >= limit):
break
best_quality = quality
rv.append((renderer, renderer.mimetype))
return rv
def select_renderer(self, renderers, mimetype=None, prefer=None, request=None):
"""Given the incoming request and a list of renderers, return a
two-tuple of: (renderer, mimetype).
"""
accepts = self.get_accept_mimetypes(request)
if mimetype:
if accepts.quality(mimetype) > 0:
for renderer in renderers:
if renderer.mimetype == mimetype:
return renderer, mimetype
raise exc.NotAcceptable()
renderers = self.best_matches(accepts, renderers)
if renderers:
if prefer:
for renderer, mimetype in renderers:
if mimetype == prefer:
return renderer, mimetype
return renderers[0]
raise exc.NotAcceptable()
| 27.988506
| 92
| 0.70308
| 321
| 2,435
| 5.258567
| 0.299065
| 0.0391
| 0.028436
| 0.040877
| 0.108412
| 0.108412
| 0.074645
| 0.074645
| 0.074645
| 0.074645
| 0
| 0.00758
| 0.187269
| 2,435
| 86
| 93
| 28.313953
| 0.845376
| 0.15729
| 0
| 0.140351
| 0
| 0
| 0.023221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.035088
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f99b06a71dceb4f5f766c0212d0a17f6f162c70a
| 539
|
py
|
Python
|
uva_answers/10018/main.py
|
andriisoldatenko/fan
|
e7ed6ea0f39bd71af4e286af8d81ebc137ae8ff4
|
[
"MIT"
] | 6
|
2018-11-18T15:00:02.000Z
|
2022-03-23T21:32:24.000Z
|
uva_answers/10018/main.py
|
andriisoldatenko/leetcode
|
8fef4da00234f8acbea9b71ee730b2267b70395f
|
[
"MIT"
] | null | null | null |
uva_answers/10018/main.py
|
andriisoldatenko/leetcode
|
8fef4da00234f8acbea9b71ee730b2267b70395f
|
[
"MIT"
] | null | null | null |
import pprint
import sys
import re
FILE = sys.stdin
#FILE = open('sample.in')
def is_palindrome(n):
k = str(n)
return list(k) == list(reversed(k))
def reverse_add(n):
return n + int(str(n)[::-1])
#import ipdb;ipdb.set_trace()
test_cases = range(int(FILE.readline()))
#import ipdb; ipdb.set_trace()
for tc in test_cases:
n = int(FILE.readline().strip())
total_sum_count = 1
n = reverse_add(n)
while not is_palindrome(n):
n = reverse_add(n)
total_sum_count += 1
print(total_sum_count, n)
| 20.730769
| 40
| 0.64564
| 88
| 539
| 3.784091
| 0.431818
| 0.09009
| 0.099099
| 0.102102
| 0.132132
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007026
| 0.207792
| 539
| 25
| 41
| 21.56
| 0.772834
| 0.152134
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0.055556
| 0.388889
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f99fbafb536fdd0dcee45c41f8a1a58e47ef7f46
| 1,086
|
py
|
Python
|
2dtree/bin/bpy_validation.py
|
LeanderSilur/Snippets
|
3ff718f91439450bd5aff13342aa98a9d4957e85
|
[
"MIT"
] | 9
|
2020-02-04T05:41:09.000Z
|
2022-03-08T06:14:54.000Z
|
2dtree/bin/bpy_validation.py
|
LeanderSilur/Snippets
|
3ff718f91439450bd5aff13342aa98a9d4957e85
|
[
"MIT"
] | 2
|
2020-06-14T19:58:01.000Z
|
2021-07-04T14:21:33.000Z
|
2dtree/bin/bpy_validation.py
|
LeanderSilur/Snippets
|
3ff718f91439450bd5aff13342aa98a9d4957e85
|
[
"MIT"
] | 2
|
2020-07-29T19:54:44.000Z
|
2020-07-29T20:00:24.000Z
|
import bpy
import subprocess
REBUILD = 0
if REBUILD:
subprocess.call([
"g++",
bpy.path.abspath('//../main.cpp'),
bpy.path.abspath('//../PtTree.cpp'),
"-o",
bpy.path.abspath('//PtTree')
])
# Collect the input data.
verts = bpy.data.meshes['PointCloud'].vertices
query_amount = 5
query_obj = bpy.data.objects['Search']
query_pos = query_obj.location
query_radius = query_obj.dimensions[0] / 2
points = [str(v.co.x) + ',' + str(v.co.y) for v in verts]
args = [
bpy.path.abspath('//PtTree.exe'),
str(query_amount),
str(query_radius),
str(query_pos.x) + ',' + str(query_pos.y),
*points
]
# Make the call.
proc = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE)
stdout = proc.stdout.split('\n')
[print(line) for line in stdout]
ids = [int(line.split(" ")[0]) for line in stdout]
# Visualize the output.
bpy.ops.object.mode_set(mode="OBJECT")
for i in range(len(verts)):
verts[i].select = False
if i in ids:
verts[i].select = True
bpy.ops.object.mode_set(mode="EDIT")
| 22.163265
| 69
| 0.621547
| 159
| 1,086
| 4.169811
| 0.440252
| 0.042232
| 0.084465
| 0.090498
| 0.069382
| 0.069382
| 0
| 0
| 0
| 0
| 0
| 0.006912
| 0.200737
| 1,086
| 49
| 70
| 22.163265
| 0.756912
| 0.055249
| 0
| 0
| 0
| 0
| 0.086999
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9a216fdaa67e5fc6913f5aa98c379d5a25e120e
| 10,274
|
py
|
Python
|
zgrab2_schemas/zgrab2/ssh.py
|
aspacewalz/zgrab2
|
d9ed4f141dae102d65ba1e08bf2eb4179678d172
|
[
"Apache-2.0"
] | 1,031
|
2016-11-29T15:12:05.000Z
|
2022-03-31T05:02:50.000Z
|
zgrab2_schemas/zgrab2/ssh.py
|
vl4deee11/zgrab2
|
c859e9ef1173955dadae88416289ef8cc8910495
|
[
"Apache-2.0"
] | 191
|
2017-07-24T17:27:57.000Z
|
2022-03-16T04:59:59.000Z
|
zgrab2_schemas/zgrab2/ssh.py
|
vl4deee11/zgrab2
|
c859e9ef1173955dadae88416289ef8cc8910495
|
[
"Apache-2.0"
] | 230
|
2017-11-14T07:25:57.000Z
|
2022-03-31T04:20:46.000Z
|
# zschema sub-schema for zgrab2's ssh module (modules/ssh.go)
# Registers zgrab2-ssh globally, and ssh with the main zgrab2 schema.
from zschema.leaves import *
from zschema.compounds import *
import zschema.registry
import zcrypto_schemas.zcrypto as zcrypto
from . import zgrab2
# NOTE: Despite the fact that we have e.g. "supportedHostKeyAlgos",
# "allSupportedCiphers", etc, including a different value is not syntactically
# incorrect...so all of the following algorithm identifiers are Strings with
# examples=[...], rather tha Enums with values=[...].
# lib/ssh/common.go -- allSupportedKexAlgos
KexAlgorithm = String.with_args(
doc="An ssh key exchange algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-15 for standard values.",
examples=[
"diffie-hellman-group1-sha1",
"diffie-hellman-group14-sha1",
"ecdh-sha2-nistp256",
"ecdh-sha2-nistp384",
"ecdh-sha2-nistp521",
"curve25519-sha256@libssh.org",
"diffie-hellman-group-exchange-sha1",
"diffie-hellman-group-exchange-sha256",
]
)
KexAlgorithms = ListOf.with_args(KexAlgorithm())
# Defined in lib/ssh/common.go -- supportedHostKeyAlgos, though they are
# generated via PublicKey.Type()
KeyAlgorithm = String.with_args(
doc="An ssh public key algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-19 for standard values.",
examples=[
"ssh-rsa-cert-v01@openssh.com",
"ssh-dss-cert-v01@openssh.com",
"ecdsa-sha2-nistp256-cert-v01@openssh.com",
"ecdsa-sha2-nistp384-cert-v01@openssh.com",
"ecdsa-sha2-nistp521-cert-v01@openssh.com",
"ssh-ed25519-cert-v01@openssh.com",
"ssh-rsa",
"ssh-dss",
"ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521",
"ssh-ed25519",
]
)
KeyAlgorithms = ListOf.with_args(KeyAlgorithm())
# From lib/ssh/common.go -- allSupportedCiphers
CipherAlgorithm = String.with_args(
doc="An ssh cipher algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-16 for standard values.",
examples=[
"aes128-ctr", "aes192-ctr", "aes256-ctr", "aes128-gcm@openssh.com",
"aes128-cbc", "3des-cbc", "arcfour256", "arcfour128", "arcfour",
]
)
CipherAlgorithms = ListOf.with_args(CipherAlgorithm())
# From lib/ssh/common.go -- supportedMACs.
MACAlgorithm = String.with_args(
doc="An ssh MAC algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-18 for standard values.",
examples=["hmac-sha2-256", "hmac-sha1", "hmac-sha1-96"]
)
MACAlgorithms = ListOf.with_args(MACAlgorithm())
# From lib/ssh/common.go -- supportedCompressions
CompressionAlgorithm = String.with_args(
doc="An ssh compression algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-20 for standard values.",
examples=["none", "zlib"]
)
CompressionAlgorithms = ListOf.with_args(CompressionAlgorithm())
LanguageTag = String.with_args(doc="A language tag, as defined in https://www.ietf.org/rfc/rfc3066.txt.")
LanguageTags = ListOf.with_args(LanguageTag(), doc="A name-list of language tags in order of preference.")
# zgrab2/lib/ssh/messages.go: (Json)kexInitMsg
KexInitMessage = SubRecordType({
"cookie": Binary(),
"kex_algorithms": KexAlgorithms(doc="Key exchange algorithms used in the handshake."),
"host_key_algorithms": KeyAlgorithms(doc="Asymmetric key algorithms for the host key supported by the client."),
"client_to_server_ciphers": CipherAlgorithms(),
"server_to_client_ciphers": CipherAlgorithms(),
"client_to_server_macs": MACAlgorithms(),
"server_to_client_macs": MACAlgorithms(),
"client_to_server_compression": CompressionAlgorithms(),
"server_to_client_compression": CompressionAlgorithms(),
"client_to_server_languages": LanguageTags(),
"server_to_client_languages": LanguageTags(),
"first_kex_follows": Boolean(),
"reserved": Unsigned32BitInteger(),
})
# zgrab2/lib/ssh/log.go: EndpointId
EndpointID = SubRecordType({
"raw": String(),
"version": String(),
"software": String(),
"comment": String(),
})
# This could be merged into a single class with e.g. an analyzed param,
# but it's probably clearer to just duplicate it.
AnalyzedEndpointID = SubRecordType({
"raw": AnalyzedString(),
"version": String(),
"software": AnalyzedString(),
"comment": AnalyzedString(),
})
# zgrab2/lib/ssh/kex.go: kexResult
KexResult = SubRecordType({
"H": Binary(),
"K": Binary(),
"session_id": Binary()
})
# zgrab2/lib/ssh/keys.go: ed25519PublicKey
ED25519PublicKey = SubRecordType({
"public_bytes": Binary(),
})
# zgrab2/lib/ssh/kex.go: curve25519sha256JsonLogParameters (via curve25519sha256)
Curve25519SHA256Params = SubRecordType({
"client_public": Binary(required=False),
"client_private": Binary(required=False),
"server_public": Binary(required=False),
})
# zgrab2/lib/ssh/certs.go: JsonSignature
Signature = SubRecordType({
"parsed": SubRecord({
"algorithm": KeyAlgorithm(),
"value": Binary(),
}),
"raw": Binary(),
"h": Binary(),
})
# lib/ssh/kex.go: PublicKeyJsonLog, sans the certkey_public_key (since that would create a loop)
SSHPublicKey = SubRecordType({
"raw": Binary(),
"fingerprint_sha256": String(),
# TODO: Enum? Obviously must serialize to one of rsa/dsa/ecdsa/ed25519_public_key...
"algorithm": String(),
# For compatiblity with ztag
"key_algorithm":String(),
"rsa_public_key": zcrypto.RSAPublicKey(),
"dsa_public_key": zcrypto.DSAPublicKey(),
"ecdsa_public_key": zcrypto.ECDSAPublicKey(),
"ed25519_public_key": ED25519PublicKey(),
})
# lib/ssh/certs.go: JsonCertType
CertType = SubRecordType({
"id": Unsigned32BitInteger(doc="The numerical certificate type value. 1 identifies user certificates, 2 identifies host certificates."),
"name": Enum(values=["USER", "HOST", "unknown"], doc="The human-readable name for the certificate type."),
})
# lib/ssh/certs.go: JsonCertificate
SSHPublicKeyCert = SubRecord.with_args({
# TODO: Use / include our cert type here, or maybe somewhere else in the response?
"certkey_public_key": SubRecord({
"nonce": Binary(),
# Note that this is not recursive, since SSHPublicKey() does not include certkey_public_key.
"key": SSHPublicKey(),
"serial": String(doc="The certificate serial number, encoded as a base-10 string."),
"cert_type": CertType(),
"key_id": String(doc="A free-form text field filled in by the CA at the time of signing, intended to identify the principal in log messages."),
"valid_principals": ListOf(String(), doc="Names for which this certificate is valid; hostnames for cert_type=HOST certificates and usernames for cert_type=USER certificates."),
"validity": SubRecord({
"valid_after": DateTime(doc="Timestamp of when certificate is first valid. Timezone is UTC."),
"valid_before": DateTime(doc="Timestamp of when certificate expires. Timezone is UTC."),
"length": Signed64BitInteger(),
}),
"reserved": Binary(),
"signature_key": SSHPublicKey(),
"signature": Signature(),
"parse_error": String(),
"extensions": SubRecord({
"known": SubRecord({
"permit_X11_forwarding": String(),
"permit_agent_forwarding": String(),
"permit_port_forwarding": String(),
"permit_pty": String(),
"permit_user_rc": String(),
}),
"unknown": ListOf(String()),
}),
"critical_options": SubRecord({
"known": SubRecord({
"force_command": String(),
"source_address": String(),
}),
"unknown": ListOf(String()),
})
})
}, extends=SSHPublicKey())
# zgrab2/lib/ssh/common.go: directionAlgorithms
DirectionAlgorithms = SubRecordType({
"cipher": CipherAlgorithm(),
"mac": MACAlgorithm(),
"compression": CompressionAlgorithm(),
})
# zgrab2/lib/ssh/kex.go: interface kexAlgorithm
# Searching usages of kexAlgorithm turns up:
# - dhGroup: dh_params, server_signature, server_host_key
# - ecdh: ecdh_params, server_signature, server_host_key
# - curve25519sha256: curve25519_sha256_params, server_signature, server_host_key
# - dhGEXSHA: dh_params, server_signature, server_host_key
KeyExchange = SubRecordType({
"curve25519_sha256_params": Curve25519SHA256Params(),
"ecdh_params": zcrypto.ECDHParams(),
"dh_params": zcrypto.DHParams(),
"server_signature": Signature(),
"server_host_key": SSHPublicKeyCert(),
})
# zgrab2/lib/ssh/common.go: algorithms (aux in MarshalJSON)
AlgorithmSelection = SubRecordType({
"dh_kex_algorithm": KexAlgorithm(),
"host_key_algorithm": KeyAlgorithm(),
"client_to_server_alg_group": DirectionAlgorithms(),
"server_to_client_alg_group": DirectionAlgorithms(),
})
# zgrab2/lib/ssh/log.go: HandshakeLog
# TODO: Can ssh re-use any of the generic TLS model?
ssh_scan_response = SubRecord({
"result": SubRecord({
"banner": WhitespaceAnalyzedString(),
"server_id": AnalyzedEndpointID(),
"client_id": EndpointID(),
"server_key_exchange": KexInitMessage(),
"client_key_exchange": KexInitMessage(),
"algorithm_selection": AlgorithmSelection(),
"key_exchange": KeyExchange(),
"userauth": ListOf(String()),
"crypto": KexResult(),
})
}, extends=zgrab2.base_scan_response)
zschema.registry.register_schema("zgrab2-ssh", ssh_scan_response)
zgrab2.register_scan_response_type("ssh", ssh_scan_response)
| 40.448819
| 234
| 0.694569
| 1,178
| 10,274
| 5.936333
| 0.304754
| 0.015444
| 0.01716
| 0.014014
| 0.198627
| 0.161876
| 0.114686
| 0.10439
| 0.10439
| 0.10439
| 0
| 0.030186
| 0.171306
| 10,274
| 253
| 235
| 40.608696
| 0.791167
| 0.199046
| 0
| 0.164894
| 0
| 0.042553
| 0.449152
| 0.088023
| 0
| 0
| 0
| 0.003953
| 0
| 1
| 0
| false
| 0
| 0.026596
| 0
| 0.026596
| 0.005319
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9a21f50645fbd8f00212379587d12cc0568bcd5
| 5,123
|
py
|
Python
|
app/main/views.py
|
Juru-10/blog
|
ac554952e884c35ce7dd50cf0ef9748a8da96a3f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Juru-10/blog
|
ac554952e884c35ce7dd50cf0ef9748a8da96a3f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Juru-10/blog
|
ac554952e884c35ce7dd50cf0ef9748a8da96a3f
|
[
"MIT"
] | null | null | null |
from flask import render_template,request,redirect,url_for,abort
from ..models import User,Post,Comment,Subscriber
from ..requests import get_quotes
from . import main
from .forms import PostForm,CommentForm,DelForm,UpdateProfile
from app.auth.forms import SubscriptionForm
from .. import db,photos
from flask_login import login_required,current_user
import markdown2
from ..email import mail_message
from app.auth import views,forms
# from sqlalchemy import desc
@main.route('/',methods = ['GET','POST'])
def index():
'''
View root page function that returns the index page and its data
'''
quotes=get_quotes()
title = 'Home - Welcome to The best Blogging Website Online'
posts=Post.query.all()
# desc(posts)
users= None
for post in posts:
comments=Comment.query.filter_by(post_id=post.id).all()
return render_template('index.html', title = title,posts=posts, users=users,quotes=quotes,comments=comments)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update/bio',methods = ['GET','POST'])
@login_required
def update_bio(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update_bio.html',form =form)
# @main.route('/user/update/pitch/<id>',methods = ['GET','POST'])
# def single_review(id):
# pitch=Pitch.query.get(id)
# if pitch is None:
# abort(404)
# form = PitchForm()
#
# if form.validate_on_submit():
# user.pitches = form.pitches.data
#
# db.session.add(user)
# db.session.commit()
#
# return redirect(url_for('.profile',pitch=user.pitches))
#
# format_pitch = markdown2.markdown(pitch.movie_pitch,extras=["code-friendly", "fenced-code-blocks"])
# return render_template('new_pitch.html',pitch = pitch,format_pitch=format_pitch)
@main.route('/new_post/',methods = ['GET','POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(name = form.name.data, user_id = current_user.id)
db.session.add(post)
db.session.commit()
subscribers=Subscriber.query.filter_by(email=Subscriber.email).all()
form = SubscriptionForm()
for subscriber in subscribers:
mail_message("A New Blog Post is added","email/welcome_user",subscriber.email,subscribers=subscribers)
return redirect(url_for('.index'))
return render_template('profile/new_post.html',post_form=form)
# @main.route('/delete_post/<int:id>',methods = ['GET','POST'])
# def del_post(id):
@main.route('/new_comment/<int:id>',methods = ['GET','POST'])
def new_comment(id):
form = CommentForm()
form2=DelForm()
posts=Post.query.filter_by(id=id).all()
comments=Comment.query.filter_by(post_id=id).all()
if form.validate_on_submit():
comment = Comment(name = form.name.data, post_id = id)
db.session.add(comment)
db.session.commit()
if form2.validate_on_submit():
comment=Comment.query.filter_by(id=id).delete()
# db.session.delete(comment)
db.session.commit()
# if button.click()
return redirect(url_for('.index'))
return render_template('profile/new_comment.html',comment_form=form,del_form=form2,comments=comments,posts=posts)
# @main.route('/new_vote/',methods = ['GET','POST'])
# @login_required
# def new_vote():
# form = VoteForm()
# # votes = get_vote(id)
#
# if form.validate_on_submit():
# pitch = Pitch(name = form.name.data, user_id = current_user.id)
# upvote = Vote(upvote = form.validate_on_submit(),pitch_id = pitch.id)
# downvote = Vote(downvote = form.validate_on_submit(),pitch_id = pitch.id)
# up=0
# down=0
# for upvote in vote:
# up+=1
# db.session.add(upvote=up)
# db.session.commit()
# for downvote in vote:
# down+=1
# db.session.add(downvote=down)
# db.session.commit()
# user=User.query.filter_by(id = pitch.id).first()
# return redirect(url_for('.index'))
#
# return render_template('profile/new_comment.html',comment_form=form)
# return render_template('new_vote.html',upvote = upvote, downvote = downvote, vote_form=form, votes=votes)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
| 33.927152
| 117
| 0.662112
| 680
| 5,123
| 4.854412
| 0.191176
| 0.040897
| 0.035444
| 0.042411
| 0.389882
| 0.287186
| 0.249015
| 0.197819
| 0.177219
| 0.156013
| 0
| 0.004351
| 0.192465
| 5,123
| 150
| 118
| 34.153333
| 0.79357
| 0.340426
| 0
| 0.25641
| 0
| 0
| 0.112821
| 0.041327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.141026
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9a3781192095c8ae404cd60bb006a2f14049443
| 4,370
|
py
|
Python
|
genoml/steps/model_validate.py
|
GenoML/genoml
|
bfe0164e99a27d5ec2b720b5a24e059294603e3f
|
[
"Apache-2.0"
] | 13
|
2019-03-22T12:12:12.000Z
|
2021-10-04T15:06:18.000Z
|
genoml/steps/model_validate.py
|
GenoML/genoml
|
bfe0164e99a27d5ec2b720b5a24e059294603e3f
|
[
"Apache-2.0"
] | 21
|
2019-03-15T15:40:59.000Z
|
2020-08-03T21:44:26.000Z
|
genoml/steps/model_validate.py
|
GenoML/genoml
|
bfe0164e99a27d5ec2b720b5a24e059294603e3f
|
[
"Apache-2.0"
] | 4
|
2019-06-28T18:25:37.000Z
|
2020-01-21T01:22:07.000Z
|
#! /usr/bin/env python -u
# coding=utf-8
from shutil import copyfile
from genoml.steps import PhenoScale, StepBase
from genoml.utils import DescriptionLoader
__author__ = 'Sayed Hadi Hashemi'
class ModelValidateStep(StepBase):
"""performs validation with existing data"""
_valid_prefix = None
def _reduce_validate(self):
self.execute_command([
self._dependecies["Plink"],
"--bfile", self._opt.valid_geno_dir,
"--extract", "{}.reduced_genos_snpList".format(self._opt.prune_prefix),
"--recode", "A",
"--out", "{}.reduced_genos".format(self._valid_prefix)
], name="Plink")
def _merge(self):
self.merge_reduced()
def _main(self):
if self._opt.pheno_scale == PhenoScale.DISCRETE:
script_name = self._opt.VALIDATE_DISC if self._opt.pheno_scale == PhenoScale.DISCRETE \
else self._opt.VALIDATE_CONT
self.execute_command([
self._dependecies["R"],
script_name,
self._valid_prefix,
self._opt.n_cores,
self._opt.impute_data,
self._opt.prune_prefix #todo: new best_model
], name="VALIDATE_CONT, please make sure you have included .cov and .addit validation files, if used for "
"training.")
@DescriptionLoader.function_description("validation_step")
def process(self):
self._valid_prefix = "{}_validation".format(self._opt.prune_prefix)
self.model_validate()
def merge_reduced(self):
self.execute_command([
self._dependecies["R"],
self._opt.MERGE,
self._opt.valid_geno_dir,
self._opt.valid_pheno_file,
self.xna(self._opt.valid_cov_file),
self.xna(self._opt.valid_addit_file),
self._valid_prefix
], name="R")
@staticmethod
def xna(s):
return s if s is not None else "NA"
# todo: new
def model_validate(self):
"""this function performs validation with existing data"""
# check if GWAS is present (meaning it was also present in training), otherwise the Prune option has been used
# TODO: find a way to ensure user is providing GWAS for validation, in case it was used during training
if self._opt.gwas_file is None:
# we need to specify the forced allele here from the training set genotype file, this pulls the allele to
# force
# TODO: refactor to a Python code
self.cut_column(self._opt.geno_prefix + ".bim",
"2,5",
self._opt.prune_prefix + ".allelesToForce")
# plink
self.execute_command([
self._dependecies["Plink"],
"--bfile", self._opt.valid_geno_dir,
"--extract", self._opt.prune_prefix + '.reduced_genos_snpList',
"--recode", "A",
"--recode-allele", self._opt.prune_prefix + '.allelesToForce',
"--out", self._valid_prefix + '.reduced_genos'
], name="model_validate")
else: # gwas_file is not None
# plink
self.execute_command([
self._dependecies["Plink"],
"--bfile", self._opt.valid_geno_dir,
"--extract", self._opt.prune_prefix + '.reduced_genos_snpList',
"--recode", "A",
"--recode-allele", self._opt.prune_prefix + '.variantWeightings',
"--out", self._valid_prefix + '.reduced_genos'
], name="model_validate")
# copy
copyfile(self._opt.prune_prefix + ".temp.snpsToPull2", self._valid_prefix + ".temp.snpsToPull2")
self.execute_command([
self._dependecies["R"],
self._opt.SCALE_VAR_DOSES_VALID,
self._opt.prune_prefix,
self._opt.gwas_file,
self._opt.valid_geno_dir,
self._opt.geno_prefix
], name="validate")
self.merge_reduced()
self._main()
self.execute_command([
self._dependecies["R"],
self._opt.CHECK_VALIDATION,
self._valid_prefix,
self._opt.valid_pheno_file
], name="CHECK_VALIDATION")
| 38
| 118
| 0.576659
| 481
| 4,370
| 4.945946
| 0.297297
| 0.094157
| 0.050441
| 0.075662
| 0.442203
| 0.334594
| 0.297604
| 0.244641
| 0.192938
| 0.153426
| 0
| 0.001669
| 0.314645
| 4,370
| 114
| 119
| 38.333333
| 0.792654
| 0.126087
| 0
| 0.376471
| 0
| 0
| 0.145153
| 0.017914
| 0
| 0
| 0
| 0.008772
| 0
| 1
| 0.082353
| false
| 0
| 0.035294
| 0.011765
| 0.152941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9a5ad42dfd6f80195b93f6de20b3058e7e2213b
| 7,374
|
py
|
Python
|
pyquil/api/_benchmark.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 677
|
2017-01-09T23:20:22.000Z
|
2018-11-26T10:57:49.000Z
|
pyquil/api/_benchmark.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 574
|
2018-11-28T05:38:40.000Z
|
2022-03-23T20:38:28.000Z
|
pyquil/api/_benchmark.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 202
|
2018-11-30T06:36:28.000Z
|
2022-03-29T15:38:18.000Z
|
##############################################################################
# Copyright 2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import List, Optional, Sequence, cast
from qcs_api_client.client import QCSClientConfiguration
from pyquil.api._abstract_compiler import AbstractBenchmarker
from pyquil.api._compiler_client import (
GenerateRandomizedBenchmarkingSequenceRequest,
ConjugatePauliByCliffordRequest,
CompilerClient,
)
from pyquil.paulis import PauliTerm, is_identity
from pyquil.quil import address_qubits, Program
from pyquil.quilbase import Gate
class BenchmarkConnection(AbstractBenchmarker):
"""
Represents a connection to a server that generates benchmarking data.
"""
def __init__(self, *, timeout: float = 10.0, client_configuration: Optional[QCSClientConfiguration] = None):
"""
Client to communicate with the benchmarking data endpoint.
:param timeout: Time limit for requests, in seconds.
:param client_configuration: Optional client configuration. If none is provided, a default one will be loaded.
"""
self._compiler_client = CompilerClient(
client_configuration=client_configuration or QCSClientConfiguration.load(),
request_timeout=timeout,
)
def apply_clifford_to_pauli(self, clifford: Program, pauli_in: PauliTerm) -> PauliTerm:
r"""
Given a circuit that consists only of elements of the Clifford group,
return its action on a PauliTerm.
In particular, for Clifford C, and Pauli P, this returns the PauliTerm
representing CPC^{\dagger}.
:param clifford: A Program that consists only of Clifford operations.
:param pauli_in: A PauliTerm to be acted on by clifford via conjugation.
:return: A PauliTerm corresponding to clifford * pauli_in * clifford^{\dagger}
"""
# do nothing if `pauli_in` is the identity
if is_identity(pauli_in):
return pauli_in
indices_and_terms = list(zip(*list(pauli_in.operations_as_set())))
request = ConjugatePauliByCliffordRequest(
pauli_indices=list(indices_and_terms[0]),
pauli_symbols=list(indices_and_terms[1]),
clifford=clifford.out(calibrations=False),
)
response = self._compiler_client.conjugate_pauli_by_clifford(request)
phase_factor, paulis = response.phase_factor, response.pauli
pauli_out = PauliTerm("I", 0, 1.0j ** phase_factor)
clifford_qubits = clifford.get_qubits()
pauli_qubits = pauli_in.get_qubits()
all_qubits = sorted(set(cast(List[int], pauli_qubits)).union(set(cast(List[int], clifford_qubits))))
# The returned pauli will have specified its value on all_qubits, sorted by index.
# This is maximal set of qubits that can be affected by this conjugation.
for i, pauli in enumerate(paulis):
pauli_out = cast(PauliTerm, pauli_out * PauliTerm(pauli, all_qubits[i]))
return cast(PauliTerm, pauli_out * pauli_in.coefficient)
def generate_rb_sequence(
self,
depth: int,
gateset: Sequence[Gate],
seed: Optional[int] = None,
interleaver: Optional[Program] = None,
) -> List[Program]:
"""
Construct a randomized benchmarking experiment on the given qubits, decomposing into
gateset. If interleaver is not provided, the returned sequence will have the form
C_1 C_2 ... C_(depth-1) C_inv ,
where each C is a Clifford element drawn from gateset, C_{< depth} are randomly selected,
and C_inv is selected so that the entire sequence composes to the identity. If an
interleaver G (which must be a Clifford, and which will be decomposed into the native
gateset) is provided, then the sequence instead takes the form
C_1 G C_2 G ... C_(depth-1) G C_inv .
The JSON response is a list of lists of indices, or Nones. In the former case, they are the
index of the gate in the gateset.
:param depth: The number of Clifford gates to include in the randomized benchmarking
experiment. This is different than the number of gates in the resulting experiment.
:param gateset: A list of pyquil gates to decompose the Clifford elements into. These
must generate the clifford group on the qubits of interest. e.g. for one qubit
[RZ(np.pi/2), RX(np.pi/2)].
:param seed: A positive integer used to seed the PRNG.
:param interleaver: A Program object that encodes a Clifford element.
:return: A list of pyquil programs. Each pyquil program is a circuit that represents an
element of the Clifford group. When these programs are composed, the resulting Program
will be the randomized benchmarking experiment of the desired depth. e.g. if the return
programs are called cliffords then `sum(cliffords, Program())` will give the randomized
benchmarking experiment, which will compose to the identity program.
"""
# Support QubitPlaceholders: we temporarily index to arbitrary integers.
# `generate_rb_sequence` handles mapping back to the original gateset gates.
gateset_as_program = address_qubits(sum(gateset, Program()))
qubits = len(gateset_as_program.get_qubits())
gateset_for_api = gateset_as_program.out().splitlines()
interleaver_out: Optional[str] = None
if interleaver:
assert isinstance(interleaver, Program)
interleaver_out = interleaver.out(calibrations=False)
depth = int(depth) # needs to be jsonable, no np.int64 please!
request = GenerateRandomizedBenchmarkingSequenceRequest(
depth=depth,
num_qubits=qubits,
gateset=gateset_for_api,
seed=seed,
interleaver=interleaver_out,
)
response = self._compiler_client.generate_randomized_benchmarking_sequence(request)
programs = []
for clifford in response.sequence:
clifford_program = Program()
if interleaver:
clifford_program._calibrations = interleaver.calibrations
# Like below, we reversed the order because the API currently hands back the Clifford
# decomposition right-to-left.
for index in reversed(clifford):
clifford_program.inst(gateset[index])
programs.append(clifford_program)
# The programs are returned in "textbook style" right-to-left order. To compose them into
# the correct pyquil program, we reverse the order.
return list(reversed(programs))
| 46.670886
| 118
| 0.675481
| 906
| 7,374
| 5.384106
| 0.31457
| 0.01435
| 0.02624
| 0.021525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004638
| 0.239761
| 7,374
| 157
| 119
| 46.968153
| 0.865501
| 0.481421
| 0
| 0.029412
| 0
| 0
| 0.0003
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 1
| 0.044118
| false
| 0
| 0.102941
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9a78f0606518ebedfb5bc19389f7930753e4683
| 554
|
py
|
Python
|
questions/q292_pair_with_given_difference/simple_storage.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
questions/q292_pair_with_given_difference/simple_storage.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | 1
|
2021-05-15T07:56:51.000Z
|
2021-05-15T07:56:51.000Z
|
questions/q292_pair_with_given_difference/simple_storage.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
class Solution:
def findPair(self, arr, L,N):
store = set()
for num in arr :
if num in store :
return True
store.add(num - N)
store.add(num + N)
return False
if __name__ == '__main__':
t = int(input())
for _ in range(t):
L,N = [int(x) for x in input().split()]
arr = [int(x) for x in input().split()]
solObj = Solution()
if(solObj.findPair(arr,L, N)):
print(1)
else:
print(-1)
| 19.103448
| 47
| 0.440433
| 70
| 554
| 3.357143
| 0.428571
| 0.025532
| 0.042553
| 0.102128
| 0.170213
| 0.170213
| 0.170213
| 0
| 0
| 0
| 0
| 0.006349
| 0.431408
| 554
| 28
| 48
| 19.785714
| 0.739683
| 0
| 0
| 0
| 0
| 0
| 0.01444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0
| 0.210526
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9ae75ada823a3568610724a901cd66400be071c
| 331
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/bulb-switcher-iii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/bulb-switcher-iii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/bulb-switcher-iii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def numTimesAllBlue(self, light):
"""
:type light: List[int]
:rtype: int
"""
result, right = 0, 0
for i, num in enumerate(light, 1):
right = max(right, num)
result += (right == i)
return result
| 22.066667
| 42
| 0.483384
| 39
| 331
| 4.102564
| 0.666667
| 0.1375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019512
| 0.380665
| 331
| 14
| 43
| 23.642857
| 0.760976
| 0.178248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9b08e37d9b636d67355ff414f22cda84aa9f53b
| 3,472
|
py
|
Python
|
tilapia/lib/provider/chains/bch/provider.py
|
huazhouwang/python_multichain_wallet
|
52e0acdc2984c08990cb36433ef17a414fbe8312
|
[
"MIT"
] | 2
|
2021-09-23T13:47:08.000Z
|
2021-09-24T02:39:14.000Z
|
tilapia/lib/provider/chains/bch/provider.py
|
huazhouwang/tilapia
|
52e0acdc2984c08990cb36433ef17a414fbe8312
|
[
"MIT"
] | null | null | null |
tilapia/lib/provider/chains/bch/provider.py
|
huazhouwang/tilapia
|
52e0acdc2984c08990cb36433ef17a414fbe8312
|
[
"MIT"
] | null | null | null |
import itertools
import logging
from typing import Dict, Tuple
from tilapia.lib.basic import bip44
from tilapia.lib.basic.functional.require import require
from tilapia.lib.hardware import interfaces as hardware_interfaces
from tilapia.lib.provider import data
from tilapia.lib.provider.chains import btc
from tilapia.lib.provider.chains.bch.sdk import cash_address
from tilapia.lib.secret import interfaces as secret_interfaces
logger = logging.getLogger("app.chain")
class BCHProvider(btc.BTCProvider):
ADDRESS_PREFIX = "bitcoincash"
def pubkey_to_address(self, verifier: secret_interfaces.VerifierInterface, encoding: str = None) -> str:
require(encoding == "P2PKH", f"Invalid address encoding: {encoding}")
pubkey = verifier.get_pubkey(compressed=True)
pubkey_hash = self.network.keys.public(pubkey).hash160(is_compressed=True)
if encoding == "P2PKH": # Pay To Public Key Hash
address = cash_address.to_cash_address(self.ADDRESS_PREFIX, pubkey_hash)
else:
raise Exception("Should not be here")
return address
def verify_address(self, address: str) -> data.AddressValidation:
is_valid, encoding = False, None
try:
if ":" not in address:
address = f"{self.ADDRESS_PREFIX}:{address}"
prefix, _ = address.split(":")
if prefix == self.ADDRESS_PREFIX:
is_valid = cash_address.is_valid_cash_address(address)
encoding = "P2PKH" if is_valid else None
except Exception as e:
logger.exception(f"Illegal address: {address}, error: {e}")
address = address if is_valid else ""
return data.AddressValidation(
normalized_address=address,
display_address=address,
is_valid=is_valid,
encoding=encoding,
)
def _cash_address_to_legacy_address(self, address: str) -> str:
if ":" not in address:
return address
pubkey_hash = cash_address.export_pubkey_hash(address)
return self.network.address.for_p2pkh(pubkey_hash)
def _pre_process_unsigned_tx(self, unsigned_tx: data.UnsignedTx, signers: dict) -> Tuple[data.UnsignedTx, dict]:
for i in itertools.chain(unsigned_tx.inputs, unsigned_tx.outputs):
i.address = self._cash_address_to_legacy_address(i.address) # pycoin supports legacy bch address only
signers = {self._cash_address_to_legacy_address(k): v for k, v in signers.items()}
return unsigned_tx, signers
def sign_transaction(
self, unsigned_tx: data.UnsignedTx, signers: Dict[str, secret_interfaces.SignerInterface]
) -> data.SignedTx:
unsigned_tx, signers = self._pre_process_unsigned_tx(unsigned_tx, signers)
return super(BCHProvider, self).sign_transaction(unsigned_tx, signers)
def hardware_sign_transaction(
self,
hardware_client: hardware_interfaces.HardwareClientInterface,
unsigned_tx: data.UnsignedTx,
bip44_path_of_signers: Dict[str, bip44.BIP44Path],
) -> data.SignedTx:
unsigned_tx, bip44_path_of_signers = self._pre_process_unsigned_tx(unsigned_tx, bip44_path_of_signers)
return super(BCHProvider, self).hardware_sign_transaction(hardware_client, unsigned_tx, bip44_path_of_signers)
def get_token_info_by_address(self, token_address: str) -> Tuple[str, str, int]:
raise NotImplementedError()
| 39.908046
| 118
| 0.701613
| 425
| 3,472
| 5.489412
| 0.263529
| 0.064295
| 0.042006
| 0.030862
| 0.183455
| 0.123018
| 0.068581
| 0.035148
| 0
| 0
| 0
| 0.007689
| 0.213422
| 3,472
| 86
| 119
| 40.372093
| 0.846576
| 0.017857
| 0
| 0.092308
| 0
| 0
| 0.047256
| 0.009099
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107692
| false
| 0
| 0.153846
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9b1801867198f74dece84cf86c7c8fce031dea8
| 1,747
|
py
|
Python
|
Excercises/Automata.py
|
RyanClinton777/graph-theory-project
|
a3ff9512da3ac2d48138b59d2e8eb8899d8e552d
|
[
"Unlicense"
] | null | null | null |
Excercises/Automata.py
|
RyanClinton777/graph-theory-project
|
a3ff9512da3ac2d48138b59d2e8eb8899d8e552d
|
[
"Unlicense"
] | null | null | null |
Excercises/Automata.py
|
RyanClinton777/graph-theory-project
|
a3ff9512da3ac2d48138b59d2e8eb8899d8e552d
|
[
"Unlicense"
] | null | null | null |
""" DFA Automata Implementation """
class State:
""" Nodes/States in an automaton """
def __init__(self, isAccept, arrows):
# Boolean, wether or not this state is an accept state
self.isAccept = isAccept
# dictionary of keys/labels:Other states
self.arrows = arrows
class DFA:
""" A DFA """
def __init__(self, start):
# Starting state
self.start = start
def match(self, s):
""" check and return wether or not string s is accepted by our automaton """
# Current state we are in
currentState = self.start
# Loop through characters in state
for c in s:
# Set current state as one pointed to by key of c
currentState = currentState.arrows[c]
# Return wether or not current state is an accept state
return currentState.isAccept
def compile():
""" Create our automaton """
# Creating an DFA with two states; 0 points to themselves, and 1 points to the other. (checking for even parity)
# Compile is standard terminoligy for creating something like this
# Create start state
start = State(True, {})
# Other state
other = State(False, {})
# The states point to themselves for 0
start.arrows['0'] = start
other.arrows['0'] = other
# They point to eachother for 1
start.arrows['1'] = other
other.arrows['1'] = start
a = DFA(start)
return a
# Create automaton instance
myAuto = compile()
# tests
for s in ['1100', '11111', '', '1', '0']:
result = myAuto.match(s)
print(f"{s} accepted? {result}")
for s in ['000', '001', '010', '011', '100', '101', '110', '111']:
result = myAuto.match(s)
print(f"{s} accepted? {result}")
| 28.639344
| 117
| 0.611906
| 233
| 1,747
| 4.553648
| 0.39485
| 0.02262
| 0.031103
| 0.028275
| 0.111216
| 0.073516
| 0.073516
| 0.073516
| 0.073516
| 0
| 0
| 0.033965
| 0.275329
| 1,747
| 61
| 118
| 28.639344
| 0.804107
| 0.418432
| 0
| 0.142857
| 0
| 0
| 0.085391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.285714
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9b86bf8f49332b5a6eedda3d7d77088bf890eb2
| 19,801
|
py
|
Python
|
pangtreebuild/pangenome/builders/dagmaf2poagraph.py
|
meoke/PangTreeBuild
|
7cafb76df32c559a76ed1d269699dc0e52313312
|
[
"MIT"
] | 2
|
2019-09-04T20:01:28.000Z
|
2019-12-23T22:41:57.000Z
|
pangtreebuild/pangenome/builders/dagmaf2poagraph.py
|
meoke/PangTreeBuild
|
7cafb76df32c559a76ed1d269699dc0e52313312
|
[
"MIT"
] | 2
|
2019-08-10T16:18:01.000Z
|
2019-10-28T21:40:23.000Z
|
pangtreebuild/pangenome/builders/dagmaf2poagraph.py
|
meoke/PangTreeBuild
|
7cafb76df32c559a76ed1d269699dc0e52313312
|
[
"MIT"
] | 2
|
2020-04-23T23:57:52.000Z
|
2020-07-12T17:09:02.000Z
|
from collections import namedtuple
from typing import Tuple, List, NewType, Optional, Dict
from pangtreebuild.mafgraph.graph import Block
from pangtreebuild.mafgraph.graph.Arc import Arc
from pangtreebuild.mafgraph.mafreader import start_position
from pangtreebuild.pangenome import graph
from pangtreebuild.pangenome import DAGMaf
from pangtreebuild.pangenome.parameters import missings
from pangtreebuild.pangenome.parameters import msa
from pangtreebuild.tools import logprocess
global_logger = logprocess.get_global_logger()
detailed_logger = logprocess.get_logger("details")
class PoagraphBuildException(Exception):
"""Any exception connected with building poagraph."""
pass
MafSequenceID = NewType('MafSequenceID', str)
SequenceInfo = namedtuple('SequenceInfo', ['block_id',
'start',
'strand',
'size',
'srcSize',
'orient'])
Edge = namedtuple('Edge', ['seq_id',
'from_block_id',
'to_block_id',
'last_node_id'])
class _BuildState:
def __init__(self,
initial_nodes: List[graph.Node],
initial_sequences: Dict[msa.SequenceID, graph.Sequence],
initial_edges: Dict[msa.SequenceID, List[Edge]],
seqs_info: Dict[msa.SequenceID, List[SequenceInfo]],
initial_column_id: graph.ColumnID,
fasta_provider: missings.FastaProvider):
self.nodes: List[graph.Node] = initial_nodes
self.sequences: Dict[msa.SequenceID, graph.Sequence] = initial_sequences
self.free_edges: Dict[msa.SequenceID, List[Edge]] = initial_edges
self.seqs_info: Dict[msa.SequenceID, List[SequenceInfo]] = seqs_info
self.column_id: graph.ColumnID = initial_column_id
self.fasta_provider: missings.FastaProvider = fasta_provider
def get_poagraph(dagmaf: DAGMaf.DAGMaf,
fasta_provider: missings.FastaProvider,
metadata: Optional[msa.MetadataCSV]) -> \
Tuple[List[graph.Node], Dict[msa.SequenceID, graph.Sequence]]:
"""Gets poagraph from given dagmaf using fasta_provider and metadata.
Args:
dagmaf: DagMaf that will be converted to Poagraph.
fasta_provider: Provider of symbols missing in DagMaf.
metadata: MetadataCSV.
Returns:
Tuple of poagraph elements.
"""
sequences_in_dagmaf = _get_sequences_ids(dagmaf)
build_state = _BuildState(initial_nodes=[],
initial_sequences=_init_sequences(sequences_in_dagmaf, metadata),
initial_edges=_init_free_edges(sequences_in_dagmaf),
seqs_info=_get_seqs_info(dagmaf, sequences_in_dagmaf),
initial_column_id=graph.ColumnID(-1),
fasta_provider=fasta_provider)
_complement_starting_nodes(build_state)
for i, mafnode in enumerate(dagmaf.dagmaf_nodes):
_process_block(build_state, mafnode)
return build_state.nodes, build_state.sequences
def _get_sequences_ids(dagmaf: DAGMaf.DAGMaf) -> List[msa.SequenceID]:
return list({msa.SequenceID(seq.id)
for block in dagmaf.dagmaf_nodes
for seq in block.alignment})
def _init_sequences(sequences_in_dagmaf: List[msa.SequenceID],
metadata: Optional[msa.MetadataCSV]) -> \
Dict[msa.SequenceID, graph.Sequence]:
metadata_sequences_ids = metadata.get_all_sequences_ids() if metadata else []
initial_sequences = {seq_id: graph.Sequence(seqid=seq_id,
paths=[],
seqmetadata=metadata.get_sequence_metadata(seq_id)
if metadata else {})
for seq_id in set(sequences_in_dagmaf + metadata_sequences_ids)}
return initial_sequences
def _init_free_edges(maf_sequences_ids: List[msa.SequenceID]) -> \
Dict[msa.SequenceID, List[Edge]]:
return {seq_id: [] for seq_id in maf_sequences_ids}
def _get_seqs_info(dagmaf: DAGMaf.DAGMaf,
sequences_in_dagmaf: List[msa.SequenceID]) -> \
Dict[msa.SequenceID, List[SequenceInfo]]:
seqs_info = {seq_id: [] for seq_id in sequences_in_dagmaf}
for n in dagmaf.dagmaf_nodes:
for seq in n.alignment:
seqs_info[msa.SequenceID(seq.id)].append(SequenceInfo(block_id=graph.BlockID(n.id),
start=start_position(seq),
strand=seq.annotations["strand"],
size=seq.annotations["size"],
srcSize=seq.annotations["srcSize"],
orient=n.orient))
absents_sequences: List[msa.SequenceID] = []
for seq_id, seq_info_list in seqs_info.items():
if seq_info_list:
seqs_info[seq_id] = sorted(seq_info_list, key=lambda si: si.start)
else:
absents_sequences.append(seq_id)
for seq_id in absents_sequences:
del seqs_info[seq_id]
return seqs_info
def _complement_starting_nodes(build_state: _BuildState) -> None:
for seq_id, seq_info_list in build_state.seqs_info.items():
first_block_sinfo = seq_info_list[0]
if first_block_sinfo.start != 0:
_complement_sequence_starting_nodes(build_state,
seq_id,
first_block_sinfo)
def _complement_sequence_starting_nodes(build_state: _BuildState,
seq_id: msa.SequenceID,
first_block_sinfo: SequenceInfo) -> \
None:
current_node_id: graph.NodeID = _get_max_node_id(build_state.nodes)
column_id = -first_block_sinfo.start
join_with = None
for i in range(first_block_sinfo.start):
current_node_id += 1
missing_nucleotide = _get_missing_nucleotide(build_state.fasta_provider, seq_id, i)
build_state.nodes += [graph.Node(node_id=current_node_id,
base=missing_nucleotide,
column_id=column_id)]
_add_node_to_sequence(build_state,
seq_id=seq_id,
join_with=join_with,
node_id=current_node_id)
join_with = current_node_id
column_id += 1
build_state.free_edges[seq_id] += [Edge(seq_id=seq_id,
from_block_id=None,
to_block_id=first_block_sinfo.block_id,
last_node_id=current_node_id)]
def _get_max_node_id(nodes: List[graph.Node]) -> graph.NodeID:
return graph.NodeID(len(nodes) - 1)
def _get_missing_nucleotide(fasta_provider, seq_id: msa.SequenceID, i: int) -> graph.Base:
return fasta_provider.get_base(seq_id, i)
def _add_node_to_sequence(build_state: _BuildState,
seq_id: msa.SequenceID,
join_with: graph.NodeID,
node_id: graph.NodeID) -> None:
if len(build_state.sequences[seq_id].paths) == 0 or join_with is None:
build_state.sequences[seq_id].paths.append(graph.SeqPath([node_id]))
else:
for path in build_state.sequences[seq_id].paths:
if path[-1] == join_with:
path.append(node_id)
return
raise PoagraphBuildException("No path with specified last node id.")
def _process_block(build_state: _BuildState, block: DAGMaf.DAGMafNode):
current_node_id = _get_max_node_id(build_state.nodes)
block_width = len(block.alignment[0].seq)
paths_join_info = _get_paths_join_info(block, build_state.free_edges)
build_state.column_id = _get_max_column_id(build_state.nodes)
for col in range(block_width):
build_state.column_id += 1
sequence_name_to_nucleotide = {MafSequenceID(seq.id): seq[col]
for seq in block.alignment}
nodes_codes = _get_column_nucleotides_sorted_codes(sequence_name_to_nucleotide)
column_nodes_ids = [current_node_id + i + 1 for i, _ in enumerate(nodes_codes)]
for i, nucl in enumerate(nodes_codes):
current_node_id += 1
maf_seqs_id = [seq_id for seq_id, n in sequence_name_to_nucleotide.items() if n == nucl]
build_state.nodes += [graph.Node(node_id=current_node_id,
base=graph.Base(nucl),
aligned_to=_get_next_aligned_node_id(i, column_nodes_ids),
column_id=build_state.column_id,
block_id=block.id)]
for maf_seq_id in maf_seqs_id:
seq_id = msa.SequenceID(maf_seq_id)
_add_node_to_sequence(build_state, seq_id, paths_join_info[seq_id], current_node_id)
paths_join_info[seq_id] = current_node_id
_add_block_out_edges_to_free_edges(build_state, block, paths_join_info)
_manage_endings(build_state, block, paths_join_info)
def _get_paths_join_info(block: Block,
free_edges: Dict[msa.SequenceID, List[Edge]]) -> \
Dict[msa.SequenceID, Optional[graph.NodeID]]:
paths_join_info: Dict[msa.SequenceID, Optional[graph.NodeID]] = dict()
for seq in block.alignment:
seq_id = msa.SequenceID(seq.id)
paths_join_info[seq_id] = None
for i, edge in enumerate(free_edges[seq_id]):
if edge.to_block_id == block.id:
paths_join_info[seq_id] = edge.last_node_id
return paths_join_info
def _get_max_column_id(nodes: List[graph.Node]) -> graph.ColumnID:
current_columns_ids = [node.column_id for node in nodes]
return max(current_columns_ids) if current_columns_ids \
else graph.ColumnID(-1)
def _get_column_nucleotides_sorted_codes(seq_to_nucl: Dict[msa.SequenceID, str]) -> \
List[str]:
return sorted(
set(
[nucleotide
for nucleotide
in seq_to_nucl.values()
if nucleotide != '-']))
def _get_next_aligned_node_id(current_column_i, column_nodes_ids) -> \
Optional[graph.NodeID]:
if len(column_nodes_ids) > 1:
return column_nodes_ids[(current_column_i + 1) % len(column_nodes_ids)]
return None
def _add_block_out_edges_to_free_edges(build_state: _BuildState,
block: Block,
join_info: Dict[msa.SequenceID, graph.NodeID]):
for edge in block.out_edges:
_ = _get_correct_edge_type(edge)
for seq in edge.sequences:
seq_id = msa.SequenceID(seq[0].seq_id)
last_node_id = _complement_sequence_middles_if_needed(build_state=build_state,
block=block,
edge=edge,
seq=seq,
last_node_id=join_info[seq_id])
if last_node_id is not None:
build_state.free_edges[seq_id].append(Edge(seq_id=seq_id,
from_block_id=block.id,
to_block_id=edge.to,
last_node_id=last_node_id))
def _get_correct_edge_type(edge: Arc) -> Tuple[int, int]:
return edge.edge_type
def _complement_sequence_middles_if_needed(build_state: _BuildState,
block: Block,
edge: Arc,
seq,
last_node_id: graph.NodeID):
seq_id = msa.SequenceID(seq[0].seq_id)
left_block_sinfo, right_block_sinfo = _get_edge_sinfos(seqs_info=build_state.seqs_info,
from_block_id=block.id,
edge=edge,
seq_id=seq_id)
if _complementation_not_needed(left_block_sinfo, right_block_sinfo):
if edge.edge_type == (1, -1):
return last_node_id
else:
return None
else:
current_node_id = _get_max_node_id(build_state.nodes)
column_id = build_state.column_id
if left_block_sinfo.start < right_block_sinfo.start:
last_pos = left_block_sinfo.start + left_block_sinfo.size - 1
next_pos = right_block_sinfo.start
else:
last_pos = right_block_sinfo.start + right_block_sinfo.size - 1
next_pos = left_block_sinfo.start
join_with = last_node_id if _should_join_with_last_node(edge.edge_type) else None
for i in range(last_pos + 1, next_pos):
column_id += 1
current_node_id += 1
missing_nucleotide = _get_missing_nucleotide(build_state.fasta_provider, seq_id, i)
build_state.nodes += [graph.Node(node_id=current_node_id,
base=missing_nucleotide,
aligned_to=None,
column_id=column_id,
block_id=None)]
_add_node_to_sequence(build_state,
seq_id=seq_id,
join_with=join_with,
node_id=current_node_id)
join_with = current_node_id
if _should_join_with_next_node(edge.edge_type):
return current_node_id
else:
return None
def _get_edge_sinfos(seqs_info: Dict[msa.SequenceID, List[SequenceInfo]],
from_block_id: graph.BlockID,
edge: Arc,
seq_id: msa.SequenceID) -> \
Tuple[SequenceInfo, SequenceInfo]:
left_seq_info, right_seq_info = None, None
for sinfo in seqs_info[seq_id]:
if sinfo.block_id == from_block_id:
left_seq_info = sinfo
if sinfo.block_id == edge.to:
right_seq_info = sinfo
if left_seq_info is None or right_seq_info is None:
raise PoagraphBuildException(f"""SequenceInfos for edge cannot be None.
Left block is {left_seq_info},
right block is {right_seq_info}.""")
return left_seq_info, right_seq_info
def _complementation_not_needed(left: SequenceInfo, right: SequenceInfo) -> \
bool:
return left.start + left.size == right.start or \
right.start + right.size == left.start
def _should_join_with_last_node(edge_type: Tuple[int, int]) -> bool:
if edge_type == (1, 1) or edge_type == (1, -1):
return True
elif edge_type == (-1, 1) or edge_type == (-1, -1):
return False
else:
raise PoagraphBuildException("""Incorrect edge type.
Cannot decide if sequence should be joined
with complemented nucleotides.""")
def _should_join_with_next_node(edge_type: Tuple[int, int]) -> bool:
if edge_type == (-1, 1) or edge_type == (1, -1) or edge_type == (-1, -1):
return True
elif edge_type == (1, 1):
return False
else:
raise PoagraphBuildException("""Incorrect edge type. Cannot decide if
complemented nucleotides must be joined
with next block.""")
def _manage_endings(build_state: _BuildState,
block: Block,
join_info: Dict[msa.SequenceID, graph.NodeID]):
sequences_ending_in_this_block = _get_ending_sequences(build_state.seqs_info, block)
for seq_id in sequences_ending_in_this_block:
block_sinfo: SequenceInfo = _get_sinfo(build_state.seqs_info[seq_id], block.id)
if _sequence_not_complete(block_sinfo):
last_node_id = _complement_sequence_middle_nodes(build_state,
seq_id=seq_id,
last_pos=block_sinfo.start + block_sinfo.size-1,
next_pos=block_sinfo.srcSize,
last_node_id=join_info[seq_id])
else:
last_node_id = join_info[seq_id]
build_state.free_edges[seq_id].append(Edge(seq_id=seq_id,
from_block_id=block.id,
to_block_id=None,
last_node_id=last_node_id))
def _get_ending_sequences(seqs_info: Dict[msa.SequenceID, List[SequenceInfo]], block: Block) -> List[msa.SequenceID]:
sequences_ending_in_this_block = []
for seq_id, sinfo_list in seqs_info.items():
last_block_sinfo = sinfo_list[-1]
if last_block_sinfo.block_id == block.id:
sequences_ending_in_this_block.append(seq_id)
return sequences_ending_in_this_block
def _get_sinfo(seq_info: List[SequenceInfo], block_id: int) -> SequenceInfo:
for sinfo in seq_info:
if sinfo.block_id == block_id:
return sinfo
raise PoagraphBuildException(f"No sequences info for given block")
def _sequence_not_complete(last_block_sinfo: SequenceInfo) -> bool:
if last_block_sinfo.strand == 1:
return last_block_sinfo.start + last_block_sinfo.size != last_block_sinfo.srcSize
elif last_block_sinfo.strand == -1:
return last_block_sinfo.start != 0
else:
raise Exception("Unexpected strand value")
def _complement_sequence_middle_nodes(build_state: _BuildState,
seq_id: msa.SequenceID,
last_pos,
next_pos,
last_node_id: graph.NodeID) -> \
graph.NodeID:
current_node_id = _get_max_node_id(build_state.nodes)
column_id = build_state.column_id
join_with = last_node_id
for i in range(last_pos+1, next_pos):
column_id += 1
current_node_id += 1
missing_nucleotide = _get_missing_nucleotide(build_state.fasta_provider, seq_id, i)
build_state.nodes += [graph.Node(node_id=current_node_id,
base=missing_nucleotide,
aligned_to=None,
column_id=column_id,
block_id=None)
]
_add_node_to_sequence(build_state,
seq_id=seq_id,
join_with=join_with,
node_id=current_node_id)
join_with = current_node_id
return current_node_id
| 44.596847
| 117
| 0.56982
| 2,234
| 19,801
| 4.673232
| 0.081916
| 0.03592
| 0.029885
| 0.014368
| 0.496169
| 0.368391
| 0.279693
| 0.208812
| 0.188027
| 0.169157
| 0
| 0.003768
| 0.356699
| 19,801
| 443
| 118
| 44.697517
| 0.815827
| 0.015353
| 0
| 0.249292
| 0
| 0
| 0.038135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07932
| false
| 0.002833
| 0.028329
| 0.01983
| 0.195467
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9bff3fda8ac0169c8ed57237e81f02a4608ca3c
| 2,451
|
py
|
Python
|
conductor/conductor/api/controllers/validator.py
|
aalsudais/optf-has
|
c3e070b6ebc713a571c10d7a5cd87e5053047136
|
[
"Apache-2.0"
] | 4
|
2019-02-14T19:18:09.000Z
|
2019-10-21T17:17:59.000Z
|
conductor/conductor/api/controllers/validator.py
|
aalsudais/optf-has
|
c3e070b6ebc713a571c10d7a5cd87e5053047136
|
[
"Apache-2.0"
] | null | null | null |
conductor/conductor/api/controllers/validator.py
|
aalsudais/optf-has
|
c3e070b6ebc713a571c10d7a5cd87e5053047136
|
[
"Apache-2.0"
] | 4
|
2019-05-09T07:05:54.000Z
|
2020-11-20T05:56:47.000Z
|
#
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from yaml.constructor import ConstructorError
from yaml.nodes import MappingNode
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
class UniqueKeyLoader(Loader):
"""Unique Key Loader for PyYAML
Ensures no duplicate keys on any given level.
https://gist.github.com/pypt/94d747fe5180851196eb#gistcomment-2084028
"""
DUPLICATE_KEY_PROBLEM_MARK = "found duplicate key"
def construct_mapping(self, node, deep=False):
"""Check for duplicate keys while constructing a mapping."""
if not isinstance(node, MappingNode):
raise ConstructorError(
None, None, "expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except (TypeError) as exc:
raise ConstructorError("while constructing a mapping",
node.start_mark,
"found unacceptable key (%s)" % exc,
key_node.start_mark)
# check for duplicate keys
if key in mapping:
raise ConstructorError("while constructing a mapping",
node.start_mark,
self.DUPLICATE_KEY_PROBLEM_MARK,
key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
| 38.296875
| 78
| 0.572011
| 265
| 2,451
| 5.215094
| 0.471698
| 0.043415
| 0.047033
| 0.054269
| 0.085384
| 0.085384
| 0.085384
| 0.085384
| 0.085384
| 0
| 0
| 0.019965
| 0.305182
| 2,451
| 63
| 79
| 38.904762
| 0.791544
| 0.394941
| 0
| 0.258065
| 0
| 0
| 0.096461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.16129
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9c217fdeaa845a8b5a88bdf4815b175a49ccae7
| 32,310
|
py
|
Python
|
gamejoltapi.py
|
bgempire/gamejoltapi
|
03a77527e00a67e5990dbc1289c54e280954b712
|
[
"MIT"
] | 1
|
2022-01-18T12:08:58.000Z
|
2022-01-18T12:08:58.000Z
|
gamejoltapi.py
|
bgempire/gamejoltapi
|
03a77527e00a67e5990dbc1289c54e280954b712
|
[
"MIT"
] | null | null | null |
gamejoltapi.py
|
bgempire/gamejoltapi
|
03a77527e00a67e5990dbc1289c54e280954b712
|
[
"MIT"
] | 1
|
2021-04-30T11:27:39.000Z
|
2021-04-30T11:27:39.000Z
|
from urllib.parse import urlencode as _urlencode, quote as _quote
from urllib.request import urlopen as _urlopen
from hashlib import md5 as _md5
from ast import literal_eval as _literal_eval
from collections import OrderedDict as _OrderedDict
_DEBUG = False
class GameJoltDataRequired(Exception):
""" Exception raised when not all required data is provided in the request call.
:param key: The data field name which is required.
:type key: str
"""
def __init__(self, key):
self.key = key
self.message = "Value is required, cannot be None: " + repr(key)
super().__init__(self.message)
class GameJoltDataCollision(Exception):
""" Exception raised when a value cannot be provided along with another.
:param keys: The data field names which collided.
:type keys: list
"""
def __init__(self, keys):
self.keys = keys
self.message = "Values cannot be used together: " + ", ".join([repr(k) for k in self.keys])
super().__init__(self.message)
class GameJoltAPI:
""" The main Game Jolt API class. Aside from the required arguments, most of the
optional arguments are provided to avoid asking for them in every single method.
:param gameId: The game ID. Required in all requests.
:type gameId: int
:param privateKey: The API private key. Required in all requests.
:type privateKey: str
:param username: Username used in some requests. Optional.
:type username: str
:param userToken: User access token used in some requests. Optional.
:type userToken: str
:param responseFormat: The response format of the requests. Can be ``"json"``, ``"xml"``, ``"keypair"`` or ``"dump"``. Optional, defaults to ``"json"``.
:type responseFormat: str
:param submitRequests: If submit the requests or just get the generated URLs from the method calls. Useful to generate URLs for batch requests. Optional, defaults to ``True``.
:type submitRequests: bool
.. py:attribute:: gameId
:type: int
The game ID. Required in all requests.
.. py:attribute:: privateKey
:type: str
The API private key. Required in all requests.
.. py:attribute:: username
:type: str
Username used in some requests. Optional.
.. py:attribute:: userToken
:type: str
User access token used in some requests. Optional.
.. py:attribute:: responseFormat
:type: str
The response format of the requests. Can be ``"json"``, ``"xml"``, ``"keypair"`` or ``"dump"``. Optional, defaults to ``"json"``.
.. py:attribute:: submitRequests
:type: bool
If submit the requests or just get the generated URLs from the method calls. Useful to generate URLs for batch requests. Optional, defaults to ``True``."""
def __init__(self, gameId, privateKey, username=None, userToken=None, responseFormat="json", submitRequests=True):
self.__API_URL = "https://api.gamejolt.com/api/game/v1_2"
self.__RETURN_FORMATS = ["json", "keypair", "dump", "xml"]
self.gameId = str(gameId)
self.privateKey = privateKey
self.username = username
self.userToken = userToken
self.responseFormat = responseFormat if responseFormat in self.__RETURN_FORMATS else "json"
self.submitRequests = submitRequests
self.operations = {
"users/fetch" : self.__API_URL + "/users/" + "?",
"users/auth" : self.__API_URL + "/users/auth/" + "?",
"sessions/open" : self.__API_URL + "/sessions/open/" + "?",
"sessions/ping" : self.__API_URL + "/sessions/ping/" + "?",
"sessions/check" : self.__API_URL + "/sessions/check/" + "?",
"sessions/close" : self.__API_URL + "/sessions/close/" + "?",
"scores/fetch" : self.__API_URL + "/scores/" + "?",
"scores/tables" : self.__API_URL + "/scores/tables/" + "?",
"scores/add" : self.__API_URL + "/scores/add/" + "?",
"scores/get-rank" : self.__API_URL + "/scores/get-rank/" + "?",
"trophies/fetch" : self.__API_URL + "/trophies/" + "?",
"trophies/add-achieved" : self.__API_URL + "/trophies/add-achieved/" + "?",
"trophies/remove-achieved" : self.__API_URL + "/trophies/remove-achieved/" + "?",
"data-store/set" : self.__API_URL + "/data-store/set/" + "?",
"data-store/update" : self.__API_URL + "/data-store/update/" + "?",
"data-store/remove" : self.__API_URL + "/data-store/remove/" + "?",
"data-store/fetch" : self.__API_URL + "/data-store/" + "?",
"data-store/get-keys" : self.__API_URL + "/data-store/get-keys/" + "?",
"friends" : self.__API_URL + "/friends/" + "?",
"time" : self.__API_URL + "/time/" + "?",
"batch" : self.__API_URL + "/batch/" + "?",
}
def _submit(self, operationUrl, data):
orderedData = _OrderedDict()
isBatch = "batch" in operationUrl
if not self.submitRequests and "format" in data.keys():
data.pop("format")
for key in sorted(data.keys()):
orderedData[key] = data[key]
data = orderedData
requestUrls = data.pop("requests") if isBatch else []
requestAsParams = "&".join(["requests[]=" + url for url in requestUrls]) if isBatch else ""
urlParams = _urlencode(data)
urlParams += "&" + requestAsParams if isBatch else ""
urlToSignature = operationUrl + urlParams + self.privateKey
signature = _md5(urlToSignature.encode()).hexdigest()
finalUrl = operationUrl + urlParams + "&signature=" + signature
if self.submitRequests:
if _DEBUG: print("Requesting URL:", finalUrl)
response = _urlopen(finalUrl).read().decode()
if self.responseFormat == "json":
return _literal_eval(response)["response"]
else:
return response
else:
if _DEBUG: print("Generated URL:", finalUrl)
return finalUrl
def _validateRequiredData(self, data):
for key in data.keys():
if data[key] is None:
raise GameJoltDataRequired(key)
return True
def _getValidData(self, data):
validatedData = {}
if self.responseFormat != "json":
validatedData["format"] = self.responseFormat
for key in data.keys():
if data[key] is not None:
validatedData[key] = data[key]
return validatedData
def _processBoolean(self, value):
if value is not None:
return str(value).lower()
# Users
def usersFetch(self, username=None, userId=None):
"""Returns a user's data.
:param username: The username of the user whose data you'd like to fetch.
:type username: str
:param userId: The ID of the user whose data you'd like to fetch.
:type userId: str, int or list
.. note::
- Only one parameter, ``username`` or ``userId``, is required.
- You can pass in multiple user ids by providing a list or separating them with commas in a string (example: ``"13,89,35"``)."""
if type(userId) in (list, tuple, set):
userId = ",".join(userId)
# Required data
data = {
"game_id" : self.gameId
}
if username is not None:
data["username"] = username
elif userId is not None:
data["user_id"] = userId
else:
data["username"] = self.username
self._validateRequiredData(data)
return self._submit(self.operations["users/fetch"], data)
def usersAuth(self):
"""Authenticates the user's information. This should be done before you make
any calls for the user, to make sure the user's credentials (username and
token) are valid."""
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken
}
self._validateRequiredData(data)
return self._submit(self.operations["users/auth"], data)
# Sessions
def sessionsOpen(self):
"""Opens a game session for a particular user and allows you to tell Game Jolt
that a user is playing your game. You must ping the session to keep it active
and you must close it when you're done with it.
.. note::
You can only have one open session for a user at a time. If you try to open a new session while one is running, the system will close out the current one before opening the new one.
"""
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken
}
self._validateRequiredData(data)
return self._submit(self.operations["sessions/open"], data)
def sessionsPing(self, status=None):
"""Pings an open session to tell the system that it's still active. If the session
hasn't been pinged within 120 seconds, the system will close the session and you
will have to open another one. It's recommended that you ping about every 30
seconds or so to keep the system from clearing out your session.
You can also let the system know whether the player is in an active or idle state
within your game.
:param status: Sets the status of the session.
:type status: str
.. note::
Valid Values for ``status``:
- ``"active"``: Sets the session to the ``"active"`` state.
- ``"idle"``: Sets the session to the ``"idle"`` state.
"""
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken
}
# Optional data
optionalData = {
"status" : status # active or idle
}
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["sessions/ping"], data)
def sessionsCheck(self):
"""Checks to see if there is an open session for the user. Can be used to see
if a particular user account is active in the game.
.. note::
This endpoint returns ``"false"`` for the ``"success"`` field when no open session exists. That behaviour is different from other endpoints which use this field to indicate an error state.
"""
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken
}
self._validateRequiredData(data)
return self._submit(self.operations["sessions/check"], data)
def sessionsClose(self):
"""Closes the active session."""
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken
}
self._validateRequiredData(data)
return self._submit(self.operations["sessions/close"], data)
# Scores
def scoresFetch(self, limit=None, tableId=None, guest=None, betterThan=None, worseThan=None, thisUser=False):
"""Returns a list of scores either for a user or globally for a game.
:param limit: The number of scores you'd like to return.
:type limit: int
:param tableId: The ID of the score table.
:type tableId: int
:param guest: A guest's name.
:type guest: str
:param betterThan: Fetch only scores better than this score sort value.
:type betterThan: int
:param worseThan: Fetch only scores worse than this score sort value.
:type worseThan: int
:param thisUser: If ``True``, fetch only scores of current user. Else, fetch scores of all users.
:type thisUser: bool
.. note::
- The default value for ``limit`` is ``10`` scores. The maximum amount of scores you can retrieve is ``100``.
- If ``tableId`` is left blank, the scores from the primary score table will be returned.
- Only pass in ``thisUser=True`` if you would like to retrieve scores for just the user set in the class constructor. Leave ``thisUser=False`` and ``guest=None`` to retrieve all scores.
- ``guest`` allows you to fetch scores by a specific guest name. Only pass either the ``thisUser=True`` or the ``guest`` (or none), never both.
- Scores are returned in the order of the score table's sorting direction. e.g. for descending tables the bigger scores are returned first.
"""
# Required data
data = {
"game_id" : self.gameId
}
# Optional data
optionalData = {
"username" : self.username if guest is None and thisUser else None,
"user_token" : self.userToken if guest is None and thisUser else None,
"limit" : limit,
"table_id" : tableId,
"guest" : guest if guest is not None and not thisUser else None,
"better_than" : betterThan,
"worse_than" : worseThan,
}
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["scores/fetch"], data)
def scoresTables(self):
"""Returns a list of high score tables for a game."""
# Required data
data = {
"game_id" : self.gameId
}
self._validateRequiredData(data)
return self._submit(self.operations["scores/tables"], data)
def scoresAdd(self, score, sort, tableId=None, guest=None, extraData=None):
"""Adds a score for a user or guest.
:param score: This is a string value associated with the score. Example: ``"500 Points"``
:type score: str
:param sort: This is a numerical sorting value associated with the score. All sorting will be based on this number. Example: ``500``
:type sort: int
:param tableId: The ID of the score table to submit to.
:type table_id: int
:param guest: The guest's name. Overrides the ``username`` set in the constructor.
:type guest: str
:param extraData: If there's any extra data you would like to store as a string, you can use this variable.
:type extra_data: str
.. note::
- You can either store a score for a user or a guest. If you're storing for a user, you must pass in the ``username`` and ``userToken`` parameters in the class constructor and leave ``guest`` as ``None``. If you're storing for a guest, you must pass in the ``guest`` parameter.
- The ``extraData`` value is only retrievable through the API and your game's dashboard. It's never displayed publicly to users on the site. If there is other data associated with the score such as time played, coins collected, etc., you should definitely include it. It will be helpful in cases where you believe a gamer has illegitimately achieved a high score.
- If ``tableId`` is left blank, the score will be submitted to the primary high score table.
"""
# Required data
data = {
"game_id" : self.gameId,
"score" : score,
"sort" : sort
}
# Optional data
optionalData = {
"username" : self.username if guest is None else None,
"user_token" : self.userToken if guest is None else None,
"table_id" : tableId,
"guest" : guest if guest is not None else None,
"extra_data" : extraData,
}
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["scores/add"], data)
def scoresGetRank(self, sort, tableId=None):
"""Returns the rank of a particular score on a score table.
:param sort: This is a numerical sorting value that is represented by a rank on the score table.
:type sort: int
:param tableId: The ID of the score table from which you want to get the rank.
:type tableId: int
.. note::
- If ``tableId`` is left blank, the ranks from the primary high score table will be returned.
- If the score is not represented by any rank on the score table, the request will return the rank that is closest to the requested score.
"""
# Required data
data = {
"game_id" : self.gameId,
"sort" : sort
}
# Optional data
optionalData = {
"table_id" : tableId,
}
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["scores/get-rank"], data)
# Trophies
def trophiesFetch(self, achieved=None, trophyId=None):
"""Returns one trophy or multiple trophies, depending on the parameters passed in.
:param achieved: Pass in ``True`` to return only the achieved trophies for a user. Pass in ``False`` to return only trophies the user hasn't achieved. Leave blank to retrieve all trophies.
:type achieved: bool
:param trophyId: If you would like to return just one trophy, you may pass the trophy ID with this parameter. If you do, only that trophy will be returned in the response. You may also pass multiple trophy IDs here if you want to return a subset of all the trophies. You do this as a list or a string with comma-separated values in the same way you would for retrieving multiple users (example: ``"13,89,35"``). Passing a ``trophyId`` will ignore the ``achieved`` parameter if it is passed.
:type trophyId: str, int or list
"""
if type(trophyId) in (list, tuple, set):
trophyId = ",".join(trophyId)
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken
}
# Optional data
optionalData = {
"achieved" : self._processBoolean(achieved) if trophyId is None else None,
"trophy_id" : trophyId
}
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["trophies/fetch"], data)
def trophiesAddAchieved(self, trophyId):
"""Sets a trophy as achieved for a particular user.
:param trophyId: The ID of the trophy to add for the user.
:type trophyId: int
"""
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken,
"trophy_id" : trophyId
}
self._validateRequiredData(data)
return self._submit(self.operations["trophies/add-achieved"], data)
def trophiesRemoveAchieved(self, trophyId):
"""Remove a previously achieved trophy for a particular user.
:param trophyId: The ID of the trophy to remove from the user.
:type trophyId: int
"""
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken,
"trophy_id" : trophyId
}
self._validateRequiredData(data)
return self._submit(self.operations["trophies/remove-achieved"], data)
# Data Storage
def dataStoreSet(self, key, data, globalData=False):
"""Sets data in the data store.
:param key: The key of the data item you'd like to set.
:type key: str
:param data: The data you'd like to set.
:type data: str
:param globalData: If set to `True`, ignores ``username`` and ``userToken`` set in constructor and processes global data instead of user data.
:type globalData: bool
.. note::
You can create new data store items by passing in a key that doesn't yet exist in the data store.
.. code-block:: python
# Store on the key "some_global_value" the data "500" in the global data store
result = api.dataStoreSet("some_global_value", "500", globalData=True)
"""
# Required data
data = {
"game_id" : self.gameId,
"key" : key,
"data" : data
}
# Optional data
optionalData = {
"username" : self.username,
"user_token" : self.userToken
}
# Process global data instead of user data
if globalData:
optionalData["username"] = None
optionalData["user_token"] = None
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["data-store/set"], data)
def dataStoreUpdate(self, key, operation, value, globalData=False):
"""Updates data in the data store.
:param key: The key of the data item you'd like to update.
:type key: str
:param operation: The operation you'd like to perform.
:type operation: str
:param value: The value you'd like to apply to the data store item. (See values below.)
:type value: str
:param globalData: If set to `True`, ignores ``username`` and ``userToken`` set in constructor and processes global data instead of user data.
:type globalData: bool
.. note::
Valid Values for ``operation``:
- ``"add"``: Adds the ``value`` to the current data store item.
- ``"subtract"``: Substracts the ``value`` from the current data store item.
- ``"multiply"``: Multiplies the ``value`` by the current data store item.
- ``"divide"``: Divides the current data store item by the ``value``.
- ``"append"``: Appends the ``value`` to the current data store item.
- ``"prepend"``: Prepends the ``value`` to the current data store item.
.. note::
You can only perform mathematic operations on numerical data.
.. code-block:: python
# Adds "100" to "some_global_value" in the global data store
result = api.dataStoreUpdate("some_global_value", "add", "100", globalData=True)
"""
# Required data
data = {
"game_id" : self.gameId,
"key" : key,
"operation" : operation,
"value" : value
}
# Optional data
optionalData = {
"username" : self.username,
"user_token" : self.userToken
}
# Process global data instead of user data
if globalData:
optionalData["username"] = None
optionalData["user_token"] = None
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["data-store/update"], data)
def dataStoreRemove(self, key, globalData=False):
"""Removes data from the data store.
:param key: The key of the data item you'd like to remove.
:type key: str
:param globalData: If set to `True`, ignores ``username`` and ``userToken`` set in constructor and processes global data instead of user data.
:type globalData: bool
.. code-block:: python
# Remove "some_global_value" from global data store
result = api.dataStoreRemove("some_global_value", globalData=True)
"""
# Required data
data = {
"game_id" : self.gameId,
"key" : key
}
# Optional data
optionalData = {
"username" : self.username,
"user_token" : self.userToken
}
# Process global data instead of user data
if globalData:
optionalData["username"] = None
optionalData["user_token"] = None
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["data-store/remove"], data)
def dataStoreFetch(self, key, globalData=False):
"""Returns data from the data store.
:param key: The key of the data item you'd like to fetch.
:type key: str
:param globalData: If set to `True`, ignores ``username`` and ``userToken`` set in constructor and processes global data instead of user data.
:type globalData: bool
.. code-block:: python
# Get "some_global_value" from global data store
result = api.dataStoreFetch("some_global_value", globalData=True)
"""
# Required data
data = {
"game_id" : self.gameId,
"key" : key
}
# Optional data
optionalData = {
"username" : self.username,
"user_token" : self.userToken
}
# Process global data instead of user data
if globalData:
optionalData["username"] = None
optionalData["user_token"] = None
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["data-store/fetch"], data)
def dataStoreGetKeys(self, pattern=None, globalData=False):
"""Returns either all the keys in the game's global data store, or all the keys in a user's data store.
:param pattern: The pattern to apply to the key names in the data store.
:type pattern: str
:param globalData: If set to `True`, ignores ``username`` and ``userToken`` set in constructor and processes global data instead of user data.
:type globalData: bool
.. note::
- If you apply a pattern to the request, only keys with applicable key names will be returned. The placeholder character for patterns is ``*``.
- This request will return a list of the ``key`` values. The ``key`` return value can appear more than once.
.. code-block:: python
# Get keys from global data store starting with "some_global"
result = api.dataStoreGetKeys("some_global_*", globalData=True)
"""
# Required data
data = {
"game_id" : self.gameId
}
# Optional data
optionalData = {
"username" : self.username,
"user_token" : self.userToken,
"pattern" : pattern
}
# Process global data instead of user data
if globalData:
optionalData["username"] = None
optionalData["user_token"] = None
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["data-store/get-keys"], data)
# Friends
def friends(self):
"""Returns the list of a user's friends."""
# Required data
data = {
"game_id" : self.gameId,
"username" : self.username,
"user_token" : self.userToken
}
self._validateRequiredData(data)
return self._submit(self.operations["friends"], data)
# Time
def time(self):
"""Returns the time of the Game Jolt server."""
# Required data
data = {
"game_id" : self.gameId
}
self._validateRequiredData(data)
return self._submit(self.operations["time"], data)
# Batch Calls
def batch(self, requests=[], parallel=None, breakOnError=None):
"""A batch request is a collection of sub-requests that enables developers to send multiple API calls with one HTTP request.
:param requests: An list of sub-request URLs. Each request will be executed and the responses of each one will be returned in the payload.
:type requests: list of str
:param parallel: By default, each sub-request is processed on the servers sequentially. If this is set to ``True``, then all sub-requests are processed at the same time, without waiting for the previous sub-request to finish before the next one is started.
:type parallel: bool
:param breakOnError: If this is set to ``True``, one sub-request failure will cause the entire batch to stop processing subsequent sub-requests and return a value of ``"false"`` for success.
:type breakOnError: bool
.. note::
- The maximum amount of sub requests in one batch request is 50.
- Dump format is not supported in batch calls.
- The ``parallel`` and ``breakOnError`` parameters cannot be used in the same request.
.. code-block:: python
# Disable request submitting to get URLs from methods
api.submitRequests = False
# Generate list of request URLs
requests = [
api.usersFetch(),
api.sessionsCheck(),
api.scoresTables(),
api.trophiesFetch(),
api.dataStoreGetKeys("*", globalData=True),
api.friends(),
api.time()
]
# Enable request submitting again
api.submitRequests = True
# Submit batch request and get all results
result = api.batch(requests=requests)
"""
if parallel is not None and breakOnError is not None:
raise GameJoltDataCollision(["parallel", "break_on_error"])
for i in range(len(requests)):
requests[i] = requests[i].replace(self.__API_URL, "")
requests[i] = requests[i].split("&signature=")[0]
requests[i] += "&signature=" + _md5((requests[i] + self.privateKey).encode()).hexdigest()
requests[i] = _quote(requests[i].replace(self.__API_URL, ""), safe="")
# Required data
data = {
"game_id" : self.gameId,
"requests" : requests if len(requests) > 0 else None
}
# Optional data
optionalData = {
"parallel" : self._processBoolean(parallel),
"break_on_error" : self._processBoolean(breakOnError)
}
self._validateRequiredData(data)
data.update(self._getValidData(optionalData))
return self._submit(self.operations["batch"], data)
| 38.327402
| 498
| 0.572857
| 3,656
| 32,310
| 4.992888
| 0.129923
| 0.01775
| 0.013148
| 0.023009
| 0.409335
| 0.374767
| 0.351649
| 0.335543
| 0.315876
| 0.301797
| 0
| 0.00232
| 0.333024
| 32,310
| 842
| 499
| 38.372922
| 0.844733
| 0.4355
| 0
| 0.431085
| 0
| 0
| 0.120855
| 0.010145
| 0.002933
| 0
| 0
| 0
| 0
| 1
| 0.082111
| false
| 0
| 0.014663
| 0
| 0.184751
| 0.005865
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9c3b4eb59658fb2124d809e4025b7e6912a6d8f
| 3,243
|
py
|
Python
|
poplar/util.py
|
mortonjt/poplar
|
854d1ef819392f54536df386ef034091831802ed
|
[
"BSD-3-Clause"
] | null | null | null |
poplar/util.py
|
mortonjt/poplar
|
854d1ef819392f54536df386ef034091831802ed
|
[
"BSD-3-Clause"
] | null | null | null |
poplar/util.py
|
mortonjt/poplar
|
854d1ef819392f54536df386ef034091831802ed
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import inspect
import torch
import numpy as np
import numbers
def get_data_path(fn, subfolder='data'):
"""Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
This is the same method as borrowed from scikit-bio
"""
# getouterframes returns a list of tuples: the second tuple
# contains info about the caller, and the second element is its
# filename
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
def check_random_state(seed):
""" Turn seed into a np.random.RandomState instance.
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
Note
----
This is from sklearn
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
dictionary = {
"A": 1,
"B": 2,
"C": 3,
"D": 4,
"E": 5,
"F": 6,
"G": 7,
"H": 8,
"I": 9,
"J": 10,
"K": 11,
"L": 12,
"M": 13,
"N": 14,
"O": 15,
"P": 16,
"Q": 17,
"R": 18,
"S": 19,
"T": 20,
"U": 21,
"V": 22,
"W": 23,
"X": 24,
"Y": 25,
"Z": 26,
".": 27
}
def encode(x):
""" Convert string to tokens. """
tokens = list(map(lambda i: dictionary[i], list(x)))
tokens = torch.Tensor(tokens)
tokens = tokens.long()
return tokens
def tokenize(gene, pos, neg, model, device, pad=1024):
if len(gene) == len(pos) and len(gene) == len(neg):
# extract features, and take <CLS> token
g = list(map(lambda x: model.extract_features(encode(x))[:, 0, :], gene))
p = list(map(lambda x: model.extract_features(encode(x))[:, 0, :], pos))
n = list(map(lambda x: model.extract_features(encode(x))[:, 0, :], neg))
g_ = torch.cat(g, 0)
p_ = torch.cat(p, 0)
n_ = torch.cat(n, 0)
else:
g_ = model.extract_features(encode(gene))[:, 0, :]
p_ = model.extract_features(encode(pos))[:, 0, :]
n_ = model.extract_features(encode(neg))[:, 0, :]
return g_, p_, n_
| 26.153226
| 81
| 0.588344
| 454
| 3,243
| 4.145374
| 0.414097
| 0.055792
| 0.063762
| 0.082891
| 0.06695
| 0.06695
| 0.06695
| 0.06695
| 0.06695
| 0.06695
| 0
| 0.025575
| 0.276596
| 3,243
| 123
| 82
| 26.365854
| 0.776641
| 0.365094
| 0
| 0
| 0
| 0
| 0.048117
| 0.012552
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.075758
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9c72fdee75782efa656735bc0a7c52d729983de
| 5,109
|
py
|
Python
|
search-github-ip.py
|
brilliant116/learning-code
|
5378b1f0c53b4ceac56942044bbb666278a138f2
|
[
"MIT"
] | null | null | null |
search-github-ip.py
|
brilliant116/learning-code
|
5378b1f0c53b4ceac56942044bbb666278a138f2
|
[
"MIT"
] | null | null | null |
search-github-ip.py
|
brilliant116/learning-code
|
5378b1f0c53b4ceac56942044bbb666278a138f2
|
[
"MIT"
] | null | null | null |
import asyncio
import time
import socket
import argparse
import aiohttp
class MyConnector(aiohttp.TCPConnector):
def __init__(self, ip):
self.__ip = ip
super().__init__()
async def _resolve_host(
self, host: str, port: int,
traces: None = None,
):
return [{
'hostname': host, 'host': self.__ip, 'port': port,
'family': self._family, 'proto': 0, 'flags': 0,
}]
async def test_domain(domain, ip, proto):
if proto == 'http':
return await test_domain_http(domain, ip)
elif proto == 'ssh':
return await test_domain_ssh(domain, ip)
else:
raise ValueError('unknown proto', proto)
async def test_domain_ssh(domain, ip):
st = time.time()
r, _w = await asyncio.open_connection(ip, 22)
await r.read(1)
return time.time() - st
async def test_domain_http(domain, ip):
url = 'https://github.com/'
st = time.time()
async with aiohttp.ClientSession(
connector = MyConnector(ip),
timeout = aiohttp.ClientTimeout(total=10),
) as s:
r = await s.get(url)
_ = await r.text()
return time.time() - st
async def producer(q, proto):
items = await get_items(proto)
for item in items:
await q.put(item)
await q.put(None)
async def printer(q):
while True:
try:
item = await q.get()
except asyncio.CancelledError:
break
if isinstance(item[1], Exception):
(domain, ip, proto), e = item
print(f'{domain:21} {ip:15} {proto:4} {e!r}')
else:
(domain, ip, proto), t = item
print(f'{domain:21} {ip:15} {proto:4} {t:6.2f}')
async def fastest_finder(q):
fastest_ip, latency = None, 1000
while True:
try:
item = await q.get()
except asyncio.CancelledError:
return fastest_ip
if not isinstance(item[1], Exception):
(_, ip, _), t = item
if t < latency:
latency = t
fastest_ip = ip
async def worker(q, ret_q):
while True:
item = await q.get()
if item is None:
await q.put(None)
break
try:
t = await test_domain(*item)
except Exception as e:
await ret_q.put((item, e))
else:
await ret_q.put((item, t))
async def main(proto):
q = asyncio.Queue()
ret_q = asyncio.Queue()
futures = [worker(q, ret_q) for _ in range(40)]
producer_fu = asyncio.ensure_future(producer(q, proto))
printer_fu = asyncio.ensure_future(printer(ret_q))
await asyncio.wait(futures)
printer_fu.cancel()
await producer_fu
await printer_fu
async def update_hosts():
import os, sys, subprocess
if os.geteuid() != 0:
sys.exit('not root?')
q = asyncio.Queue()
ret_q = asyncio.Queue()
futures = [worker(q, ret_q) for _ in range(40)]
producer_fu = asyncio.ensure_future(
producer(q, ['http']))
finder_fu = asyncio.ensure_future(
fastest_finder(ret_q))
await asyncio.wait(futures)
finder_fu.cancel()
await producer_fu
ip = await finder_fu
if ip is not None:
cmd = ['sed', '-Ei', rf'/^[0-9.]+[[:space:]]+(gist\.)?github\.com\>/s/[^[:space:]]+/{ip}/', '/etc/hosts']
subprocess.check_call(cmd)
async def resolve(domain):
loop = asyncio.get_event_loop()
addrinfo = await loop.getaddrinfo(
domain, None,
family=socket.AF_INET,
proto=socket.IPPROTO_TCP,
)
ips = [x[-1][0] for x in addrinfo]
return domain, ips
async def get_items(proto):
items = [
('13.234.210.38', 'Bombay'),
('13.234.176.102', 'Bombay'),
('52.192.72.89', 'Tokyo'),
('13.114.40.48', 'Tokyo'),
('52.69.186.44', 'Tokyo'),
('15.164.81.167', 'Seoul'),
('52.78.231.108', 'Seoul'),
('13.229.188.59', 'Singapore'),
('13.250.177.223', 'Singapore'),
('52.74.223.119', 'Singapore'),
('192.30.255.112', 'Seattle'),
('192.30.255.113', 'Seattle'),
('140.82.112.3', 'Seattle'),
('140.82.112.4', 'Seattle'),
('192.30.253.112', 'Ashburn'),
('192.30.253.113', 'Ashburn'),
('140.82.113.3', 'Ashburn'),
('140.82.113.4', 'Ashburn'),
('140.82.114.3', 'Ashburn'),
('140.82.114.4', 'Ashburn'),
('140.82.118.3', 'Armsterdam'),
('140.82.118.4', 'Armsterdam'),
('140.82.121.3', 'Frankfurt'),
('140.82.121.4', 'Frankfurt'),
('13.237.44.5', 'Sydney'),
('52.64.108.95', 'Sydney'),
('13.236.229.21', 'Sydney'),
('18.231.5.6', 'Sao Paulo'),
('18.228.52.138', 'Sao Paulo'),
('18.228.67.229', 'Sao Paulo'),
]
return [(x[1], x[0], y) for x in items for y in proto]
if __name__ == '__main__':
import logging
logging.getLogger().addHandler(logging.NullHandler())
parser = argparse.ArgumentParser(
description='GitHub IP 访问速度测试')
parser.add_argument('proto', nargs='*',
default=['http', 'ssh'],
help='测试指定协议')
parser.add_argument('--hosts',
action='store_true',
help='更新 /etc/hosts')
args = parser.parse_args()
if args.hosts:
main_fu = update_hosts()
else:
main_fu = main(args.proto)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main_fu)
except KeyboardInterrupt:
pass
| 24.921951
| 109
| 0.595811
| 720
| 5,109
| 4.105556
| 0.302778
| 0.032476
| 0.020298
| 0.028417
| 0.225304
| 0.154263
| 0.119756
| 0.119756
| 0.119756
| 0.100812
| 0
| 0.08198
| 0.228812
| 5,109
| 204
| 110
| 25.044118
| 0.668274
| 0
| 0
| 0.209302
| 0
| 0.005814
| 0.177138
| 0.012723
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005814
| false
| 0.005814
| 0.040698
| 0
| 0.098837
| 0.034884
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9c8ce2fe1e056d6cf0dbb9895f3232eec31c3ce
| 23,732
|
py
|
Python
|
AdventOfCode/2020/day20.py
|
benhunter/coding-challenges
|
0cca059da7c8ae6cdc62dbeb3db8250ab42ac3b6
|
[
"MIT"
] | null | null | null |
AdventOfCode/2020/day20.py
|
benhunter/coding-challenges
|
0cca059da7c8ae6cdc62dbeb3db8250ab42ac3b6
|
[
"MIT"
] | null | null | null |
AdventOfCode/2020/day20.py
|
benhunter/coding-challenges
|
0cca059da7c8ae6cdc62dbeb3db8250ab42ac3b6
|
[
"MIT"
] | null | null | null |
# Advent of Code 2020 Day
# https://adventofcode.com/2020/
import cProfile
import itertools
import math
import numpy as np
from collections import namedtuple
from pprint import pformat, pprint
from typing import List, Optional
from numpy.typing import ArrayLike
USE_EXAMPLE1 = False # example input or full input
DEBUG = False # debug prints to console
PROFILE = False # profiling flag
Tile = namedtuple("Tile", "number, data")
Tile.__repr__ = ( # type: ignore
lambda self: f"Tile: {self.number}\n" + f"{pformat(self.data)}" # type: ignore
) # type: ignore
# class Tile(namedtuple("Tile", "number, data")):
# # Tile with a number ID and data fields.
# # Inheriting from namedtuple to override the repr dunder.
# # Equivalent code:
# # Tile = namedtuple("Tile", "number, data")
# # Tile.__repr__ = lambda self: f"Tile: {self.number}\n{pformat(self.data)}"
# def __new__(cls, number, data: ArrayLike):
# self = super(Tile, cls).__new__(cls, number, data)
# return self
# def __repr__(self) -> str:
# return f"Tile: {self.number}\n{pformat(self.data)}"
def test_Tile():
assert Tile("1", 0)
assert Tile("1", 0).number == "1"
assert Tile("1", 0).data == 0
def count_edge_matches(first_tile: Tile, second_tile: Tile):
assert type(first_tile) is Tile
assert type(second_tile) is Tile
first_tile_faces: List[np.ndarray] = list(generate_faces(first_tile))
second_tile_faces: List[np.ndarray] = list(generate_faces(second_tile))
matches: List[np.ndarray] = [
face_one
for face_one, face_two in itertools.product(first_tile_faces, second_tile_faces)
if np.array_equal(face_one, face_two)
]
return len(matches) // 2
def test_count_edge_match():
tile_one = Tile(0, np.array([[0, 1], [2, 3]], dtype=object))
tile_two = Tile(1, np.array([[0, 1], [4, 5]], dtype=object))
assert count_edge_matches(tile_one, tile_two) == 1
def generate_faces(tile: Tile):
# Generator for Tile faces
# use:
# for face in generate_faces(tile):
# print(face)
assert type(tile) is Tile
tile_faces = [
tile.data[0],
tile.data[-1],
tile.data[:, 0],
tile.data[:, -1],
]
nparray_tile_flipped = np.flip(tile.data) # flip on both axes
tile_faces += [
nparray_tile_flipped[0],
nparray_tile_flipped[-1],
nparray_tile_flipped[:, 0],
nparray_tile_flipped[:, -1],
]
yield from tile_faces
def is_face_matches_tile(face: np.ndarray, tile: Tile):
# determine whether face matches anywhere on tile,
# including after rotating and flipping tile
assert type(face) is np.ndarray
assert type(tile) is Tile
matches = [
face_one
for face_one, face_two in itertools.product([face], generate_faces(tile))
if np.array_equal(face_one, face_two)
]
return bool(len(matches))
def is_face_matches_face(face_one: np.ndarray, face_two: np.ndarray):
# Compare faces without flipping
assert type(face_one) is np.ndarray
assert type(face_two) is np.ndarray
result = np.array_equal(face_one, face_two)
return result
def count_all_edge_matches(tile: Tile, tiles: List[Tile]):
count = 0
for candidate_tile in tiles:
if tile.number == candidate_tile.number:
# don't check a tile against itself
continue
# count the matching edges
count += count_edge_matches(tile, candidate_tile)
if DEBUG and count > 0:
print(count)
return count
def find_corner_pieces(tiles: List[Tile]):
# count matching faces for every tile
# corner tiles have only 2 matching faces
# all other tiles have more
corner_pieces = []
for tile in tiles:
match_count = count_all_edge_matches(tile, tiles)
if match_count == 2:
corner_pieces.append(tile)
return corner_pieces
def next_match(known_tile: Tile, candidate_tiles: List[Tile]) -> Tile:
assert type(known_tile) is Tile
# from candidate_tiles, find a tile that has a matching edge with known_tile
return next(generate_next_match(known_tile, candidate_tiles))
def generate_next_match(known_tile: Tile, candidate_tiles: List[Tile]):
assert type(known_tile) is Tile
# from candidate_tiles, find a tile that has a matching edge with known_tile
for candidate_tile in candidate_tiles:
if count_edge_matches(known_tile, candidate_tile) > 0:
yield candidate_tile
raise RuntimeError("Did not find a next match.")
def product(values):
# see also: math.prod
ret = 1
ret = [ret := ret * v for v in values][-1]
return ret
def generate_nparray_orientation(npa: ArrayLike):
# generator to provide all orientations (rotations and flips) for 2-Dimensial np.array
# Usage:
# for orientation in generate_nparray_orientation(candidate_nparray):
# print(orientation)
# np array rotations
# https://numpy.org/doc/stable/reference/generated/numpy.rot90.html#numpy.rot90
# print(tiles[0].data)
# print(np.rot90(tiles[0].data)) # rotate counter clockwise
# print(np.rot90(tiles[0].data, axes=(1,0))) # rotate clockwise
# print(np.rot90(tiles[0].data, k=0)) # rotate counter clockwise 0 times
# note that rotations return views, not new arrays
# flip too, not just rotate
orientations = [
npa, # original
np.rot90(npa, k=1), # counter-clockwise once
np.rot90(npa, k=2), # counter-clockwise twice
np.rot90(npa, k=3), # counter-clockwise thrice
]
npa_flipped = np.flip(npa, axis=0) # flip on x axis
orientations += [
npa_flipped,
np.rot90(npa_flipped, k=1),
np.rot90(npa_flipped, k=2),
np.rot90(npa_flipped, k=3),
]
yield from orientations
def generate_tile_orientation(tile: Tile):
yield from generate_nparray_orientation(tile.data)
def is_tile_matches_neighbors(
y_index: int, x_index: int, solution: List[List[Optional[Tile]]]
):
"""Neighbors can be Tile or None"""
optional_tile: Optional[Tile] = solution[y_index][x_index]
if optional_tile is None:
return True
elif isinstance(optional_tile, Tile):
tile: Tile = optional_tile
else:
raise RuntimeError
assert isinstance(solution[y_index][x_index], Tile)
if DEBUG:
print(tile.data)
# Up
temp_tile: Optional[Tile]
if y_index > 0:
temp_tile = solution[y_index - 1][x_index]
if isinstance(temp_tile, Tile):
# if solution[y_index - 1][x_index]:
neighbor_up: Tile = temp_tile
neighbor_face_down: np.ndarray = neighbor_up.data[-1]
tile_face_up: np.ndarray = tile.data[0]
if not is_face_matches_face(tile_face_up, neighbor_face_down):
return False
# Down
if y_index < (len(solution) - 1):
temp_tile = solution[y_index + 1][x_index]
if isinstance(temp_tile, Tile):
neighbor_down: Tile = temp_tile
neighbor_face_up: np.ndarray = neighbor_down.data[0]
tile_face_down: np.ndarray = tile.data[-1]
if not is_face_matches_face(tile_face_down, neighbor_face_up):
return False
# Left
if x_index > 0:
temp_tile = solution[y_index][x_index - 1]
if isinstance(temp_tile, Tile):
neighbor_left: Tile = temp_tile
neighbor_face_right = neighbor_left.data[:, -1]
tile_face_left = tile.data[:, 0]
if not is_face_matches_face(tile_face_left, neighbor_face_right):
return False
# Right
if x_index < (len(solution[0]) - 1):
temp_tile = solution[y_index][x_index + 1]
if isinstance(temp_tile, Tile):
neighbor_right: Tile = temp_tile
neighbor_face_left = neighbor_right.data[:, 0]
tile_face_right = tile.data[:, -1]
if not is_face_matches_face(tile_face_right, neighbor_face_left):
return False
return True
def is_partial_solution_valid(solution: List[List[Optional[Tile]]]):
# Check a partial solution. None is allowed where a Tile has not been placed yet.
for y_index in range(len(solution)):
for x_index in range(len(solution[0])):
if solution[y_index][x_index] is None:
continue
if not is_tile_matches_neighbors(y_index, x_index, solution):
return False
return True
def repr_solution_tiles(solution: List[List[Tile]]) -> str:
s = ""
for y_index, solution_row in enumerate(solution):
for y_tile_index in range(len(solution[0][0].data)):
for x_index, tile in enumerate(solution_row):
if solution[y_index][x_index]:
s += "".join(solution[y_index][x_index].data[y_tile_index])
s += " "
else:
s += "-" * len(solution[0][0].data[0])
s += " "
s += "\n"
s += "\n"
return s
def list_str_solution(solution: List[List[Tile]]) -> List[str]:
lines = []
for y_index, solution_row in enumerate(solution):
for y_tile_index in range(1, len(solution[0][0].data) - 1):
line = ""
for x_index, tile in enumerate(solution_row):
if solution[y_index][x_index]:
line += "".join(solution[y_index][x_index].data[y_tile_index][1:-1])
else:
line += "-" * len(solution[0][0].data[0][1:-1])
lines.append(line)
return lines
def repr_solution(solution: List[List[Tile]]) -> str:
s = ""
for row in list_str_solution(solution):
s += row + "\n"
return s
def match_2d(pattern_2d: np.ndarray, string_2d: np.ndarray):
matches = []
for y_index in range(len(string_2d) - len(pattern_2d) + 1):
for x_index in range(len(string_2d[0]) - len(pattern_2d[0]) + 1):
next_candidate = False
candidate_str = string_2d[
y_index : y_index + len(pattern_2d),
x_index : x_index + len(pattern_2d[0]),
]
for y_candidate in range(len(pattern_2d)):
for x_candidate in range(len(pattern_2d[0])):
# only looking for "#" in pattern_2d
if pattern_2d[y_candidate][x_candidate] != "#":
continue
if (
pattern_2d[y_candidate][x_candidate]
!= candidate_str[y_candidate][x_candidate]
):
next_candidate = True
break
else:
continue
if next_candidate:
break
if not next_candidate:
matches.append((y_index, x_index))
return matches
def test_match_2d():
monster = [" # ", "# ## ## ###", " # # # # # # "]
monster_nparray = list_str_to_nparray(monster)
sea = ["# . # ", "# ## ## ###", " # # # # # # "]
sea_nparray = list_str_to_nparray(sea)
matches = match_2d(monster_nparray, monster_nparray)
assert matches == [(0, 0)]
matches = match_2d(monster_nparray, sea_nparray)
assert matches == [(0, 0)]
def list_str_to_nparray(list_str: List[str]) -> np.ndarray:
# seperate each character so the nparray can be rotated, flipped
return np.array([[c for c in s] for s in list_str])
def solve_part1(tiles: List[Tile]) -> int:
# find the corners by counting the matching edges of each tile.
# corners have only two matching edges
corners: List[Tile] = find_corner_pieces(tiles)
corner_ids = [corner.number for corner in corners]
return product(corner_ids)
def solve_part2(tiles: List[Tile]) -> int:
dimension = math.isqrt(len(tiles))
solution: List[List[Optional[Tile]]] = [
[None for _ in range(dimension)] for _ in range(dimension)
]
# solution: List[List[Tile]] = [
# [None for _ in range(dimension)] for _ in range(dimension)
# ]
# print(solution)
assert is_partial_solution_valid(solution)
# start the solution with one of the corners found previously
solution[0][0] = find_corner_pieces(tiles)[0] # can be flipped/rotated
# tiles will only hold tiles that are not in solution yet
tiles.remove(solution[0][0])
# print(solution)
assert is_partial_solution_valid(solution)
# place solution[0][1]
# find a matching tile
assert isinstance(solution[0][0], Tile)
candidate_tile = next_match(solution[0][0], tiles)
# print(f"candidate_tile: {candidate_tile}")
# orient the corner. Which face matches?
# Options
# 1. could make this a tuple that also carries the "index" for how to rotate
# 2. or carries the rotated tile with each face
# 3. or just send the rotations and check the desired face below
# tile_faces = [
# tile.data[0], # top
# tile.data[-1], # bottom
# tile.data[:, 0], # left
# tile.data[:, -1], # right
# ]
# tile_rotations = [
# tile.data,
# np.rot90(tile.data, k=1),
# np.rot90(tile.data, k=2),
# np.rot90(tile.data, k=3),
# ]
# for face in tile_faces:
# if is_edge_match(face, candidate_tile):
# print(f"Face {face} matched candidate {candidate_tile}")
# in tile_rotations we are looking for the right face to match
# for orientation in tile_rotations:
y_index: int = 0
x_index: int = 0
tile = solution[y_index][x_index]
assert isinstance(tile, Tile)
for orientation in generate_tile_orientation(tile):
if is_face_matches_tile(orientation[:, -1], candidate_tile):
solution[y_index][x_index] = Tile(tile.number, orientation)
# print("matched orientation")
break
assert is_partial_solution_valid(solution)
# orient the candidate match and place it
for orientation in generate_tile_orientation(candidate_tile):
# compare left face of solved tile to right face of candidate_tile in all possible orientations
tile = solution[y_index][x_index]
assert isinstance(tile, Tile)
if is_face_matches_face(tile.data[:, -1], orientation[:, 0]):
# print(f"Placing candidate tile {candidate_tile.number}")
solution[y_index][x_index + 1] = Tile(candidate_tile.number, orientation)
# remove the matching candidate from tiles
tiles.remove(candidate_tile)
break
assert is_partial_solution_valid(solution)
y_index = 1
x_index = 0
tile = solution[y_index - 1][x_index]
assert isinstance(tile, Tile)
candidate_tile = next_match(tile, tiles)
# does row 0 need to flip?
# does candidate match to top or bottom of solution[0][0]?
needs_flip: bool = False
# compare top face of solution[0][0] to candidate_tile
up_neighbor: Tile = solution[0][0]
if is_face_matches_tile(up_neighbor.data[0], candidate_tile):
needs_flip = True
if needs_flip:
for x_index, tile in enumerate(solution[0]):
if isinstance(tile, Tile):
flipped_data = np.flipud(tile.data) # flip up down
solution[0][x_index] = Tile(tile.line, flipped_data)
# orient candidate_tile to tile above
# for orientation in orientation_generator(candidate_tile):
# if is_face_matches_tile(orientation[0], solution[0][0]):
# print(orientation[0])
# if is_face_matches_face(orientation[0], solution[0][0].data[-1]):
# print(orientation[0])
for orientation in generate_tile_orientation(candidate_tile):
if is_face_matches_face(up_neighbor.data[-1], orientation[0]):
if DEBUG:
print(f"Placing candidate tile {candidate_tile.number}")
solution[y_index][x_index] = Tile(candidate_tile.number, orientation)
# remove candidate match from tiles
tiles.remove(candidate_tile)
break
assert is_partial_solution_valid(solution)
# after the first corner, and it's neighbors have been placed
# the solution cannot be flipped
# solve first row
y_index = 0
for x_index, tile in enumerate(solution[y_index]):
if tile:
continue
# print(f"{x_index} {tile}")
left_neighbor: Optional[Tile] = solution[y_index][x_index - 1]
assert isinstance(left_neighbor, Tile)
for candidate_tile in generate_next_match(left_neighbor, tiles):
# find the right orientation for candidate_tile to left_neighbor
for orientation in generate_tile_orientation(candidate_tile):
if is_face_matches_face(left_neighbor.data[:, -1], orientation[:, 0]):
# print(f"Placing candidate tile {candidate_tile.number}")
solution[y_index][x_index] = Tile(
candidate_tile.number, orientation
)
# remove candidate match from tiles
tiles.remove(candidate_tile)
break
if solution[y_index][x_index] is not None:
break
assert isinstance(solution[y_index][x_index], Tile)
assert is_partial_solution_valid(solution)
# print(f"Solution:\n{solution}")
# print(repr_solution(solution))
assert is_partial_solution_valid(solution)
# print()
# solve other rows. if the left neighbor is empty or we are on the left edge of solution,
# look up to place tile
for y_index, solution_row in enumerate(solution):
for x_index, tile in enumerate(solution[y_index]):
if tile:
continue
if x_index > 0:
# we are not on left edge of solution
assert isinstance(solution[y_index][x_index - 1], Tile)
left_neighbor = solution[y_index][x_index - 1]
assert isinstance(left_neighbor, Tile)
for candidate_tile in generate_next_match(left_neighbor, tiles):
# find the right orientation for candidate_tile to left_neighbor
# and to up_neighbor
for orientation in generate_tile_orientation(candidate_tile):
if is_face_matches_face(
left_neighbor.data[:, -1], orientation[:, 0]
):
# print(f"Placing candidate tile {candidate_tile.number}")
solution[y_index][x_index] = Tile(
candidate_tile.number, orientation
)
if not is_partial_solution_valid(solution):
# keep trying orientations
continue
# this is the right orientation with all neighbors
# remove candidate match from tiles
tiles.remove(candidate_tile)
break
if solution[y_index][x_index] is not None:
break
assert solution[y_index][x_index] is not None
assert is_partial_solution_valid(solution)
elif x_index == 0:
# on left edge of solution, look at up neighbor
temp_tile: Optional[Tile] = solution[y_index - 1][x_index]
assert isinstance(temp_tile, Tile)
up_neighbor = temp_tile
for candidate_tile in generate_next_match(up_neighbor, tiles):
for orientation in generate_tile_orientation(candidate_tile):
if is_face_matches_face(up_neighbor.data[-1], orientation[0]):
# print(f"Placing candidate tile {candidate_tile.number}")
solution[y_index][x_index] = Tile(
candidate_tile.number, orientation
)
if not is_partial_solution_valid(solution):
# keep trying orientations
continue
# remove candidate match from tiles
tiles.remove(candidate_tile)
break
if solution[y_index][x_index] is not None:
break
assert solution[y_index][x_index] is not None
assert is_partial_solution_valid(solution)
for row in solution:
for tile in row:
assert isinstance(tile, Tile)
solution_complete: List[List[Tile]] = solution.copy() # type: ignore # assert above verified correctness
if DEBUG:
print(repr_solution_tiles(solution_complete))
str_solution = repr_solution(solution_complete)
print(str_solution)
monster = [" # ", "# ## ## ###", " # # # # # # "]
nparray_monster = list_str_to_nparray(monster)
# need to rotate and flip str_solution to get matches
nparray_solution = list_str_to_nparray(list_str_solution(solution_complete))
if DEBUG:
print(nparray_solution)
# matches = match_2d(monster, list_str_solution(solution))
# print(matches)
for orientation in generate_nparray_orientation(nparray_solution):
matches = match_2d(nparray_monster, orientation)
if len(matches) > 0:
break
if DEBUG:
print(orientation)
print(matches)
# count "#" minus (count "#" in monster * len(matches))
pound_in_orientation = len(
[char for row in orientation for char in row if char == "#"]
)
pound_in_monster = len(
[char for row in nparray_monster for char in row if char == "#"]
)
part2 = pound_in_orientation - (len(matches) * pound_in_monster)
return part2
def load_tiles(filename: str) -> List[Tile]:
with open(filename) as f:
tiles_str: List[str] = f.read().split("\n\n")
tiles: List[Tile] = []
t_index: int
tile_str: str
for t_index, tile_str in enumerate(tiles_str):
tile_temp: List[str] = tile_str.split("\n")
number: int = int(tile_temp[0].split()[1][:-1])
data: np.ndarray = np.array([[char for char in row] for row in tile_temp[1:]])
tiles.append(Tile(number, data))
return tiles
def main():
if USE_EXAMPLE1:
filename = "./AdventOfCode/2020/day20-example1-input.txt"
else:
filename = "./AdventOfCode/2020/day20-input.txt"
tiles: List[Tile] = load_tiles(filename)
if DEBUG:
pprint(tiles)
print(f"Loaded {len(tiles)} tiles")
print(
f"Each tile is {len(tiles[0].data)} rows, {len(tiles[0].data[0])} columns"
)
# Part 1
part1 = solve_part1(tiles)
print(f"Part 1: {part1}") # 68781323018729
if USE_EXAMPLE1:
assert part1 == 20899048083289
else:
assert part1 == 68781323018729
if PROFILE:
with cProfile.Profile() as pr:
solve_part1(tiles)
pr.print_stats()
# Part 2
part2 = solve_part2(tiles.copy())
print(f"Part 2: {part2}")
if USE_EXAMPLE1:
assert part2 == 273
else:
assert part2 == 1629
if PROFILE:
with cProfile.Profile() as pr:
solve_part2(tiles.copy())
pr.print_stats()
if __name__ == "__main__":
main()
| 36.567026
| 110
| 0.605469
| 3,034
| 23,732
| 4.530653
| 0.098879
| 0.022261
| 0.034628
| 0.024443
| 0.470391
| 0.39226
| 0.340172
| 0.293613
| 0.255565
| 0.230758
| 0
| 0.018554
| 0.295972
| 23,732
| 649
| 111
| 36.567026
| 0.804166
| 0.220125
| 0
| 0.322275
| 0
| 0.00237
| 0.029708
| 0.006814
| 0
| 0
| 0
| 0
| 0.097156
| 1
| 0.059242
| false
| 0
| 0.018957
| 0.00237
| 0.132701
| 0.037915
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9caa403c7cda77ed58ce080740499d1a738c3e3
| 1,870
|
py
|
Python
|
src/server_main_loop.py
|
the40san/unity_survival_shooter_multi_server_python
|
d20d9aa2204bca70d0787acbfe395277b776e92d
|
[
"MIT"
] | 3
|
2017-04-11T05:36:08.000Z
|
2021-03-16T16:22:07.000Z
|
src/server_main_loop.py
|
the40san/unity_survival_shooter_multi_server_python
|
d20d9aa2204bca70d0787acbfe395277b776e92d
|
[
"MIT"
] | null | null | null |
src/server_main_loop.py
|
the40san/unity_survival_shooter_multi_server_python
|
d20d9aa2204bca70d0787acbfe395277b776e92d
|
[
"MIT"
] | 1
|
2017-04-11T05:35:26.000Z
|
2017-04-11T05:35:26.000Z
|
import socket
import select
from server_info import ServerInfo
from client_handler.client_thread import ClientThread
from server_handler.server_thread import ServerThread
from server_handler.server_thread_proxy import ServerThreadProxy
from logger import Logger
class ServerMainLoop:
def __init__(self):
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.read_fds = set([self.listener])
self.server_thread = ServerThread()
self.server_thread.start()
def exec(self):
try:
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.bind((ServerInfo.hostname, ServerInfo.port))
self.listener.listen(ServerInfo.backlog)
except BaseException as error:
Logger.log("Server setup error: " + error.strerror)
Logger.log("exiting...")
else:
while True:
self.main_loop()
finally:
self.shutdown()
def main_loop(self):
read_ready, write_ready, err_ready = select.select(self.read_fds, [], [])
for sock in read_ready:
if sock is self.listener:
self.accept_new_client(sock)
else:
self.accept_new_message(sock)
def accept_new_client(self, sock):
conn, address = self.listener.accept()
self.read_fds.add(conn)
Logger.log("new client connected")
def accept_new_message(self, sock):
thread = ClientThread(sock, ServerThreadProxy(self.server_thread))
self.read_fds.remove(sock)
self.server_thread.add_client(thread)
thread.start()
def shutdown(self):
Logger.log("shutting down")
for sock in self.read_fds:
sock.close()
self.server_thread.shutdown()
self.server_thread.join()
| 30.16129
| 81
| 0.647059
| 219
| 1,870
| 5.333333
| 0.342466
| 0.082192
| 0.082192
| 0.039384
| 0.049658
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000725
| 0.262032
| 1,870
| 61
| 82
| 30.655738
| 0.845652
| 0
| 0
| 0.041667
| 0
| 0
| 0.03369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.145833
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9caf1a992a5eacb6d048931ae39cf07dfd472c4
| 8,838
|
py
|
Python
|
Aula_01/Aula01_Resolucao_Exercicios_Lista_Estruturas_Condicionais.py
|
elcbasilio/letscode
|
ea2ed5ee80485d98fad2c77a7a50927a7d524793
|
[
"MIT"
] | null | null | null |
Aula_01/Aula01_Resolucao_Exercicios_Lista_Estruturas_Condicionais.py
|
elcbasilio/letscode
|
ea2ed5ee80485d98fad2c77a7a50927a7d524793
|
[
"MIT"
] | null | null | null |
Aula_01/Aula01_Resolucao_Exercicios_Lista_Estruturas_Condicionais.py
|
elcbasilio/letscode
|
ea2ed5ee80485d98fad2c77a7a50927a7d524793
|
[
"MIT"
] | null | null | null |
# 1. Peça a idade do usuário e imprima se ele é maior ou menor de 18 anos;
idade = int (input ('Digite sua idade:'))
if idade < 18:
print ('Você tem menos de 18 anos')
else:
print ('Você tem 18 anos ou mais')
# 2. Peça um número e mostre se ele é positivo ou negativo;
numero = float (input ('Digite um número qualquer:'))
if numero < 0:
print ('Este número é Negativo')
else:
print ('Este número é Positivo')
# 3. Dado um número digitado, mostre se ele é Par ou Ímpar
numero = int (input ('Digite um número inteiro:'))
if numero % 2 == 0:
print ('Este número é Par')
else:
print ('Este número é Ímpar')
# 4. Peça dois números e mostre o maior deles;
n1 = float (input ('Digite um número qualquer:'))
n2 = float (input ('Digite mais um número qualquer:'))
if n1 < n2:
print ('O número',n1,'é menor que o número',n2)
elif n1 > n2:
print ('O número',n1,'é maior que o número',n2)
else:
print ('Os números digitados são idênticos')
# 5. Faça um programa que leia a validade das informações:
# a. Idade: entre 0 e 150;
# b. Salário: maior que 0;
# c. Sexo: M, F ou Outro;
# O programa deve imprimir uma mensagem de erro para cada informação inválida.
idade = int (input ('Digite sua idade:'))
salario = float (input ('Digite seu salário R$:'))
sexo = input ('Digite seu Sexo (M / F / O):')
if idade < 0 or idade > 150:
print ('A idade informada é inválida, digite uma idade entre 0 e 150 anos')
if salario <= 0:
print ('O salário informado é inválido, digite um valor maior que 0')
if sexo != 'M' and sexo != 'F' and sexo != 'O' and sexo != 'm' and sexo != 'f' and sexo != 'o':
print ('O sexo informado é inválido, digite M, F ou O')
# 6. Escreva um programa que peça a nota de 3 provas de um aluno, e verifique se ele passou o não de ano:
# Obs: O aluno irá passar de ano se sua média for maior que 6.
n1 = float (input ('Digite sua 1ª Nota:'))
n2 = float (input ('Digite sua 2ª Nota:'))
n3 = float (input ('Digite sua 3ª Nota:'))
nf = (n1+n2+n3)/3
if nf <= 6:
print ('Sua nota média foi',nf,'e você foi REPROVADO!')
else:
print ('Sua nota média foi',nf,'e você foi APROVADO!')
# 7. Fazer um programa que mostre uma questão de múltipla escolha com 5 opções (letras a, b, c, d, e e).
# Sabendo a resposta certa, receber a opção do usuário e informar a letra que o usuário marcou e se a
# resposta está certa ou errada.
print ('Escolha a alternativa correta')
print ('')
print ('Pergunta: Quem descobriu o Brasil?')
print ('')
print ('a) Vasco da Gama')
print ('b) Jair Bolsonaro')
print ('c) Silvio Santos')
print ('d) Pedro Álvares Cabral')
print ('e) Craque Neto 10')
print ('')
pergunta = (input ('Qual é a resposta correta: '))
if pergunta == 'd' or pergunta == 'D':
print ('Você selecionou a opção d) Pedro Álvares Cabral. A resposta está correta')
elif pergunta == 'a' or pergunta == 'A':
print ('Você selecionou a opção a) Vasco da Gama. A resposta está errada')
elif pergunta == 'b' or pergunta == 'B':
print ('Você selecionou a opção b) Jair Bolsonaro. A resposta está errada')
elif pergunta == 'c' or pergunta == 'C':
print ('Você selecionou a opção c) Silvio Santos. A resposta está errada')
elif pergunta == 'e' or pergunta == 'E':
print ('Você selecionou a opção e) Craque Neto 10. A resposta está errada')
else:
print ('Você selecionou uma opção inválida')
# 8. Vamos fazer um programa para verificar quem é o assassino de um crime.
# Para descobrir a polícia reuniu um dos suspeitos e fez um pequeno questionário com 5 perguntas de sim ou não:
# a. Telefonou para a vítima?
# b. Esteve no local do crime?
# c. Mora perto da vítima?
# d. Devia para a vítima?
# e. Já trabalhou com a vítima?
# Cada resposta sim dá um ponto para o suspeito, a polícia considera que os
# suspeitos com 5 pontos são os assassinos, com 4 a 3 pontos são cúmplices
# e 2 pontos são apenas suspeitos, necessitando outras investigações, valores
# abaixo de 1 são liberados.
print ('Responda S para Sim e N para Não em cada uma das perguntas abaixo')
print ('')
p1 = input ('Telefonou para a vítima?')
p2 = input ('Esteve no local do crime?')
p3 = input ('Mora perto da vítima?')
p4 = input ('Devia para a vítima?')
p5 = input ('Já trabalhou com a vítima?')
if p1 == 's' or p1 == 'S':
p11 = 1
else:
p11 = 0
if p2 == 's' or p1 == 'S':
p22 = 1
else:
p22 = 0
if p3 == 's' or p1 == 'S':
p33 = 1
else:
p33 = 0
if p4 == 's' or p1 == 'S':
p44 = 1
else:
p44 = 0
if p5 == 's' or p1 == 'S':
p55 = 1
else:
p55 = 0
soma = p11+p22+p33+p44+p55
print ('')
if soma == 5:
print ('Você é o Assassino')
elif soma >= 3:
print ('Você é Cúmplice')
elif soma >= 1:
print ('Você é Suspeito')
else:
print ('Você está Liberado')
# 9. Um produto vai sofrer aumento de acordo com a tabela 1 abaixo, peça para
# o usuário digitar o valor do produto de acordo com o preço antigo e
# escreva uma das mensagens da tabela 2, de acordo com o preço reajustado:
# Preço antigo Percentual de aumento
# Até R$ 50 5%
# Entre R$ 50 e R$100 10%
# Entre R$100 e R$150 13%
# Acima de R$150 15%
# Preço Novo Mensagem
# Ate R$80 Barato
# Entre R$ 80 e R$115 Razoável
# Entre R$ 115 e R$150 Normal
# Entre R$ 150 e R$170 Caro
# Acima de R$170 Muito caro
print ('Reajuste de Preços')
pa = float (input ('Digit o preço do produto que será reajustado: R$ '))
if pa <= 0:
print ('Digite um valor maior que ZERO')
pn=0
elif pa <= 50:
pn = pa * 1.05
elif pa <= 100:
pn = pa * 1.1
elif pa <= 150:
pn = pa * 1.13
else:
pn = pa * 1.15
if pn <= 0:
print ('')
elif pn < 80:
print ('O novo valor do produto é R$',pn,'- Barato')
elif pn < 115:
print ('O novo valor do produto é R$',pn,'- Razoável')
elif pn < 150:
print ('O novo valor do produto é R$',pn,'- Normal')
elif pn < 170:
print ('O novo valor do produto é R$',pn,'- Caro')
else:
print ('O novo valor do produto é R$',pn,'- Muito Caro')
# Desafio
# 1. Faça um programa que leia 3 números e informe o maior deles;
n1 = float (input ('Digite o 1º Número: '))
n2 = float (input ('Digite o 2º Número: '))
n3 = float (input ('Digite o 3º Número: '))
if n1 >= n2 and n1 >= n3:
print ('O maior número é',n1)
elif n2 >= n1 and n2 >= n3:
print ('O maior número é',n2)
elif n3 >= n1 and n3 >= n2:
print ('O maior número é',n3)
# 2. Agora faça com 4 números;
n1 = float (input ('Digite o 1º Número: '))
n2 = float (input ('Digite o 2º Número: '))
n3 = float (input ('Digite o 3º Número: '))
n4 = float (input ('Digite o 4º Número: '))
if n1 >= n2 and n1 >= n3 and n1 >= n4:
print ('O maior número é',n1)
elif n2 >= n1 and n2 >= n3 and n2 >= n4:
print ('O maior número é',n2)
elif n3 >= n1 and n3 >= n2 and n3 >= n4:
print ('O maior número é',n3)
elif n4 >= n1 and n4 >= n2 and n4 >= n3:
print ('O maior número é',n4)
'''
3. Um hospital quer fazer o diagnóstico de gripe ou dengue a partir de um
questionário de sintomas, tendo as perguntas abaixo, faça um programa
que faça o diagnóstico deste hospital:
a. Sente dor no corpo?
b. Você tem febre?
c. Você tem tosse?
d. Está com congestão nasal?
e. Tem manchas pelo corpo?
Para o diagnóstico ele tem a seguinte tabela:
A B C D E Resultado
Sim Sim Não Não Sim Dengue
Sim Sim Sim Sim Não gripe
Não Sim Sim Sim Não gripe
Sim Não Sim Sim Não Gripe
Sim Não Não Não Não Sem Doenças
Não Não Não Não Não Sem Doenças
'''
print ('Diagnóstico de gripe ou dengue')
print ('')
print ('Digite S para "Sim" ou N para "Não"')
print ('')
p1 = (input ('Sente dor no corpo? '))
p2 = (input ('Você tem febre? '))
p3 = (input ('Você tem tosse? '))
p4 = (input ('Está com congestão nasal? '))
p5 = (input ('Tem manchas pelo corpo? '))
if (p1.upper()!='S' and p1.upper()!='N') and (p2.upper()!='S' and p2.upper()!='N') and (p3.upper()!='S' and p3.upper()!='N') and (p4.upper()!='S' and p4.upper()!='N') and (p5.upper()!='S' and p5.upper()!='N'):
print ('Você digitou uma ou mais opções incorretas')
elif p1.upper()=='S' and p2.upper()=='S' and p3.upper()=='N' and p4.upper()=='N' and p5.upper()=='S':
print ('Diagnóstico - Dengue')
elif p1.upper()=='S' and p2.upper()=='S' and p3.upper()=='S' and p4.upper()=='S' and p5.upper()=='N':
print ('Diagnóstico - Gripe')
elif p1.upper()=='N' and p2.upper()=='S' and p3.upper()=='S' and p4.upper()=='S' and p5.upper()=='N':
print ('Diagnóstico - Gripe')
elif p1.upper()=='S' and p2.upper()=='N' and p3.upper()=='S' and p4.upper()=='S' and p5.upper()=='N':
print ('Diagnóstico - Gripe')
elif p1.upper()=='S' and p2.upper()=='N' and p3.upper()=='N' and p4.upper()=='N' and p5.upper()=='N':
print ('Diagnóstico - Sem Doenças')
elif p1.upper()=='N' and p2.upper()=='N' and p3.upper()=='N' and p4.upper()=='N' and p5.upper()=='N':
print ('Diagnóstico - Sem Doenças')
else:
print ('Diagnóstico - Diagnóstico não está especificado')
| 34.795276
| 209
| 0.642114
| 1,544
| 8,838
| 3.675518
| 0.182642
| 0.022203
| 0.028546
| 0.020969
| 0.362819
| 0.272599
| 0.212335
| 0.188018
| 0.174097
| 0.135683
| 0
| 0.045154
| 0.21317
| 8,838
| 253
| 210
| 34.932806
| 0.770923
| 0.225051
| 0
| 0.245614
| 0
| 0
| 0.396871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.374269
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9cd5d6aedbff1e1481a77881774667ef1fefe16
| 2,023
|
py
|
Python
|
ros_radar_mine/neuro_learning/controller/testing/load_and_test.py
|
tudelft/blimp_snn
|
23acbef8822337387aee196a3a10854e82bb4f80
|
[
"Apache-2.0"
] | 3
|
2021-11-08T20:20:21.000Z
|
2021-12-29T09:05:37.000Z
|
ros_radar_mine/neuro_learning/controller/testing/load_and_test.py
|
tudelft/blimp_snn
|
23acbef8822337387aee196a3a10854e82bb4f80
|
[
"Apache-2.0"
] | null | null | null |
ros_radar_mine/neuro_learning/controller/testing/load_and_test.py
|
tudelft/blimp_snn
|
23acbef8822337387aee196a3a10854e82bb4f80
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 20:41:01 2021
@author: marina
Interesting:
- T2000_NPOP100_NGEN500_NEU2-10-5-2-1_05-04-2021_19-08-51: Really smooth and contained between (-3,3)
- T2000_NPOP100_NGEN500_NEU2-10-5-2-1_05-04-2021_19-25-20
"""
# Set absolute package path
import sys, os
sys.path.append(os.path.abspath(".."))
import os
import extra.aux_funcs as af # :)
import numpy as np
from evol_funcs.evol_mut_eval import evaluate, evaluate_SNNyPID
#from evol_funcs.evol_funcs_ANN import evaluate, evaluate_ANNyPID
import pid.myPID as PID
# Some constants
custom_config = True
#config_file = "config_night/config1.yaml"
config_file = "config.yaml"
# T2500_NPOP50_NGEN200_24-03-2021_23-47-25/
# T1500_NPOP60_NGEN400_24-03-2021_22-21-03/
# T3000_NPOP50_NGEN200_24-03-2021_22-06-17/
# T1800_NPOP40_NGEN500_NEU10-5-1-1_17-06-2021_11-40-05
# "T1800_NPOP40_NGEN500_NEU1-3-2-1_17-06-2021_15-10-09"
net_name = "T1200_NPOP40_NGEN500_NEU10-5-5-1_26-05-2021_13-34-44"
gen = 30
hof = True
num = 0
# Load configuration "cf" dir
cf = af.set_config("../config/" + config_file)
# Load population (IND or HOF)
pop = af.readPopulation(cf, net_name, gen, hof)
#pop = af.readPopulationSD(cf, net_name, gen, hof)
network = pop[num]
# Load network configuration
if not custom_config:
cf = network.cf
# Activate plotting
cf["evol"]["plot"] = True
# Evaluate network + plot
'''
network.pid[0] = -1
network.pid[1] = 0
network.pid[2] = 0
'''
#network.pid = [2,0,0]
individual = [network]
mse = evaluate(individual, cf, h_refList = cf["evol"]["h_ref"], h_init = cf["evol"]["h_init"])
#pid = PID.PID(*cf["pid"]["PID"], cf["pid"]["dt"], cf["pid"]["simple"])
#inputList = np.linspace(*cf["pid"]["input_lim"], cf["pid"]["n_points"])
#inputList = np.random.uniform(low=cf["pid"]["input_lim"][0], high=cf["pid"]["input_lim"][1], size = (cf["pid"]["n_points"],))
#mse = evaluate_PID(individual, cf, pid=pid, inputList=inputList)
print("MSE = ", mse)
#torch.save(model.state_dict(), PATH)
| 26.973333
| 126
| 0.707365
| 344
| 2,023
| 3.965116
| 0.43314
| 0.032991
| 0.017595
| 0.028592
| 0.126833
| 0.055718
| 0.055718
| 0.055718
| 0.055718
| 0.055718
| 0
| 0.135347
| 0.116164
| 2,023
| 75
| 127
| 26.973333
| 0.627517
| 0.604053
| 0
| 0
| 0
| 0
| 0.153191
| 0.073759
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9ce968f21d6e6f401b8601663fad589082a13f8
| 293
|
py
|
Python
|
HackerRank/Python/Sets/py-the-captains-room.py
|
neiesc/Problem-solving
|
d3bce7a3b9801e6049e2c135418b31cba47b0964
|
[
"MIT"
] | 1
|
2019-07-20T16:59:21.000Z
|
2019-07-20T16:59:21.000Z
|
HackerRank/Python/Sets/py-the-captains-room.py
|
neiesc/Problem-solving
|
d3bce7a3b9801e6049e2c135418b31cba47b0964
|
[
"MIT"
] | 5
|
2019-03-10T19:46:42.000Z
|
2020-04-24T22:42:30.000Z
|
HackerRank/Python/Sets/py-the-captains-room.py
|
neiesc/Problem-solving
|
d3bce7a3b9801e6049e2c135418b31cba47b0964
|
[
"MIT"
] | null | null | null |
#!/bin/python3
# The Captain's Room
# https://www.hackerrank.com/challenges/py-the-captains-room/problem
from collections import Counter
if __name__ == '__main__':
k = int(input())
room_captain = Counter(map(int, input().split())).most_common()[:-2:-1]
print(room_captain[0][0])
| 29.3
| 76
| 0.68942
| 42
| 293
| 4.547619
| 0.761905
| 0.08377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019531
| 0.12628
| 293
| 10
| 77
| 29.3
| 0.726563
| 0.337884
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9d666c57cbfbdaa2610dd40857b2cf6d3dbfa06
| 8,973
|
py
|
Python
|
api/GIS/functionalFun/thread_GIS.py
|
AutoCoinDCF/NEW_API
|
f4abc48fff907a0785372b941afcd67e62eec825
|
[
"Apache-2.0"
] | null | null | null |
api/GIS/functionalFun/thread_GIS.py
|
AutoCoinDCF/NEW_API
|
f4abc48fff907a0785372b941afcd67e62eec825
|
[
"Apache-2.0"
] | null | null | null |
api/GIS/functionalFun/thread_GIS.py
|
AutoCoinDCF/NEW_API
|
f4abc48fff907a0785372b941afcd67e62eec825
|
[
"Apache-2.0"
] | null | null | null |
from threading import Thread,Lock
from api.GIS.config import GIS_mgdb_config
from api.GIS.database.mongoDB import MGO
import json
from api.GIS.GISStaticsFun import GisStaticsFun
class TheadFun():
def __init__(self):
pass
# self.param = param
def queryQBByIds(self,ids):
DBConfig = []
for cf in GIS_mgdb_config.dataBaseConfig:
ctype = cf['type']
if ctype == 'event' or ctype == 'org':
DBConfig.append(cf)
LocationInfo = {}
features = []
event_ids = []
org_ids = []
for id in ids:
if len(id) < 20:
org_ids.append(id)
else:
event_ids.append(id)
threads = [Thread(target=TheadFun.queryDBById,args=(cf,ids,LocationInfo)) for cf in DBConfig]
# threads_org = [Thread(target=TheadFun.queryOrgById,args=(cf,org_ids,LocationInfo)) for cf in orgDBConfig]
for t in threads:
t.start()
for t in threads:
t.join()
features = TheadFun.getFeaturesByLocationInfo(LocationInfo)
return features
@staticmethod
def queryDBById(cf, ids, LocationInfo):
cType = cf['type']
if cType == "event":
TheadFun.queryEventById(cf, ids, LocationInfo)
else:
TheadFun.queryOrgAndTarById(cf, ids, LocationInfo)
@staticmethod
def queryOrgAndTarById(cf, ids, LocationInfo):
mg = MGO(host=cf['host'], port=cf['port'], user=cf['user'], pwd=cf['pwd'],dbname=cf['dbname'])
dbName = cf['dbname']
collName = cf['collName']
fieldConfig = GIS_mgdb_config.fieldConfig
fieldconfig = fieldConfig[dbName][collName]
locationListKey = fieldconfig["locationList"]
QBIdKey = fieldconfig["QBId"]
TypeKey = fieldconfig["type"]
locationNameKey = fieldconfig["locationName"]
findObj = {QBIdKey:{'$in':ids}}
rows = mg.find(collName,findObj)
for row in rows:
try:
EventId = str(row[QBIdKey])
localName = row[locationNameKey]
locationlist = row[locationListKey]
Type = row[TypeKey]
for index,locationItem in enumerate(locationlist):
geometry = locationItem
X = str(geometry['coordinates'][0])
Y = str(geometry['coordinates'][1])
ident = "event&" + X + Y
heatAttr = GisStaticsFun.getHeatAttr(row,showHeatAttr,EventAttrKey) ## 获取热力属性
Param = TheadFun.getParam(EventId,index,eventType,heatAttr) ##获取param
location = geometry
TheadFun.getEventLocationInfo(Param,ident,location,localName,LocationInfo) ##获取locationinfo
except:
print(row["_id"] + "失败!")
@staticmethod
def queryEventById(cf, ids, LocationInfo):
mg = MGO(host=cf['host'], port=cf['port'], user=cf['user'], pwd=cf['pwd'],dbname=cf['dbname'])
dbName = cf['dbname']
collName = cf['collName']
fieldConfig = GIS_mgdb_config.fieldConfig
fieldconfig = fieldConfig[dbName][collName]
locationListKey = fieldconfig["locationList"]
QBIdKey = fieldconfig["QBId"]
SubtypeKey = fieldconfig["Subtype"]
EventAttrKey = fieldconfig["EventAttr"]
showHeatAttr = fieldconfig["showHeatAttr"]
findObj = {QBIdKey:{'$in':ids}}
rows = mg.find(collName,findObj)
for row in rows:
try:
EventId = str(row[QBIdKey])
localName = row[locationListKey][0]['name']
locationlist = row[locationListKey]
eventType = row[SubtypeKey]
for index,locationItem in enumerate(locationlist):
geometry = locationItem['geometry']
X = str(geometry['coordinates'][0])
Y = str(geometry['coordinates'][1])
ident = "event&" + X + Y
heatAttr = GisStaticsFun.getHeatAttr(row,showHeatAttr,EventAttrKey) ## 获取热力属性
Param = TheadFun.getParam(EventId,index,eventType,heatAttr) ##获取param
location = geometry
TheadFun.getEventLocationInfo(Param,ident,location,localName,LocationInfo) ##获取locationinfo
except:
print(row["_id"] + "失败!")
def exploreEvents(self,geometryStrArr):
eventsDBConfig = GIS_mgdb_config.dataBaseConfig['event']
LocationInfo = {}
features = []
threads = [Thread(target=TheadFun.spatialQueryDB,args=(cf,geometryStrArr,LocationInfo)) for cf in eventsDBConfig]
for t in threads:
t.start()
for t in threads:
t.join()
features = TheadFun.getFeaturesByLocationInfo(LocationInfo)
return features
@staticmethod
def spatialQueryDB(cf,geometryStrArr,LocationInfo):
mg = MGO(host=cf['host'], port=cf['port'], user=cf['user'], pwd=cf['pwd'],dbname=cf['dbname'])
dbName = cf['dbname']
collName = cf['collName']
fieldConfig = GIS_mgdb_config.fieldConfig
fieldconfig = fieldConfig[dbName][collName]
locationListKey = fieldconfig["locationList"]
geometryKey = fieldconfig["geometry"]
QBIdKey = fieldconfig["QBId"]
SubtypeKey = fieldconfig["Subtype"]
EventAttrKey = fieldconfig["EventAttr"]
showHeatAttr = fieldconfig["showHeatAttr"]
findOrArr = []
for geometryStr in geometryStrArr:
geometryObj = json.loads(geometryStr)
findO_point = {locationListKey:{'$elemMatch':{geometryKey:{"$within":{"$geometry":geometryObj}}}}}
findOrArr.append(findO_point)
findObj = {'$or':findOrArr}
rows = mg.find(collName,findObj)
for row in rows:
try:
EventId = str(row[QBIdKey])
localName = row[locationListKey][0]['name']
locationlist = row[locationListKey]
eventType = row[SubtypeKey]
for index,locationItem in enumerate(locationlist):
geometry = locationItem['geometry']
isIntersect = True
if len(geometry['coordinates']) == 0 or geometry['coordinates'][0] == '' or geometry['coordinates'][1] == '': #去除坐标有错误的
continue
#去除locationList中的坐标不在传入的geometry中的
if len(locationlist) > 1:
isIntersect = False
for geometryStr in geometryStrArr:
geometryObj = json.loads(geometryStr)
isIntersect = GisStaticsFun.isIntersert(geometry,geometryObj)
if isIntersect:
break
if not isIntersect: #判l断locationlist中的每一个地点是否落在所查询的范围内
continue
X = str(geometry['coordinates'][0])
Y = str(geometry['coordinates'][1])
ident = "event&" + X + Y
heatAttr = GisStaticsFun.getHeatAttr(row,showHeatAttr,EventAttrKey)
Param = TheadFun.getParam(EventId,index,eventType,heatAttr)
location = geometry
TheadFun.getEventLocationInfo(Param,ident,location,localName,LocationInfo)
except:
print(row["_id"] + "失败!")
@staticmethod
def getParam(EventId,index,eventType,heatAttr):
Param = {
"ParamId":EventId+"#"+str(index),
"QBId":EventId,
'QBType':eventType,
"heatAttr":heatAttr
}
return Param
@staticmethod
def getEventLocationInfo(Param,ident,location,localName,LocationInfo):
if(ident in LocationInfo):
EventArr = LocationInfo[ident]['Params']
EventArr.append(Param)
else:
LocationInfo[ident] = {
"Params":[Param],
"location":location,
"localName":localName
}
@staticmethod
def getFeaturesByLocationInfo(LocationInfo):
features = []
for k,v in LocationInfo.items():
location = v['location']
featureId = k
params = v['Params']
localname = v['localName']
feature = {
"type": "Feature",
"id": featureId,
"geometry": location,
"properties": {
'Params':params,
'locationName':localname,
'selectedNum':len(params)
}
}
features.append(feature)
return features
####======================================####
| 41.35023
| 140
| 0.5482
| 750
| 8,973
| 6.522667
| 0.186667
| 0.034955
| 0.015944
| 0.01063
| 0.613655
| 0.597097
| 0.562756
| 0.542723
| 0.504906
| 0.487939
| 0
| 0.00237
| 0.34158
| 8,973
| 216
| 141
| 41.541667
| 0.82566
| 0.032654
| 0
| 0.575758
| 0
| 0
| 0.068292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050505
| false
| 0.005051
| 0.025253
| 0
| 0.10101
| 0.015152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9d780addff74609d2c11318421ccdacd8b15d8d
| 1,845
|
py
|
Python
|
Creating a variable at template using filter.py/filter.py
|
YooInKeun/Facebook-Page-Insights-Web-Crawler
|
dbe8477b1e0671aca137cd94eff090d691e99ee2
|
[
"MIT"
] | 1
|
2021-07-12T00:04:04.000Z
|
2021-07-12T00:04:04.000Z
|
Creating a variable at template using filter.py/filter.py
|
YooInKeun/Facebook-Page-Insights-Web-Crawler
|
dbe8477b1e0671aca137cd94eff090d691e99ee2
|
[
"MIT"
] | null | null | null |
Creating a variable at template using filter.py/filter.py
|
YooInKeun/Facebook-Page-Insights-Web-Crawler
|
dbe8477b1e0671aca137cd94eff090d691e99ee2
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
# 문자열 변수 생성 가능
class SetVarNode(template.Node):
def __init__(self, new_val, var_name):
self.new_val = new_val
self.var_name = var_name
def render(self, context):
context[self.var_name] = self.new_val
return ''
import re
@register.tag
def setvar(parser,token):
# This version uses a regular expression to parse tag contents.
try:
# Splitting by None == splitting by spaces.
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%r tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%r tag had invalid arguments" % tag_name)
new_val, var_name = m.groups()
if not (new_val[0] == new_val[-1] and new_val[0] in ('"', "'")):
raise template.TemplateSyntaxError("%r tag's argument should be in quotes" % tag_name)
return SetVarNode(new_val[1:-1], var_name)
# 모든 타입 변수 생성 가능
class SetVarNode(template.Node):
def __init__(self, var_name, var_value):
self.var_name = var_name
self.var_value = var_value
def render(self, context):
try:
value = template.Variable(self.var_value).resolve(context)
except template.VariableDoesNotExist:
value = ""
context[self.var_name] = value
return u""
@register.tag(name='set')
def set_var(parser, token):
"""
{% set some_var = '123' %}
"""
parts = token.split_contents()
if len(parts) < 4:
raise template.TemplateSyntaxError("'set' tag must be of the form: {% set <var_name> = <var_value> %}")
return SetVarNode(parts[1], parts[3])
| 29.758065
| 112
| 0.616802
| 242
| 1,845
| 4.541322
| 0.359504
| 0.070064
| 0.050046
| 0.038217
| 0.242038
| 0.080073
| 0.080073
| 0.080073
| 0.080073
| 0.080073
| 0
| 0.009559
| 0.262873
| 1,845
| 62
| 113
| 29.758065
| 0.798529
| 0.086179
| 0
| 0.2
| 0
| 0
| 0.108276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.05
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9d8c56ae37748311a4ccb37002818c53ff1fd16
| 420
|
py
|
Python
|
fimath/constants.py
|
kalinkinisaac/modular
|
301d26ad222a5ef3278aaf251908e0a8537bb58f
|
[
"MIT"
] | null | null | null |
fimath/constants.py
|
kalinkinisaac/modular
|
301d26ad222a5ef3278aaf251908e0a8537bb58f
|
[
"MIT"
] | null | null | null |
fimath/constants.py
|
kalinkinisaac/modular
|
301d26ad222a5ef3278aaf251908e0a8537bb58f
|
[
"MIT"
] | null | null | null |
from .field import Field
from .re_field import ReField
from .matrix import Matrix
inf = Field(is_inf=True)
IDM = Matrix(1, 0, 0, 1)
G0 = Matrix(0, -1, 1, 0)
G1 = Matrix(0, 1, -1, 1)
G1_2 = G1 ** 2
G0_ = Matrix(0, 1, -1, 0)
G1_ = Matrix(1, 1, -1, 0)
G1_2_ = G1_ ** 2
G_ = Matrix(1, -1, 0, 1)
G__ = Matrix(1, 0, -1, 1)
ZERO = Field(0)
ONE = Field(1)
INF = inf
V0 = Field(1j)
V1 = Field(ReField(1/2), ReField(b=1/2))
| 16.8
| 40
| 0.592857
| 88
| 420
| 2.704545
| 0.25
| 0.067227
| 0.05042
| 0.113445
| 0.168067
| 0.168067
| 0.168067
| 0.168067
| 0
| 0
| 0
| 0.148036
| 0.211905
| 420
| 24
| 41
| 17.5
| 0.570997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9de9f006fc9afa79a63265eef2873fd5e7b5f5b
| 1,991
|
py
|
Python
|
2021/python/day3.py
|
majormunky/advent_of_code
|
4cccd7f3879e28e465bbc39176659bdd52bd70d6
|
[
"MIT"
] | null | null | null |
2021/python/day3.py
|
majormunky/advent_of_code
|
4cccd7f3879e28e465bbc39176659bdd52bd70d6
|
[
"MIT"
] | null | null | null |
2021/python/day3.py
|
majormunky/advent_of_code
|
4cccd7f3879e28e465bbc39176659bdd52bd70d6
|
[
"MIT"
] | 1
|
2020-12-04T06:12:01.000Z
|
2020-12-04T06:12:01.000Z
|
from common import get_file_contents
def most_common(index, items, total):
if int(items[index]) >= total / 2:
return "1"
else:
return "0"
def least_common(index, items, total):
# if our number is bigger than half our total lines
# then we know that 1 is the more common value
# so we return 0
if int(items[index]) > total / 2:
return "0"
elif int(items[index]) == total / 2:
return "0"
else:
return "1"
def build_frequency(lines):
freq = [0 for i in range(len(lines[0]))]
for line in lines:
parts = list(line)
for index, item in enumerate(parts):
if item == "1":
freq[index] += 1
return freq
def p1():
lines = get_file_contents("data/day3_input.txt")
freq = build_frequency(lines)
gamma = [most_common(i, freq, len(lines)) for i in range(len(freq))]
epsilon = [least_common(i, freq, len(lines)) for i in range(len(freq))]
gamma = int("0b" + "".join(gamma), 2)
epsilon = int("0b" + "".join(epsilon), 2)
return gamma * epsilon
def digit_check(index, number, target_value):
return number[index] == target_value
def get_item(lines, func):
data = lines.copy()
freq = build_frequency(data)
items_to_remove = []
for index in range(len(data[0])):
# num is either the most or least common number
num = func(index, freq, len(data))
for line in data:
if not digit_check(index, line, num):
if line in data:
items_to_remove.append(line)
for remove_item in items_to_remove:
if remove_item in data:
data.remove(remove_item)
if len(data) == 1:
break
freq = build_frequency(data)
return data[0]
test_data = [
"00100",
"11110",
"10110",
"10111",
"10101",
"01111",
"00111",
"11100",
"10000",
"11001",
"00010",
"01010",
]
def p2():
lines = get_file_contents("data/day3_input.txt")
oxygen = get_item(lines, most_common)
scrubber = get_item(lines, least_common)
return int("0b" + oxygen, 2) * int("0b" + scrubber, 2)
if __name__ == '__main__':
print("Part 1: ", p1())
print("Part 2: ", p2())
| 20.316327
| 72
| 0.658463
| 314
| 1,991
| 4.038217
| 0.286624
| 0.022082
| 0.031546
| 0.042587
| 0.224763
| 0.179022
| 0.179022
| 0.115142
| 0.05836
| 0.05836
| 0
| 0.057908
| 0.19337
| 1,991
| 97
| 73
| 20.525773
| 0.731631
| 0.07785
| 0
| 0.157143
| 0
| 0
| 0.074276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.014286
| 0.014286
| 0.257143
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9e4232dbd5470195e751b3cfb7348b26305a4d1
| 12,684
|
py
|
Python
|
src/utils/datasets.py
|
gorjanradevski/siamese_multi_head_attention
|
fcbfe21f284bf98a1d0e725a9e6f2df19363b4a5
|
[
"MIT"
] | 2
|
2020-06-11T03:03:35.000Z
|
2022-01-08T07:15:46.000Z
|
src/utils/datasets.py
|
gorjanradevski/multimodal_representations_deep_learning
|
fcbfe21f284bf98a1d0e725a9e6f2df19363b4a5
|
[
"MIT"
] | null | null | null |
src/utils/datasets.py
|
gorjanradevski/multimodal_representations_deep_learning
|
fcbfe21f284bf98a1d0e725a9e6f2df19363b4a5
|
[
"MIT"
] | null | null | null |
import json
import re
import os
import logging
from abc import ABC
from typing import Dict, Any, List, Tuple
from utils.constants import pascal_train_size, pascal_val_size
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def preprocess_caption(caption: str) -> str:
"""Basic method used around all classes
Performs pre-processing of the caption in the following way:
1. Converts the whole caption to lower case.
2. Removes all characters which are not letters.
Args:
caption: A list of words contained in the caption.
Returns:
"""
caption = caption.lower()
caption = re.sub("[^a-z' ]+", "", caption)
caption = re.sub("\s+", " ", caption).strip() # NOQA
caption = caption.strip()
return caption
class BaseCocoDataset(ABC):
# Adapted for working with the Microsoft COCO dataset.
def __init__(self, images_path: str, json_path: str):
"""Creates a dataset object.
Args:
images_path: Path where the images are located.
json_path: Path to the json file where the mappings are indicated as well
as the captions.
"""
json_file = self.read_json(json_path)
self.id_to_filename = self.parse_image_paths(json_file, images_path)
self.id_to_captions = self.parse_captions(json_file)
logger.info("Object variables set...")
@staticmethod
def parse_image_paths(
json_file: Dict[str, Any], images_path: str
) -> Dict[int, str]:
"""Parses the images metadata from the json file.
Args:
json_file: A dict representing the loaded json file.
images_path: A path where the images are.
Returns:
A dict that contains the image id and the image filename.
"""
id_to_filename = {}
for image_data in json_file["images"]:
id_to_filename[image_data["id"]] = os.path.join(
images_path, image_data["file_name"]
)
return id_to_filename
@staticmethod
def parse_captions(json_file: Dict[str, Any]) -> Dict[int, List[str]]:
"""Parses the captions metadata from the json file.
Args:
json_file: A dict representing the loaded json file.
Returns:
A dict that contains the image id and a list with the image captions.
"""
id_to_captions: Dict[int, List[str]] = {}
for captions_data in json_file["annotations"]:
if captions_data["image_id"] not in id_to_captions.keys():
id_to_captions[captions_data["image_id"]] = []
id_to_captions[captions_data["image_id"]].append(
preprocess_caption(captions_data["caption"])
)
return id_to_captions
@staticmethod
def read_json(json_path: str) -> Dict[str, Any]:
"""Reads json file given a path.
Args:
json_path: Path where the json file is.
Returns:
A dictionary representing the json file.
"""
with open(json_path) as file:
json_file = json.load(file)
return json_file
@staticmethod
def get_data_wrapper(
id_to_filename: Dict[int, str], id_to_captions: Dict[int, List[str]]
) -> Tuple[List[str], List[str]]:
"""Returns the image paths and captions.
Because in the dataset there are 5 captions for each image, what the method does
is create:
- A list of image paths where each image path is repeated 5 times.
- A list of lists of word tokens where the number of inner lists is equal to the
number of image paths.
Args:
id_to_filename: Pair id to image filename dict.
id_to_captions: Pair id to captions dict.
Returns:
The image paths, the captions and the lengths of the captions.
"""
assert len(id_to_filename.keys()) == len(id_to_captions.keys())
image_paths = []
captions = []
for pair_id in id_to_filename.keys():
for i in range(5):
image_paths.append(id_to_filename[pair_id])
captions.append(id_to_captions[pair_id][i])
assert len(image_paths) == len(captions)
return image_paths, captions
def get_data(self):
image_paths, captions = self.get_data_wrapper(
self.id_to_filename, self.id_to_captions
)
return image_paths, captions
class TrainCocoDataset(BaseCocoDataset):
# Adapted for working with the Microsoft COCO dataset.
def __init__(self, images_path: str, json_path: str):
"""Creates a dataset object.
Args:
images_path: Path where the images are located.
json_path: Path to the json file where the mappings are indicated as well
as the captions.
"""
super().__init__(images_path, json_path)
logger.info("Class variables set...")
class ValCocoDataset(BaseCocoDataset):
# Adapted for working with the Microsoft COCO dataset.
def __init__(self, images_path: str, json_path: str, val_size: int = None):
"""Creates a dataset object.
Args:
images_path: Path where the images are located.
json_path: Path to the json file where the mappings are indicated as well
as the captions.
val_size: The size of the validation set.
"""
super().__init__(images_path, json_path)
self.val_size = val_size
class FlickrDataset:
# Adapted for working with the Flickr8k and Flickr30k dataset.
def __init__(self, images_path: str, texts_path: str):
self.img_path_caption = self.parse_captions_filenames(texts_path)
self.images_path = images_path
logger.info("Object variables set...")
@staticmethod
def parse_captions_filenames(texts_path: str) -> Dict[str, List[str]]:
"""Creates a dictionary that holds:
Key: The full path to the image.
Value: A list of lists where each token in the inner list is a word. The number
of sublists is 5.
Args:
texts_path: Path where the text doc with the descriptions is.
Returns:
A dictionary that represents what is explained above.
"""
img_path_caption: Dict[str, List[str]] = {}
with open(texts_path, "r") as file:
for line in file:
line_parts = line.split("\t")
image_tag = line_parts[0].partition("#")[0]
caption = line_parts[1]
if image_tag not in img_path_caption:
img_path_caption[image_tag] = []
img_path_caption[image_tag].append(preprocess_caption(caption))
return img_path_caption
@staticmethod
def get_data_wrapper(
imgs_file_path: str,
img_path_caption: Dict[str, List[str]],
images_dir_path: str,
):
"""Returns the image paths, the captions and the lengths of the captions.
Args:
imgs_file_path: A path to a file where all the images belonging to the
validation part of the dataset are listed.
img_path_caption: Image name to list of captions dict.
images_dir_path: A path where all the images are located.
Returns:
Image paths, captions and lengths.
"""
image_paths = []
captions = []
with open(imgs_file_path, "r") as file:
for image_name in file:
# Remove the newline character at the end
image_name = image_name[:-1]
# If there is no specified codec in the name of the image append jpg
if not image_name.endswith(".jpg"):
image_name += ".jpg"
for i in range(5):
image_paths.append(os.path.join(images_dir_path, image_name))
captions.append(img_path_caption[image_name][i])
assert len(image_paths) == len(captions)
return image_paths, captions
def get_data(self, images_file_path: str):
image_paths, captions = self.get_data_wrapper(
images_file_path, self.img_path_caption, self.images_path
)
return image_paths, captions
class PascalSentencesDataset:
# Adapted for working with the Pascal sentences dataset.
def __init__(self, images_path, texts_path):
self.category_image_path_captions = self.parse_captions_filenames(
texts_path, images_path
)
@staticmethod
def parse_captions_filenames(
texts_path: str, images_path: str
) -> Dict[str, Dict[str, List[str]]]:
"""Creates a dictionary of dictionaries where:
1. The keys of the first dict are the different categories of data.
2. The keys of the second dict are the image paths for the corresponding
category.
3. The values of the of second dict are a list of list where each list holds the
5 different captions for the image path, and each sublist holds the indexed
words of the caption.
Args:
texts_path: Path where the image captions are.
images_path: Path where the images are.
Returns:
A dictionary as explained above.
"""
category_image_path_captions: Dict[str, Dict[str, List[str]]] = dict(dict())
for category in os.listdir(texts_path):
file_path = os.path.join(texts_path, category)
if os.path.isdir(file_path):
if category not in category_image_path_captions:
category_image_path_captions[category] = {}
for txt_file in os.listdir(file_path):
if txt_file.endswith(".txt"):
image_path = os.path.join(
images_path, category, txt_file[:-3] + "jpg"
)
if image_path not in category_image_path_captions[category]:
category_image_path_captions[category][image_path] = []
txt_file_path = os.path.join(file_path, txt_file)
with open(txt_file_path, "r") as f:
for caption in f:
category_image_path_captions[category][
image_path
].append(preprocess_caption(caption))
return category_image_path_captions
@staticmethod
def get_data_wrapper(category_image_path_captions, data_type: str):
"""Returns the image paths, the captions and the captions lengths.
Args:
category_image_path_captions: A really compex dict :(
data_type: The type of the data that is returned (Train, val or test).
Returns:
The image paths, the captions and the captions lengths.
"""
image_paths = []
captions = []
train_size = pascal_train_size * 50
val_size = pascal_val_size * 50
for category in category_image_path_captions.keys():
for v, image_path in enumerate(
category_image_path_captions[category].keys()
):
for caption in category_image_path_captions[category][image_path]:
if data_type == "train":
if v < train_size:
image_paths.append(image_path)
captions.append(caption)
elif data_type == "val":
if train_size + val_size > v >= train_size:
image_paths.append(image_path)
captions.append(caption)
elif data_type == "test":
if v >= train_size + val_size:
image_paths.append(image_path)
captions.append(caption)
else:
raise ValueError("Wrong data type!")
return image_paths, captions
def get_train_data(self):
img_paths, cap = self.get_data_wrapper(
self.category_image_path_captions, "train"
)
return img_paths, cap
def get_val_data(self):
img_paths, cap = self.get_data_wrapper(self.category_image_path_captions, "val")
return img_paths, cap
def get_test_data(self):
img_paths, cap = self.get_data_wrapper(
self.category_image_path_captions, "test"
)
return img_paths, cap
| 34.467391
| 88
| 0.601861
| 1,591
| 12,684
| 4.57511
| 0.137649
| 0.038467
| 0.044374
| 0.054953
| 0.493474
| 0.381371
| 0.339744
| 0.276824
| 0.223932
| 0.206072
| 0
| 0.002679
| 0.323163
| 12,684
| 367
| 89
| 34.561308
| 0.845195
| 0.297383
| 0
| 0.255556
| 0
| 0
| 0.024575
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 1
| 0.105556
| false
| 0
| 0.038889
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dda4affe6b2847c17389112e2763a725bc4f7b5b
| 5,545
|
py
|
Python
|
jaxtorch/image.py
|
GallagherCommaJack/jaxtorch
|
3bc6785d781f12fabf3a436d9cfc0b839ebf5aec
|
[
"MIT"
] | null | null | null |
jaxtorch/image.py
|
GallagherCommaJack/jaxtorch
|
3bc6785d781f12fabf3a436d9cfc0b839ebf5aec
|
[
"MIT"
] | null | null | null |
jaxtorch/image.py
|
GallagherCommaJack/jaxtorch
|
3bc6785d781f12fabf3a436d9cfc0b839ebf5aec
|
[
"MIT"
] | null | null | null |
import math
from typing import Tuple
import jax
import jax.numpy as jnp
import numpy as np
from einops import repeat
def factor_int(n: int) -> Tuple[int, int]:
f1 = int(math.ceil(math.sqrt(n)))
while n % f1:
f1 -= 1
f2 = n // f1
return min(f1, f2), max(f1, f2)
def compute_channel_change_mat(c_in: int, c_out: int) -> np.ndarray:
assert max(c_in, c_out) % min(c_in, c_out) == 0
io_ratio = max(c_in, c_out) // min(c_in, c_out)
base = np.eye(min(c_in, c_out))
if c_in < c_out:
return repeat(base, "d1 d2 -> (d1 r) d2", r=io_ratio)
elif c_out < c_in:
# decreasing channel count, average nearby channels
return repeat(base, "d1 d2 -> d1 (d2 r)", r=io_ratio) / io_ratio
else:
return base
upsample_arrays = dict(
lanczos3=np.array(
[
0.0073782638646662235,
0.030112292617559433,
-0.06799723953008652,
-0.13327467441558838,
0.2710106074810028,
0.8927707076072693,
0.8927707672119141,
0.2710106074810028,
-0.13327467441558838,
-0.06799724698066711,
0.03011229634284973,
0.007378263399004936,
],
),
cubic=np.array(
[
-0.0234375,
-0.0703125,
0.2265625,
0.8671875,
0.8671875,
0.2265625,
-0.0703125,
-0.0234375,
],
),
linear=np.array([0.25, 0.75, 0.75, 0.25]),
)
downsample_arrays = dict(
lanczos3=np.array(
[
0.003689131001010537,
0.015056144446134567,
-0.03399861603975296,
-0.066637322306633,
0.13550527393817902,
0.44638532400131226,
0.44638532400131226,
0.13550527393817902,
-0.066637322306633,
-0.03399861603975296,
0.015056144446134567,
0.003689131001010537,
]
),
cubic=np.array(
[
-0.01171875,
-0.03515625,
0.11328125,
0.43359375,
0.43359375,
0.11328125,
-0.03515625,
-0.01171875,
]
),
linear=np.array([0.125, 0.375, 0.375, 0.125]),
)
def upsample_kernel(
c_in: int,
c_out: int,
method: str = "linear",
) -> np.ndarray:
cmat = compute_channel_change_mat(c_in, c_out)
kernel = upsample_arrays[method]
weight = np.einsum("oi,h,w->oihw", cmat, kernel, kernel)
return weight
def downsample_kernel(
c_in: int,
c_out: int,
method="linear",
) -> np.ndarray:
cmat = compute_channel_change_mat(c_in, c_out)
kernel = downsample_arrays[method]
weight = np.einsum("oi,h,w->oihw", cmat, kernel, kernel)
return weight
def upsample2x_base(
img: jnp.ndarray,
kern: jnp.ndarray,
format: str = "NCHW",
norm:bool=True,
):
ksize = kern.shape[-1]
kern = jax.lax.convert_element_type(kern, img.dtype)
out = jax.lax.conv_general_dilated(
img,
kern,
window_strides=[1, 1],
padding=[(ksize // 2, ksize // 2), (ksize // 2, ksize // 2)],
lhs_dilation=[2, 2],
rhs_dilation=None,
dimension_numbers=(format, "OIHW", format),
)
if norm:
# normalization for parts that touch the zero-padding
norm = jax.lax.conv_general_dilated(
jnp.ones([1, *img.shape[-3:]], dtype=img.dtype),
kern,
window_strides=[1, 1],
padding=[(ksize // 2, ksize // 2), (ksize // 2, ksize // 2)],
lhs_dilation=[2, 2],
rhs_dilation=None,
dimension_numbers=(format, "OIHW", format),
)
out = out / norm
return out
def downsample2x_base(
x: jnp.ndarray,
kern: jnp.ndarray,
format: str = "NCHW",
norm:bool=True,
):
ksize = kern.shape[-1]
kern = jax.lax.convert_element_type(kern, x.dtype)
out = jax.lax.conv_general_dilated(
x,
kern,
window_strides=[2, 2],
padding=[(ksize // 2 - 1, ksize // 2 - 1), (ksize // 2 - 1, ksize // 2 - 1)],
lhs_dilation=[1, 1],
rhs_dilation=None,
dimension_numbers=(format, "OIHW", format),
)
if norm:
# normalization for parts that touch the zero-padding
norm = jax.lax.conv_general_dilated(
jnp.ones([1, *x.shape[-3:]], dtype=x.dtype),
kern,
window_strides=[2, 2],
padding=[
(ksize // 2 - 1, ksize // 2 - 1),
(ksize // 2 - 1, ksize // 2 - 1),
],
lhs_dilation=[1, 1],
rhs_dilation=None,
dimension_numbers=(format, "OIHW", format),
)
out = out / norm
return out
def upsample2x(
img: jnp.ndarray,
c_out: int = None,
method: str = "linear",
format: str = "NCHW",
) -> jnp.ndarray:
c_in = img.shape[-3]
if c_out is None:
c_out = c_in
kern = upsample_kernel(c_in, c_out, method=method)
kern = jnp.array(kern, dtype=img.dtype)
return upsample2x_base(img, kern, format)
def downsample2x(
img: jnp.ndarray,
c_out: int = None,
method: str = "linear",
format: str = "NCHW",
) -> jnp.ndarray:
c_in = img.shape[-3]
if c_out is None:
c_out = c_in
kern = downsample_kernel(c_in, c_out, method=method)
kern = jax.lax.convert_element_type(kern, img.dtype)
return downsample2x_base(img, kern, format)
| 25.671296
| 85
| 0.544274
| 686
| 5,545
| 4.262391
| 0.198251
| 0.02736
| 0.01368
| 0.02394
| 0.575581
| 0.572161
| 0.527018
| 0.50684
| 0.471272
| 0.454856
| 0
| 0.180038
| 0.327863
| 5,545
| 215
| 86
| 25.790698
| 0.604508
| 0.027592
| 0
| 0.636364
| 0
| 0
| 0.021529
| 0
| 0
| 0
| 0
| 0
| 0.005348
| 1
| 0.042781
| false
| 0
| 0.032086
| 0
| 0.128342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dda604bdbe931306a411dfabae424401c18dc54e
| 1,210
|
py
|
Python
|
1-image2ascii/image2ascii.py
|
dourgey/Python_Exercise
|
f41d69033b76d2fea3671f751e936cb804742b57
|
[
"MIT"
] | null | null | null |
1-image2ascii/image2ascii.py
|
dourgey/Python_Exercise
|
f41d69033b76d2fea3671f751e936cb804742b57
|
[
"MIT"
] | null | null | null |
1-image2ascii/image2ascii.py
|
dourgey/Python_Exercise
|
f41d69033b76d2fea3671f751e936cb804742b57
|
[
"MIT"
] | null | null | null |
# Author: @dourgey
# Create Time: 2019/12/27: 18:06
# 主要知识点:
# argparse的使用
# 检查文件路径是否存在
# PILLOW读取图片并处理
# 文件写入
import argparse
import os
import sys
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image", help="主人要转换的图片路径喵,默认在当前路径下读取喵~")
parser.add_argument("-f", "--file", help="主人要保存的字符画文件路径喵,默认保存在当前路径下喵~")
args = parser.parse_args()
if not os.path.exists(args.image): # 如果图片路径不存在
print("图片路径不存在呢,粗心的主人请再次检查喵~")
sys.exit(0)
img_path = args.image
im = Image.open(img_path)
width, height = im.size
t_height = int(height / width * 100 / 2.5)
im = im.resize((100, t_height), Image.ANTIALIAS)
def get_char(r, g, b, alpha=256):
ascii_char = "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:,\"^`'. " # 设定映射字符
if alpha == 0:
return " "
gray = (r * 38 + g * 75 + b * 15) >> 7 # RGB转灰阶参考https://www.cnblogs.com/carekee/articles/3629964.html
return ascii_char[gray % 70]
f = open(args.file, "w") # 新建文件写入
# 逐行逐像素转换,写入文件
for i in range(t_height):
for j in range(100):
r, g, b = im.getpixel((j, i))
f.write(
get_char(r, g, b)
)
f.write("\n")
f.close()
print("已经为主人处理好了喵~")
| 22
| 107
| 0.629752
| 173
| 1,210
| 4.32948
| 0.583815
| 0.028037
| 0.012016
| 0.024032
| 0.026702
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048156
| 0.193388
| 1,210
| 54
| 108
| 22.407407
| 0.719262
| 0.161157
| 0
| 0
| 0
| 0
| 0.169169
| 0.137137
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.125
| 0
| 0.21875
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dda62a60e83b2ac0fa35757329d616e26ea6b265
| 6,536
|
py
|
Python
|
python/ray/serialization.py
|
delding/ray
|
8532ba4272556aa24b5e0c7d275c7b383815c022
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serialization.py
|
delding/ray
|
8532ba4272556aa24b5e0c7d275c7b383815c022
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serialization.py
|
delding/ray
|
8532ba4272556aa24b5e0c7d275c7b383815c022
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import ray.numbuf
import ray.pickling as pickling
def check_serializable(cls):
"""Throws an exception if Ray cannot serialize this class efficiently.
Args:
cls (type): The class to be serialized.
Raises:
Exception: An exception is raised if Ray cannot serialize this class
efficiently.
"""
if is_named_tuple(cls):
# This case works.
return
if not hasattr(cls, "__new__"):
raise Exception("The class {} does not have a '__new__' attribute, and is "
"probably an old-style class. We do not support this. "
"Please either make it a new-style class by inheriting "
"from 'object', or use "
"'ray.register_class(cls, pickle=True)'. However, note "
"that pickle is inefficient.".format(cls))
try:
obj = cls.__new__(cls)
except:
raise Exception("The class {} has overridden '__new__', so Ray may not be "
"able to serialize it efficiently. Try using "
"'ray.register_class(cls, pickle=True)'. However, note "
"that pickle is inefficient.".format(cls))
if not hasattr(obj, "__dict__"):
raise Exception("Objects of the class {} do not have a `__dict__` "
"attribute, so Ray cannot serialize it efficiently. Try "
"using 'ray.register_class(cls, pickle=True)'. However, "
"note that pickle is inefficient.".format(cls))
if hasattr(obj, "__slots__"):
raise Exception("The class {} uses '__slots__', so Ray may not be able to "
"serialize it efficiently. Try using "
"'ray.register_class(cls, pickle=True)'. However, note "
"that pickle is inefficient.".format(cls))
# This field keeps track of a whitelisted set of classes that Ray will
# serialize.
whitelisted_classes = {}
classes_to_pickle = set()
custom_serializers = {}
custom_deserializers = {}
def class_identifier(typ):
"""Return a string that identifies this type."""
return "{}.{}".format(typ.__module__, typ.__name__)
def is_named_tuple(cls):
"""Return True if cls is a namedtuple and False otherwise."""
b = cls.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(cls, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n) == str for n in f)
def add_class_to_whitelist(cls, pickle=False, custom_serializer=None,
custom_deserializer=None):
"""Add cls to the list of classes that we can serialize.
Args:
cls (type): The class that we can serialize.
pickle (bool): True if the serialization should be done with pickle. False
if it should be done efficiently with Ray.
custom_serializer: This argument is optional, but can be provided to
serialize objects of the class in a particular way.
custom_deserializer: This argument is optional, but can be provided to
deserialize objects of the class in a particular way.
"""
class_id = class_identifier(cls)
whitelisted_classes[class_id] = cls
if pickle:
classes_to_pickle.add(class_id)
if custom_serializer is not None:
custom_serializers[class_id] = custom_serializer
custom_deserializers[class_id] = custom_deserializer
# Here we define a custom serializer and deserializer for handling numpy
# arrays that contain objects.
def array_custom_serializer(obj):
return obj.tolist(), obj.dtype.str
def array_custom_deserializer(serialized_obj):
return np.array(serialized_obj[0], dtype=np.dtype(serialized_obj[1]))
add_class_to_whitelist(np.ndarray, pickle=False,
custom_serializer=array_custom_serializer,
custom_deserializer=array_custom_deserializer)
def serialize(obj):
"""This is the callback that will be used by numbuf.
If numbuf does not know how to serialize an object, it will call this method.
Args:
obj (object): A Python object.
Returns:
A dictionary that has the key "_pyttype_" to identify the class, and
contains all information needed to reconstruct the object.
"""
class_id = class_identifier(type(obj))
if class_id not in whitelisted_classes:
raise Exception("Ray does not know how to serialize objects of type {}. "
"To fix this, call 'ray.register_class' with this class."
.format(type(obj)))
if class_id in classes_to_pickle:
serialized_obj = {"data": pickling.dumps(obj)}
elif class_id in custom_serializers.keys():
serialized_obj = {"data": custom_serializers[class_id](obj)}
else:
# Handle the namedtuple case.
if is_named_tuple(type(obj)):
serialized_obj = {}
serialized_obj["_ray_getnewargs_"] = obj.__getnewargs__()
elif hasattr(obj, "__dict__"):
serialized_obj = obj.__dict__
else:
raise Exception("We do not know how to serialize the object '{}'"
.format(obj))
result = dict(serialized_obj, **{"_pytype_": class_id})
return result
def deserialize(serialized_obj):
"""This is the callback that will be used by numbuf.
If numbuf encounters a dictionary that contains the key "_pytype_" during
deserialization, it will ask this callback to deserialize the object.
Args:
serialized_obj (object): A dictionary that contains the key "_pytype_".
Returns:
A Python object.
"""
class_id = serialized_obj["_pytype_"]
cls = whitelisted_classes[class_id]
if class_id in classes_to_pickle:
obj = pickling.loads(serialized_obj["data"])
elif class_id in custom_deserializers.keys():
obj = custom_deserializers[class_id](serialized_obj["data"])
else:
# In this case, serialized_obj should just be the __dict__ field.
if "_ray_getnewargs_" in serialized_obj:
obj = cls.__new__(cls, *serialized_obj["_ray_getnewargs_"])
else:
obj = cls.__new__(cls)
serialized_obj.pop("_pytype_")
obj.__dict__.update(serialized_obj)
return obj
def set_callbacks():
"""Register the custom callbacks with numbuf.
The serialize callback is used to serialize objects that numbuf does not know
how to serialize (for example custom Python classes). The deserialize
callback is used to serialize objects that were serialized by the serialize
callback.
"""
ray.numbuf.register_callbacks(serialize, deserialize)
| 35.521739
| 79
| 0.684823
| 873
| 6,536
| 4.894616
| 0.211913
| 0.057805
| 0.018722
| 0.017786
| 0.291832
| 0.250644
| 0.233092
| 0.155862
| 0.140417
| 0.121694
| 0
| 0.000794
| 0.229498
| 6,536
| 183
| 80
| 35.715847
| 0.847697
| 0.304468
| 0
| 0.158416
| 0
| 0
| 0.248928
| 0.021666
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089109
| false
| 0
| 0.059406
| 0.019802
| 0.237624
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dda91173fa6aa6ba29a55f8ecc21898b460a57e2
| 3,729
|
py
|
Python
|
wlcsim/FrankElastic/stonefence.py
|
SpakowitzLab/BasicWLC
|
13edbbc8e8cd36a3586571ff4d80880fc89d30e6
|
[
"MIT"
] | 1
|
2021-03-16T01:39:18.000Z
|
2021-03-16T01:39:18.000Z
|
wlcsim/FrankElastic/stonefence.py
|
riscalab/wlcsim
|
e34877ef6c5dc83c6444380dbe624b371d70faf2
|
[
"MIT"
] | 17
|
2016-07-08T21:17:40.000Z
|
2017-01-24T09:05:25.000Z
|
wlcsim/FrankElastic/stonefence.py
|
riscalab/wlcsim
|
e34877ef6c5dc83c6444380dbe624b371d70faf2
|
[
"MIT"
] | 9
|
2016-06-21T22:03:53.000Z
|
2016-11-10T00:55:01.000Z
|
from numpy import sqrt
import numpy as np
#from util import sphinx_compat_jit as jit
from numba import jit
ORDER_L=50
@jit
def alpha(l,m):
return sqrt((3*(l-m)*(l+m))/(4*np.pi*(2*l-1)*(2*l+1)))
@jit
def alpha_plus(l,m):
return sqrt((3*(l+m)*(l+m+1))/(8*np.pi*(2*l-1)*(2*l+1)))
@jit
def Alm(l,m):
return alpha(l,m)*alpha(l-1,m)/alpha(2,0)
@jit
def Blm(l,m):
return (alpha(l+1,m)*alpha(l+1,m)
- alpha(1,0)*sqrt(0.25/np.pi) +
alpha(l,m)*alpha(l,m))/alpha(2,0)
@jit
def PgammaB_vec(m, p, gamma):
""" P - \gamma \\beta
Returns:
vector with index ell
"""
PgammaB = np.zeros(ORDER_L, np.complex128)
for ell in range(abs(m),ORDER_L):
PgammaB[ell] = p+ell*(ell+1) - gamma*Blm(ell,m)
return PgammaB
@jit
def Alm_vec(m):
Am_vec = np.zeros(ORDER_L, np.complex128)
for ell in range(abs(m)+2,ORDER_L):
Am_vec[ell] = Alm(ell,m)
return Am_vec
@jit
def Wplus_vec(m, gamma, p, Am, PgammaB):
Wplus = np.zeros(ORDER_L, np.complex128)
for ell in (ORDER_L-1,ORDER_L-2):
Wplus[ell] = 1.0/PgammaB[ell]
for ell in range(ORDER_L-3,abs(m)-1,-1):
Wplus[ell] = 1.0/(PgammaB[ell] - (gamma*Am[ell+2])**2*Wplus[ell+2])
return Wplus
@jit
def Wminus_vec(m, gamma, p, Am, PgammaB):
Wminus = np.zeros(ORDER_L, np.complex128)
for ell in (abs(m),abs(m)+1):
Wminus[ell] = 1.0/PgammaB[ell]
for ell in range(abs(m)+2,ORDER_L):
Wminus[ell] = 1.0/(PgammaB[ell] - (gamma*Am[ell])**2*Wminus[ell-2])
return Wminus
@jit
def Gmll_matrix(Wplus, Wminus, Am, PgammaB, gamma, m):
""""Matrox of propagators between starting and ending l value.
Args:
Wplus (numpy array): Result of Wplus_vec for same m, p
Wminus (numpy array): Reult of Wminus_vec for same m, p
Am (numpy array): Result of Am_vec for same m
PgammaB (numpy array): Result of PgammaB_vec for same m, p, gamma
gamma (float): alignment strength, in kT's per Kuhn length
m (int): z component of agular momentum quantum number
Returns:
An ORDER_L x ORDER_L numpy matrix with propagators that use Maier-Saupe
steps to get from l0 to lf.
"""
Wpm = np.zeros(ORDER_L,np.complex128)
absm = abs(m)
Wpm[absm:] = (Wplus[absm:]*Wminus[absm:])\
/(Wminus[absm:]
- PgammaB[absm:]*Wplus[absm:]*Wminus[absm:] + Wplus[absm:])
Gmll = np.zeros((ORDER_L,ORDER_L), np.complex128)
for ell in range(abs(m),ORDER_L):
Gmll[ell,ell] = Wpm[ell]
for lf in range(ell+2,ORDER_L,2):
Gmll[ell, lf] = Gmll[ell, lf-2]*Wplus[lf]*Am[lf]*gamma
Gmll[lf, ell] = Gmll[ell, lf] # Must be symmetric
# the following loop is an alturnative the using the symmetric propory
#for lf in range(ell-2,-1,-2):
# Gmll[ell, lf] = Gmll[ell, lf+2]*Wminus[lf]*Am[lf+2]*gamma
return Gmll
def precalculate_data(p, gamma, m_values=[0]):
"""Precalculate W_plus, W_minus, W_pm, and G_m_ll
Args:
p (complex): laplace conjugate of path length
gamma (real): aligning l=2 (Maier-Saupe) field strength
m_values (list): list of integer m values to precalculate for
"""
Wps = {}
Wms = {}
Gmlls = {}
for m in m_values:
Am = Alm_vec(m)
PgammaB = PgammaB_vec(m, p, gamma)
Wplus = Wplus_vec(m, gamma, p, Am, PgammaB)
Wminus = Wminus_vec(m, gamma, p, Am, PgammaB)
Gmll = Gmll_matrix(Wplus, Wminus, Am, PgammaB, gamma, m)
Wps[m]=Wplus
Wms[m]=Wminus
Gmlls[m] = Gmll
return {"Wplus":Wps, "Wminus":Wms, "Gmll":Gmlls, "ms":m_values, "p":p,
"gamma":gamma}
| 30.565574
| 79
| 0.592116
| 629
| 3,729
| 3.434022
| 0.193959
| 0.05
| 0.025926
| 0.036111
| 0.415741
| 0.342593
| 0.28287
| 0.231944
| 0.180093
| 0.075463
| 0
| 0.028017
| 0.253419
| 3,729
| 121
| 80
| 30.818182
| 0.747845
| 0.281845
| 0
| 0.173333
| 0
| 0
| 0.008942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.04
| 0.053333
| 0.306667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddaa279788a0d07cc55b56c6e5a215a8e2e118cc
| 9,283
|
py
|
Python
|
controllers/default.py
|
npfe/pursuit
|
edd2d66ec0770251041b748c4b9f967a15c138b5
|
[
"Unlicense"
] | null | null | null |
controllers/default.py
|
npfe/pursuit
|
edd2d66ec0770251041b748c4b9f967a15c138b5
|
[
"Unlicense"
] | 16
|
2020-03-30T13:00:10.000Z
|
2020-05-16T16:42:52.000Z
|
controllers/default.py
|
npfe/pursuit
|
edd2d66ec0770251041b748c4b9f967a15c138b5
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# This is a sample controller
# this file is released under public domain and you can use without limitations
# -------------------------------------------------------------------------
import json
from datetime import datetime
from pprint import pprint
status = {1:'not_started', 2:'hold', 3:'track', 4:'done'}
# ---- index page ----
def index():
level = 1
archive = list()
# top entries selection
entries_list = db(db.entry.parent == None).select().as_list()
for entry in entries_list:
# sets the level of the top entry
entry['level'] = 0
# adds the qty of children
entry['children'] = db(db.entry.parent == entry['id']).count()
# adds the quantity of each subitems category for progress bar
entry['not_started'], entry['hold'], entry['track'], entry['done'], entry['sum_w'] = get_w_progress(entry['name'], entry)
# creates a list containing ids of each entry
entries_id = [entry['id'] for entry in entries_list]
next_id = list() # placeholder for ids to be pushed into the loop
# recursively populates the list
for i, id in enumerate(entries_id):
children = db(db.entry.parent == id).select().as_list()
for child in children:
temp_level = level
# finds the position in entries_list where to insert child
index = next((index for (index, d) in enumerate(entries_list) if d['id']==child['parent']), None)
# if the parent is not be displayed (e.g. parent status is done)
if index == None:
level = 1 # item level is set to 1
index = 1 # item to be shown on top
else:
level = entries_list[index]['level']+1
# position index to 1 after position of the parent
index+=1
# append id of current child in the loop to the next list
next_id.append(child['id'])
# sets the entry
child['level'] = level
# sets level of the entry back
level = temp_level
# counts the children of the current entry
if child['status'] != 4:
child['children'] = db((db.entry.parent == child['id']) & (db.entry.status != 4)).count()
else:
child['children'] = 0
# fetches the last log of the current entry
log = db(db.journal.parent == child['id']).select().last()
# adds last log and last edit date
if log != None :
child['log'] = log.body[:75]+'...' if len(log.body) > 75 else log.body
child['last'] = get_status(log.created_on)
else:
child['log'] = ''
# skips items that are done or insert them in the final structure
if child['status'] != 4:
entries_list.insert(index,child)
else:
archive.append(child)
if i == len(entries_id)-1:
for item in next_id:
entries_id.append(item)
next_id = []
level+=1
return dict(data=entries_list, status=status, archive=archive)
def get_w_progress(name, id):
# entry status count dictionnary
entry_status = {1:0, 2:0, 3:0, 4:0}
entries_list = [id]
entries_id = [id['id']]
next_id = list()
level=1
# recursively populates the list
for i, id in enumerate(entries_id):
children = db(db.entry.parent == id).select().as_list()
for child in children:
# finds index in entries_list where to insert the children
index = next((index for (index, d) in enumerate(entries_list) if d['id']==child['parent']), None)
# position index to 1 after position of the parent
index = index+1 if index!=None else index
next_id.append(child['id'])
# counts tasks status
entry_status[child['status']]+=1
if i == len(entries_id)-1:
for item in next_id:
entries_id.append(item)
next_id = []
level+=1
total = sum(entry_status.values())
return entry_status[1], entry_status[2], entry_status[3], entry_status[4], total
def get_status(status):
wrapper_class = 'badge badge-pill '
class_type = {2:'badge-success', 3:'badge-warning', 5:'badge-danger'}
# creates a list of the class_type dict keys
list_type = list(class_type.keys())
# finds out the duration since last log was entered
now = datetime.now()
delta = (now-status).days
if delta < 1:
delta = int((now-status).seconds/3600)
value = '%s hour%s' % (str(delta), 's' if delta>1 else '')
wrapper_class = wrapper_class+class_type[list_type[0]]
else:
value = '%s day%s' % (str(delta), 's' if delta>1 else '')
if delta > list_type[2]:
wrapper_class = wrapper_class+class_type[list_type[2]]
elif delta >= list_type[1]:
wrapper_class = wrapper_class+class_type[list_type[1]]
else:
wrapper_class = wrapper_class+class_type[list_type[0]]
# creates the span html to be displayed
delta = SPAN(value, _class=wrapper_class)
return delta
def children(parent):
children_list = list()
db_children = db(db.entry.parent == parent['id']).select()
if len(db_children) > 0:
for db_child in db_children:
child_data = {'name': db_child.name, 'id': db_child.id, 'children':{}}
child_data['children'] = children(db_child)
children_list.append(child_data)
return children_list
def children_list(parent):
c_list = list()
children = db(db.entry.parent == parent).select()
for child in children:
c_count = db(db.entry.parent == child.id).count()
c_list.append({'name': child.name, 'id': child.id, 'children': c_count})
return c_list
def item():
item = request.args(0)
status = {1:'not_started', 2:'hold', 3:'track', 4:'done'}
record = db.entry[item]
parent = db.entry[record.parent]
children = db(db.entry.parent == record.id).select()
notes = db(db.notes.parent == item).select(db.notes.id, db.notes.title, db.notes.modified_on)
return locals()
def new_item():
parent = request.args(0)
if parent == 0:
parent = None
db.entry.parent.default = parent
form = SQLFORM(db.entry)
form.vars.parent = parent
if form.process(session=None, formname='_newitem').accepted:
response.js = "location.reload();"
response.flash=('Log inserted')
else:
print(form.vars)
return locals()
def edit_item():
entry = request.args(0)
redirection = request.args(1).replace('_', '/')
db.entry.id.readable = db.entry.id.writable = False
db.entry.parent.readable = db.entry.parent.writable = False
db.entry.status.readable = db.entry.status.writable = False
form = SQLFORM(db.entry, entry)
if form.process().accepted:
redirect(URL( redirection))
return locals()
def delete_item():
entry = request.args(0)
form = FORM.confirm('Yes', {'Back':URL('index')})
if form.accepted:
db(db.entry.id==entry).delete()
session.flash = "entry deleted"
redirect(URL('default', 'index'))
return locals()
def set_status():
record = request.args(0)
status = request.args(1)
db(db.entry.id == record).update(status=status)
session.flash = '%s status updated' % (db.entry[record].name)
redirect(URL('default', 'item', args=record))
def log_form():
record = request.args(0)
db.journal.parent.readable = db.journal.parent.writable = False
db.journal.parent.default = record
db.journal.created_on.default = request.now
form = SQLFORM(db.journal)
form.vars.created_on = request.now
if form.process(session=None, formname='_newlog').accepted:
response.js = "jQuery('#%s').get(0).reload()" % request.vars.reload_div
response.flash=('Log inserted')
return locals()
def log_journal():
record = request.args(0)
logs = db(db.journal.parent == record).select(orderby=~db.journal.id)
return dict(logs=logs)
def log_delete():
record = request.args(0)
db(db.journal.id == record).delete()
response.js = "jQuery('#%s').get(0).reload()" % request.args(1)
response.flash=('Log deleted')
def log_edit():
record = db.journal(request.args(0))
db.journal.id.readable = db.journal.id.writable = False
db.journal.parent.readable = db.journal.parent.writable = False
db.journal.created_on.readable = db.journal.created_on.writable = False
form = SQLFORM(db.journal, record)
if form.process().accepted:
response.js = "jQuery('#log_journal').get(0).reload(); "
response.js += "$('body').removeClass('modal-open'); "
response.js += "$('.modal-backdrop').remove(); "
return dict(form=form)
# ---- action to server uploaded static content (required) ---
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
| 39.004202
| 129
| 0.596251
| 1,229
| 9,283
| 4.412531
| 0.179821
| 0.030979
| 0.028766
| 0.024894
| 0.286373
| 0.229947
| 0.189747
| 0.189747
| 0.154896
| 0.154896
| 0
| 0.011268
| 0.254336
| 9,283
| 237
| 130
| 39.168776
| 0.772176
| 0.163632
| 0
| 0.306011
| 0
| 0
| 0.077033
| 0.021139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.016393
| 0
| 0.169399
| 0.010929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|