seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
37785219536 | from pylab import *
import matplotlib
def n(j,xc,x):
n = 1
for i in arange(j):
n *= (xc-x[i])
return n
def a(j,l,x,y):
if j==0:
return y[0]
elif j-l==1 :
return (y[j]-y[l])/(x[j]-x[l])
else:
return (a(j,l+1,x,y)-a(j-1,l,x,y))/(x[j]-x[l])
def N(xc,x,y):
N = 0
for j in range(len(x)):
N += a(j,0,x,y)*n(j,xc,x)
return N
x = [0,1,2]
y = [-1,1,5]
#for testing
xc = 3
yc = N(xc,x,y)
"""
print ''
print xc, yc
#plot
print ("u", u)
u = N(t,x,y)
plot(t,u)
grid(True)
show()
"""
t = linspace(-7,7,100)
u = N(t,x,y)
I = np.arange(0,5,0.1)
plt.plot(I, u)
plt.plot(x,y,"ro")
plt.show()
| pdelfino/numerical-analysis | lista-3/example-2.py | example-2.py | py | 671 | python | en | code | 0 | github-code | 13 |
39090698159 | from selection_sort import selection_sort
from insertion_sort import insertion_sort
from merge_sort import merge_sort
from quick_sort import quick_sort
selection_unsorted = [99, 77, 44, 16, 1000, 7, 4, 8, 22, 3, 16, 99, 205, 33, 1, 100, 19, 12, 55]
insertion_unsorted = [99, 77, 44, 16, 1000, 7, 4, 8, 22, 3, 16, 99, 205, 33, 1, 100, 19, 12, 55]
merge_unsorted = [99, 77, 44, 16, 1000, 7, 4, 8, 22, 3, 16, 99, 205, 33, 1, 100, 19, 12, 55]
quick_unsorted = [99, 77, 44, 16, 1000, 7, 4, 8, 22, 3, 16, 99, 205, 33, 1, 100, 19, 12, 55]
test = [10, 7, 1, 3, 5, 8, 9, 6]
print('selection sort')
selection_sort_result = selection_sort(selection_unsorted)
print(selection_sort_result, '\n')
print('insertion sort')
insertion_sort_result = insertion_sort(insertion_unsorted)
print(insertion_sort_result, '\n')
print('merge sort')
merge_sort_result = merge_sort(merge_unsorted, 0, len(merge_unsorted) - 1)
print(merge_sort_result, '\n')
print('quick sort')
quick_sort_result = quick_sort(quick_unsorted, 0, len(quick_unsorted) - 1)
print(quick_sort_result, '\n')
| aconstantinou123/sorting_algorithms | main.py | main.py | py | 1,059 | python | en | code | 0 | github-code | 13 |
22678679872 | from __future__ import print_function
import numpy as np
from pysnptools.snpreader import Bed
data_dir = '/groups/price/hilary/ibd/data'
bedfile = data_dir+'/1000G.EUR.QC.22'
outfile = bedfile+'.f2snps'
bed = Bed(bedfile)
x = bed.read()
b = np.array([sum(x.val[:,i]) in [2,976] and 1 in x.val[:,i] for i in range(len(x.sid))])
f2snps = x.sid[b]
print('\n'.join(f2snps), file = open(outfile,'w'))
| hilaryfinucane/ibd | find_f2.py | find_f2.py | py | 398 | python | en | code | 0 | github-code | 13 |
31514826840 | import datetime
from django.db import models
from main.globals import UNSW_LATITUDE, UNSW_LONGITUDE
class Message(models.Model):
text = models.EmailField(max_length=100)
time = models.DateTimeField(auto_now_add=True)
sentFrom = models.ForeignKey('registration.FingrUser', related_name='sent_from_fingruser')
sentTo = models.ForeignKey('registration.FingrUser', related_name='sent_to_fingruser')
read = models.BooleanField(default=False)
MESSAGE = 'M'
NOTIFICATION = 'N'
TYPE_CHOICES = ((NOTIFICATION, 'Notification'),
(MESSAGE, 'Message'),
)
type = models.CharField(max_length=1, choices=TYPE_CHOICES, default=NOTIFICATION)
class Event(models.Model):
title = models.CharField(max_length=30)
owner = models.ForeignKey('registration.FingrUser', related_name='event_owner')
day = datetime.date.today
date = models.DateField(default=day)
timeStart = models.DateTimeField()
timeEnd = models.DateTimeField()
description = models.CharField(max_length=5000)
latitude = models.FloatField(default=UNSW_LATITUDE)
longitude = models.FloatField(default=UNSW_LONGITUDE)
def __unicode__(self):
return self.title + " @ " + str(self.date) | joelbrady/unswfingr | main/models.py | models.py | py | 1,253 | python | en | code | 0 | github-code | 13 |
24610691866 |
class MyList():
def __init__(self, list1, list2):
self.list1 = list1 or [0]
self.list2 = list2 or [0]
def chack_digits(self) -> bool:
"""
Submitted list's numbers must be upper from 0 and lower from 9
:return:
"""
joinded_lists = self.list1 + self.list2
for number in joinded_lists:
if number < 0 or number > 9:
return False
return True
def revers(self) -> str:
"""
The digits are stored in reverse order, and each of the nodes contains a single digit.
Add the two numbers and return the sum as a linked list
:return:
"""
if not self.chack_digits():
raise Exception("Введенное число должно быть больше 0 и меньше 9.")
# converting list to integer
int_l1 = self.convert_to_integer(self.list1)
int_l2 = self.convert_to_integer(self.list2)
# adding
if not int_l1 and not int_l2:
raise AttributeError("Invalid attribute")
total_int = int_l1 + int_l2
total_list = list(reversed(str(total_int)))
# output
output = f"Input: l1 = {self.list1}, l2 = {self.list2}\n"
output += f"Output: {total_list}\n"
output += f"Explanation: {int_l1} + {int_l2} = {total_int}"
return output
def convert_to_integer(self, lst = None):
"""
Converting handle list to integer, for adding each other
:param lst:
:return:
"""
if lst != None and type(lst) == list:
convert_string = [str(number) for number in list(reversed(lst))]
string = "".join(convert_string)
integer = int(string)
return integer
else:
return False
#
# list_1 = [2,4,3]
# list_2 = [5,6,4]
#
# cls = MyList(list_1, list_2)
# r = cls.revers()
# print(r)
| aliabdullahsadikov/E24-test-task | main.py | main.py | py | 1,942 | python | en | code | 0 | github-code | 13 |
7147413529 | import csv
import hashlib
import inspect
import logging
from numbers import Number
from namespace import *
import codecs
import os
import petl as etl
import re
from loader.prefixes import PREFIX_LANGUAGE, PREFIX_MULTIMEDIA
from lxml import etree
from petl.util.base import Table
from rdflib import Literal, RDF, RDFS, XSD
# A logger to be used for logging warnings or errors detected during loading.
warning_log = logging.getLogger("load_warnings")
warning_log.setLevel(logging.WARNING)
def num_to_str(num):
"""
Converts a number to a string.
If the number is already a string, then just returns.
"""
if isinstance(num, Number):
return str(int(num))
return num
def join_if_not_empty(items, sep=" "):
"""
Joins a list of items with a provided separator.
Skips an empty item.
"""
joined = ""
for item in items:
if item and len(item) > 0:
if joined != "":
joined += sep
joined += item
return joined
def to_hash_identifier(prefix, parts):
"""
Return an identifier composed of the prefix and hash of the parts.
"""
hash_parts = hashlib.md5("".join([unicode(part) for part in parts if part]).encode("utf-8"))
return "%s-%s" % (prefix, hash_parts.hexdigest())
def season_to_month(season):
"""
Converts a season to the corresponding month.
"""
return {
"Spring": 1,
"Summer": 5,
"Fall": 8
}[season]
months = ("January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December")
def month_str_to_month_int(month_str):
"""
Converts a month name to the corresponding month number.
If already a number, returns the number.
Also, tries to convert the string to a number.
"""
if isinstance(month_str, Number):
return month_str
try:
return int(month_str)
except ValueError:
pass
return months.index(month_str)+1
def month_int_to_month_str(month_int):
if isinstance(month_int, basestring):
return month_int
return months[month_int-1]
def add_date(date_uri, year, g, month=None, day=None, label=None):
"""
Adds triples for a date.
Return True if date was added.
"""
# Date
# Filtering out dates that are set to 1900.
if year and str(year) != "1900":
g.add((date_uri, RDF.type, VIVO.DateTimeValue))
# Day, month, and year
if day and month:
g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearMonthDayPrecision))
g.add((date_uri, VIVO.dateTime,
Literal("%s-%02d-%02dT00:00:00" % (
year, month_str_to_month_int(month), day),
datatype=XSD.dateTime)))
g.add((date_uri,
RDFS.label,
Literal(label or "%s %s, %s" % (month_int_to_month_str(month), num_to_str(day), num_to_str(year)))))
# Month and year
elif month:
g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearMonthPrecision))
g.add((date_uri, VIVO.dateTime,
Literal("%s-%02d-01T00:00:00" % (
year, month_str_to_month_int(month)),
datatype=XSD.dateTime)))
g.add((date_uri,
RDFS.label,
Literal(label or "%s %s" % (month, num_to_str(year)))))
else:
# Just year
g.add((date_uri, VIVO.dateTimePrecision, VIVO.yearPrecision))
g.add((date_uri, VIVO.dateTime,
Literal("%s-01-01T00:00:00" % (
year),
datatype=XSD.dateTime)))
g.add((date_uri, RDFS.label, Literal(label or num_to_str(year))))
return True
return False
term_re = re.compile("(Spring|Summer|Fall) (\d\d\d\d)")
def add_season_date(date_uri, date_str, g):
"""
Parses a season date (e.g., Spring 2012) and adds tripes.
Returns true if parse was successful.
"""
if date_str:
m = term_re.match(date_str)
if m:
season = m.group(1)
year = m.group(2)
return add_date(date_uri, year, g, season_to_month(season), label=date_str)
return False
def add_date_interval(interval_uri, subject_uri, g, start_uri=None, end_uri=None):
"""
Adds triples for a date interval.
"""
if start_uri or end_uri:
g.add((interval_uri, RDF.type, VIVO.DateTimeInterval))
g.add((subject_uri, VIVO.dateTimeInterval, interval_uri))
if start_uri:
g.add((interval_uri, VIVO.start, start_uri))
if end_uri:
g.add((interval_uri, VIVO.end, end_uri))
language_map = {
"ARAB": "Arabic",
"BENG": "Bengali",
"CHIN": "Chinese",
"FREN": "French",
"GERM": "German",
"HIND": "Hindi/Urdu",
"ITAL": "Italian",
"JAPN": "Japanese",
"KREN": "Korean",
"MAND": "Mandarin",
"PORT": "Portuguese",
"PUNJ": "Punjabi",
"RUSS": "Russian",
"SPAN": "Spanish"
}
def add_language(language, person_uri, g):
language_uri = D[to_hash_identifier(PREFIX_LANGUAGE, (language,))]
g.add((language_uri, RDF.type, LINKVOJ.Lingvo))
g.add((language_uri, RDFS.label, Literal(language)))
g.add((person_uri, LINKVOJ.expertUnderstanding, language_uri))
def add_multimedia(multimedia, person_uri, multimedia_predicate, g):
if not multimedia.endswith(","):
multimedia += ","
for multimedia_string in re.findall(r".\|.+?\|.+?,", multimedia):
(multimedia_type, multimedia_label, multimedia_url) = multimedia_string[:-1].split("|")
multimedia_uri = D[to_hash_identifier(PREFIX_MULTIMEDIA, multimedia_url)]
if multimedia_type == "A":
multimedia_class = BIBO.AudioDocument
elif multimedia_type == "O":
multimedia_class = BIBO.Webpage
else:
multimedia_class = VIVO.Video
g.add((multimedia_uri, RDF.type, multimedia_class))
g.add((person_uri, multimedia_predicate, multimedia_uri))
g.add((multimedia_uri, RDFS.label, Literal(multimedia_label)))
g.add((multimedia_uri, VCARD.url, Literal(multimedia_url, datatype=XSD.anyURI)))
def strip_gw_prefix(string):
if isinstance(string, basestring) and string.startswith("GW_"):
return string[3:]
return string
def xml_result_generator(filepath):
"""
Returns a generator that provides maps of field names to values read from
xml produced by mysql --xml.
"""
# Using lxml because recover=True makes it tolerant of unicode encoding problems.
for event, row_elem in etree.iterparse(filepath, tag="row", recover=True):
result = {}
for field_elem in row_elem.iter("field"):
if "xsi:nil" in field_elem.attrib or not field_elem.text:
value = None
else:
# Strip whitespace
value = field_elem.text.strip()
result[field_elem.get("name")] = value
row_elem.clear()
yield result
def remove_extra_args(func_args, func):
"""
Removes values from map of function arguments that are not necessary to invoke the function.
"""
(arg_names, varargs, keywords, defaults) = inspect.getargspec(func)
for key in list(func_args.keys()):
if key not in arg_names:
del func_args[key]
def valid_department_name(name):
if name and name not in ("No Department", "University-level Dept"):
return True
return False
def valid_college_name(name):
if name and name not in ("University", "No College Designated"):
return True
return False
# Register banner dialect
csv.register_dialect("banner", delimiter="|")
# Map of banner position codes to VIVO classes
pos_code_to_classes = {
# Research scientist or related
"28101": "NonFacultyAcademic",
"28301": "NonFacultyAcademic",
"28302": "NonFacultyAcademic",
"28502": "NonFacultyAcademic",
"283R2": "NonFacultyAcademic",
"283R1": "NonFacultyAcademic",
"28102": "NonFacultyAcademic",
"19S01": "NonFacultyAcademic",
"28501": "NonFacultyAcademic",
"27401": "NonFacultyAcademic",
# Postdoc
"289A1": "Postdoc",
"289A2": "Postdoc",
# Librarian
"OC221": "Librarian",
"OC231": "Librarian",
"OD311": "Librarian",
"OC241": "Librarian",
"OC211": "Librarian",
"30401": "Librarian",
"OC341": "Librarian",
"OA411": "Librarian",
"OC321": "Librarian"
}
def get_netid_lookup(data_dir):
"""
Returns a map of gwids to netids.
"""
netid_map = {}
with codecs.open(os.path.join(data_dir, "vivo_demographic.txt"), 'r', encoding="utf-8") as csv_file:
reader = csv.DictReader(csv_file, dialect="banner")
for row in reader:
netid_map[row["EMPLOYEEID"]] = row["NETID"]
return netid_map
def demographic_intersection(gwids, data_dir):
"""
Returns the intersection of a provided list of gwids and the gwids in banner
demographic data.
"""
demo_gwids = set()
with codecs.open(os.path.join(data_dir, "vivo_demographic.txt"), 'r', encoding="utf-8") as csv_file:
reader = csv.DictReader(csv_file, dialect="banner")
for row in reader:
demo_gwids.add(row["EMPLOYEEID"])
return list(demo_gwids.intersection(gwids))
def get_non_faculty_gwids(data_dir, non_fac_limit=None):
"""
Returns the list of non-faculty gwids.
This is determined by taking the intersection of gwids in banner
demographic data and gwids in mygw data and
removing all faculty gwids.
"""
mygw_gwids = []
for result in xml_result_generator(os.path.join(data_dir, "mygw_users.xml")):
mygw_gwids.append(result["gw_id"])
# Only gwids with demographic data
demo_gwids = demographic_intersection(mygw_gwids, data_dir)
# Not faculty gwids
fac_gwids = get_faculty_gwids(data_dir)
gwids = [gw_id for gw_id in demo_gwids if gw_id not in fac_gwids]
if non_fac_limit is not None and len(gwids) > non_fac_limit:
return gwids[:non_fac_limit]
else:
return gwids
def get_faculty_gwids(data_dir, fac_limit=None):
"""
Returns the list of faculty gwids.
This is determined by taking the intersection of gwids in banner
demographic data and fis_faculty in certain roles.
"""
gwids = set()
# fis faculty
for result in xml_result_generator(os.path.join(data_dir, "fis_faculty.xml")):
if result["role"] in ("Dean", "Dep Head", "Provost", "Faculty", "Faculty-COI", "CLAD"):
gwids.add(result["gw_id"])
demo_gwids = demographic_intersection(gwids, data_dir)
if fac_limit is not None and len(demo_gwids) > fac_limit:
return demo_gwids[:fac_limit]
else:
return demo_gwids
def mediaexpert_intersection(gwids, data_dir):
"""
Returns the intersection of a provided list of gwids and the gwids in mediaexpert data.
"""
mediaexpert_gwids = set()
for result in xml_result_generator(os.path.join(data_dir, "mygw_mediaexperts.xml")):
mediaexpert_gwids.add(result["gw_id"])
return list(mediaexpert_gwids.intersection(gwids))
def get_skip_name_gwids(data_dir):
"""
Returns the list of gwids for mediaexperts that have names.
"""
skip_name_gwids = set()
for result in xml_result_generator(os.path.join(data_dir, "mygw_mediaexperts.xml")):
if result["last_name"]:
skip_name_gwids.add(result["gw_id"])
return list(skip_name_gwids)
def format_phone_number(phone_number):
if phone_number:
clean_phone_number = phone_number.replace("-", "").replace(" ", "")
if len(clean_phone_number) == 10:
return "%s-%s-%s" % (clean_phone_number[0:3], clean_phone_number[3:6], clean_phone_number[6:])
return None
def frommysqlxml(filename):
return MySqlXmlView(filename)
etl.frommysqlxml = frommysqlxml
class MySqlXmlView(Table):
def __init__(self, filename):
self.filename = filename
def __iter__(self):
yielded_field_names = False
for event, row_elem in etree.iterparse(self.filename, tag="row", recover=True):
field_names = []
values = []
for field_elem in row_elem.iter("field"):
if "xsi:nil" in field_elem.attrib or not field_elem.text:
value = None
else:
# Strip whitespace
value = unicode(field_elem.text).strip()
field_names.append(field_elem.get("name"))
values.append(value)
row_elem.clear()
if not yielded_field_names:
yield field_names
yielded_field_names = True
yield values
| gwu-libraries/vivo-load | loader/utility.py | utility.py | py | 12,977 | python | en | code | 1 | github-code | 13 |
19482061515 | import logging
from hashlib import shake_128
from typing import Optional, List, Dict, Union
import os
from flask import current_app
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.selectable import Select
# debdeps: python3-clickhouse-driver
from clickhouse_driver import Client as Clickhouse
import clickhouse_driver.errors
# query_time = Summary("query", "query", ["hash", ], registry=metrics.registry)
Base = declarative_base()
log = logging.getLogger()
def _gen_application_name(): # pragma: no cover
try:
machine_id = "/etc/machine-id"
with open(machine_id) as fd:
mid = fd.read(8)
except FileNotFoundError:
mid = "macos"
pid = os.getpid()
return f"api-{mid}-{pid}"
def query_hash(q: str) -> str:
"""Short hash used to identify query statements.
Allows correlating query statements between API logs and metrics
"""
return shake_128(q.encode()).hexdigest(4)
# # Clickhouse
def init_clickhouse_db(app) -> None:
"""Initializes Clickhouse session"""
url = app.config["CLICKHOUSE_URL"]
app.logger.info("Connecting to Clickhouse")
app.click = Clickhouse.from_url(url)
Query = Union[str, TextClause, Select]
def _run_query(query: Query, query_params: dict, query_prio=3):
settings = {"priority": query_prio, "max_execution_time": 28}
if isinstance(query, (Select, TextClause)):
query = str(query.compile(dialect=postgresql.dialect()))
try:
q = current_app.click.execute(
query, query_params, with_column_types=True, settings=settings
)
except clickhouse_driver.errors.ServerException as e:
log.info(e.message)
raise Exception("Database query error")
rows, coldata = q
colnames, coltypes = tuple(zip(*coldata))
return colnames, rows
def query_click(query: Query, query_params: dict, query_prio=3) -> List[Dict]:
colnames, rows = _run_query(query, query_params, query_prio=query_prio)
return [dict(zip(colnames, row)) for row in rows]
def query_click_one_row(
query: Query, query_params: dict, query_prio=3
) -> Optional[dict]:
colnames, rows = _run_query(query, query_params, query_prio=query_prio)
for row in rows:
return dict(zip(colnames, row))
return None
def insert_click(query, rows: list) -> int:
assert isinstance(rows, list)
settings = {"priority": 1, "max_execution_time": 300} # query_prio
return current_app.click.execute(query, rows, types_check=True, settings=settings)
def optimize_table(tblname: str) -> None:
settings = {"priority": 1, "max_execution_time": 300} # query_prio
sql = f"OPTIMIZE TABLE {tblname} FINAL"
current_app.click.execute(sql, {}, settings=settings)
def raw_query(query: Query, query_params: dict, query_prio=1):
settings = {"priority": query_prio, "max_execution_time": 300}
q = current_app.click.execute(
query, query_params, with_column_types=True, settings=settings
)
return q
| ooni/backend | api/ooniapi/database.py | database.py | py | 3,111 | python | en | code | 43 | github-code | 13 |
30796309117 | #!/usr/bin/env python3
__version__ = "0.1.0"
from nlp_00 import nlp_005
"""
05. n-gram
与えられたシーケンス(文字列やリストなど)からn-gramを作る関数を作成せよ.
この関数を用い,”I am an NLPer”という文から単語bi-gram,文字bi-gramを得よ.
"""
def test_char_bi_gram():
arg = "I am an NLPer"
expected = ["I ", " a", "am", "m ", " a", "an", "n ", " N", "NL", "LP", "Pe", "er"]
actual = nlp_005.char_bi_gram(arg)
print(actual)
assert expected == actual
def test_word_bi_gram():
arg = "I am an NLPer"
expected = [["I", "am"], ["am", "an"], ["an", "NLPer"]]
actual = nlp_005.word_bi_gram(arg)
print(actual)
assert expected == actual
| bulldra/nlp100 | tests/nlp_00/test_nlp_005.py | test_nlp_005.py | py | 734 | python | en | code | 0 | github-code | 13 |
33346152130 | class Solution:
def search(self, nums: List[int], target: int) -> int:
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if (nums[0] <= target) ^ (nums[0] <= nums[mid]) ^ (target <= nums[mid]):
right = mid
else:
left = mid + 1
return left if target in nums[left:left+1] else -1 | BradleyGenao/LeetCode-Solutions | 33-search-in-rotated-sorted-array/33-search-in-rotated-sorted-array.py | 33-search-in-rotated-sorted-array.py | py | 447 | python | en | code | 0 | github-code | 13 |
7517027682 | from contextlib import contextmanager
from functools import wraps
import threading
T = threading.local()
def check(ret):
if ret is not None:
raise ValueError('A deferred call returned a value; this should never happen')
@contextmanager
def defer():
try:
T.QUEUE = []
yield
finally:
for (f, args, kwargs) in T.QUEUE:
check(f(*args, **kwargs))
del T.QUEUE
def wrap(f):
@wraps(f)
def deferred(*args, **kwargs):
if hasattr(T, 'QUEUE'):
T.QUEUE.append((f, args, kwargs))
else:
check(f(*args, **kwargs))
return deferred
| andyljones/boardlaw | pavlov/stats/deferral.py | deferral.py | py | 644 | python | en | code | 29 | github-code | 13 |
2449043287 | class Solution:
def pivotIndex(self, nums: List[int]) -> int:
pivot = -1
temp = 0
length = len(nums)
presum = [0]
# building presum
for num in nums:
temp += num
presum.append(temp)
# finding the pivot
for i in range(1,length+1):
if presum[i-1] == presum[-1] - presum[i]:
pivot = i-1
break
return pivot | asnakeassefa/A2SV_programming | 0724-find-pivot-index/0724-find-pivot-index.py | 0724-find-pivot-index.py | py | 445 | python | en | code | 1 | github-code | 13 |
3083151605 | import os
import io
import json
import base64
import shutil
import tempfile
import logging as logger
import urllib.request
import urllib.error
from urllib.parse import quote
from PIL import Image
from datetime import date, datetime
from wsgiref.handlers import format_date_time
import docker
WAYBACK_TS_FORMAT = '%Y%m%d%H%M%S'
# Location of WARCPROX proxy used to store WARC records:
WARCPROX = os.getenv("WARCPROX", None)
# Get the Docker Network to create the browser container on:
DOCKER_NETWORK = os.getenv("DOCKER_NETWORK", None)
DOCKER_RENDERER_IMAGE = os.getenv("DOCKER_RENDERER_IMAGE", 'ukwa/webrender-puppeteer:1.0.10')
DOCKER_TIMEOUT = int(os.getenv('DOCKER_TIMEOUT', 15*60)) # long (default) timeout of 15 minutes
# Set up the Docker client:
client = docker.from_env(timeout=DOCKER_TIMEOUT)
# Make sure we get the container image:
try:
client.images.pull(DOCKER_RENDERER_IMAGE)
except Exception as e:
logger.warning("Exception when pulling renderer image: %s", e)
pass
def get_har_with_image(url, selectors=None, proxy=WARCPROX, warc_prefix=date.today().isoformat(),
include_rendered=False, return_screenshot=False, target_date=None, scale=None):
"""Gets the raw HAR output from PhantomJs with rendered image(s)."""
# Set up Docker container environment:
if not proxy and 'HTTP_PROXY' in os.environ:
proxy = os.environ['HTTP_PROXY']
d_env = {
'HTTP_PROXY': proxy,
'HTTPS_PROXY': proxy,
'LC_ALL': 'en_US.utf8',
'USER_AGENT_ADDITIONAL': "bl.uk_ldfc_renderbot/3.0.0 (+ http://www.bl.uk/aboutus/legaldeposit/websites/websites/faqswebmaster/index.html)",
'WARCPROX_WARC_PREFIX': warc_prefix
}
# Add the datetime if needed:
if target_date:
td = datetime.strptime(target_date, WAYBACK_TS_FORMAT)
d_env['MEMENTO_ACCEPT_DATETIME'] = format_date_time(td.timestamp())
# Add device scale factor if set:
if scale:
d_env['DEVICE_SCALE_FACTOR'] = scale
# Set up volume mount:
tmp_dir = tempfile.mkdtemp(dir=os.environ.get('WEB_RENDER_TMP', '/tmp/'))
os.chmod(tmp_dir, 0o777) # Make sure the browser execution user can write to it
d_vol = {
tmp_dir: {'bind': '/output', 'mode': 'rw'}
}
# Set up the container and run it:
d_c = client.containers.create(DOCKER_RENDERER_IMAGE, command="node renderer.js %s" % url, init=True,
environment=d_env, volumes=d_vol, cap_add=['SYS_ADMIN'], network=DOCKER_NETWORK,
detach=True, restart_policy={"Name": "on-failure", "MaximumRetryCount": 2})
d_c.start()
d_c.wait(timeout=60*7) # Kill renders that take far too long (7 mins)
#d_c.wait(timeout=10) # Short-time out for debugging.
d_logs = d_c.logs()
d_c.stop()
d_c.remove(force=True)
## Attempt to ensure clean-up: NB I think the timeout is likely the problem, rather than the previous dc_remove not working
#client.remove_container(d_c, force=True)
# If this fails completely, assume this was a temporary problem and suggest retrying the request:
tmp = os.path.join(tmp_dir,'./rendered.har')
if not os.path.exists(tmp):
logger.error("Rendering to JSON failed for %s" % url)
logger.warning("FAILED:\logs=%s" % d_logs )
return {"status": "FAILED"}
else:
logger.debug("GOT:\nlogs=%s" % d_logs)
with open(tmp, "r") as i:
har = i.read()
shutil.rmtree(tmp_dir)
output = _warcprox_write_har_content(har, url, warc_prefix, warcprox=WARCPROX,
include_rendered_in_har=include_rendered, return_screenshot=return_screenshot)
return output
def full_and_thumb_jpegs(large_png):
# Load the image and drop the alpha channel:
img = Image.open(io.BytesIO(large_png))
img = remove_transparency(img)
img = img.convert("RGB")
# Save it as a JPEG:
out = io.BytesIO()
img.save(out, "jpeg", quality=95)
full_jpeg = out.getvalue()
w, h = img.size
logger.debug("Types are %s, %s" % ( type(w), type(h) ))
h = int(h)
logger.debug("IMAGE %i x %x" % (w,h))
thumb_width = 300
thumb_height = int((float(thumb_width) / w) * h)
logger.debug("Got %i x %x" % (thumb_width,thumb_height))
img.thumbnail((thumb_width, thumb_height))
out = io.BytesIO()
img.save(out, "jpeg", quality=95)
thumb_jpeg = out.getvalue()
return full_jpeg, thumb_jpeg
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency (http://stackoverflow.com/a/1963146)
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
# (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
# HTML5: https://dev.w3.org/html5/spec-preview/image-maps.html
# <img src="shapes.png" usemap="#shapes"
# alt="Four shapes are available: a red hollow box, a green circle, a blue triangle, and a yellow four-pointed star.">
# <map name="shapes">
# <area shape=rect coords="50,50,100,100"> <!-- the hole in the red box -->
# <area shape=rect coords="25,25,125,125" href="red.html" alt="Red box.">
# <area shape=circle coords="200,75,50" href="green.html" alt="Green circle.">
# <area shape=poly coords="325,25,262,125,388,125" href="blue.html" alt="Blue triangle.">
# <area shape=poly coords="450,25,435,60,400,75,435,90,450,125,465,90,500,75,465,60"
# href="yellow.html" alt="Yellow star.">
# </map>
# <img alt="Embedded Image" src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIA..." />
def build_imagemap(page_jpeg, page):
html = "<html><head><title>%s [Static version of %s]</title>\n</head>\n<body style=\"margin: 0;\">\n" % (page['title'], page['url'])
html = html + '<img src="data:image/jpeg;base64,%s" usemap="#shapes" alt="%s">\n' %( base64.b64encode(page_jpeg).decode('utf-8'), page['title'])
html = html + '<map name="shapes">\n'
for box in page['map']:
if 'href' in box:
x1 = box['location']['left']
y1 = box['location']['top']
x2 = x1 + box['location']['width']
y2 = y1 + box['location']['height']
html = html + '<area shape=rect coords="%i,%i,%i,%i" href="%s">\n' % (x1,y1,x2,y2,box['href'])
else:
logger.debug("Skipping box with no 'href': %s" % box)
html = html + '</map>\n'
html = html + "</body>\n</html>\n"
return html
# We use the page ID (i.e. the original URL) to identify records, but note that the final URL can be different.
def _warcprox_write_har_content(har_js, url, warc_prefix, warcprox=WARCPROX, include_rendered_in_har=False,
return_screenshot=False):
warcprox_headers = { "Warcprox-Meta" : json.dumps( { 'warc-prefix' : warc_prefix}) }
har = json.loads(har_js)
# If there are no entries, something went very wrong:
if len(har['log']['entries']) == 0:
logger.error("No entries in log: " + har_js)
raise Exception("No requests/responses logged! Rendering failed!")
# Look at page contents:
for page in har['log']['pages']:
dom = page['renderedContent']['text']
dom = base64.b64decode(dom)
# Store the page URL, which can be different (redirects etc.)
final_location = page.get('url', None)
# Store the on-ready DOM:
_warcprox_write_record(warcprox_address=warcprox,
url="onreadydom:{}".format(url),
warc_type="resource", content_type="text/html",
payload=dom, location=final_location,
extra_headers= warcprox_headers )
# Store the rendered elements:
full_png = None
for rende in page['renderedElements']:
selector = rende['selector']
im_fmt = rende['format']
url_prefix = 'screenshot'
if im_fmt == 'PNG':
im_fmt = 'image/png'
elif im_fmt == 'JPEG' or im_fmt == 'JPG':
im_fmt = 'image/jpeg'
elif im_fmt == 'PDF':
im_fmt = 'application/pdf'
url_prefix = 'pdf'
else:
im_fmt = 'application/octet-stream; ext=%s' % im_fmt
content = rende['content']
image = base64.b64decode(content)
# Keep the :root image
if selector == ':root':
if im_fmt == 'image/png':
full_png = image
xpointurl = url
else:
# https://www.w3.org/TR/2003/REC-xptr-framework-20030325/
xpointurl = "%s#xpointer(%s)" % (url, selector)
# And write the WARC:
_warcprox_write_record(warcprox_address=warcprox,
url="{}:{}".format(url_prefix,xpointurl),
warc_type="resource", content_type=im_fmt,
payload=image, location=final_location,
extra_headers=warcprox_headers)
# If we have a full-page PNG:
if full_png:
# Store a thumbnail:
(full_jpeg, thumb_jpeg) = full_and_thumb_jpegs(full_png)
_warcprox_write_record(warcprox_address=warcprox,
url="thumbnail:{}".format(url),
warc_type="resource", content_type='image/jpeg',
payload=thumb_jpeg, location=final_location, extra_headers=warcprox_headers)
# Store an image map HTML file:
imagemap = build_imagemap(full_jpeg, page)
_warcprox_write_record(warcprox_address=warcprox,
url="imagemap:{}".format(url),
warc_type="resource", content_type='text/html; charset="utf-8"',
payload=bytearray(imagemap,'UTF-8'), location=final_location,
extra_headers=warcprox_headers)
if return_screenshot:
return full_png
# And remove rendered forms from HAR:
if not include_rendered_in_har:
del page['renderedElements']
del page['renderedContent']
# Store the HAR
_warcprox_write_record(warcprox_address=warcprox,
url="har:{}".format(url),
warc_type="resource", content_type="application/json",
payload=bytearray(json.dumps(har), "UTF-8"),
extra_headers=warcprox_headers)
return har
def _warcprox_write_record(
warcprox_address, url, warc_type, content_type,
payload, location=None, extra_headers=None):
headers = {"Content-Type": content_type, "WARC-Type": warc_type, "Host": "N/A"}
if location:
headers['Location'] = location
if extra_headers:
headers.update(extra_headers)
# Cope with Unicode URLs (based on https://stackoverflow.com/a/4494314/6689)
url = quote(url, safe="/#%[]=:;$&()+,!?*@'~")
request = urllib.request.Request(url, method="WARCPROX_WRITE_RECORD",
headers=headers, data=payload)
# XXX setting request.type="http" is a hack to stop urllib from trying
# to tunnel if url is https
request.type = "http"
if warcprox_address:
request.set_proxy(warcprox_address, "http")
logger.debug("Connecting via "+warcprox_address)
else:
logger.info("Cannot write WARC records without warcprox!")
return
try:
with urllib.request.urlopen(request) as response:
if response.status != 204:
logger.warning(
'got "%s %s" response on warcprox '
'WARCPROX_WRITE_RECORD request (expected 204)',
response.status, response.reason)
except urllib.error.HTTPError as e:
logger.warning(
'got "%s %s" response on warcprox '
'WARCPROX_WRITE_RECORD request (expected 204)',
e.getcode(), e.info())
| ukwa/webrender-api | webrender/puppeteer/docker.py | docker.py | py | 12,353 | python | en | code | 0 | github-code | 13 |
7189285960 | from pathlib import Path
from auto_argparse import parse_args_and_run_dec
from .planner import convert_to_pddl
@parse_args_and_run_dec
def convert_pdkbddl(pdkbddl_path: str):
"""
Convert a PDKBDDL file to PDDL domain and problem files.
The output files will be named {name}_domain.pddl and {name}_prob.pddl and
saved in the current directory, where {name} is the name of the PDKBDDL file.
:param pdkbddl_path: Path to PDKBDDL file.
"""
pdkbddl_file = Path(pdkbddl_path)
name = pdkbddl_file.name.replace(".pdkbddl", "")
pddl = convert_to_pddl(pdkbddl_file.read_text())
Path(f"{name}_domain.pddl").write_text(pddl["domain"])
Path(f"{name}_problem.pddl").write_text(pddl["problem"])
| neighthan/pdkb-planning | pdkb/scripts.py | scripts.py | py | 728 | python | en | code | 0 | github-code | 13 |
3617463718 | # Euclid's Extended Algorithm computes GCD
# and the coefficents of Bezout's identity
def eucalg(a, b):
# make a the bigger one and b the lesser one
swapped = False
if a < b:
a, b = b, a
swapped = True
# ca and cb store current a and b in form of
# coefficients with initial a and b
# a' = ca[0] * a + ca[1] * b
# b' = cb[0] * a + cb[1] * b
ca = (1, 0)
cb = (0, 1)
while b != 0:
# k denotes how many times number b
# can be substracted from a
k = a // b
# a <- b
# b <- a - b * k
# ca <- cb
# cb <- (ca[0] - k * cb[0], ca[1] - k * cb[1])
a, b, ca, cb = b, a-b*k, cb, (ca[0]-k*cb[0], ca[1]-k*cb[1])
if swapped:
return (ca[1], ca[0])
else:
return ca
| ThomasNJordan/GCI-Cryptography | EEuclid.py | EEuclid.py | py | 686 | python | en | code | 0 | github-code | 13 |
7900261139 | import numpy as np
import tensorflow as tf
from network import neural_network
from load_2d_dataset import load_2d_dataset
from matplotlib import pyplot as plt
dataset = load_2d_dataset()
x_arr = np.around(np.arange(-1, 1.001, 0.01), decimals=2)
res_img = np.zeros([x_arr.shape[0], x_arr.shape[0]])
t_d = np.zeros([x_arr.shape[0], x_arr.shape[0]])
u_vals = np.zeros([x_arr.shape[0], x_arr.shape[0]])
x = tf.placeholder(tf.float64, [None, 2])
y = tf.placeholder(tf.float64, [None, 1])
model = neural_network(2,1,[10], name='Model_G_')
network_out = model.value(x)
init = tf.initialize_all_variables()
saver = tf.train.Saver(save_relative_paths=True)
with tf.Session() as sess:
sess.run(init)
path = saver.restore(sess, "./Models_sq/Model_G/model_G.ckpt")
y_pred = sess.run(network_out, feed_dict={x: dataset[:, 0:2].reshape((dataset.shape[0], 2))})
for it in range(dataset.shape[0]):
idx_x = np.where(x_arr == dataset[it, 0])
idx_y = np.where(x_arr == dataset[it, 1])
res_img[idx_x, idx_y] = y_pred[it]
u_vals[idx_x, idx_y] = dataset[it, 3]
plt.figure(1)
plt.imshow(res_img)
plt.colorbar()
plt.savefig('./Plots_Test/G_res.png')
plt.figure()
plt.imshow(abs(res_img-u_vals))
plt.colorbar()
plt.savefig('./Plots_Test/diff_u_g.png')
plt.figure()
plt.imshow(u_vals)
plt.colorbar()
plt.savefig('./Plots_Test/True_Data.png')
print("All Done") | 4m4npr33t/UnifiedANN_PDE | test_2d_model.py | test_2d_model.py | py | 1,379 | python | en | code | 1 | github-code | 13 |
21469519685 | global db_autor, db_user, db_prest, db_libro, db_categoria
#funzioni per creare i vari dizionari
def autor():
db_autor = {
"nome": [],
"cognome": [],
"anno": [],
"note": [],
"id": []
}
return db_autor
def user():
db_user = {
"nome": [],
"cognome": [],
"anno_reg": [],
"telefono": [],
"tessera": [],
"nprest": []
}
return db_user
def libro():
db_libro = {
"isbn": [],
"titolo": [],
"lingua": [],
"anno": [],
"editore": [],
"pagine": [],
"categoria": [],
"copie": [],
"autore": []
}
return db_libro
def categoria():
db_categoria = {
"id": [],
"nome": []
}
return db_categoria
def prest():
db_prest = {
"isbn": [],
"id": [],
"tessera": [],
"data_inizio": [],
"data_fine": [],
"stato": [],
"data_consegna": []
}
return db_prest
#if __name__ == '__main__':
| Sbeir/Python_SQLite_Exam | esame_ufs01/Biblio/create_db.py | create_db.py | py | 994 | python | uz | code | 0 | github-code | 13 |
41985444811 |
def tester(ls, targ1, targ2):
for i in range(len(ls)-1):
if ls[i] == targ1 and ls[i+1] == targ2 or ls[i] == targ2 and ls[i+1] == targ1:
return True
return False
print("should be False: ", tester([3,1,0,19,4], 19, 5))
print("should be True: ", tester([3,1,0,19], 19, 0))
def tester2(lis, tar1, tar2):
if tar1 in lis and tar2 in lis:
if lis.index(tar1) - lis.index(tar2) == 1 or lis.index(tar1) - lis.index(tar2) == -1:
return True
return False
print("should be False: ", tester2([3,1,0,19,4], 19, 5))
print("should be True: ", tester2([3,1,0,19], 19, 0)) | ToddGallegos/CodingTemple | week_3_day_4/whiteboard.py | whiteboard.py | py | 621 | python | en | code | 2 | github-code | 13 |
72340130899 | import importlib, webbrowser, datetime, logging, script
from time import sleep
from keyboard import read_key
try: import save
except ModuleNotFoundError:
with open('save.py', 'w') as save:
save.write(script.data)
exit()
class LogHandler():
def __init__(self):
self.day = datetime.date.today().day
self.month = datetime.date.today().month
self.year = datetime.date.today().year
self.minute = datetime.datetime.today().minute
self.hour = datetime.datetime.today().hour
def print_errors(self, filename, error):
if self.hour > 12:
self.hour -= 12
filename.write("[{}-{}-{}]{}:{}PM: {}\n".format(self.day, self.month, self.year, self.hour, self.minute, error))
elif self.hour == 12:
filename.write("[{}-{}-{}]{}:{}PM: {}\n".format(self.day, self.month, self.year, self.hour, self.minute, error))
elif self.hour == 0:
filename.write("[{}-{}-{}]{}:{}AM: {}\n".format(self.day, self.month, self.year, self.hour, self.minute, error))
else:
filename.write("[{}-{}-{}]{}:{}AM: {}\n".format(self.day, self.month, self.year, self.hour, self.minute, error))
def print_logs(self, key, url):
if self.hour > 12:
self.hour -= 12
logging.info("[{}-{}-{}]{}:{}PM: [{}] => [{}]\n".format(self.day, self.month, self.year, self.hour, self.minute, key, url))
elif self.hour == 12:
logging.info("[{}-{}-{}]{}:{}PM: [{}] => [{}]\n".format(self.day, self.month, self.year, self.hour, self.minute, key, url))
elif self.hour == 0:
logging.info("[{}-{}-{}]{}:{}AM: [{}] => [{}]\n".format(self.day, self.month, self.year, self.hour, self.minute, key, url))
else:
logging.info("[{}-{}-{}]{}:{}AM: [{}] => [{}]\n".format(self.day, self.month, self.year, self.hour, self.minute, key, url))
logging.basicConfig(
filename = r'..\key_logs.log',
level = logging.INFO,
format = '%(message)s'
)
if __name__ == "__main__":
log_handler = LogHandler()
while True:
importlib.reload(save)
try:
key = read_key()
if key in save.mapping:
if save.mapping[key] != "":
webbrowser.open(url=save.mapping[key])
log_handler.print_logs(
key = key,
url = save.mapping[key]
)
sleep(1)
else:
continue
else:
continue
except Exception as error:
with open(r'..\traceback.txt', 'a+') as traceback:
log_handler.print_errors(
filename = traceback,
error = error
); break | Glitcher85/web-shortcut | code/web-shortcut.py | web-shortcut.py | py | 2,826 | python | en | code | 4 | github-code | 13 |
73006058259 | #!/usr/bin/python
import sys
import math
import copy
import random
sys.setrecursionlimit(10000)
class player:
def __init__(self,PlayerSymbol):
self.PlayerSymbol=PlayerSymbol
def GetPlayerSymbol(self):
return self.PlayerSymbol
class board:
def __init__(self):
self.grid = [['-' for x in range(3)] for y in range(3)]
self.positionleft=9
#self.availablex=[x for x in range(0,3)]
#self.availabley=[y for y in range(0,3)]
self.available=[(0,0),(0,1),(0,2),(1,0),(1,1),(1,2),(2,0),(2,1),(2,2)]
def CheckIfPlayerWins(self,symbol):
#along x-axis
if(self.grid[0][0] == symbol and self.grid[0][1] == symbol and self.grid[0][2] == symbol):
return True
if(self.grid[1][0] == symbol and self.grid[1][1] == symbol and self.grid[1][2] == symbol):
return True
if(self.grid[2][0] == symbol and self.grid[2][1] == symbol and self.grid[2][2] == symbol):
return True
#along y-axis
if(self.grid[0][0] == symbol and self.grid[1][0] == symbol and self.grid[2][0] == symbol):
return True
if(self.grid[0][1] == symbol and self.grid[1][1] == symbol and self.grid[2][1] == symbol):
return True
if(self.grid[0][2] == symbol and self.grid[1][2] == symbol and self.grid[2][2] == symbol):
return True
#along diagonal
if(self.grid[0][0] == symbol and self.grid[1][1] == symbol and self.grid[2][2] == symbol):
return True
if(self.grid[0][2] == symbol and self.grid[1][1] == symbol and self.grid[2][0] == symbol):
return True
return False
def DisplayBoard(self):
for i in range(3):
print(self.grid[i][:])
def MarkOnPosition(self,x,y,symbol):
self.grid[x][y]=symbol
self.positionleft=self.positionleft-1
self.available.remove((x,y))
class game:
def __init__(self):
self.C_Board=board()
self.computer=player('x')
self.user=player('0')
def Plays(self):
computermove=MCTS(1000)
self.C_Board.DisplayBoard()
for i in range(4):
print('Enter position (\'0\' base indexing)')
x=input('Enter x:')
y=input('Enter y:')
self.C_Board.MarkOnPosition(x,y,self.user.GetPlayerSymbol())
self.C_Board.DisplayBoard()
print('Now its Computer turn\n')
# here use monte carlo tree search
x,y=computermove.getMove(self)
self.C_Board.MarkOnPosition(x,y,self.computer.GetPlayerSymbol())
self.C_Board.DisplayBoard()
print('Enter position (\'0\' base indexing)')
x=input('Enter x:')
y=input('Enter y:')
self.C_Board.DisplayBoard()
if self.C_Board.CheckIfPlayerWins(self.computer.GetPlayerSymbol()):
print('Computer Wins!!')
else:
if self.C_Board.CheckIfPlayerWins(self.user.GetPlayerSymbol()):
print('User Wins!!')
else:
print('Draw !!')
class Node:
def __init__(self,parent,children,unexploremove,moveusedforthisnode):
self.parent=parent
self.children=children
self.unexploremove=unexploremove
self.numSimulation=0
self.winSimulation=0
self.moveusedforthisnode=moveusedforthisnode
def select(self):
selectedNode=self
maximum= (-sys.maxint - 1)
for child in self.children :
uctvalue=self.getUCTValue(child)
if uctvalue > maximum :
maximum=uctvalue
selectedNode=child
return selectedNode
def expand(self,C_game):
if C_game.C_Board.positionleft == 0 :
return self
x,y=random.choice(C_game.C_Board.available)
if self.children is None:
self.children=[]
C_game.C_Board.MarkOnPosition(x,y,'x')
child=Node(self,None,C_game.C_Board.available,[x,y])
self.children.append(child)
return child
#may be bug *******
def backpropagate(self,reward):
self.numSimulation=self.numSimulation+1
self.winSimulation=self.winSimulation+reward
if self.parent is not None :
self.parent.backpropagate(reward)
def getUCTValue(self,child):
if child.numSimulation == 0:
uctValue=1
else:
uctValue= (child.winSimulation/child.numSimulation) + (2*(math.log(self.numSimulation) / child.numSimulation))**(1/2)
return uctValue
def getMostVisitedNode(self):
mostvisitcount=0
bestChild=None
for child in self.children:
if child.numSimulation > mostvisitcount:
bestChild=child
mostvisitcount=child.numSimulation
return bestChild
class MCTS:
def __init__(self,itr):
self.maxiteration=itr
#parent,children,unexploremoveX,unexploremoveY,moveusedforthisnode
def getMove(self,M_game):
rootnode=Node(None,None,M_game.C_Board.available,None)
for i in range(0,self.maxiteration):
game_copy= copy.deepcopy(M_game)
new_node=self.select(rootnode,game_copy)
new_node=new_node.expand(game_copy)
reward=self.rollout(game_copy)
new_node.backpropagate(reward)
mostVisitedChild=rootnode.getMostVisitedNode()
return mostVisitedChild.moveusedforthisnode
def select(self,t_node,t_game):
while t_node.children is not None:
t_node=t_node.select()
return t_node
def rollout(self,game_copy):
symbol='0'
while game_copy.C_Board.positionleft != 0:
x,y=random.choice(game_copy.C_Board.available)
game_copy.C_Board.MarkOnPosition(x,y,symbol)
if symbol == '0':
symbol='x'
else:
symbol='0'
if game_copy.C_Board.CheckIfPlayerWins('x'):
return 1
else:
return 0
obj= game()
obj.Plays()
# Credits: http://codegatherer.com/mcts_tic_tac_toe.php
| hammadwaseem3/Tic-tac-tow-Monte-Carlo-Tree-Search- | tic-tac-tow.py | tic-tac-tow.py | py | 5,216 | python | en | code | 0 | github-code | 13 |
21989017677 | import os
from kedro.pipeline import Pipeline, node, pipeline
#import kedro
#import numpy as np
import re
#import h5py
from tqdm import tqdm
import torch
import torch.nn as nn
import yaml
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, EarlyStopping, ModelCheckpoint
from pytorch_lightning import loggers as pl_loggers
from pytorch_pfn_extras.config import Config
from .CONFIG_TYPES import CONFIG_TYPES
from pydiver.models import lstm
class DiverModule(pl.LightningModule):
def __init__(self):
super(DiverModule, self).__init__()
self.model = nn.DataParallel(cfg['/model'])
self.loss = cfg['/loss']
self.val_loss = cfg['/loss']
self.output_length = len(cfg['/dataset']['depths'])
def forward(self, input):
return self.model(input, max_depth=self.output_length)
def configure_optimizers(self):
return {"optimizer": cfg['/optimizer'], "lr_scheduler": cfg["/scheduler"], "monitor": "val_loss"}
def training_step(self, train_batch, batch_idx):
X = train_batch['X']
y = train_batch['y']
y_pred = self.forward(X)
loss = self.loss(y_pred, y)
self.log('train_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
return loss
def validation_step(self, valid_batch, batch_idx):
X = valid_batch['X']
y = valid_batch['y']
y_pred = self.forward(X)
loss = self.loss(y_pred, y)
self.log('val_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
class DataModule(pl.LightningDataModule):
def __init__(self):
super(DataModule, self).__init__()
def train_dataloader(self):
return cfg['/dataloader']['train']
def val_dataloader(self):
return cfg['/dataloader']['val']
def config(partition_fnc_X, partition_fnc_Y, params):
#import IPython ; IPython.embed() ; exit(1)
CONFIG_TYPES['input_data'] = partition_fnc_X
CONFIG_TYPES['true_output_data'] = partition_fnc_Y
with open(params['config_file'], 'r') as file:
pre_eval_cfg = yaml.safe_load(file)
return Config(pre_eval_cfg, types=CONFIG_TYPES)
def train(dataset_X, dataset_Y, params):
global cfg
files_X, files_Y = list(dataset_X.keys()), list(dataset_Y.keys())
for name in files_X:
m = re.search(r'train_\d+$', name)#.group()
if isinstance(m, (type(None))):
files_X.remove(name)
for name in files_Y:
m = re.search(r'train_\d+$', name)#.group()
if isinstance(m, (type(None))):
files_Y.remove(name)
files_X.sort(), files_Y.sort()
#import IPython ; IPython.embed() ; exit(1)
cfg = config(dataset_X['X_train_00'], dataset_Y['Y_train_00'], params)
model = DiverModule()
datamodule = DataModule()
logger = pl_loggers.WandbLogger(name=cfg["/name"], project=cfg["/project"], save_dir="logs/")
#logger = pl_loggers.TensorBoardLogger("logs/")
trainer = pl.Trainer(gpus=1,
max_epochs=cfg['/globals']['max_epochs'],
progress_bar_refresh_rate=1,
callbacks=[
#LearningRateMonitor(logging_interval='epoch'),
#EarlyStopping(monitor='val_acc', patience=3, verbose=True, mode='max'),
ModelCheckpoint(monitor='val_loss',
dirpath="data/06_models/regimeB",
filename=cfg["/name"] + '{epoch}',
verbose=True)
],
logger=logger,
)
trainer.fit(model, datamodule)
#import IPython ; IPython.embed() ; exit(1)
#return model
return {cfg["/name"]:model.model.state_dict()}
def create_pipeline(**kwargs):
model_eval_pipe = Pipeline(
[
node(
func=train,
inputs=["X_train", "Y_train", "params:data_science"],
outputs="models",
name="training_node",
),
]
)
return model_eval_pipe
| RolandGit95/FromSurface2DepthKedro | src/pydiver/pipelines/training/pipeline.py | pipeline.py | py | 4,298 | python | en | code | 1 | github-code | 13 |
42221756020 | """
Author:
Corey R. Randall (08 June 2018)
Description:
This is an external function that calculates fluxes with the Dusty Gas
Model approach. It was written to be used in 2D_NRSupport_FluxModel.
"""
def Flux_Calc(SV,Nx,dX,Ny,dY,Nspecies,BC_in,inlet_BC,gas,phi_g,tau_g,d_p):
import numpy as np
Fluxes_X = np.zeros((Nx+1)*Ny*Nspecies) # Fluxes in x-direction w/ 0's BC
Fluxes_X_int = np.zeros((Nx-1)*Ny*Nspecies) # Interior x-direction fluxes
Fluxes_Y = np.zeros(Nx*(Ny+1)*Nspecies) # Fluxes in y-direction
# Get molecular weights for mol -> mass conversions:
MWs = gas.molecular_weights
# Set constant temperature from main function:
T = gas.T
# Initialize counters for flux loops:
cnt_x = 0
cnt_y = Nx*Nspecies
# Calculate each x-direction flux:
for j in range(Ny):
ind1 = j*Nx*Nspecies # First -> last index of cell on left
ind2 = ind1 + Nspecies
for i in range(Nx-1):
ind3 = ind2 # First -> last index of cell on right
ind4 = ind3 + Nspecies
D1 = sum(SV[ind1:ind2])
D2 = sum(SV[ind3:ind4])
Y1 = SV[ind1:ind2] / D1
Y2 = SV[ind3:ind4] / D2
molar_fluxes = gas.molar_fluxes(T,T,D1,D2,Y1,Y2,dX)
Fluxes_X_int[cnt_x:cnt_x+Nspecies] = molar_fluxes*MWs
ind1 = ind3 # Index of right cell becomes index of left
ind2 = ind1 + Nspecies
cnt_x = cnt_x + Nspecies
x1 = j*(Nx+1)*Nspecies + Nspecies # First non-zero x-flux of each row
x2 = x1 + (Nx-1)*Nspecies # Last non-zero x-flux of each row
Fluxes_X[x1:x2] = Fluxes_X_int[j*(Nx-1)*Nspecies:(j+1)*(Nx-1)*Nspecies]
# Calculate each y-direction flux:
for i in range(BC_in):
D1 = sum(inlet_BC)
D2 = sum(SV[i*Nspecies:(i+1)*Nspecies])
Y1 = inlet_BC / D1
Y2 = SV[i*Nspecies:(i+1)*Nspecies] / D2
molar_fluxes = gas.molar_fluxes(T,T,D1,D2,Y1,Y2,dY)
Fluxes_Y[i*Nspecies:(i+1)*Nspecies] = molar_fluxes*MWs
for j in range(Ny-1):
ind1 = j*Nx*Nspecies # First -> last index of cell on top
ind2 = ind1 + Nspecies
for i in range(Nx):
D1 = sum(SV[ind1:ind2])
D2 = sum(SV[ind1+Nx*Nspecies:ind2+Nx*Nspecies])
Y1 = SV[ind1:ind2] / D1
Y2 = SV[ind1+Nx*Nspecies:ind2+Nx*Nspecies] / D2
molar_fluxes = gas.molar_fluxes(T,T,D1,D2,Y1,Y2,dY)
Fluxes_Y[cnt_y:cnt_y+Nspecies] = molar_fluxes*MWs
ind1 = ind2
ind2 = ind1 + Nspecies
cnt_y = cnt_y + Nspecies
return Fluxes_X, Fluxes_Y
| decaluwe/2D-porous-flux-model | DGM_func.py | DGM_func.py | py | 2,991 | python | en | code | 7 | github-code | 13 |
21293391739 | import math
people = int(input())
tax = float(input())
deck_chair_price = float(input())
umbrella_price = float(input())
all_tax = people * tax
deck_chair_total_price = math.ceil(people * 0.75) * deck_chair_price
umbrella_total_price = math.ceil(people / 2) * umbrella_price
total_sum = all_tax + deck_chair_total_price + umbrella_total_price
print(f"{total_sum:.2f} lv.") | SJeliazkova/SoftUni | Programming-Basic-Python/Exams/Exam_6_7_July_2019/01. Pool Day.py | 01. Pool Day.py | py | 376 | python | en | code | 0 | github-code | 13 |
22984465728 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as f:
requirements = f.read().splitlines()
setuptools.setup(
name="apluslms_file_transfer",
version="0.1",
author="Qianqian Qin",
author_email="qianqian.qin@outlook.com",
description="A package for file transfer in apluslms",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/apluslms/apluslms-file-transfer",
packages=setuptools.find_packages(include=['apluslms_file_transfer',
'apluslms_file_transfer.*']),
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.5',
)
| apluslms/apluslms-file-transfer | setup.py | setup.py | py | 952 | python | en | code | 0 | github-code | 13 |
1946591821 | # Write a program that receives a number and creates the following pattern. The number represents the largest
# count of stars on one row.
# *
# **
# ***
# **
# *
biggest_row = int(input())
for i in range(biggest_row + 1):
print("*"*i)
for i in range(biggest_row - 1, 0, -1):
print("*"*i)
| dnkirkov/SoftUni_Python_Fundamentals | Patterns.py | Patterns.py | py | 311 | python | en | code | 0 | github-code | 13 |
42000005782 | #
# @lc app=leetcode.cn id=13 lang=python3
#
# [13] 罗马数字转整数
# 先计算其他位的结果, 最后单独处理最后一位
# Time: O(n) Space: O(1)
# @lc code=start
class Solution:
def romanToInt(self, s: str) -> int:
# 算是题目给出的条件, 不计入空间复杂度
roman = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000,
}
ans = 0
length = len(s)
for i in range(1, length):
if roman[s[i-1]] >= roman[s[i]]:
ans += roman[s[i-1]]
elif roman[s[i-1]] < roman[s[i]]:
ans -= roman[s[i-1]]
ans += roman[s[-1]]
return ans
# @lc code=end
| WeiS49/leetcode | Solution/其他/其他/13. 罗马数字转整数/哈希表.py | 哈希表.py | py | 786 | python | zh | code | 0 | github-code | 13 |
23111482040 | # -*- coding: utf-8 -*-
"""
Created on Fri May 31 11:50:54 2019
@author:Pablo La Grutta
pablo.lg@hotmail.com.ar
"""
import tkinter as tk
from tkinter import ttk, StringVar,scrolledtext as st
from tkinter.ttk import Style
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter.messagebox import showinfo
import datetime
import pandas as pd
import os
import sys
if getattr(sys, 'frozen', False):
# If the application is run as a bundle, the pyInstaller bootloader
# extends the sys module by a flag frozen=True and sets the app
# path into variable _MEIPASS'.
application_path = sys._MEIPASS
print('application_path de Py', application_path)
else:
application_path = os.path.dirname(os.path.abspath(__file__))
class GUI(tk.Tk):
def __init__(self, window):
a = "soy un cambio"
b = "soy un branch"
self.path_resultados = ''
self.input_text_resultados= ''
etiq_bt1 = "Archivo BD"
etiq_bt2 = "Resultados"
etiq_bt3 = "Descargar en..."
window.title("DAT Mastikator 1.0")
window.config(bg='#321899')
window.resizable(0, 0)
window.geometry("800x300") #ancho largo
style = Style()
style.configure('W.TButton', font =('calibri', 10, 'bold', 'underline'), background = "orange", foreground = 'blue')
ttk.Button(window, text = etiq_bt1,style = 'W.TButton', command = lambda: self.set_path_entrada()).grid(row = 0)
self.scrolledtext0=st.ScrolledText(window, width=80, height=2)
self.scrolledtext0.grid(column=1,row=0, padx=0, pady=10)
ttk.Label(text="Numero de líneas a separar",foreground="red").grid(row=1,column=0, ipadx=20, ipady=10)
self.combo = ttk.Combobox(window)
self.combo.grid(row=1,column=1, ipadx=20, ipady=10)
self.combo["values"] = [100000,200000, 300000,400000, 500000]
self.num_lineas = self.combo.bind("<<ComboboxSelected>>", self.selection_changed)
ttk.Button(window, text = etiq_bt3,style = 'W.TButton', command = lambda: self.set_path_resultados()).grid(row = 2)
self.scrolledtext1=st.ScrolledText(window, width=80, height=2)
self.scrolledtext1.grid(column=1,row=2, padx=0, pady=10)
ttk.Button(window, text = etiq_bt2, style = 'W.TButton',command = lambda: self.Proceso1()).grid(row = 3)
def selection_changed(self, event):
num_lineas = self.combo.get()
self.num_lineas = int(num_lineas)
return self.num_lineas
def Proceso1(self):
data = pd.read_csv(self.path_entrada, names=['MRBTS','RSRP','LAT','LONG','LNCEL'])
data['RSRP'] = data['RSRP'].apply(lambda x: x - 65536)
data[['LAT','LONG']].replace('.',',',inplace=True)
ct = str(datetime.datetime.now() ).replace(' ','_').replace(':','').replace('.','')[0:17]
path_origen = str(self.path_entrada).split('/')
with pd.ExcelWriter(self.path_res + '/Resultados_'+ path_origen[-1][:-4] +'_' + ct + '.xlsx') as writer: # el argumento de ExcelWriter es el path al archivo, ahi tengo que cargar el path deseado
for key, grp in data.groupby(data.index // int(self.num_lineas)):
grp.to_excel(writer, f'sheet_{key}', header=True)
writer.save()
print(showinfo("Resultados", "Resultados listos!\nCargue nuevos CSV o cierre el programa."))
gui.__init__(window)
def set_path_entrada(self):
self.path_entrada = askopenfilename( filetypes = [("Archivo DAT","*.dat"),("Todos los archivos","*.*")], title = "Seleccionar archivo DAT o TXT")
self.scrolledtext0.insert("1.0", self.path_entrada)
def set_path_resultados(self):
self.path_res = askdirectory( )
self.scrolledtext1.insert("1.0",self.path_res)
if __name__ == '__main__':
window = tk.Tk()
gui = GUI(window)
window.mainloop()
| pablolagrutta127/DAT_Mastikator | DAT_Cnv_1.0.py | DAT_Cnv_1.0.py | py | 3,914 | python | en | code | 0 | github-code | 13 |
72859193298 |
import struct
import time
import numpy as np
import matplotlib.pyplot as plt
#fig,ax = plt.subplots(2,5)
#ax=ax.flatten()
def Normalize(data):
m = np.mean(data)
mx = max(data)
mn = min(data)
return np.array([(float(i) - m) / (mx-mn) for i in data])
def loadmnist():
with open("/home/vr/mnist/train-labels-idx1-ubyte","rb") as fs:
magic, n = struct.unpack('>II',fs.read(8))
labels = np.fromfile(fs,dtype=np.uint8)
with open("/home/vr/mnist/train-images-idx3-ubyte",'rb') as fd:
magic, n, rows, cols= struct.unpack('>IIII', fd.read(16))
images = np.fromfile(fd, dtype=np.uint8).reshape(len(labels), 784)
#images = np.insert(images,784,values=1,axis=1)
norimages=np.ones((len(labels),784))
for i in range(len(labels)):
norimages[i]=Normalize(images[i])
norimages = np.insert(norimages, 784, values=1., axis=1)
print(images[0])
print(norimages[0])
#for i in range(10):
# print(f"{i:4} ==> {labels[i]:4}")
# print(f"{images[i]}")
# img=images[i].reshape(28,28)
# ax[i].imshow(img,cmap='Greys',interpolation='nearest')
# ax[i].set_xticks([])
# ax[i].set_yticks([])
#plt.tight_layout()
#plt.show()
return labels , norimages
def loadmnisttest():
with open("/home/vr/mnist/t10k-labels-idx1-ubyte","rb") as fs:
magic, n = struct.unpack('>II',fs.read(8))
labels = np.fromfile(fs,dtype=np.uint8)
with open("/home/vr/mnist/t10k-images-idx3-ubyte",'rb') as fd:
magic, n, rows, cols= struct.unpack('>IIII', fd.read(16))
images = np.fromfile(fd, dtype=np.uint8).reshape(len(labels), 784)
norimages = np.ones((len(labels), 784))
for i in range(len(labels)):
norimages[i] = Normalize(images[i])
norimages = np.insert(norimages, 784, values=1., axis=1)
return labels,norimages
start=time.time();
(x_labels,x_train)=loadmnist()
print(f"loadmnist finish {len(x_labels)} {len(x_train)} ==> {len(x_train[0])}")
rx_train=np.transpose(x_train)
print(f"rx_train transpose {len(rx_train)} ==> {len(rx_train[0])}")
m_xtrain=np.dot(rx_train,x_train)
print(f"m_xtrain generator {len(m_xtrain)} ==> {len(m_xtrain[0])}")
midxos=np.dot(np.linalg.pinv(m_xtrain),rx_train)
print(f"midxos {len(midxos)} ==> {len(midxos[0])}")
omiga = np.dot(midxos,np.transpose(x_labels))
print(f"result over! {len(omiga)} ==> {omiga[0]})")
(y_labels,y_train)=loadmnisttest()
lastre=np.dot(omiga,np.transpose(y_train))
print(f"result over! {len(lastre)} ==> {lastre[0]})")
end=time.time();
print(end-start)
cc=0
ttl={}
ttl2={}
for i in range(10):
ttl[i]=0;
ttl2[i] = 0;
for i in range(len(lastre)):
if round(lastre[i]) == y_labels[i]:
ttl[y_labels[i]] +=1;
cc+=1;
else :
print(lastre[i]," ===> ",y_labels[i])
ttl2[y_labels[i]]+=1
print(f"last result cc={cc} total={len(lastre)}")
print(ttl)
print(ttl2)
| fishfreetaken/orange | pyspider/learnmnist.py | learnmnist.py | py | 3,021 | python | en | code | 1 | github-code | 13 |
17127060565 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('university_dashboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='university',
name='university_language',
field=models.CharField(max_length=2),
preserve_default=True,
),
]
| LuisBosquez/student-net-2015 | src/university_dashboard/migrations/0002_auto_20150221_1631.py | 0002_auto_20150221_1631.py | py | 455 | python | en | code | 0 | github-code | 13 |
15076793671 | from morse import *
message1 = "abcdefghijklmnopqrstuvwxyz"
message2 = "Live long and prosper!"
message3 = "May the force be with you!"
message4 = "Never give up, never surrender!"
texts = [message1,message2,message3,message4]
Done = False
count = 0
compthink() #computer thinking
docode("hello") #say hello
blinknum(1,blue)
while not Done:
val = 0
if touch1.value:
val = val + 1
if touch2.value:
val = val + 2
if val == 1: #advance through messages touching "1"
count = count+1
if count == 4:
count = 0
blinknum (count+1,blue) #display index+1
if val == 2: #display message in Morse when touching "2"
docode(texts[count])
compthink() #indicates end of message
time.sleep(.2)
| mrklingon/neotrinkey | DoMorse.py | DoMorse.py | py | 822 | python | en | code | 0 | github-code | 13 |
4682781622 | '''
Print largest word from the sentence.
Input:
Sentence(string)
Output:
Largest word from the string.
'''
# output = max(input("Enter your sentence: ").split(sep=" "))
# print(output)
#using function
def largestword(sentence):
words = sentence.split(sep=" ")
stringlength = -1
for word in words:
if len(word) >stringlength:
stringlength = len(word)
output = word
return output
print(largestword("Welcome world to the new world"))
| SatyasaiNandigam/competitive-coding-solutions | Wipro CodingQuestions/5.py | 5.py | py | 529 | python | en | code | 0 | github-code | 13 |
28127519852 | from .Player import Player
import sys
import random
sys.path.append('../')
from streamy import stream
from const import *
from rule_checker import rule_checker, get_opponent_stone, get_legal_moves
from board import make_point, board, get_board_length, make_empty_board, parse_point
from utils import *
def generate_random_point():
return make_point(random.randint(0, maxIntersection - 1), random.randint(0, maxIntersection - 1))
# read 'capture in n moves' depth from config file
def set_depth():
config_file = open("go-player.config", "r")
depth = config_file.readlines()
depth_info = list(stream(depth))[0]
print(depth_info)
global n
n = depth_info["depth"]
class player(Player):
def __init__(self, name=random_string()):
if name is None:
super().__init__()
else:
super().__init__(name=name)
self.register_flag = False
self.receive_flag = False
self.crazy_flag = False
def query(self, query_lst):
# don't keep playing if we've gone crazy (deviated from following rules)
if self.crazy_flag:
return
# get method and arguments from input query
try:
method = query_lst[0].replace("-", "_")
args = query_lst[1:]
if method not in self.function_names:
return self.go_crazy()
method = getattr(self, method)
if method:
return method(*args)
return self.go_crazy()
except:
return self.go_crazy()
def register(self):
if self.receive_flag or self.register_flag:
self.go_crazy()
else:
self.register_flag = True
print("generic player 58", self.name)
return self.name
def receive_stones(self, stone):
if not is_stone(stone):
self.go_crazy()
if self.receive_flag or not self.register_flag:
self.go_crazy()
self.receive_flag = True
self.stone = stone
def end_game(self):
self.receive_flag = False
return "OK"
def is_stone(self, stone):
if stone == black or stone == white:
return True
return False
def is_maybe_stone(self, maybe_stone):
if is_stone(maybe_stone) or maybe_stone == empty:
return True
return False
def check_boards_object(self, boards):
min_boards_size = 1
max_boards_size = 3
# check to make sure input is actually a list
if not isinstance(boards, list):
return False
# board history between length 1 and 3
if len(boards) < min_boards_size or len(boards) > max_boards_size:
return False
for board in boards:
if not self.check_board_object(board):
return False
return True
def check_board_object(self, board):
# check types
if not isinstance(board, list):
return False
if not isinstance(board[0], list):
return False
# check dimensions
if len(board) != maxIntersection or len(board[0]) != maxIntersection:
return False
# make sure all boards contain only maybe stones
for i in range(maxIntersection):
for j in range(maxIntersection):
if not self.is_maybe_stone(board[i][j]):
return False
return True
def go_crazy(self):
self.crazy_flag = True
return crazy
def make_a_move_random(self, boards):
# don't make a move until a player has been registered with a given stone
if self.receive_flag and self.register_flag:
if rule_checker().check_history(boards, self.stone):
generate_random_point()
point = generate_random_point()
if rule_checker().check_validity(self.stone, [point, boards]):
return point
return "pass"
return history
return self.go_crazy()
def make_a_move_random_maybe_illegal(self, boards):
# don't make a move until a player has been registered with a given stone
if self.receive_flag and self.register_flag:
if rule_checker().check_history(boards, self.stone):
point = generate_random_point()
if random.randint(0, maxIntersection - 1) % 3 != 0:
return point
return "pass"
return history
return self.go_crazy()
def make_a_move_dumb(self, boards):
# don't make a move until a player has been registered with a given stone
if self.receive_flag and self.register_flag:
if rule_checker().check_history(boards, self.stone):
curr_board = boards[0]
# go through rows and columns to find a point
# check_validity of that move
for i in range(maxIntersection): # row
for j in range(maxIntersection): # col
if curr_board[j][i] == empty:
point = make_point(i, j)
if rule_checker().check_validity(self.stone, [point, boards]):
return point
return "pass"
return history
return self.go_crazy()
def make_a_move_end_game_quickly(self, boards):
r = random.randint(0, 10)
if r == 0:
return generate_random_point()
if r == 1:
return self.go_crazy()
if r == 2:
return history
if r >= 3:
return "pass"
def make_a_move(self, boards):
m = self.make_a_move_random_maybe_illegal(boards)
return m
def make_a_move_capture(self, boards):
# don't make a move until a player has been registered with a given stone
if self.receive_flag and self.register_flag:
if self.check_boards_object(boards):
if rule_checker().check_history(boards, self.stone):
curr_board = boards[0]
non_capture_move = None
# go through rows and columns to find a point
# check_validity of that move
for i in range(maxIntersection): # row
for j in range(maxIntersection): # col
point = make_point(i, j)
if curr_board[j][i] == empty:
if rule_checker().check_validity(self.stone, [point, boards]):
if self.make_capture_n_moves(n, curr_board, self.stone, point, boards):
return point
elif non_capture_move is None:
non_capture_move = point
if non_capture_move:
return non_capture_move
return "pass"
return history
return self.go_crazy()
return self.go_crazy()
def make_capture_n_moves(self, n, curr_board, stone, point, boards):
if n == 1:
return self.make_capture_1_move(curr_board, stone, point)
new_boards = self.randomize_next_move(n, curr_board, stone, point, boards)
updated_board = new_boards[0]
for i in range(maxIntersection):
for j in range(maxIntersection):
new_point = make_point(i, j)
if updated_board[j][i] == empty and rule_checker().check_validity(stone, [new_point, new_boards]):
if self.make_capture_1_move(updated_board, stone, new_point):
return True
return False
def randomize_next_move(self, n, curr_board, stone, point, boards):
if n == 1:
return boards
curr_board = board(curr_board)
updated_board = curr_board.place(stone, point)
new_boards = [updated_board] + boards[:min(2, len(boards))]
opponent_random_move = self.next_player_move(stone, new_boards)
if opponent_random_move == "pass":
new_boards = [new_boards[0]] + [new_boards[0]] + [new_boards[1]]
else:
new_boards = [board(new_boards[0]).place(get_opponent_stone(stone), opponent_random_move)] + \
[new_boards[0]] + [new_boards[1]]
point = self.make_a_move_dumb(new_boards)
return self.randomize_next_move(n - 1, new_boards[0], stone, point, new_boards)
def next_player_move(self, stone, new_boards):
next_player = player(get_opponent_stone(stone))
next_player.register_flag = True
next_player.receive_flag = True
return next_player.make_a_move_dumb(new_boards)
def make_capture_1_move(self, curr_board, stone, point):
curr_board = board(curr_board)
updated_board = curr_board.place(stone, point)
stones_to_remove = board(updated_board).get_no_liberties(get_opponent_stone(stone))
if len(stones_to_remove) > 0:
return True
return False
| MicahThompkins/go_project | Deliverables/10/10.1/tournament/player_pkg/GenericPlayer.py | GenericPlayer.py | py | 9,178 | python | en | code | 0 | github-code | 13 |
33211985692 | import json
import xmltodict
import os
import argparse
def convert_xml_to_json(xml_file_path):
# Derive JSON file path from XML file path
base = os.path.splitext(xml_file_path)[0]
json_file_path = base + '.json'
with open(xml_file_path, 'r') as xml_file:
xml_dict = xmltodict.parse(xml_file.read()) # Convert XML to Python dict
json_data = json.dumps(xml_dict) # Convert Python dict to JSON
with open(json_file_path, 'w') as json_file:
json_file.write(json_data)
print(f"JSON file has been saved as {json_file_path}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert XML to JSON.')
parser.add_argument('xml_file_path', type=str, help='The path to the XML file to convert.')
args = parser.parse_args()
convert_xml_to_json(args.xml_file_path) | MultiREM/multiREM | utils/convert_xml_to_json.py | convert_xml_to_json.py | py | 840 | python | en | code | 2 | github-code | 13 |
26710901295 | import pygame,sys,random
from Tkinter import *
from pygame.locals import *
import time
black=(0,0,0)
white=(255,255,255)
blue=(0,0,255)
green=(0,255,0)
red=(255,0,0)
cyan=(0,255,255)
mixed_cyan=(60,255,255)
gray=(230,230,230)
navyBlue=( 60, 60, 100)
lightNavyBlue=(130,130,200)
yellow=(255,255,0)
purple=(255,0,255)
orange=(255,128,0)
donut='donut'
square='square'
diamond='diamond'
lines='lines'
oval='oval'
backgroundColor=navyBlue
lightBgColor=gray
boxColor=white
highlightColor=yellow
windowWidth=640
windowHeight=480
boxSize=40
gapSize=10
boardWidth=4
boardHeight=3
groupSize=4
delay=500 # in miliseconds
assert (boardWidth*boardHeight)%2==0,"Board should have even number of boxes"
x_margin=int((windowWidth-(boardWidth*boxSize+gapSize*(boardWidth-1)))/2) # comes out to be 70 for above cases
y_margin=int((windowHeight-(boardHeight*boxSize+gapSize*(boardHeight-1)))/2) # comes out to be 65 for above cases
All_Colors=[purple,blue,green,red,cyan,yellow,orange] # it doesn't matters if u use a list or tuple here.
All_Shapes=[donut,square,oval,diamond,lines]
fpsClock=pygame.time.Clock() # initializing the clock
def main():
mouse_x=0
mouse_y=0
pygame.init()
global basic_font,start_time,final_time
basic_font=pygame.font.Font('freesansbold.ttf', 20)
global display_surface,start_rect,start_surface,exit_rect,exit_surface
display_surface=pygame.display.set_mode((windowWidth,windowHeight)) # will return a surface object on which everything will
# be laid upon
start_surface,start_rect=makeText('New Game',gray,lightNavyBlue,500,10)
exit_surface,exit_rect=makeText('Exit Game',gray,lightNavyBlue,500,40)
pygame.display.set_caption('Memory Puzzle')
display_surface.fill(backgroundColor)
mainBoard=getRandomizedBoard() # will return a 10x7 board with each place containing a tuple of
# (color,shape)
revealedBoxes=generateRevealedBoxesData(False) # will return a 7x10 list revealing the state( T or F )of individual boxes
while True:
# whether it is revealed or covered.In the beginning it is False by default
pygame.draw.circle(display_surface,red,(30,30),10)
display_surface.blit(start_surface,start_rect)
display_surface.blit(exit_surface,exit_rect)
for event in pygame.event.get():
if event.type==QUIT:
pygame.quit()
sys.exit()
elif event.type==MOUSEBUTTONUP:
mouse_x,mouse_y=event.pos
if start_rect.collidepoint(event.pos): # on clicking the new Game button.
pygame.time.delay(500)
startGameAnimation(mainBoard)
start_time=time.time() # after all the initial revealing happens ,jst start counting
# the time
pygame.draw.circle(display_surface,green,(30,30),10) # signal that game has started.
pygame.display.update()
firstSelected=None
while True:
display_surface.fill(backgroundColor)
isMouseClicked=False
drawBoard(mainBoard,revealedBoxes,green)
# time calculation and showing
cur_time=time.time()
time_surf=basic_font.render('Time:'+ str(int(cur_time-start_time)),True,gray,lightNavyBlue)
time_rect=time_surf.get_rect()
time_rect.topleft=(500,80)
display_surface.blit(time_surf,time_rect)
pygame.display.update()
for event in pygame.event.get():
if event.type==QUIT:
pygame.quit()
sys.exit()
elif event.type==MOUSEMOTION:
mouse_x,mouse_y=event.pos
elif event.type==MOUSEBUTTONUP:
if exit_rect.collidepoint(event.pos):
pygame.quit()
sys.exit()
elif start_rect.collidepoint(event.pos):
mainBoard=getRandomizedBoard()
revealedBoxes=generateRevealedBoxesData(False)
display_surface.fill(backgroundColor)
#drawBoard(mainBoard,revealedBoxes,red)
pygame.time.delay(500)
startGameAnimation(mainBoard)
pygame.draw.circle(display_surface,green,(30,30),10)
pygame.display.update()
mouse_x,mouse_y=event.pos
isMouseClicked=True
box_x,box_y=getBoxNumber(mouse_x,mouse_y)
if box_x!=None and box_y!=None:
#if not revealedBoxes[box_x][box_y]:
#drawHighlightColor(mainBoard,box_x,box_y)
#pygame.display.update()
if not revealedBoxes[box_x][box_y] and isMouseClicked==True:
revealBoxesAnimation(mainBoard,[(box_x,box_y)])
revealedBoxes[box_x][box_y]=True
if firstSelected==None:
firstSelected=(box_x,box_y)
else:
getShape1,getColor1=getShapeAndColor(mainBoard,firstSelected[0],firstSelected[1])
getShape2,getColor2=getShapeAndColor(mainBoard,box_x,box_y)
if getShape1!=getShape2 or getColor1!=getColor2:
pygame.time.delay(500)
coverBoxesAnimation(mainBoard,[(firstSelected[0],firstSelected[1]),(box_x,box_y)])
revealedBoxes[box_x][box_y]=False
revealedBoxes[firstSelected[0]][firstSelected[1]]=False
elif hasGameWon(revealedBoxes):
#final_time=cur_time
gameWonAnimation(mainBoard,cur_time)
mainBoard=getRandomizedBoard()
revealedBoxes=generateRevealedBoxesData(False)
display_surface.fill(backgroundColor)
pygame.draw.circle(display_surface,red,(30,30),10)
pygame.display.update()
gameWon=True
break
firstSelected=None
elif exit_rect.collidepoint(event.pos):
pygame.quit()
sys.exit()
elif event.type==MOUSEMOTION:
mouse_x,mouse_y=event.pos
if insideCircle(mouse_x,mouse_y):
pygame.draw.circle(display_surface,white,(30,30),10)
pygame.display.update()
def makeText(text, color, bgcolor, top, left): # for making text like new game and exit game
# create the Surface and Rect objects for some text.
textSurf = basic_font.render(text, True, color, bgcolor)
textRect = textSurf.get_rect()
textRect.topleft = (top, left)
return (textSurf, textRect)
def insideCircle(x,y):
if (pow((x-30),2)+pow((y-30),2)-100)<=0:
return True
else:
return False
def hasGameWon(revealedBoxes): # checks whether game has ended or not #
flag=True
for box_x in range(boardWidth):
for box_y in range(boardHeight):
if revealedBoxes[box_x][box_y]==False:
return False
return True
def get_Name():
playerName=name_entry.get()
with open('TimeRecord.txt','a') as appendFile:
appendFile.write('%s : %s secs on %s\n'%(playerName,timeToComplete,time.ctime()))
#for widget in frame.winfo_children():
#widget.destroy()
frame.destroy()
def gameWonAnimation(board,final_time): # after game completion , background changes appears.
display_surface.fill(cyan)
global timeToComplete
timeToComplete=str(int(final_time-start_time))
finalTime_surf,finalTime_rect=makeText('Time Taken : '+timeToComplete+' secs',gray,lightNavyBlue,200,200)
display_surface.blit(finalTime_surf,finalTime_rect)
pygame.display.update()
pygame.time.delay(3000)
global frame
frame=Tk()
frame.title('LeaderBoard Name')
frame.geometry('200x100+350+150')
frame.config(bg='#ABADAC')
frame.resizable(width = False,height = False)
global name_entry
name_entry=Entry(frame,width=40)
name_entry.place(x=20,y=30)
Button(frame,text='Proceed',command=get_Name).place(x=70,y=60)
frame.mainloop()
def getBoxNumber(mouse_x,mouse_y): # given the pixel values of the starting of a box , returns the box numbers.
for box_x in range(boardWidth):
for box_y in range(boardHeight):
left,top=leftTopCoordsOfBox(box_x,box_y)
boxRect=pygame.Rect(left,top,boxSize,boxSize)
if boxRect.collidepoint(mouse_x,mouse_y):
return box_x,box_y
return None,None
def drawHighlightColor(board,box_x,box_y): # highlights the boxes if cursor hovers over it
left,top=leftTopCoordsOfBox(box_x,box_y)
pygame.draw.rect(display_surface,highlightColor,(left-5,top-5,boxSize+10,boxSize+10),5)
def getRandomizedBoard(): # creates a row x col list having random tuples of (shape,color)
icons=[]
for color in All_Colors:
for shape in All_Shapes:
icons.append((shape,color))
random.shuffle(icons)
numIconsNeeded=int((boardWidth*boardHeight)/2)
icons=icons[0:numIconsNeeded]*2
random.shuffle(icons) # for more randomization
board=[]
for x in range(boardWidth):
column=[]
for y in range(boardHeight):
column.append(icons[0])
del icons[0]
board.append(column)
return board
def generateRevealedBoxesData(boolean):
revealedBoxes=[]
for i in range(boardWidth):
revealedBoxes.append([boolean]*boardHeight)
return revealedBoxes
def leftTopCoordsOfBox(box_x,box_y): # given box numbers , returns the pixel values of the topleft corner
return int(x_margin+(box_x * (boxSize+gapSize))),int(y_margin+(box_y * (boxSize+gapSize)))
def getShapeAndColor(board,box_x,box_y):
return board[box_x][box_y][0],board[box_x][box_y][1]
def drawIcon(shape,color,box_x,box_y): # function name clarifies
left,top=leftTopCoordsOfBox(box_x,box_y)
x_center=left+boxSize/2
y_center=top+boxSize/2
if shape=='donut':
pygame.draw.circle(display_surface,color,(x_center,y_center),boxSize/2,5)
elif shape=='diamond':
pygame.draw.polygon(display_surface,color,((x_center,top),(left+boxSize,y_center),(x_center,top+boxSize),(left,y_center)))
elif shape=='square':
pygame.draw.rect(display_surface,color,(left+boxSize/4,top+boxSize/4,boxSize/2,boxSize/2))
elif shape=='lines':
for i in range(0, boxSize, 4):
pygame.draw.line(display_surface, color, (left, top + i), (left +i, top))
pygame.draw.line(display_surface, color, (left + i, top + boxSize- 1), (left + boxSize - 1, top + i))
elif shape=='oval':
pygame.draw.ellipse(display_surface,color,(left,top+boxSize/4,boxSize,boxSize/2))
def drawBoxCover(board,boxesToReveal,coverage): # has the effect of opening and closing of boxes.
for box in boxesToReveal: # here boxToReveal ~ one of the group(8) of boxes.
left,top=leftTopCoordsOfBox(box[0],box[1])
pygame.draw.rect(display_surface,backgroundColor,(left,top,boxSize,boxSize)) # when opening and closing the background should
shape,color=getShapeAndColor(board,box[0],box[1]) # be backgroundColor
drawIcon(shape,color,box[0],box[1])
if coverage>0:
pygame.draw.rect(display_surface,boxColor,(left,top,coverage,boxSize))
pygame.display.update()
pygame.time.delay(delay/10) # After revealing 8 boxes partially or fully it will wait.
def coverBoxesAnimation(board,boxesToCover): # covers the box from both sides
for coverage in range(0,boxSize+groupSize,8):
drawBoxCover(board,boxesToCover,coverage)
def revealBoxesAnimation(board,boxesToReveal): # opens the boxes from the middle
for coverage in range(boxSize,-groupSize-1,-groupSize):
drawBoxCover(board,boxesToReveal,coverage)
def drawBoard(board,revealed,colorOfCircle): # this function draws the board in its present state.
for box_x in range(boardWidth):
for box_y in range(boardHeight):
if not revealed[box_x][box_y]:
left,top=leftTopCoordsOfBox(box_x,box_y) # left ,top are starting pixel coordinates of a particular box.
pygame.draw.rect(display_surface,boxColor,(left,top,boxSize,boxSize)) # draw a white cover of boxSize x boxSize
else:
shape,color=getShapeAndColor(board,box_x,box_y) # will take shape and color if it is not revealed and draw icon
drawIcon(shape,color,box_x,box_y)
pygame.draw.circle(display_surface,colorOfCircle,(30,30),10)
display_surface.blit(start_surface,start_rect)
display_surface.blit(exit_surface,exit_rect)
def splitsIntoGroupOf(groupsize,boxes): # creates group of random boxes to show during startGameAnimation
result=[]
for x in range(0,len(boxes),groupsize):
result.append(boxes[x:x+groupsize])
return result
def startGameAnimation(board):
coveredBoxes=generateRevealedBoxesData(False) # 7x10
boxes=[]
for x in range(boardWidth):
for y in range(boardHeight):
boxes.append((x,y)) # boxes[] appends boardWidth x boardheight number of tuples like [(5,4),(2,6)]
random.shuffle(boxes)
boxGroups=splitsIntoGroupOf(groupSize,boxes) # will return a list[] containing tuples of (x,y) in groups of 8 but last will
# be a group of 6 since it is 10 x 7 matrix
drawBoard(board,coveredBoxes,red) # covered boxes is a list of 10 x 7 matrix containing False.
for boxGroup in boxGroups:
revealBoxesAnimation(board,boxGroup)
pygame.time.delay(delay*2)
coverBoxesAnimation(board,boxGroup)
pygame.time.delay(delay)
if __name__=='__main__':
main() | saqib1707/pyGame | memoryPuzzle.py | memoryPuzzle.py | py | 12,814 | python | en | code | 1 | github-code | 13 |
74509166097 | """
https://www.pyimagesearch.com/2015/11/16/hog-detectmultiscale-parameters-explained/
--image switch is the path to our input image that we want to detect pedestrians in.
--win-stride is the step size in the x and y direction of our sliding window.
--padding switch controls the amount of pixels the ROI is padded
with prior to HOG feature vector extraction and SVM classification.
To control the scale of the image pyramid (allowing us to detect people in images at multiple scales),
we can use the --scale argument.
--mean-shift can be specified if we want to apply mean-shift grouping to the detected bounding boxes.
-- Default USAGE
$ python main_hog.py --image images/person_010.bmp
--The smaller winStride is, the more windows need to be evaluated
(which can quickly turn into quite the computational burden):
$ python main_hog.py --image images/person_010.bmp --win-stride="(4, 4)"
-- winStride is the less windows need to be evaluated
(allowing us to dramatically speed up our detector).
However, if winStride gets too large, then we can easily miss out on detections entirely:
$ python main_hog.py --image images/person_010.bmp --win-stride="(16, 16)"
-- A smaller scale will increase the number of layers in the image pyramid
and increase the amount of time it takes to process your image:
$ python detectmultiscale.py --image images/person_010.bmp --scale 1.01
"""
from __future__ import print_function
import argparse
import datetime
from imutils import paths
import imutils
import cv2
def get_hog_features(trainingSetPath, win_stride, padding, mean_shift, scale):
# evaluate the command line arguments (using the eval function like
# this is not good form, but let's tolerate it for the example)
winStride = eval(win_stride)
padding = eval(padding)
meanShift = True if mean_shift > 0 else False
# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
#hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) # CHECK FOR IT!
# initialize the local binary patterns descriptor along with the data and label lists
labels = []
data = []
test_labels = []
test_data = []
# loop over the training images
for imagePath in paths.list_files(trainingSetPath, validExts=(".png",".ppm")):
# open image
img = cv2.imread(imagePath, cv2.IMREAD_GRAYSCALE)
# resized_image = cv2.resize(img, (32, 32)) # RESIZING
rects, weights = hog.detectMultiScale(img, winStride=winStride,
padding=padding, scale=scale, useMeanshiftGrouping=meanShift)
print (rects)
# get hog features
"""STAYED HERE!"""
# extract the label from the image path, then update the
# label and data lists
labels.append(imagePath.split("/")[-2])
data.append(weights)
if __name__ == '__main__':
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--training", required=True,
help="path to training dataset")
ap.add_argument("-w", "--win-stride", type=str, default="(8, 8)",
help="window stride")
ap.add_argument("-p", "--padding", type=str, default="(16, 16)",
help="object padding")
ap.add_argument("-s", "--scale", type=float, default=1.05,
help="image pyramid scale")
ap.add_argument("-m", "--mean-shift", type=int, default=-1,
help="whether or not mean shift grouping should be used")
args = vars(ap.parse_args())
get_hog_features(args["training"], args["win_stride"], args["padding"],args["mean_shift"], args["scale"]) | ilkayDevran/Traffic_Sign_Recognition | HOG implementation/main_hog.py | main_hog.py | py | 3,671 | python | en | code | 0 | github-code | 13 |
19734569129 | import bpy
from bpy.types import Operator
from bpy.props import FloatVectorProperty, FloatProperty, BoolProperty
from bpy_extras.object_utils import AddObjectHelper, object_data_add
from mathutils import Vector
def add_object(self, context):
if self.use_plane:
verts = [
Vector((0, 0, 0)),
Vector((0, 0, self.height)),
Vector((self.length, 0, self.height)),
Vector((self.length, 0, 0)),
]
edges = []
faces = [[0, 1, 2, 3]]
else:
verts = [
Vector((0, 0, 0)),
Vector((self.length, 0, 0)),
]
edges = [[0, 1]]
faces = []
mesh = bpy.data.meshes.new(name="Dumb Wall")
mesh.from_pydata(verts, edges, faces)
obj = object_data_add(context, mesh, operator=self)
if not self.use_plane:
modifier = obj.modifiers.new("Wall Height", "SCREW")
modifier.angle = 0
modifier.screw_offset = self.height
modifier.use_smooth_shade = False
modifier.use_normal_calculate = True
modifier.use_normal_flip = True
modifier.steps = 1
modifier.render_steps = 1
modifier = obj.modifiers.new("Wall Width", "SOLIDIFY")
modifier.use_even_offset = True
modifier.thickness = self.width
obj.name = "IfcWall/Dumb Wall"
attribute = obj.BIMObjectProperties.attributes.add()
attribute.name = "PredefinedType"
attribute.string_value = "STANDARD"
class BIM_OT_add_object(Operator, AddObjectHelper):
bl_idname = "mesh.add_wall"
bl_label = "Dumb Wall"
bl_options = {"REGISTER", "UNDO"}
height: FloatProperty(name="Height", default=3)
length: FloatProperty(name="Length", default=1)
width: FloatProperty(name="Width", default=0.2)
use_plane: BoolProperty(name="Use Plane", default=False)
def execute(self, context):
add_object(self, context)
return {"FINISHED"}
def add_object_button(self, context):
self.layout.operator(BIM_OT_add_object.bl_idname, icon="PLUGIN")
| AnastasiiSh/AnastasiiSh.github.io | IfcOpenShell/src/ifcblenderexport/blenderbim/bim/module/model/wall.py | wall.py | py | 2,032 | python | en | code | 0 | github-code | 13 |
11286045860 | """
N x N maze game
find a path between upper left and bottom right
"""
from pprint import pprint
class MazeGame(object):
def __init__(self, matrix):
self.matrix = matrix
self.N = len(matrix)
self.sol_matrix = self._get_original_maze()
def sol_maze(self, i, j):
"""
solve maze
Args:
i (int): the row of the next move
j (int): the col of the next move
"""
if i == self.N-1 and j == self.N-1 \
and matrix[self.N-1][self.N-1] == 1:
self.sol_matrix[i][j] = 1
return True
if self._is_safe(i, j):
# set sol matrix
self.sol_matrix[i][j] = 1
# try the bottom
if self.sol_maze(i+1, j):
return True
# try the right
if self.sol_maze(i, j+1):
return True
return False
else:
return False
def _is_safe(self, i, j):
"""
check whether the point [i, j] is safe
Args:
matrix (list): 2 dimension list
i (int): the row of the next move
j (int): the col of the next move
"""
if i >= 0 and i <= self.N-1 and \
j >= 0 and j <= self.N-1 and self.matrix[i][j] == 1:
return True
return False
def _get_original_maze(self):
"""
get orignal maze (with all 0 in it)
"""
return [[0 for x in xrange(self.N)] for x in xrange(self.N)]
if __name__ == '__main__':
matrix = [
[1,0,0,1,0],
[1,1,0,0,0],
[0,1,1,0,0],
[0,0,1,0,0],
[0,0,1,1,1]
]
m = MazeGame(matrix)
m.sol_maze(0,0)
pprint(m.sol_matrix) | zikangyao/algorithm | backtracking/n_x_n_maze.py | n_x_n_maze.py | py | 1,399 | python | en | code | 0 | github-code | 13 |
13020928892 | """
## Part 3: Data Preparation
In this section the raw data is prepared and reshaped to be fed into the different models. Furhtermore, the distribution of the input data is visualized to check if the data set is balanced. The data is converted into two main variables X (patiens and the coresponding protein quantities) and y (patients and the coresponding healt condition).
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#create pandas dataframe
path = "./RawData/tidy.csv"
pathMet = "./RawData/metadata.csv"
tidy = pd.read_csv(path, sep=",")
tidyMet = pd.read_csv(pathMet, sep=";", index_col=0)
#remove samples which are not in the metadata index column (quality controle etc)
tidy = tidy[ (tidy["R.FileName"].isin(tidyMet.index)) ]
tidyMer = pd.merge(tidy, tidyMet, how="left", on="R.FileName")
tidySub = tidyMer[["R.FileName", "uniprot", "meanAbu", "Cancer"]]
tidySub.Cancer.value_counts()
#reshape data for model
#X data
tidyReshaped = tidySub.pivot(index = "R.FileName", columns = "uniprot", values = "meanAbu")
tidyReshaped.head()
#y condition
Group = tidySub.drop(["uniprot", "meanAbu"], axis=1)
Group = Group.drop_duplicates().reset_index(drop=True)
Group.head()
#merge X and y and set dataframe to numerical values
data = pd.merge(tidyReshaped, Group, how="left", on="R.FileName")
data = data.set_index("R.FileName")
X_ = data.iloc[:, :-1].apply(np.log2)
y_ = data.iloc[:,-1]
#check first 10 entries of the dataframe
data[:10]
fig, ax = plt.subplots(1,1, figsize=(30, 5))
ax.boxplot(X_) | cewinharhar/BECS2_dataChallenge | part3.py | part3.py | py | 1,540 | python | en | code | 0 | github-code | 13 |
71808976338 | from typing import Union
from django.forms import TextInput, NumberInput
from django.db.models.aggregates import Max
from django.test import TestCase
from online_store.forms import AddDeliveryForm, AddOrderInBasket, OrderCreationMultiForm, OrderForm
from online_store.models import Product, Collection, TypeClothing, OrderForm as ModelOrderForm, Orders
def get_order_and_order_form():
collection = Collection.objects.create(name='test collection', description='test_collection_description',
views=0)
type_ = TypeClothing.objects.create(type='test_type', description='test_type_description',
characteristics='test_characteristics', colors='black, white',
delivery='7 - 14 day', size='S, M, XL', slug='test-type-slug',
views=0)
product = Product.objects.create(collection=collection, name='product 1',
type=type_, price='55.33', color='white',
description_print='description_print_test',
views=100)
order_form = ModelOrderForm.objects.create(full_name='ALex', first_address='first address',
second_address='second address', country='UA',
postal_or_zip_code='50050', city='KR', phone='phone',
email='lapu2@gmail.com',
company='fdfsfsdf')
return AddOrderInBasket({'product': product.pk, 'color': 'white', "size": 'S', "quantity": 3}), order_form
class AddDeliveryFormTest(TestCase):
def test_all_show_fields(self):
show_field_list_for_TextInput = ['id', 'full_name', 'first_address', 'second_address', 'country',
'postal_or_zip_code', 'city', 'company', 'email', 'phone']
self.assertEquals(AddDeliveryForm.Meta.fields, show_field_list_for_TextInput)
def test_field_list_with_TextInput(self):
show_field_list_for_TextInput = ['id', 'full_name', 'first_address', 'second_address', 'country',
'postal_or_zip_code', 'city', 'company', 'email', 'phone']
field_with_new_TextInput_widgets = ['first_address', 'second_address', 'postal_or_zip_code',
'order_number', 'company', 'state']
for field in field_with_new_TextInput_widgets:
if field in show_field_list_for_TextInput:
self.assertTrue(isinstance(AddDeliveryForm().fields[field].widget, TextInput))
class AddOrderInBasketTest(TestCase):
def test_show_fields(self):
show_fields = ['product', 'color', 'size', 'quantity']
self.assertEquals(AddOrderInBasket.Meta.fields, show_fields)
def test_widgets_form(self):
self.assertIsInstance(AddOrderInBasket().fields['product'].widget, NumberInput)
self.assertIsInstance(AddOrderInBasket().fields['color'].widget, TextInput)
def test_model_form_data_save(self):
order, order_number = get_order_and_order_form()
if order.is_valid():
order.save(order_form_number=order_number)
def test_method_color_clean(self):
order, _ = get_order_and_order_form()
self.assertTrue(order.is_valid())
order = AddOrderInBasket({'product': order.data['product'], 'color': 'yellow', "size": 'S', "quantity": 3})
self.assertFalse(order.is_valid())
self.assertEqual(order._errors['color'][0], 'There is no product( test collection product 1 ) in this color')
class OrderCreationMultiFormTest(TestCase):
@classmethod
def setUpTestData(cls):
"""Запускается перед тестом,обычно задаються данные, которые понадобяться при работе теста в целом"""
order, order_form = get_order_and_order_form()
cls.data = {'order_form-full_name': ['ALex'],
'order_form-first_address': ['first address'],
'order_form-second_address': ['second address'],
'order_form-country': ['UA'],
'order_form-postal_or_zip_code': ['50050'],
'order_form-city': ['KR'],
'order_form-company': ['fdfsfsdf'],
'order_form-order_number': ["company"],
'order_form-phone': ['phone'],
'order_form-email': 'lapdsfu@gmail.com',
}
cls.add_new_order_in_orders(cls.data, order, 0)
cls.data['orders-TOTAL_FORMS'] = 1
cls.data['orders-INITIAL_FORMS'] = 0
cls.form = OrderCreationMultiForm(data=cls.data, quantity_orders=1)
cls.order = order
cls.order_form = order_form
@staticmethod
def get_template_product(product_id: int, name='product', price='50.00', color='white,black', views=150):
name += ' ' + str(product_id)
return Product.objects.create(collection=Collection.objects.last(), name=name, type=TypeClothing.objects.last(),
price=price, color=color, description_print='description_print_test',
views=views)
@staticmethod
def add_new_order_in_orders(data, order: AddOrderInBasket, id_order_in_orders: Union[int, str, float]):
for key in order.data:
data[f'orders-{id_order_in_orders}-' + key] = order.data[key]
if 'orders-TOTAL_FORMS' in data:
data['orders-TOTAL_FORMS'] += 1
def test_is_valid_method_on_base_form(self):
self.assertTrue(self.form.is_valid())
def test_is_valid_and_save_method_in_form_with_several_orders_form(self):
get_order_and_order_form()
product_2 = self.get_template_product(2)
order_2 = AddOrderInBasket({'product': product_2.pk, 'color': 'black', "size": 'M', "quantity": 5})
self.add_new_order_in_orders(self.data, order_2, 1)
product_3 = self.get_template_product(3)
order_3 = AddOrderInBasket({'product': product_3.pk, 'color': 'white', "size": 'S', "quantity": 2})
self.add_new_order_in_orders(self.data, order_3, 2)
for key in {'product': 1, 'color': 'black', "size": 'L', "quantity": 10}:
self.data[f'orders-{3}-' + key] = {'product': 1, 'color': 'black', "size": 'L', "quantity": 10}[key]
self.assertTrue(self.form.is_valid())
self.assertEqual(len(self.form.cleaned_data['orders']), 3)
client_order_number = self.form.save().pk
self.assertEqual(len(Orders.objects.filter(order_number=client_order_number)), 3)
def test_save_method_on_base_form(self):
self.form.is_valid()
client_order_form_number = self.form.save().pk
last_order_form = OrderForm.objects.aggregate(Max('pk'))['pk__max']
self.assertEqual(client_order_form_number, last_order_form)
def test_is_valid_method_and_save_method_in_form_with_deleted_element(self):
"""Сохранение заказа, что был объявлен при создании класса"""
self.form.is_valid()
self.form.save()
"""Формирование второго и третьего заказа на базе первого(расширение данных указанных при объявлении класса -
использование атрибутов класса)"""
product_2 = self.get_template_product(2)
order_2 = AddOrderInBasket({'product': product_2.pk, 'color': 'black', "size": 'M', "quantity": 5})
self.add_new_order_in_orders(self.data, order_2, 1)
product_3 = self.get_template_product(3)
order_3 = AddOrderInBasket({'product': product_3.pk, 'color': 'black', "size": 'M', "quantity": 5})
self.add_new_order_in_orders(self.data, order_3, 2)
"""Удаление первого продукта заказа, при оформлении заказа на сайте"""
self.data['orders-0-DELETE'] = ['on']
self.form = OrderCreationMultiForm(data=self.data, quantity_orders=2)
self.assertTrue(self.form.is_valid())
client_order_form_number = self.form.save().pk
last_order_form = OrderForm.objects.aggregate(Max('pk'))['pk__max']
self.assertEqual(client_order_form_number, last_order_form)
self.assertEqual(len(Orders.objects.filter(order_number=client_order_form_number)), 2)
self.assertEqual(
list(Orders.objects.filter(order_number=client_order_form_number).values_list('product__name', flat=True)),
['product 2', 'product 3']
)
"""Проводим проверку с удалением уже 2 элемента(продукта, товара) заказа"""
self.data['orders-1-DELETE'] = ['on']
del self.data['orders-0-DELETE']
self.data['orders-TOTAL_FORMS'] = '3'
self.form = OrderCreationMultiForm(data=self.data, quantity_orders=2)
self.assertTrue(self.form.is_valid())
client_order_form_number = self.form.save().pk
last_order_form = OrderForm.objects.aggregate(Max('pk'))['pk__max']
self.assertEqual(client_order_form_number, last_order_form)
self.assertEqual(len(Orders.objects.filter(order_number=client_order_form_number)), 2)
self.assertCountEqual(
list(Orders.objects.filter(order_number=client_order_form_number).values_list('product__name', flat=True)),
['product 1', 'product 3']
)
| ALEXsawb/CHEAP.P | CheapSh0p/tests/test_online_store/test_forms.py | test_forms.py | py | 9,762 | python | en | code | 0 | github-code | 13 |
16601843445 | calculation_to_units = 24
name_of_unit = "hours"
def calculation(a):
if a > 0:
return (f"{a} days are {calculation_to_units * a} {name_of_unit}")
if a == 0:
return "You entered 0. Please enter a valid number."
else:
return ("You entered a negative value")
user_input = input("Enter something: \n")
calculated_value = calculation(int(user_input))
print(calculated_value)
| Zeeshan1920/python_practice | YouTube Course/conditions.py | conditions.py | py | 412 | python | en | code | 0 | github-code | 13 |
27706521315 | import re
regexes = [re.compile(p) for p in ['this','that']]
text = "Does this text match the partten"
for regex in regexes:
print('Seeking "{}" ->'.format(regex))
if regex.search(text):
print('match!')
else:
print('no match') | Highsir/Python3_stdlib | Python3标准库/First_text/1_17.re_simple_compiled.py | 1_17.re_simple_compiled.py | py | 258 | python | en | code | 0 | github-code | 13 |
17934668505 | from django.contrib import admin
from django.urls import path, include
from .views import PostList, PostDetail, PostListDetailfilter,CreatePost,AdminPostDetail,EditPost,DetelePost,CommentDetail
app_name = 'blog_api'
urlpatterns = [
path('posts/', PostDetail.as_view(), name='detailcreate'),
path('', PostList.as_view(), name='listcreate'),
path('comment/<int:pk>',CommentDetail.as_view(), name='comment'),
path('search/', PostListDetailfilter.as_view(), name='postsearch'),
path('admin/create/',CreatePost.as_view(), name='createpost'),
path('admin/edit/postdetail/<int:pk>', AdminPostDetail.as_view(), name='admindetailpost'),
path('admin/edit/<int:pk>', EditPost.as_view(),name='editpost'),
path('admin/detele/<int:pk>', DetelePost.as_view(),name='deletepost'),
]
| hungthe-opn/thehung.github.io | blog_api/urls.py | urls.py | py | 794 | python | en | code | 0 | github-code | 13 |
42081303056 | def solution(str1, str2):
answer = 2
for i in range(len(str1)-len(str2)+1):
a=len(str2)
for j in range(len(str2)):
if str1[i+j]==str2[j]:
a-=1
else:
break
if a==0:
answer =1
return answer | HotBody-SingleBungle/HBSB-ALGO | HB/pysrc/프로그래머스/레벨0/Day18/문자열안에_문자열.py | 문자열안에_문자열.py | py | 304 | python | de | code | 0 | github-code | 13 |
29524767639 | #In enumerate function show the index value with the item in listor topple
items=['alu','potol','shak']
for index,item in enumerate(items):
print(f"{index}->{item}")
#you have to sent a one list and one string then if you find the that string in this list then return the list position .
#if you not find then return -1
def find_string(l,target):
for index,item in enumerate(l):
if item==target:
return index
return -1
list=["alu","potol","tomato","morich"]
print(find_string(list,"potol")) | milton9220/Python-basic-to-advance-tutorial-source-code | enumerate_function.py | enumerate_function.py | py | 526 | python | en | code | 0 | github-code | 13 |
5718603886 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sublime
import sublime_plugin
from io import StringIO
import tokenize
import argparse
import json
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import local_keywords
def __collect_name(name_stack, names):
if len(name_stack) > 0:
keys = []
# dump field keywords
for field in name_stack:
keys.append(field)
# dump union keyword
if len(name_stack) > 1:
keys.append('.'.join(name_stack))
for key in keys:
if names.get(key):
names[key] = names.get(key) + 1
else:
names[key] = 1
# clear stack
del name_stack[:]
def scan_keywords(code_string, keyword_set):
"""
get valid keyword and occur frequency.
"""
io_obj = StringIO(code_string)
names = {}
name_token_stack = []
pre_token_type = tokenize.INDENT
pre_token_str = None
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_str = tok[1]
# start_line, start_col = tok[2]
# end_line, end_col = tok[3]
# line_text = tok[4]
#
# if 0: # Change to if 1 to see the tokens fly by.
# print("%10s %-14s %-20r %r" % (
# tokenize.tok_name.get(token_type, token_type),
# "%d.%d-%d.%d" % (start_line, start_col, end_line, end_col),
# token_str, line_text
# ))
if token_type == tokenize.OP and token_str == '.':
pass
elif token_type == tokenize.NAME:
if pre_token_type == tokenize.OP and pre_token_str == '.':
name_token_stack.append(token_str)
else:
__collect_name(name_token_stack, names)
name_token_stack.append(token_str)
else:
__collect_name(name_token_stack, names)
pre_token_type = token_type
pre_token_str = token_str
# add tail process
__collect_name(name_token_stack, names)
def bin_search(word, wordList):
first = 0
last = len(wordList) - 1
found = False
while first <= last and not found:
middle = (first + last) // 2
if wordList[middle] == word:
found = True
else:
if word < wordList[middle]:
last = middle - 1
else:
first = middle + 1
return found
kf = {}
keyValues =''
for key in names.keys():
if bin_search(key, keyword_set):
# kf[key] = names[key]\
keyValues = keyValues+key+" "
kf['keywords'] = keyValues
return kf
def getScanKeyWords(code_string):
print("getScanKeyWords -----")
# BASE_DIR = os.path.dirname(__file__) #获取当前文件夹的绝对路径
# path = os.path.join(BASE_DIR, 'kesywords.txt')
# path='keywords.txt'
# path = KEYWORDS
# print("getScanKeyWords = ",local_keywords.KEYWORDS)
# def get_file(path):
# return open(path, 'r')
# args = open(path, 'r')
keywords = local_keywords.KEYWORDS.splitlines()
keywords.sort()
# print("code_string = " + code_string)
results = scan_keywords(code_string, keywords)
return results
# class LoadKeyWordsCommand(sublime_plugin.TextCommand):
# def run(self, edit):
# code_string = self.view.substr(sublime.Region(0, self.view.size()))
# # print("code_string = " + code_string)
# results = getScanKeyWords(code_string)
# print(json.dumps(results))
| qiuxfeng1985/geecode-sublime-plugin | geecode_keywords.py | geecode_keywords.py | py | 3,660 | python | en | code | 0 | github-code | 13 |
4177898318 | from datetime import datetime
import uuid
from sqlalchemy.orm.session import Session
from src.schemas.log_info import LogInfo
from ..models.log_info import LogInfoModel, model_to_entity, entity_to_model
from ..models.raw_log import RawLogModel
class LogInfoRepository:
session: Session
def __init__(self, session: Session):
self.session = session
def get_log_infos_by_error(
self, error_uid: uuid.UUID, offset: int, limit: int
) -> list[LogInfo]:
log_info_models = (
self.session.query(LogInfoModel)
.filter(LogInfoModel.error_uid == error_uid)
.offset(offset)
.limit(limit)
.all()
)
return list(map(model_to_entity, log_info_models))
def get_log_infos_count_by_error(
self, error_uid: uuid.UUID, start_date: datetime | None = None
) -> int:
if start_date is None:
return (
self.session.query(LogInfoModel)
.filter(LogInfoModel.error_uid == error_uid)
.count()
)
return (
self.session.query(LogInfoModel)
.filter(LogInfoModel.error_uid == error_uid)
.join(RawLogModel)
.filter(RawLogModel.created_date >= start_date)
.count()
)
def get_min_log_date_by_error(self, error_uid: uuid.UUID) -> LogInfo | None:
log_info = (
self.session.query(LogInfoModel)
.filter(LogInfoModel.error_uid == error_uid)
.join(RawLogModel)
.order_by(RawLogModel.created_date.asc())
.first()
)
if not log_info:
return None
return model_to_entity(log_info)
def add_log_info(self, log_info: LogInfo) -> LogInfo:
log_info_model = entity_to_model(log_info)
self.session.add(log_info_model)
self.session.commit()
return model_to_entity(log_info_model)
| KeepError/TenderHackKazan23-Backend | src/postgres/repositories/log_info.py | log_info.py | py | 1,962 | python | en | code | 0 | github-code | 13 |
13985248163 | # 8 puzzle
import copy
import time
class Node:
size = 3
empty = '0'
last_info = []
def __init__(self, info):
self.info = info # cheia nodului
self.h = self.estimate_cost() # estimarea pentru nod
self.suc = [] # lista de succesori
self.index = self.find_empty_index() # pozitia spatiului liber
'''
Configuratie pentru info: o matrice cu cheile tablitelor. Marchez spatiul liber cu simbolul din Node.empty
Schematic: info = [
[11], [12], [13],
[21], [22], [23],
[31], [32], [33]
]
'''
def __repr__(self):
node = ""
for line in self.info:
for element in line:
node += f"{element} "
node += "\n"
return node
def __eq__(self, other):
return self.info == other.info
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(self)
def append(self, node):
if node not in self.suc:
self.suc.append(node)
def estimate_cost(self):
diff = 0
for i in range(Node.size):
for j in range(Node.size):
diff += self.info[i][j] != self.last_info[i][j]
return diff
def find_empty_index(self):
for i in range(Node.size):
for j in range(Node.size):
if self.info[i][j] == Node.empty:
return i, j
return None, None
def count_inversions(self):
inv = 0
for i in range(Node.size ** 2):
for j in range(i + 1, Node.size ** 2):
if self.info[int(i / 3)][i % 3] > self.info[int(j / 3)][j % 3]:
inv += 1
return inv
class TreeNode:
def __init__(self, node, parent, g, f, seen=False):
self.node = node # variabila de tip Node
self.parent = parent # variabila de tip Node, parintele nodului curent
self.g = g # costul de la radacina pana la nodul curent
self.f = f # costul estimat de la radacina pana la final, trecand prin nodul curent
self.seen = seen # daca nodul a fost deja vizitat
def expand(self):
directions = [(1, 0), (-1, 0), (0, -1), (0, 1)]
for direction in directions:
new_node = slide_slate(self.node, direction)
if new_node:
self.node.append(new_node)
return self.node.suc
def test_fin(self):
return self.node.info == Node.last_info
def path(self, tree):
path = []
if self.parent is not None:
path = tree[tuple(tuple(element) for element in self.parent.info)].path(tree)
path.append(self.node)
return path
def slide_slate(node, direction):
new_line = node.index[0] + direction[0]
new_column = node.index[1] + direction[1]
if 0 <= new_line < Node.size and 0 <= new_column < Node.size:
new_info = copy.deepcopy(node.info)
new_info[node.index[0]][node.index[1]] = new_info[new_line][new_column]
new_info[new_line][new_column] = node.empty
return Node(new_info)
def tree_sort(elem, tree):
return tree[elem].f
def read_node(path):
file = open(path, "r")
node = []
for line in file:
info = []
line = line.split()
for element in line:
info.append(element)
node.append(info)
file.close()
return node
def search():
start_node = Node(read_node("stare_initiala.txt"))
tree = {tuple(tuple(element) for element in start_node.info): TreeNode(start_node, None, 0, 0)}
open_nodes = [start_node]
if start_node.count_inversions() % 2 == 1:
return None
while open_nodes:
open_nodes.sort(key=lambda x: tree_sort(tuple(tuple(element) for element in x.info), tree))
current_node = open_nodes.pop(0)
current_info = tuple(tuple(element) for element in current_node.info)
tree[current_info].seen = True
if tree[current_info].test_fin():
return tree[current_info].path(tree)
suc = tree[current_info].expand()
for node in suc:
current_g = tree[current_info].g + 1
current_f = current_g + node.h
node_info = tuple(tuple(element) for element in node.info)
if node_info not in tree:
open_nodes.append(node)
tree[node_info] = TreeNode(node, current_node, current_g, current_f)
else:
if current_f < tree[node_info].f:
tree[node_info].f = current_f
tree[node_info].parent = current_node
if tree[node_info].seen:
open_nodes.append(node)
def main():
Node.last_info = read_node("stare_finala.txt")
start_time = time.perf_counter()
path = search()
end_time = time.perf_counter()
print(f"Timpul de rulare: {end_time - start_time} secunde.")
file = open("output.txt", "w")
if path:
file.write("Drumul de cost minim: \n")
for node in path:
file.write(f"{node}\n")
file.write(f"S-au realizat {len(path) - 1} operatii.\n")
else:
file.write("Nu exista solutie\n")
file.close()
if __name__ == "__main__":
main()
| Loila11/fmi | Licenta 2/AI/8-puzzle/main.py | main.py | py | 5,392 | python | en | code | 0 | github-code | 13 |
25297163640 | from django.urls import path, re_path
from species.api_views.upload_species import (
SaveCsvSpecies,
SpeciesUploader,
UploadSpeciesStatus,
)
from .views import (
TaxonFrontPageListAPIView,
TaxonListAPIView,
TaxonTrendPageAPIView
)
urlpatterns = [
path('api/species/front-page/list/', TaxonFrontPageListAPIView.as_view(),
name='species-front-page'),
re_path(r'^api/species/trend-page/?$',
TaxonTrendPageAPIView.as_view(),
name='taxon-trend-page'),
path('species/', TaxonListAPIView.as_view(),
name='species'),
path('api/upload-species/', SpeciesUploader.as_view(),
name='upload-species'),
path('api/save-csv-species/', SaveCsvSpecies.as_view(),
name='save-csv-species'),
re_path(r'api/upload-species-status/(?P<token>[\da-f-]+)/?$',
UploadSpeciesStatus.as_view(),
name='upload-species-status'
),
]
| kartoza/sawps | django_project/species/urls.py | urls.py | py | 944 | python | en | code | 0 | github-code | 13 |
31321754974 | from abc import ABC, abstractmethod
from jabberjaw import utils
from jabberjaw.utils import mkt_classes
from jabberjaw.utils.mkt_classes import MktCoord, get_coord_default_source
from jabberjaw.data_manager import mkt_data_manager as dm
import datetime
import dpath.util as dp
class Marketiser(ABC):
""" a class used as a template for the different asset marketisers"""
mkt_type = None
mkt_class = None
@classmethod
def mkt_class(cls) -> str:
return ""
@classmethod
def mkt_type(cls) -> str:
return ""
@classmethod
def marketise_ticker(cls, ticker:str, source:str, start_date:datetime.date, end_date: datetime.date, overwrite: bool = False) -> None:
mkt_coord = MktCoord(cls.mkt_class(),cls.mkt_type(),ticker,source=source)
cls.marketise_mkt_point(mkt_coord,start_date,end_date,overwrite)
@classmethod
def marketise_all_mkt_type_tickers(cls,start_date:datetime.date, end_date:datetime.date, overwrite: bool = False) -> None:
"""
This function marketise all tickers for cash stocks
:parm start_date: the date from which we marketise
:parm end_date: the date will which we marketise (inclusive)
:parm overwrite: a flag that determines if we overwrite existing data or not
"""
cfg = mkt_classes.mkt_data_cfg()
xpath = f'{cls.mkt_class()}/{cls.mkt_type()}'.upper()
search= dp.search(cfg, xpath, yielded=True)
tickers_to_marketise = [i for i in search].pop()[1]
for ticker, metadata in tickers_to_marketise.items():
cls.marketise_ticker(ticker, metadata['default_source'],start_date,end_date,overwrite)
print('finished the marketisiation process for {}'.format(xpath))
@classmethod
def marketise_mkt_point(cls, mkt_coord: MktCoord, start_date: datetime.date,
end_date: datetime.date, overwrite: bool = False) -> None:
""" marketises a single MKtCoord according to given period ( start_date to end_date, inclusive)"""
points, tickers, source = cls.get_ticker(mkt_coord)
tick_pnts = zip(tickers, points) if len(points) else [(tickers,tickers)]
for ticker,pnt in tick_pnts:
mkt_coord.point = pnt if len(points) else None
df = dm.get_history_mkt_data(mkt_coord)
if not df.empty and not overwrite:
print(ticker + " already MARKETISED")
return None
print("TRYING to Marketise " + ticker)
df_new = dm.extract_data(ticker, source, datetime.datetime.fromordinal(start_date.toordinal()),
datetime.datetime.fromordinal(end_date.toordinal()))
if df.empty:
df = df_new
else:
df.update(df_new)
dm.save_mkt_data(mkt_coord, df)
@classmethod
def get_ticker(cls, mkt_coord: MktCoord) -> tuple:
""" returns the ticker and source for a given MktCoord"""
pts = mkt_classes.get_points(mkt_coord)
tickers = mkt_classes.get_ticker(mkt_coord)
source = get_coord_default_source(mkt_coord) if mkt_coord.source in [None, "DEFAULT"] else mkt_coord.source
return pts, tickers, source
| imry-rosenbuam/jabberjaw | jabberjaw/data_manager/marketiser.py | marketiser.py | py | 3,327 | python | en | code | 0 | github-code | 13 |
8029146998 | # Will create an xml file for a whole folder of pictures (.jpg, .png, and .bmp)
import os
#===========================<Function library>==================================
def initXML(ofile, loc):
ofile.write('<background>\n\n\t<starttime>\n\t\t<hour>0</hour>')
ofile.write('\n\t\t<minute>00</minute>\n\t\t<second>01</second>')
ofile.write('\n\t</starttime>')
#Add picture to file
#NEED TO ADD CODE TO REMOVE THEN ADD FINISHING CODE!!!!!
def addPic(ofile, pic, dur):
ofile.write('\n<static>\n\t<duration>' + str(dur) +'</duration>')
ofile.write('\n\t<file>' + pic + '</file>')
ofile.write('\n</static>')
#Add trasition time between pic1 & pic2
def addTrans(ofile, pic1, pic2, dur):
ofile.write('<transition>\n\t<duration>' + str(dur) + '</duration>')
ofile.write('\n\t<from>' +pic1 + '</from>')
ofile.write('\n\t<to>' +pic2+ '</to>\n</transition>')
#Add ending transition for after user adds a pic
#This will be used to loop back to beginning of list
def addEnd(ofile, pic1, pic2, dur):
ofile.write('<transition>\n\t<duration>' + str(dur) + '</duration>')
ofile.write('\n\t<from>' + pic1 + '</from>')
ofile.write('\n\t<to>' + pic2 + '</to>\n</transition>\n\n</background>')
#===============================================================================
#WHAT IF FILE ALREADY EXISTS?????
#WHAT IF LOCATION DOESN'T EXIST???
loc = str(raw_input('Please enter the name & location of the file:\n'))
if loc[-4:] != '.xml':
loc += '.xml'
#Ask for folder location
picDir = str(raw_input('Please enter the picture directory:\n'))
if picDir[-1] != '/':
picDir += '/'
dur = str(raw_input('How many miliseconds shall each picture last?\n'))
bgfile = open(loc, 'w')
#Start the file contents
initXML(bgfile, loc)
#Get pics into a list
picL = os.listdir(picDir)
#Get list with only pics
picLN = []
for x in picL:
if (x[-4:] == '.jpg') or (x[-4:] == '.png') or (x[-4:] == '.bmp'):
picLN.append(x)
for i in range(len(picLN[:-2])):
addPic(bgfile, picDir+picLN[i], dur)
addTrans(bgfile, picDir+picLN[i], picDir+picLN[i+1], 4)
addPic(bgfile, picDir+picLN[-1], dur)
#End file
addEnd(bgfile, picDir+picLN[-1], picDir+picLN[0], 4)
bgfile.close()
| adutta/bgSlideshow | delbg.py | delbg.py | py | 2,240 | python | en | code | 2 | github-code | 13 |
73488229457 | # There is an ATM machine that stores banknotes of 5 denominations: 20, 50, 100, 200, and 500 dollars. Initially the ATM is empty.
# The user can use the machine to deposit or withdraw any amount of money.
# When withdrawing, the machine prioritizes using banknotes of larger values.
# For example, if you want to withdraw $300 and there are 2 $50 banknotes, 1 $100 banknote, and 1 $200 banknote, then the machine will
# use the $100 and $200 banknotes.
# However, if you try to withdraw $600 and there are 3 $200 banknotes and 1 $500 banknote, then the withdraw request will be rejected
# because the machine will first try to use the $500 banknote and then be unable to use banknotes to complete the remaining $100. Note
# that the machine is not allowed to use the $200 banknotes instead of the $500 banknote.
# Implement the ATM class:
# ATM() Initializes the ATM object.
# void deposit(int[] banknotesCount) Deposits new banknotes in the order $20, $50, $100, $200, and $500.
# int[] withdraw(int amount) Returns an array of length 5 of the number of banknotes that will be handed to the user in the order $20, $50,
# $100, $200, and $500, and update the number of banknotes in the ATM after withdrawing. Returns [-1] if it is not possible (do not withdraw
# any banknotes in this case).
# Example 1:
# Input
# ["ATM", "deposit", "withdraw", "deposit", "withdraw", "withdraw"]
# [[], [[0,0,1,2,1]], [600], [[0,1,0,1,1]], [600], [550]]
# Output
# [null, null, [0,0,1,0,1], null, [-1], [0,1,0,0,1]]
# Explanation
# ATM atm = new ATM();
# atm.deposit([0,0,1,2,1]); // Deposits 1 $100 banknote, 2 $200 banknotes,
# // and 1 $500 banknote.
# atm.withdraw(600); // Returns [0,0,1,0,1]. The machine uses 1 $100 banknote
# // and 1 $500 banknote. The banknotes left over in the
# // machine are [0,0,0,2,0].
# atm.deposit([0,1,0,1,1]); // Deposits 1 $50, $200, and $500 banknote.
# // The banknotes in the machine are now [0,1,0,3,1].
# atm.withdraw(600); // Returns [-1]. The machine will try to use a $500 banknote
# // and then be unable to complete the remaining $100,
# // so the withdraw request will be rejected.
# // Since the request is rejected, the number of banknotes
# // in the machine is not modified.
# atm.withdraw(550); // Returns [0,1,0,0,1]. The machine uses 1 $50 banknote
# // and 1 $500 banknote.
class ATM:
def __init__(self):
self.NotesInStorage = {}
self.Notes = [20, 50, 100, 200, 500]
def deposit(self, banknotesCount: List[int]) -> None:
for i in range(len(banknotesCount)):
if banknotesCount[i] == 0:
continue
if self.Notes[i] not in self.NotesInStorage:
self.NotesInStorage[self.Notes[i]] = banknotesCount[i]
else:
self.NotesInStorage[self.Notes[i]] += banknotesCount[i]
def withdraw(self, amount: int) -> List[int]:
storageBeforeOperation = []
for i in range(len(self.Notes)):
if self.Notes[i] in self.NotesInStorage:
storageBeforeOperation.append(self.NotesInStorage[self.Notes[i]])
else:
storageBeforeOperation.append(0)
returnNotes = []
for i in reversed(range(len(self.Notes))):
if self.Notes[i] in self.NotesInStorage and self.NotesInStorage[self.Notes[i]] != 0:
multiple = amount // self.Notes[i]
if multiple == 0:
returnNotes.insert(0, 0)
else:
if multiple >= self.NotesInStorage[self.Notes[i]]:
returnNotes.insert(0, self.NotesInStorage[self.Notes[i]])
amount -= (self.Notes[i] * self.NotesInStorage[self.Notes[i]])
self.NotesInStorage[self.Notes[i]] = 0
else:
returnNotes.insert(0, multiple)
amount -= (multiple * self.Notes[i])
self.NotesInStorage[self.Notes[i]] = (self.NotesInStorage[self.Notes[i]] - multiple)
else:
returnNotes.insert(0, 0)
if amount != 0:
for i in range(len(self.Notes)):
if storageBeforeOperation[i] != 0:
self.NotesInStorage[self.Notes[i]] = storageBeforeOperation[i]
return [-1]
return returnNotes
# Your ATM object will be instantiated and called as such:
# obj = ATM()
# obj.deposit(banknotesCount)
# param_2 = obj.withdraw(amount)
| aslamovamir/LeetCode | design_an_ATM_machine.py | design_an_ATM_machine.py | py | 4,845 | python | en | code | 0 | github-code | 13 |
1615180940 | from openpyxl import load_workbook
import pandas as pd
import geopandas
import matplotlib.pyplot as plt
import sys,os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import funcs
years=["schools2018.xlsx","schools2019.xlsx"]
base = geopandas.read_file('../Ireland.GeoJSON')
for year in years:
wb = load_workbook('../'+year)
ws = wb.active
year=year.split('schools')[1].split('.')[0]
if year == '2018': # if coords already
#better if statement?
latCol=list(ws['D'])
longCol=list(ws['E'])
for i in range(min(len(latCol),len(longCol))):
latCol[i]=latCol[i].value
longCol[i]=longCol[i].value
else: # if eircodes
eircodes=[row.value for row in ws['E']]
latCol, longCol = funcs.getCoordsFromPostcodes(eircodes)
lats=[]
longs=[]
for i in range(min(len(latCol),len(longCol))):
lat=latCol[i]
long=longCol[i]
if lat and long and\
lat < 90 and lat > -90 and\
long < 180 and long > -180: #weird outliers...
lats.append(lat)
longs.append(long)
ax=base.plot(color='white', edgecolor='black')
funcs.mapCoords(lats, longs, ax)
plt.suptitle(year)
plt.show()
| euanleith/schools | graphs/map.py | map.py | py | 1,238 | python | en | code | 0 | github-code | 13 |
30968477274 | import getpass
import os
import sys
import time
import pandas as pd
import tms_login as tms
from datetime import date, datetime, timedelta
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
# check if on citrix ('nt') or pi
if os.name == 'nt':
#set to chrome defualt download folder - BOA CITRIX DESKTOP DEFAULT SETTINGS
DOWNLOAD_FOLDER = f'C:\\Users\\{getpass.getuser().title()}\\Downloads\\'
else:
DOWNLOAD_FOLDER = '/home/pi/Downloads/'
# list of files before downloading
before = os.listdir(DOWNLOAD_FOLDER)
today = date.today()
# set up start date and end date for filter
# if no args passed automatically use today
if len(sys.argv) < 2:
s_date = today
e_date = s_date
datestamp = today
elif len(sys.argv) == 3:
# takes args start and end dates as MMDD ex. 0420 = APR 20
s_date = date(today.year, int(sys.argv[1][:2]), int(sys.argv[1][2:]))
e_date = date(today.year, int(sys.argv[2][:2]), int(sys.argv[2][2:]))
datestamp = e_date
else:
print('Error: Need 2 arguments: start and end dates in MMDD format.')
sys.exit()
start = s_date.strftime("%m/%d/%Y 00:00:00")
end = e_date.strftime("%m/%d/%Y 23:59:59")
url = 'https://boa.3plsystemscloud.com/'
browser = tms.login(url, False)
# enter report code into report_code variabe
# "zLoadsInvoiced" report
report_code = '7BD7D97CBE8E'
report_url = f'{url}App_BW/staff/Reports/ReportViewer.aspx?code={report_code}'
browser.get(report_url)
startbox = browser.find_element_by_xpath("//td[1]/input[@class='filter between'][1]")
endbox = browser.find_element_by_xpath("//td[1]/input[@class='filter between'][2]")
startbox.clear()
startbox.send_keys(start)
endbox.clear()
endbox.send_keys(end)
# save & view report, then download
save_button = browser.find_element_by_id('ctl00_ContentBody_butSaveView')
save_button.click()
browser.implicitly_wait(3)
download = browser.find_element_by_id('ctl00_ContentBody_butExportToExcel')
download.click()
time.sleep(3)
browser.close()
print('Retrieved invoice report.')
# list of files in Downloads folder after downloading to extract filename
after = os.listdir(DOWNLOAD_FOLDER)
change = set(after) - set(before)
if len(change) == 1:
file_name = change.pop()
print(f'{file_name} downloaded.')
elif len(change) == 0:
print('No file downloaded.')
else:
print('More than one file downloaded.')
#file name contains .xls extention but is actually html format
filepath = f'{DOWNLOAD_FOLDER}{file_name}'
data = pd.read_html(filepath)
df = data[0]
df.drop(axis=1, columns=['R/ Lumper', 'C/ Lumper', 'R/ Processing Fee', 'C/ Processing Fee'], inplace=True)
formatting = {'AR Balance': '${:,.2f}'}
df.style.format(formatting)
df.fillna('', inplace=True)
# df.drop(df.tail(1).index, inplace=True)
print(df)
print('Exporting DataFrame to HTML...')
df.to_html('invoiced-table.html', index=False)
head = open('./templates/head.html', 'r').read()
body = open('invoiced-table.html', 'r').read()
tail = '</body>\n</html>'
html_list = [head, body, tail]
export = open('invoiced.html', 'wt')
full = '\n'.join(html_list)
export.write(full)
print('Export HTML file saved!')
print('Exporting TXT file with load numbers...')
load_list_int = list(df['Load #'])[:-1]
load_list_str = [str(load_num) for load_num in load_list_int]
load_nos_txt = open('load_nos.txt', 'w')
output = "['" + "', '".join(load_list_str) + "']"
load_nos_txt.write(output)
print('Export TXT file saved!')
export.close()
load_nos_txt.close()
| boalogistics/auto-report | loadsinvoiced.py | loadsinvoiced.py | py | 3,493 | python | en | code | 0 | github-code | 13 |
32136811328 | import pickle
import tensorflow
import pandas as pd
import re
from tensorflow.keras.preprocessing.sequence import pad_sequences
# read the model objects for predictions
with open("./models/recomm.pickle", "rb") as file:
recomm_matrix = pickle.load(file)
with open("./models/tokenizer.pickle", "rb") as file:
tokenizer = pickle.load(file)
sentiment_model = tensorflow.keras.models.load_model('./models/sentiment')
pd_data = pd.read_csv('./data/sample30.csv')
def combine_and_clean_text(row):
#apply text preprocessing same as used when training the DL model for sentiment
# 1. combine reviews title with reviews text
text = " ".join([str(row['reviews_text']),
str(row['reviews_title']) if row['reviews_text'] is not None else ''
])
# 2. lowercase text and remove all alphanumeric characters
text = re.sub("[^a-z ]", "", text.lower()).replace(" ", " ")
return text
def tokenize_and_predict(X):
# 1. Tokenize text
X = tokenizer.texts_to_sequences(X)
# 2. Pad text to fix length required by the model
X = pad_sequences(X, truncating='post', padding='post', value=0, maxlen=575)
# 3. score with model prediction
scores = sentiment_model.predict(X, batch_size=250)
return scores
def row_to_json(row):
return {
'id': row['id'],
'name': row['name'],
'brand': row['brand'],
'manufacturer': row['manufacturer'],
'positive_perc': row['positive_perc']
}
def get_recommendation(userid, top=20):
# select top 20 recommended items from recommendation engine
rec_items = pd.DataFrame(recomm_matrix.loc[userid].sort_values(ascending=False)[0:top]).reset_index()
# get the sentiments for these top 20 selected products
items_text = pd.merge(pd_data, rec_items, on='id', how='inner')[['id', 'brand', 'manufacturer', 'name',
'reviews_text', 'reviews_title']]
# preprocess text for model prediction
items_text['clean_text'] = items_text.apply(lambda x: combine_and_clean_text(x), axis=1)
# tokenize and pad text and apply model prediction
items_text['sentiment_score'] = tokenize_and_predict(items_text['clean_text'].tolist())
# apply cutoff for binary classification
items_text['sentiment'] = items_text['sentiment_score'].apply(lambda x: 1 if x >= 0.6 else 0)
# group by item id to get percentage of positive reviews
items_text['positive_perc'] = items_text.groupby('id')['sentiment'].transform(lambda x: int(100 * x.sum()/x.count()))
# sort by positive percentage and select top 5 recommended items
reccoms = items_text.drop_duplicates('id', keep='first').sort_values('positive_perc', ascending=False).head(5)
# convert results to json for display on UI
return reccoms.apply(lambda x: row_to_json(x), axis=1).to_list()
| abhishek-74/capstone | model.py | model.py | py | 2,911 | python | en | code | 0 | github-code | 13 |
19220660665 | """
Various fitness functions to control the specimen selection in the reproduction stage
"""
import abc
import math
from path_finder.chromosome import Chromosome
from path_finder.grid import GridWrapper
from path_finder.point import distance
class Fitness(abc.ABC):
"""
A fitness function
"""
def __init__(self, grid: GridWrapper):
"""
:param grid: The environment we use
"""
self.grid = grid
self.grid_size = grid.grid_x_size * grid.grid_y_size
@abc.abstractmethod
def __call__(self, chrom: Chromosome) -> float:
"""
:param chrom: The chromosome to calculate the fitness of
:return: The fitness of the chromosome
"""
raise NotImplementedError()
class NaiveFitness(Fitness):
"""
A Naive fitness functions that converges fast when it hits a wall
"""
def __call__(self, chrom: Chromosome) -> float:
"""
see: Fitness.__call__
"""
return (
self.grid_size
- distance(self.grid.simulate_movement(chrom), self.grid.target)
- (len(chrom) / self.grid_size)
)
class PathFinderFitnessNoLengthPenalty(Fitness):
"""
A fitness function that only looks at chromosome length once we hit the target
"""
def __call__(self, chrom: Chromosome) -> float:
"""
see: Fitness.__call__
"""
dist = distance(self.grid.simulate_movement(chrom), self.grid.target)
if dist != 0:
return self.grid_size - dist
else:
return self.grid_size - (len(chrom) / self.grid_size)
class PathFinderFitnessRewardLength(Fitness):
"""
A fitness function that rewards long chromosomes over short chromosomes which do
not hit the target
"""
def __call__(self, chrom: Chromosome) -> float:
"""
see: Fitness.__call__
"""
dist = distance(self.grid.simulate_movement(chrom), self.grid.target)
if dist != 0:
return self.grid_size - dist + min((len(chrom) / self.grid_size), 0.2)
else:
# reward extra 1 for destination to make that beat length reward
return self.grid_size + 1 - (len(chrom) / self.grid_size)
class PathFinderFitnessRewardLengthDistanceGroups(Fitness):
"""
A fitness function that rewards long chromosomes over short chromosomes which do
not hit the target.
uses distance groups.
"""
DISTANCE_GROUPS_IN_AXIS = 5
@property
def dist_group_length(self) -> int:
return self.grid.grid_x_size // self.DISTANCE_GROUPS_IN_AXIS
def __call__(self, chrom: Chromosome) -> float:
"""
see: Fitness.__call__
"""
dist = distance(self.grid.simulate_movement(chrom), self.grid.target)
if dist != 0:
return (
self.grid_size
- math.ceil(dist / self.dist_group_length)
+ min((len(chrom) / self.grid_size), 0.2)
)
else:
# reward extra 1 for destination to make that beat length reward
return (
self.grid_size + self.dist_group_length - (len(chrom) / self.grid_size)
)
class PathFinderFitnessRewardLengthDistanceGroupsWithLimit(Fitness):
"""
A fitness function that rewards long chromosomes over short chromosomes which do
not hit the target.
uses distance groups.
"""
DISTANCE_GROUPS_IN_AXIS = 5
@property
def dist_group_length(self) -> int:
return self.grid.grid_x_size // self.DISTANCE_GROUPS_IN_AXIS
def __call__(self, chrom: Chromosome) -> float:
"""
see: Fitness.__call__
"""
dist = distance(self.grid.simulate_movement(chrom), self.grid.target)
if dist != 0:
chrom_len_prop = len(chrom) / self.grid_size
if chrom_len_prop > 0.5:
# maintain a reasonable length chrom for performance reasons
return self.grid_size - math.ceil(dist / self.dist_group_length)
return (
self.grid_size
- math.ceil(dist / self.dist_group_length)
+ min(chrom_len_prop, 0.2)
)
else:
# reward extra 1 for destination to make that beat length reward
return (
self.grid_size + self.dist_group_length - (len(chrom) / self.grid_size)
)
| galbash/genetic-algo-grid-path | path_finder/fitness.py | fitness.py | py | 4,450 | python | en | code | 0 | github-code | 13 |
28596924770 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 28 16:11:03 2023
@author: Dartoon
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import glob, pickle
from galight.tools.cutout_tools import psf_clean
from astropy.wcs import WCS
from galight.tools.astro_tools import plt_fits
from galight.data_process import DataProcess
from galight.tools.astro_tools import read_pixel_scale
from galight.tools.astro_tools import plt_many_fits
filt_i = 3
filt = ['F115W', 'F150W','F277W', 'F444W'][filt_i]
filefolder = '/Volumes/Seagate_Expansion_Drive/data_backup/JWST_COSMOS/'
filename = 'mosaic_nircam_f{0}w_COSMOS-Web_30mas_v0_1_i2d.fits'.format(filt[1:-1])
# filename = '1727_cosmos_mosaic_miri_exptime_scale1.0.fits'
fitsFile = pyfits.open(filefolder+filename)
header = fitsFile[1].header # if target position is add in WCS, the header should have the wcs information, i.e. header['EXPTIME']
img = fitsFile[1].data #
print(img.shape)
wcs = WCS(header)
#%%
flux_mjsr = header['PHOTMJSR']
pixscale = read_pixel_scale(header)
zp = -2.5*np.log10(2.350443 * 10**(-5) *pixscale**2/3631) #- 2.5*np.log10(flux_mjsr) #zp for flux
rx, ry = img.shape
cut_p = 4 #cut_p
x_size, y_size = int(rx/4+100), int(ry/4+100)
all_PSF_pos_list_ = []
FWHM_filer = [2.1,2.25,4.5, 5.5][filt_i] #Set 2.1 as upper limit
# flux_filt = [[], []]
rerun = False
if rerun == True:
for i in range(cut_p):
for j in range(cut_p):
print('x',x_size*i,x_size*(i+1), 'y', y_size*j,y_size*(j+1))
img_i = img[x_size*i:x_size*(i+1), y_size*j:y_size*(j+1)]
ct_pos_i, ct_pos_j = x_size*i, y_size*j
data_process = DataProcess(fov_image = img_i, target_pos = [10,10], pos_type = 'pixel', header = header,
rm_bkglight = False, if_plot=False, zp = zp)#, fov_noise_map = fov_noise_map)
# data_process.generate_target_materials(radius=30, create_mask = False, nsigma=2.8, if_select_obj=False,
# exp_sz= 1.2, npixels = 15, if_plot=True)
#PSF works.
if img_i.max() == 0:
print("ignore", i,j)
continue
data_process.find_PSF(radius = 50, user_option = True, if_filter=True,
FWHM_filer = FWHM_filer, flux_filter= [50,20000],
nearyby_obj_filter=False, FWHM_sort=True)
# data_process.plot_overview()
PSFs = data_process.PSF_list
# plt_many_fits(PSFs)
# PSF_list = data_process.PSF_list
PSF_pos_list = data_process.PSF_pos_list
fov_PSF_pos_list = [PSF_pos_list[k]+np.array([ct_pos_j, ct_pos_i]) for k in range(len(PSF_pos_list))]
all_PSF_pos_list_ = all_PSF_pos_list_ + fov_PSF_pos_list
#%%
cata_folder = 'Chandra_COSMOS_Catalog/'
cata_file = 'chandra_COSMOS_legacy_opt_NIR_counterparts_20160113_4d.fits'
hdul = pyfits.open(cata_folder+cata_file)
table = hdul[1].data
name = hdul[1].columns
cata_list = []
frame_flux = []
target_pos_list = []
for i in range(len(table)):
RA, Dec = table[i][24], table[i][25]
if RA != -99:
pos = wcs.all_world2pix([[RA, Dec]], 1)[0]
if pos[0]>0 and pos[1]>0 :
try:
flux = img[int(pos[1]), int(pos[0]) ] #!!! To be confirm if pos[1], pos[0]
if flux!=0:
target_pos_list.append(pos)
except:
continue
target_pos_list = np.array(target_pos_list)
remove_i = []
for i in range(len(all_PSF_pos_list_)-1):
if np.min(np.sqrt( np.sum( (all_PSF_pos_list_[i] - target_pos_list)**2,axis=1 ) )) < 10:
remove_i.append(i)
print('This PSF id{0} is repeated!'.format(i))
#%%Remove the repeat
all_PSF_pos_list = np.array(all_PSF_pos_list_)
remove_i = []
for i in range(len(all_PSF_pos_list)-1):
if np.min(np.sqrt(np.sum( (all_PSF_pos_list[i] - all_PSF_pos_list[i+1:])**2,axis=1 ) )) < 10:
remove_i.append(i)
print('This PSF id{0} is repeated!'.format(i))
#%%
all_PSF_pos_list = [all_PSF_pos_list[i] for i in range(len(all_PSF_pos_list)) if i not in remove_i]
for i,pos in enumerate(all_PSF_pos_list):
pos[0], pos[1] = int(pos[0]), int(pos[1])
# print(img[int(pos[1]),int(pos[0])])
test_img = img[int(pos[1])-30:int(pos[1])+30, int(pos[0])-30:int(pos[0])+30]
ct_pos = len(test_img)/2
shift_pos = np.where(test_img == test_img.max())[0]-ct_pos, np.where(test_img == test_img.max())[1]-ct_pos,
pos[0] = pos[0]+shift_pos[1]
pos[1] = pos[1]+shift_pos[0]
#Clean up PSF:
clean_up = True
PSF_org_list = []
PSF_clean_list = []
PSF_RA_DEC_list = []
if clean_up == True:
# lines = np.loadtxt('target_info.txt', dtype='str')
for pos in all_PSF_pos_list:
psf = img[int(pos[1])-50:int(pos[1])+50, int(pos[0])-50:int(pos[0])+50]
PSF_org_list.append(psf)
psf = psf_clean(psf,if_plot=False, nsigma=3, npixels=45, ratio_to_replace=0.005,
if_print_fluxratio=True)
RA, Dec = wcs.all_pix2world([[pos[0], pos[1]]], 1)[0]
PSF_clean_list.append(psf)
PSF_RA_DEC_list.append([RA, Dec])
# print("Before remove candidates")
# plt_many_fits(PSF_org_list)
plt_many_fits(PSF_clean_list)
# pickle.dump([PSF_org_list, PSF_clean_list, all_PSF_pos_list, PSF_RA_DEC_list],
# open('material/'+filt+'_PSF_Library.pkl', 'wb'))
#%%Refine the PSF
# new_PSF_org_list, new_PSF_clean_list, new_all_PSF_pos_list, new_PSF_RA_DEC_list = [],[],[],[]
# from galight.tools.cutout_tools import cutout
# for i,pos in enumerate(all_PSF_pos_list):
# image = cutout(image = img, center = pos, radius=120)
# # plt_fits(image)
# psf = psf_clean(image,if_plot=False, nsigma=3, npixels=45, ratio_to_replace=0.005,
# if_print_fluxratio=True)
# plt_fits(psf)
# ifsave = input('input any string to not save:\n')
# if ifsave == '':
# new_PSF_org_list.append(image)
# new_PSF_clean_list.append(psf)
# new_all_PSF_pos_list.append(all_PSF_pos_list[i])
# new_PSF_RA_DEC_list.append(PSF_RA_DEC_list[i])
# pickle.dump([new_PSF_org_list, new_PSF_clean_list, new_all_PSF_pos_list, new_PSF_RA_DEC_list],
# open('material/'+filt+'_PSF_Library_v2.pkl', 'wb'))
# plt_many_fits(new_PSF_clean_list)
#%%
print("After remove candidates")
PSF_lib_files = glob.glob('material/'+filt+'_PSF_Library.pkl')[0]
PSF_org_list, PSF_clean_list, all_PSF_pos_list, PSF_RA_DEC_list = pickle.load(open(PSF_lib_files,'rb'))
plt_many_fits(PSF_clean_list) | dartoon/my_code | projects/2022_COSMOSweb/0_build_PSF_library.py | 0_build_PSF_library.py | py | 6,988 | python | en | code | 0 | github-code | 13 |
10472743106 | import sys
import PyQt5.QtWidgets as QtWidgets
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication, QMessageBox, QAction, QFileDialog
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
import vtk
from models import model
from models_bezier import model_bezier
class MainWindow(object): #设置主窗口显示
def __init__(self):
self.central_widget = None
self.grid_layout = None
self.vtk_widget = None
def setup_ui(self, view): #设置主界面
view.setObjectName("MainWindow")
view.resize(1500, 1500) #设置界面初始大小
self.central_widget = QtWidgets.QWidget(view)
self.grid_layout = QtWidgets.QGridLayout(self.central_widget)
self.vtk_widget = QVTKRenderWindowInteractor(self.central_widget) #嵌入vtk界面
view.setCentralWidget(self.vtk_widget)
view.statusBar().showMessage("successfully...") #显示加载成功字样
view.setWindowTitle("Free-Form Deformation") #显示窗口标题
class SimpleView(QtWidgets.QMainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.ui = MainWindow()
self.ui.setup_ui(self)
self.create_actions() #创建菜单栏操作
self.create_menus() #创建菜单栏
self.filename = "iphone_6_model.obj" #封面导入苹果手机obj文件
self.show_obj() #显示封面
self.show_all() #显示界面
def show_obj(self):
self.reader = vtk.vtkOBJReader() #读取obj文件
self.reader.SetFileName(self.filename) #读取文件名
self.reader.Update()
self.data = self.reader.GetOutput() #记录obj文件数据
self.ren = vtk.vtkRenderer() #初始化显示界面
self.ui.vtk_widget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.ui.vtk_widget.GetRenderWindow().GetInteractor()
mapper = vtk.vtkPolyDataMapper() #生成polydata格式
mapper.SetInputData(self.data)
self.actor = vtk.vtkActor()
self.actor.SetMapper(mapper)
self.ren.AddActor(self.actor) #添加actor
def show_vtk(self, dots=5, grid_size=[5, 5, 5], method="nolight", ffd_type="B"):
self.ren = vtk.vtkRenderer()
self.ui.vtk_widget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.ui.vtk_widget.GetRenderWindow().GetInteractor()
if method == "light": #添加聚光灯效果
mylight = vtk.vtkLight()
mylight.SetColor(0, 1, 0)
mylight.SetPosition(2, 3, 1)
self.ren.AddLight(mylight)
self.dots = dots
self.grid_size = grid_size
self.dot_xyz = [None, None, None]
if ffd_type == "B": #使用B样条变换
self.model: model = model(
ren=self.ren,
iren=self.iren,
filename=self.filename,
cp_num_x=grid_size[0] - 1,
cp_num_y=grid_size[1] - 1,
cp_num_z=grid_size[2] - 1,
)
else: #使用贝塞尔变换
self.model = model_bezier(
ren=self.ren,
iren=self.iren,
filename=self.filename,
cp_num_x=grid_size[0] - 1,
cp_num_y=grid_size[1] - 1,
cp_num_z=grid_size[2] - 1,
)
def show_all(self): #显示界面
self.iren.Initialize()
self.show()
def create_actions(self): #创建菜单链接
self.load_obj_Action = QAction("Add_OBJ_file", self, triggered=self.load_obj) #导入obj文件
self.load_ffd_Action = QAction("Add_FFD_file", self, triggered=self.load_ffd) #导入ffd文件
self.save_ffd_Action = QAction("Save_to_FFD", self, triggered=self.save_ffd) #保存ffd文件
self.to_bezier_Action = QAction(
"To_Bezier", self, triggered=self.load_ffd_bezier #创建对应操作
)
self.turn_on_light_Action = QAction(
"Turn_on_light", self, triggered=self.load_ffd_light
)
self.reset_Action = QAction(
QIcon("reset.jpg"), "Reset", triggered=self.initial #添加复原图标
)
self.exit_Action = QAction(
QIcon("exit.jpg"), "Exit", triggered=QApplication.instance().quit #添加退出图标
)
def create_menus(self): #创建菜单栏
menubar = self.menuBar()
self.toolbar_reset = self.addToolBar("Initial")
self.toolbar_reset.addAction(self.reset_Action)
self.toolbar_exit = self.addToolBar("Exit")
self.toolbar_exit.addAction(self.exit_Action)
self.loadMenu_obj = menubar.addMenu("Change_obj_background")
self.loadMenu_ffd = menubar.addMenu("Add_FFD_file")
self.saveMenu = menubar.addMenu("To_FFD_file")
self.to_bezier = menubar.addMenu("To_Bezier")
self.turn_on_light = menubar.addMenu("Turn_on_light")
self.loadMenu_obj.addAction(self.load_obj_Action)
self.loadMenu_ffd.addAction(self.load_ffd_Action)
self.saveMenu.addAction(self.save_ffd_Action)
self.to_bezier.addAction(self.to_bezier_Action)
self.turn_on_light.addAction(self.turn_on_light_Action)
def load_obj(self):
filename, ok = QFileDialog.getOpenFileName(self, "Add_OBJ_file", "")
if not filename.upper().endswith(".OBJ"): #判断后缀是否为obj
reply = QMessageBox.information(
self, "Info", "This file is not .OBJ", QMessageBox.Yes
)
else:
if ok:#导入obj文件
self.filename = filename
self.show_obj()
self.show_all()
reply = QMessageBox.information(
self, "Info", "Successfully added obj file", QMessageBox.Yes
)
print("Done Load OBJ")
def load_control_size(self, filename): #读取ffd文件对应控制点个数
num = []
start = False
with open(filename, "r") as f:
while True:
line = f.readline()
if "#control grid size#" in line:
start = True
continue
if len(num) == 3:
break
if start:
num.append(int(line))
return num
def load_ffd(self): #导入ffd文件并进行偏移
filename, ok = QFileDialog.getOpenFileName(self, "Add_FFD_file", "")
if not filename.upper().endswith(".FFD"):
reply = QMessageBox.information(
self, "Info", "This file is not .FFDAlgorithm", QMessageBox.Yes
)
else:
if ok:
grid_size = self.load_control_size(filename)
self.show_vtk(grid_size=grid_size)
self.model.ffd.read_ffd(filename) #读取ffd文件
for x in range(len(self.model.ffd.control_points)): #对控制点偏移变化做记录
for y in range(len(self.model.ffd.control_points[x])):
for z in range(len(self.model.ffd.control_points[x][y])):
x_loc_new, y_loc_new, z_loc_new = self.model.ffd.new_control_points_location[
x
][
y
][
z
]
x_loc_old, y_loc_old, z_loc_old = self.model.ffd.control_points_location[
x
][
y
][
z
]
print(1)
if (
(x_loc_old != x_loc_new)
or (y_loc_old != y_loc_new)
or (z_loc_old != z_loc_new)
):
print(2)
self.model.render_sphere(
(x, y, z),
self.model.ffd.new_control_points_location[x][y][z],
)
reply = QMessageBox.information(#提示成功导入ffd
self, "Info", "Successfully added ffd_algo file", QMessageBox.Yes
)
print("Done Load FFDAlgorithm")
self.show_all()
return
def load_ffd_light(self): #以聚光灯方式导入ffd
""" 导入 ffd_algo 文件,用self.model.sphereQt函数依次设置点位移 """
filename, ok = QFileDialog.getOpenFileName(self, "Add_FFD_file", "")
if not filename.upper().endswith(".FFD"):
reply = QMessageBox.information(
self, "Info", "This file is not .FFDAlgorithm", QMessageBox.Yes
)
else:
if ok:
grid_size = self.load_control_size(filename)
self.show_vtk(grid_size=grid_size, method="light")
self.model.ffd.read_ffd(filename)
for x in range(len(self.model.ffd.control_points)):
for y in range(len(self.model.ffd.control_points[x])):
for z in range(len(self.model.ffd.control_points[x][y])):
x_loc_new, y_loc_new, z_loc_new = self.model.ffd.new_control_points_location[
x
][
y
][
z
]
x_loc_old, y_loc_old, z_loc_old = self.model.ffd.control_points_location[
x
][
y
][
z
]
print(1)
if (
(x_loc_old != x_loc_new)
or (y_loc_old != y_loc_new)
or (z_loc_old != z_loc_new)
):
print(2)
self.model.render_sphere(
(x, y, z),
self.model.ffd.new_control_points_location[x][y][z],
)
reply = QMessageBox.information(
self, "Info", "Successfully added ffd_algo file", QMessageBox.Yes
)
print("Done Load FFDAlgorithm")
self.show_all()
return
def load_ffd_bezier(self): #以贝塞尔变换方式导入ffd
""" 导入 ffd_algo 文件,用self.model.sphereQt函数依次设置点位移 """
filename, ok = QFileDialog.getOpenFileName(self, "Add_FFD_file", "")
if not filename.upper().endswith(".FFD"):
reply = QMessageBox.information(
self, "Info", "This file is not .FFDAlgorithm", QMessageBox.Yes
)
else:
if ok:
grid_size = self.load_control_size(filename)
self.show_vtk(grid_size=grid_size, ffd_type="bezier")
self.model.ffd.read_ffd(filename)
for x in range(len(self.model.ffd.control_points)):
for y in range(len(self.model.ffd.control_points[x])):
for z in range(len(self.model.ffd.control_points[x][y])):
x_loc_new, y_loc_new, z_loc_new = self.model.ffd.new_control_points_location[
x
][
y
][
z
]
x_loc_old, y_loc_old, z_loc_old = self.model.ffd.control_points_location[
x
][
y
][
z
]
print(1)
if (
(x_loc_old != x_loc_new)
or (y_loc_old != y_loc_new)
or (z_loc_old != z_loc_new)
):
print(2)
self.model.render_sphere(
(x, y, z),
self.model.ffd.new_control_points_location[x][y][z],
)
reply = QMessageBox.information(
self, "Info", "Successfully added ffd_algo file", QMessageBox.Yes
)
print("Done Load FFDAlgorithm")
self.show_all()
return
def save_ffd(self): #导出ffd
filename, ok = QFileDialog.getSaveFileName(self, "Save_to_FFD", "")
if ok:
self.model.ffd.save_cp(filename)
reply = QMessageBox.information(
self, "Info", "Successfully saved ffd_algo file", QMessageBox.Yes
)
print("Done Save FFDAlgorithm")
return
def initial(self): #复原
self.show_obj()
self.show_all()
reply = QMessageBox.information( #弹出提示框
self, "Info", "Successfully reset", QMessageBox.Yes
)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = SimpleView()
window.show()
window.iren.Initialize() #将vtk嵌入pyqt5界面显示
sys.exit(app.exec_())
| James0231/Data-Visualization-PJ | GUI.py | GUI.py | py | 13,920 | python | en | code | 5 | github-code | 13 |
19334297026 | import bz2
import csv
import errno
import os
from typing import Any, List
def ensure_exists(file_path: str) -> None:
path: str = os.path.dirname(file_path)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def file_exists(path: str) -> bool:
return os.path.exists(path)
def write_csv_data(path: str, header: List[str], data: List[Any], update_type: str = "w", as_bz2: bool = False) -> None:
if as_bz2:
with bz2.open(filename=path, mode="wt") as f:
write(obj=f, header=header, data=data)
else:
with open(file=path, mode=update_type, newline="\n") as f:
write(obj=f, header=header, data=data)
def write(obj: Any, header: List[str], data: List[Any]) -> None:
writer = csv.writer(obj, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
if header and len(header) > 0:
writer.writerow(header)
for row in data:
writer.writerow(row)
| mhowell234/robinhood_commons | robinhood_commons/util/io_utils.py | io_utils.py | py | 989 | python | en | code | 0 | github-code | 13 |
1246720985 | """
Read server.py first, and heed the warning there
"""
import asyncio
from threading import Lock
import queue
import logging
import itertools
from .server import Room, GameScheduler
from .utils import generate_id
from .othello_core import BLACK, WHITE, EMPTY, OUTER
from ..apps.tournament.models import GameModel, SetModel, MoveModel
from ..apps.tournament.utils import add_game_to_set, calc_set_winner, \
get_player, count_completed_games, create_set, safely_call
log = logging.getLogger(__name__)
class AutomaticGameScheduler(GameScheduler):
"""
A subclass that doesn't allow anyone to make new games, and
is instead started with a list of initial games to start running.
"""
"""
Methods needed to be implemented by the subclass:
* populate_game_queue()
* check_game_queue()
Optional methods to be implemented by subclass:
* log_move(room_id, board, to_move)
* log_game(room_id, black_ai, white_ai, black_score, white_score, winner, by_forfeit)
Exposed internal variables:
* ai_list: list of AIs, supposedly in order from strongest to weakest
* timelimit: float
* max_games: int, maximum number of games to play at once
* num_games: int, current number of games running
Using add_new_game to add new games to the game queue
"""
def __init__(self, loop, ai_list=[], timelimit=5, max_games=2):
super().__init__(loop)
self.ai_list = ai_list
self.timelimit = timelimit
self.max_games = max_games
self.num_games = 0
self.game_queue = queue.Queue()
# populate queue with initial matchups to play
self.populate_game_queue()
# Take care of starting games ourself, because this is done way too
# often otherwise
while self.num_games < self.max_games and not self.game_queue.empty():
self.play_next_game()
def add_new_game(self, black, white, timelimit=None):
if timelimit is None:
self.game_queue.put_nowait((black, white, self.timelimit))
else:
self.game_queue.put_nowait((black, white, timelimit))
def populate_game_queue(self):
# Called in __init__, should be defined by subclasses in order to start
# the initial matches
raise NotImplementedError
def check_game_queue(self):
# Called after each game ends, when the result has been recorded as
# the last element in self.results
raise NotImplementedError
def log_move(room_id, board, to_move):
# Called whenever board_update intercepts a valid board
pass
def log_game(room_id, black_ai, white_ai, black_score, white_score, winner, by_forfeit):
# Called whenever game_end sees that a game has ended
pass
def play_game(self, parsed_data, room_id):
# send an error message back telling them they can't play
log.warn("{} tried to play during a tournament".format(room_id))
self.game_error({'error': "You cannot start a game during a tournament."}, room_id)
self.game_end(dict(), room_id)
def play_next_game(self):
if not self.game_queue.empty():
self.play_automatic_game(*self.game_queue.get_nowait())
self.num_games += 1
if self.num_games > self.max_games:
log.warn("Playing more games at a time than allowed...")
# You'll notice the number of arguments isn't specified here.
# This is so I can add an extra argument to _play_automatic_game in a
# below subclass. <this is fine meme>
def play_automatic_game(self, *args):
self.loop.call_soon_threadsafe(self._play_automatic_game, *args)
def _play_automatic_game(self, black, white, timelimit):
new_id = generate_id()
# extremely low chance to block, ~~we take those~~
while new_id in self.rooms: new_id = generate_id()
log.info("{} playing next game: {} v {}".format(new_id, black, white))
room = Room()
room.id = new_id
self.rooms[new_id] = room
log.debug("{} starting to play".format(new_id))
self.play_game_actual(black, white, timelimit, new_id)
return new_id
def game_end(self, parsed_data, room_id):
log.debug("{} overridded game_end called".format(room_id))
# log result
if room_id not in self.rooms:
log.debug("{} tried to end room, which might've already ended".format(room_id))
return
board = parsed_data.get('board', "")
forfeit = parsed_data.get('forfeit', False)
winner = parsed_data.get('winner', OUTER)
black_score = board.count(BLACK)
white_score = board.count(WHITE)
black_ai = self.rooms[room_id].black_ai
white_ai = self.rooms[room_id].white_ai
if black_ai is None or white_ai is None:
log.info("{} ignoring room with blank AI...".format(room_id))
return
super().game_end(parsed_data, room_id)
self.num_games -= 1
# log game
self.log_game(room_id, black_ai, white_ai, black_score, white_score, winner, forfeit)
# handle putting new games into queue, if necessary
self.check_game_queue()
while self.num_games < self.max_games and not self.game_queue.empty():
self.play_next_game()
# Simple class to test AutomaticGameScheduler functionality
class RRTournamentScheduler(AutomaticGameScheduler):
def populate_game_queue(self):
for black, white in itertools.permutations(self.ai_list, 2):
self.add_new_game(black, white)
def check_game_queue(self):
if not self.game_queue.empty():
self.play_next_game()
class SetTournamentScheduler(AutomaticGameScheduler):
# completed_callback and record_callback are called with a list of SetModel
# object as their first argument, protected by a threading.Lock object as
# their second argument
def __init__(self, tournament, *args, sets=[], games_per_set=1, completed_callback=lambda *x: None, record_callback=None, **kwargs):
self.tournament = tournament
self.sets = sets
self.results_lock = Lock()
self.games = dict()
self.games_lock = Lock()
self.games_per_set = games_per_set
self.currently_playing = set()
self.completed_callback = completed_callback
self.record_callback = record_callback if not (record_callback is None) else completed_callback
self.completed = False
super().__init__(*args, **kwargs)
def log_move(self, room_id, board, to_move):
# Called whenever board_update intercepts a valid board
pass
def log_game(self, *args):
# Need to have django operations be in a seperate thread
safely_call(self.unsafe_log_game, *args)
def unsafe_log_game(self, room_id, black_ai, white_ai, black_score, white_score, winner, by_forfeit):
with self.games_lock:
log.info("{} game over {} v {}".format(room_id, black_ai, white_ai))
if room_id in self.games:
game = self.games[room_id]
game.black = get_player(black_ai)
game.white = get_player(white_ai)
game.black_score = black_score
game.white_score = white_score
game.winner = winner
game.by_forfeit = by_forfeit
game.completed = True
game.save()
# Maybe I need to do this? idk throw it out and accept the leak if
# bad things happen
del self.games[room_id]
else:
log.warn("Couldn't find existing game! Falling back to old method...")
game = GameModel(
black=black_ai,
white=white_ai,
timelimit=int(self.timelimit),
black_score=black_score,
white_score=white_score,
winner=winner,
by_forfeit=by_forfeit,
completed=True
)
for i in self.currently_playing:
s = self.sets[i]
# No room_id -> game mapping anymore, need to search for set
# to add it to
if (s.black == black_ai and s.white == white_ai) or \
s.black == white_ai and s.white == black_ai:
add_game_to_set(s, game)
# I wish there was a better way to do this, i.e. have GameModels correspond
# to room_ids so we can record moves that get captured, and have us know
# which set tried to add them at creation time.
# But there isn't. So now we have to live with this.
def add_new_game(self, black, white, setm, timelimit=None):
if timelimit is None:
self.game_queue.put_nowait((black, white, self.timelimit, setm))
else:
self.game_queue.put_nowait((black, white, timelimit, setm))
def _play_automatic_game(self, black, white, timelimit, setm):
room_id = super()._play_automatic_game(black.id, white.id, timelimit)
with self.games_lock:
self.games[room_id] = GameModel(
black=black,
white=white,
in_set=setm,
)
safely_call(self.games[room_id].save)
def play_next_set(self, next_set_index):
if next_set_index in self.currently_playing:
return
next_set = self.sets[next_set_index]
# TODO: check that `is None` works for fields set to NULL in the database
# (because that's what these are)
if not (next_set.black_from_set is None):
black_prev_set = next_set.black_from_set
winner = black_prev_set.winner
if black_prev_set.winner_set.id == next_set.id:
if winner == WHITE:
next_set.black = black_prev_set.white
else:
# I guess this is kind of like a tiebreaker,
# black continues if previous set was a tie?
# idk, tie handling is hard
next_set.black = black_prev_set.black
elif black_prev_set.loser_set.id == next_set.id:
if winner == WHITE:
next_set.black = black_prev_set.black
else:
next_set.black = black_prev_set.white
else:
log.warn("Set {}'s black previous set ({}) does not have a pointer to it".format(next_set_index, black_prev_set.num))
if not (next_set.white_from_set is None):
white_prev_set = next_set.white_from_set
winner = white_prev_set.winner
# TODO: Consider using some other method besides direct object
# comparison to tell if two sets are the same
if white_prev_set.winner_set.id == next_set.id:
if winner == WHITE:
next_set.white = white_prev_set.white
else:
next_set.white = white_prev_set.black
elif white_prev_set.loser_set.id == next_set.id:
if winner == WHITE:
next_set.white = white_prev_set.black
else:
next_set.white = white_prev_set.white
else:
log.warn("Set {}'s white previous set ({}) does not have a pointer to it".format(next_set_index, white_prev_set.num))
safely_call(next_set.save)
for g in range(self.games_per_set):
self.add_new_game(next_set.black, next_set.white, next_set)
self.add_new_game(next_set.white, next_set.black, next_set)
self.currently_playing.add(next_set_index)
# TODO: adapt this to allow checking to see if sets are constructed
# incorrectly, i.e. there is no way to finish a tournament because
# the results of two games depend on each other somehow
def populate_game_queue(self):
all_played = True
for i in range(len(self.sets)):
s = self.sets[i]
all_played = all_played and s.completed
if s.completed or i in self.currently_playing:
continue
black_set_done = (s.black_from_set is None) or (s.black_from_set.completed)
white_set_done = (s.white_from_set is None) or (s.white_from_set.completed)
if black_set_done and white_set_done:
self.play_next_set(i)
if all_played:
self.tournament_end()
else:
self.tournament_record()
def check_game_queue(self):
new_currently_playing = self.currently_playing.copy()
for i in self.currently_playing:
s = self.sets[i]
log.debug("{}".format(s))
num_games = safely_call(count_completed_games, s)
log.debug("num_completed_games: {}".format(num_games))
if num_games >= 2*self.games_per_set:
s.completed = True
safely_call(calc_set_winner, s)
new_currently_playing.remove(i)
self.currently_playing = new_currently_playing
self.populate_game_queue()
def tournament_record(self):
log.info("Recording tournament results...")
self.loop.call_soon_threadsafe(self.record_callback, self.sets, self.results_lock)
def tournament_end(self):
log.info("Tournament completed! Returning results...")
self.loop.call_soon_threadsafe(self.completed_callback, self.sets, self.results_lock)
class SwissTournamentScheduler(SetTournamentScheduler):
def __init__(self, *args, rounds=8, **kwargs):
self.rounds = rounds
self.current_round = 0
self.num_wins = dict()
self.last_recorded_index = 0
super().__init__(*args, **kwargs)
def unsafe_create_set(self, black, white):
black_player = get_player(black)
white_player = get_player(white)
new_set = create_set(self.tournament, black_player, white_player)
return new_set
def populate_game_queue(self):
# First, check if the current round is over
all_played = True
for i in range(len(self.sets)):
s = self.sets[i]
if not s.completed:
all_played = False
break
# If it is, go to the next round
if all_played and self.current_round < self.rounds:
# First, calculate number of wins each AI has
for i in range(self.last_recorded_index, len(self.sets)):
s = self.sets[i]
if s.winner == BLACK:
black_wins = self.num_wins.get(s.black.id, 0)
self.num_wins[s.black.id] = black_wins + 1
elif s.winner == WHITE:
white_wins = self.num_wins.get(s.white.id, 0)
self.num_wins[s.white.id] = white_wins + 1
self.last_recorded_index = len(self.sets)
# Sort by number of wins
ranking = sorted(self.ai_list, key=lambda ai: -self.num_wins.get(ai, 0))
log.info("Swiss ranking: {}".format(ranking))
log.info("Num wins: {}".format(self.num_wins))
for i in range(0, len(ranking), 2):
black = ranking[i]
if i+1 >= len(ranking):
white = "random" # lowest player gets equivalent of a "bye"
else:
white = ranking[i+1]
# TODO: This doesn't record all the information I would like, i.e.
# the set each person came from, but this should do for now
new_set = safely_call(self.unsafe_create_set, black, white)
self.sets.append(new_set)
self.current_round += 1
super().populate_game_queue()
| duvallj/othello_tourney | othello/gamescheduler/tournament_server.py | tournament_server.py | py | 16,101 | python | en | code | 1 | github-code | 13 |
6478526201 | from turtle import Turtle, Screen
t = Turtle()
t.shape("turtle")
for sides in range(3, 11):
angles = 360 /sides
for i in range(sides):
t.forward(100)
t.right(angles)
screen = Screen()
screen.exitonclick()
| Mohammad-Shiblu/Python_project | python turtle graphics/Drawing_different_shape.py | Drawing_different_shape.py | py | 241 | python | en | code | 0 | github-code | 13 |
3713001752 | import numpy as np
try:
import cm #dev
except:
import cloudmrhub.cm as cm #runtime
import matplotlib.pyplot as plt
import scipy
from types import MethodType
class cm2DRecon(cm.cmOutput):
"""
Python implementation of the cm2DRecon MATLAB class
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2023
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
:Attributes:
Dimension (int): The dimension of the reconstruction.
SignalKSpace (cm.k2d): The signal k-space data.
NoiseKSpace (cm.k2d): The noise k-space data.
NoiseCovariance (np.ndarray): The noise covariance matrix.
InverseNoiseCovariance (np.ndarray): The Inverse Noise Covariance matrix.
SignalPrewhitened (np.ndarray): The prewhitened signal k-space data.
HasSensitivity (bool): Whether the reconstruction contains sensitivity information.
HasAcceleration (bool): Whether the reconstruction has accelerations.
complexType (np.complex128): The complex type of the reconstruction.
"""
def __init__(self):
"""
Initializes the cm2DRecon object.
"""
self.dimesion=2
self.SignalKSpace = cm.k2d()
self.NoiseKSpace = cm.k2d()
self.NoiseCovariance = np.array([])
self.InverseNoiseCovariance = np.array([])
self.SignalPrewhitened = cm.k2d()
self.HasSensitivity = False
self.HasAcceleration = False
self.HasAutocalibration = False
self.NoiseBandWidth = None
self.complexType=np.complex128
def checkKSpacePixelType(self,s):
if s.dtype != self.complexType:
s = s.astype(self.complexType)
return s
def setSignalKSpace(self, signalKSpace):
"""
Sets the signal k-space data.
:param signalkspace: The signal k-space data
:type: np.ndarray
"""
# check pixel type
signalKSpace=self.checkKSpacePixelType(signalKSpace)
self.SignalKSpace.set(signalKSpace)
# i've set a new signal, so the prewhitened signal is not valid anymore
self.SignalPrewhitened.reset()
def getSignalKSpace(self):
"""
Gets the signal k-space data.
Returns:
_type_: nd.array(f,p,c)
"""
return self.SignalKSpace.get()
def getSignalKSpaceSize(self):
"""
Gets the signal k-space size.
Returns:
NoiseKspace: nd.array(f,p,c)
"""
return self.SignalKSpace.getSize()
def getsignalNCoils(self):
"""
Gets the number of coils in the signal k-space data.
Returns:
int: ncoils
"""
return self.SignalKSpace.getNCoils()
def setNoiseKSpace(self, noiseKSpace):
""" Sets the noise k-space data.
:param noiseKspace: The noise k-space data
:type: np.ndarray(f,p,c)
"""
noiseKSpace=self.checkKSpacePixelType(noiseKSpace)
self.NoiseKSpace.set(noiseKSpace)
def getNoiseKSpace(self):
"""
Gets the noise k-space data.
Returns:
NoiseKspace: nd.array(f,p)
"""
return self.NoiseKSpace.get()
def getNoiseKSpaceSize(self):
"""
Gets the noise k-space size.
Returns:
NoiseKspace: nd.array(f,p)
"""
return self.NoiseKSpace.getSize()
def getNoiseNCoils(self):
"""
Gets the number of coils in the noise k-space data.
Returns:
int: ncoils
"""
return self.NoiseKSpace.getNCoils()
def getNoiseBandWidth(self):
if(self.NoiseBandWidth is None):
noise_bandwidth = cm.mrir_noise_bandwidth(self.getNoiseKSpace());
self.NoiseBandWidth=noise_bandwidth
try:
return self.NoiseBandWidth
except:
print("---problem in the noisebadwidth----")
self.appendLog("---problem in the noisebadwidth----","warning")
return 1.0
def getInverseNoiseCovariancePrewhitened(self):
"""when prewhitened the correlation is an eye
"""
return np.eye(self.getsignalNCoils())
def setNoiseCovariance(self, noiseCovariance):
"""
Sets the noise covariance matrix.
:param noiseCovariance: The noise covariance matrix.
:type: mp.ndarray(c,c)
"""
noiseCovariance=self.checkKSpacePixelType(noiseCovariance)
self.NoiseCovariance = noiseCovariance
self.InverseNoiseCovariance = np.linalg.inv(noiseCovariance)
def getNoiseCovariance(self):
"""
Return the covariance matrix
Returns:
np.ndarray(c,c): Covariance Matrix
"""
if not self.NoiseCovariance.any():
self.NoiseCovariance = cm.calculate_covariance_matrix(
self.getNoiseKSpace(),self.getNoiseBandWidth()
)
return self.NoiseCovariance
def getNoiseCovarianceCoefficients(self):
noise_covariance=self.getNoiseCovariance()
return cm.calculate_covariance_coefficient_matrix(noise_covariance)
def getInverseNoiseCovariance(self):
"""
Gets the inverse noise covariance matrix.
Returns:
np.ndarray(c,c): inverse of the covariance matrix
"""
return self.InverseNoiseCovariance
def getPrewhitenedSignal(self):
"""
Gets the prewhitened signal.
Returns:
np.ndarray(f,p,c): prewhitened signal
"""
if self.SignalPrewhitened.isEmpty():
self.SignalPrewhitened.set(cm.prewhiteningSignal(self.getSignalKSpace(), self.getNoiseCovariance()))
return self.SignalPrewhitened.get()
def setPrewhitenedSignal(self, prewhitenedSignal):
prewhitenedSignal=self.checkKSpacePixelType(prewhitenedSignal)
self.SignalPrewhitened.set(prewhitenedSignal)
def plotImageAfterTest(self,IM,tit):
# Create the figure and subplots
fig, axarr = plt.subplots(2, 1)
axarr[0].imshow(IM)
axarr[0].set_title(tit)
axarr[1].imshow(abs(IM))
axarr[1].set_title(tit + ' abs')
ha = plt.axes([0, 0, 1, 1], frameon=False, visible=False,
xlim=[0, 1], ylim=[0, 1], aspect='equal')
# Add text to the axes object
text = ha.text(0.5, 0.98, f'{IM.shape[0]}x{IM.shape[1]}',
transform=ha.transAxes, horizontalalignment='center',
verticalalignment='top')
plt.show()
def test(self):
TEST = self.testrecon()
self.plotImageAfterTest(TEST.getOutput(), "recon")
return TEST
def get2DKSIFFT(self,K=None):
if K is None:
K=self.getPrewhitenedSignal()
SC=np.sqrt(np.prod(np.array(K.shape[0:2])))
return cm.MRifft(K,[0,1])*SC
def getOutput(self):
pass
def resetAfterSignal(self):
self.SignalPrewhitened.reset()
def resetAfterNoise(self):
if not isinstance(self.NoiseBandWidth,str):
self.NoiseBandWidth=None
print('reset the NBW')
self.NoiseCovariance = np.array([])
self.InverseNoiseCovariance = np.array([])
self.SignalPrewhitened.reset()
class cm2DReconRSS(cm2DRecon):
"""
Python implementation of the cm2DReconRSS MATLAB class
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2023
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
"""
def __init__(self):
"""
Initializes the RSS reconstruction.
"""
super().__init__()
self.HasAcceleration= False
self.HasSensitivity=False
self.HasAutocalibration=False
def getOutput(self):
img_matrix = self.get2DKSIFFT()
im = np.sqrt(np.sum(np.abs(img_matrix)**2,axis=-1))
return im
@staticmethod
def testrecon():
TEST = cm2DReconRSS()
[K, N, nc] = cm.getMRoptimumTestData()
TEST.setSignalKSpace(K)
TEST.setNoiseCovariance(nc)
return TEST
class cm2DReconSS(cm2DReconRSS):
"""
Python implementation of the cm2DReconSS MATLAB class
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2023
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
"""
def getOutput(self):
img_matrix = self.get2DKSIFFT()
im = np.sum(img_matrix**2,axis=-1)
return im
@staticmethod
def testrecon():
TEST = cm2DReconSS()
[K, N, nc] = cm.getMRoptimumTestData()
TEST.setSignalKSpace(K)
TEST.setNoiseCovariance(nc)
return TEST
class cm2DReconRSSunAbs(cm2DReconSS):
"""
Python implementation of the cm2DReconRSSunAbs MATLAB class
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2023
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
"""
def getOutput(self):
im = np.sqrt(super().getOutput())
return im
@staticmethod
def testrecon():
TEST = cm2DReconRSSunAbs()
[K, N, nc] = cm.getMRoptimumTestData()
TEST.setSignalKSpace(K)
TEST.setNoiseCovariance(nc)
return TEST
class cm2DKellmanRSS(cm2DReconRSS):
def __init__(self):
super().__init__()
def getOutput(self):
nf,nph =self.getSignalKSpaceSize()
img_matrix = self.get2DKSIFFT()
snr = np.zeros((nf,nph),self.complexType)
for irow in range(nf):
for icol in range(nph):
B=np.expand_dims(img_matrix[irow,icol],axis=-1)
A=B.conj().T
snr[irow,icol] = np.abs(np.sqrt(2*(A @ B)))
return snr
class cm2DReconWithSensitivity(cm2DRecon):
"""
Python implementation of the cm2DReconWithSensitivity MATLAB class
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2003
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
"""
def __init__(self):
"""
Initializes the reconstruction.
"""
super().__init__()
self.HasAcceleration= False
self.HasSensitivity=True
self.HasAutocalibration=False
self.CoilSensitivityMatrix=cm.k2d()
self.ReferenceKSpace=cm.k2d()
self.PrewhitenedReferenceKSpace=cm.k2d()
self.MaskCoilSensitivityMatrix='reference'
def setNoMask(self):
self.setMaskCoilSensitivityMatrix(False)
def setMaskCoilSensitivityMatrixBasedOnEspirit(self,k=4,r=12,t=0.01,c=0.997):
SENS=cm.sensitivitiesEspirit2D(self.getReferenceKSpace(),k=k,r=r,t=t,c=c)
SENS=np.squeeze(SENS)
self.setMaskCoilSensitivityMatrix(abs(SENS[...])>0)
def setMaskCoilSensitivityMatrixDefault(self):
self.setMaskCoilSensitivityMatrix('reference')
def setCoilSensitivityMatrix(self, S):
self.CoilSensitivityMatrix.set(S)
def resetCoilSensitivityMatrix(self):
self.CoilSensitivityMatrix.reset()
def getCoilSensitivityMatrix(self):
# if a coil sensitivity matrix has not yet been set
if self.CoilSensitivityMatrix.isEmpty():
coilsens_set = s=cm.calculate_simple_sense_sensitivitymaps(
self.getPrewhitenedReferenceKSpace(),self.MaskCoilSensitivityMatrix)
self.setCoilSensitivityMatrix(coilsens_set)
return self.CoilSensitivityMatrix.get()
def setPrewhitenedReferenceKSpace(self, x):
self.PrewhitenedReferenceKSpace.set(x)
def getReferenceKSpace(self):
return self.ReferenceKSpace.get()
def getReferenceKSpaceSize(self):
return self.ReferenceKSpace.getSize()
def getReferenceKSpaceNCoils(self):
return self.ReferenceKSpace.getNCoils()
def getPrewhitenedReferenceKSpace(self):
if self.PrewhitenedReferenceKSpace.isEmpty():
S = self.getReferenceKSpace()
Rn = self.getNoiseCovariance()
pw_S = cm.prewhiteningSignal(S, Rn)
self.setPrewhitenedReferenceKSpace(pw_S)
else:
pw_S = self.PrewhitenedReferenceKSpace.get()
return pw_S
def setReferenceKSpace(self, IM):
IM=self.checkKSpacePixelType(IM)
self.ReferenceKSpace.set(IM)
self.PrewhitenedReferenceKSpace.reset()
def setMaskCoilSensitivityMatrix(self, x):
self.MaskCoilSensitivityMatrix = x
def getMaskCoilSensitivityMatrix(self):
return self.MaskCoilSensitivityMatrix
def resetAfterSignal(self):
#triggered after setting the signal
super().resetAfterSignal()
def resetAfterNoise(self):
#triggered after setting the signal
super().resetAfterNoise()
# the noise covariance changes and therwfore the prewhitening are not valid anymore
self.CoilSensitivityMatrix.reset()
self.PrewhitenedReferenceKSpace.reset()
def prepareCoilSensitivityMatrixPlot(self,title='Coil Sensitivity Maps',newplot=True):
if newplot:
plt.figure()
S=self.getCoilSensitivityMatrix()
NC=S.shape[-1]
SNC=int(np.ceil(np.sqrt(NC)))
for t in range(NC):
#place the subplot closer to each other
plt.subplot(SNC,SNC,t+1)
plt.imshow(np.abs(S[:,:,t]))
# set title padding to 0
plt.title('Coil '+str(t),fontdict={'fontsize': 7},pad =0)
#remove axis
plt.axis('off')
#remove ticks
plt.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
# add a common colorbar for all the subplots in the left side of the figure
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
plt.colorbar(plt.gcf().axes[0].images[0], ax=plt.gcf().axes)
# add a title for the figure
plt.suptitle(title, fontsize=16)
def plotCoilSensitivityMatrix(self,fn=None, title_addition='',newplot=True):
self.prepareCoilSensitivityMatrixPlot(title='Coil Sensitivity Maps '+title_addition,newplot=newplot)
if fn is not None:
plt.savefig(fn)
else:
plt.show()
class cm2DReconB1(cm2DReconWithSensitivity):
"""
Python implementation of the cm2DReconB1 MATLAB class
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2003
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
"""
def __init__(self):
"""
Initializes the RSS reconstruction.
"""
super().__init__()
self.HasAcceleration= False
self.HasSensitivity=True
self.HasAutocalibration=False
def getOutput(self):
img_matrix = self.get2DKSIFFT()
pw_sensmap=self.getCoilSensitivityMatrix()
invRn=self.getInverseNoiseCovariancePrewhitened()
nf,nph =self.getSignalKSpaceSize()
im = np.zeros((nf,nph),dtype=self.complexType)
for irow in range(nf):
for icol in range(nph):
s_matrix=pw_sensmap[irow,icol,:]
if s_matrix.sum() !=0:
im[irow,icol] = s_matrix.conj().T @ invRn @ img_matrix[irow,icol,:]
return im
@staticmethod
def testrecon():
TEST = cm2DReconB1()
[K, N, nc] = cm.getMRoptimumTestData()
TEST.setSignalKSpace(K)
TEST.setNoiseCovariance(nc)
return TEST
class cm2DKellmanB1(cm2DReconB1):
"""
Python implementation of the cm2DReconB1 MATLAB class
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2003
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
"""
def getOutput(self):
img_matrix = self.get2DKSIFFT()
pw_sensmap=self.getCoilSensitivityMatrix()
invRn=self.getInverseNoiseCovariancePrewhitened()
nf,nph =self.getSignalKSpaceSize()
im = np.zeros((nf,nph),dtype=self.complexType)
SR=np.sqrt(2.0)
for irow in range(nf):
for icol in range(nph):
s_matrix=pw_sensmap[irow,icol,:]
if s_matrix.sum() !=0:
S=s_matrix
ST=s_matrix.conj().T
I=img_matrix[irow,icol,:]
num=np.dot(SR,np.abs(ST @ invRn @ I))
den=np.sqrt(np.abs(ST @ invRn @ S))
im[irow,icol] = np.divide(num,den)
return im
class cm2DReconWithSensitivityAutocalibrated(cm2DReconWithSensitivity):
"""
Python implementation of the m2DReconWithSensitivityAutocalibrated MATLAB class
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2003
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
"""
def __init__(self):
"""
Initializes the reconstruction.
"""
super().__init__()
self.HasAcceleration= True
self.HasSensitivity=True
self.HasAutocalibration=True
self.Autocalibration=[np.nan]*self.dimesion
self.Acceleration=[1]*self.dimesion
def setAutocalibrationLines(self,ACL):
# if acl is a tuple or list
if isinstance(ACL, (list, tuple)):
self.Autocalibration=ACL
else:
self.Autocalibration=[np.NaN,ACL]
s=self.getSignalKSpaceSize()
for i,a in enumerate(self.Autocalibration):
if np.isnan(a):
self.Autocalibration[i]=s[i]
def setAcceleration(self,ACL):
# if acceleration is a tuple or list
if isinstance(ACL, (list, tuple)):
self.Acceleration=ACL
else:
self.Acceleration=[1,ACL]
# def getCoilSensitivityMatrixSimpleSenseACL(self):
# # MASK
# if self.CoilSensitivityMatrix.isEmpty():
# if self.ReferenceKSpace.isEmpty():
# s=self.getSignalKSpace()
# else:
# s=self.getReferenceKSpace()
# self.setCoilSensitivityMatrix(cm.prewhiteningSignal(cm.calculate_simple_sense_sensitivitymaps(s,self.MaskCoilSensitivityMatrix), self.getNoiseCovariance() ))
# return self.CoilSensitivityMatrix.get()
def getCoilSensitivityMatrixReferenceKSpace(self):
# MASK
if self.CoilSensitivityMatrix.isEmpty():
s= self.getReferenceKSpace()
self.setCoilSensitivityMatrix(cm.prewhiteningSignal(cm.calculate_simple_sense_sensitivitymaps(s,self.MaskCoilSensitivityMatrix), self.getNoiseCovariance() ))
return self.CoilSensitivityMatrix.get()
# def __selectCoilSensitivityMapMethod__(self):
# s=None
# s=super().__selectCoilSensitivityMapMethod__()
# if s is None:
# if ((self.getCoilSensitivityMatrixCalculationMethod() == 'simplesenseacl') or (self.getCoilSensitivityMatrixCalculationMethod() =='inneracl')):
# s=self.getCoilSensitivityMatrixSimpleSenseACL
# if (self.getCoilSensitivityMatrixCalculationMethod() == 'reference'):
# s=self.getCoilSensitivityMatrixReferenceKSpace
# return s
# def getCoilSensitivityMatrixSimpleSenseACL(self):
# # MASK
# if self.CoilSensitivityMatrix.isEmpty():
# if self.ReferenceKSpace.isEmpty():
# s=self.getSignalKSpace()
# else:
# s=self.getReferenceKSpace()
# self.setCoilSensitivityMatrix(cm.prewhiteningSignal(cm.calculate_simple_sense_sensitivitymaps_acl(s,self.AutocalibrationF,self.AutocalibrationP,self.MaskCoilSensitivityMatrix), self.getNoiseCovariance() ))
# return self.CoilSensitivityMatrix.get()
class cm2DReconSENSE(cm2DReconWithSensitivityAutocalibrated):
"""_summary_
class to reconstruct 2D Kspace asSENSE.
it works with a zeropadded signal and reference Kspace
:author:
Dr. Eros Montin, Ph.D. <eros.montin@gmail.com>
:date:
16/06/2023
:note:
This work was supported in part by the National Institute of Biomedical Imaging and Bioengineering (NIBIB) of the National Institutes of Health under Award Number R01 EB024536 and P41 EB017183. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.
Args:
cm2DReconWithSensitivityAutocalibrated (_type_): _description_
Attributes:
Acceleration [int,int]: Acceleration factor in the Frequency and Phase direction
Autocalibration (int): Number of autocalibration lines in the Frequency and Phase direction
"""
def __init__(self):
super().__init__()
self.HasAutocalibration=True
def setSignalKSpace(self, signalKSpace):
super().setSignalKSpace(signalKSpace)
self.Autocalibration[0]=signalKSpace.shape[0]
def getOutput(self):
#if self.Acceleration is a tuple or a list
if isinstance(self.Acceleration, (list, tuple)):
R1,R2=self.Acceleration
else:
R1=1
R2=self.Acceleration
Rtot = R1 * R2
nc=self.getsignalNCoils()
#preapre the matrix size
nf,nph = self.getSignalKSpaceSize()
# ideally after prewhiteninig the noise covariance matrix should be the identity
invRn=self.getInverseNoiseCovariancePrewhitened()
# get the prewhitened signal
pw_signalrawdata=self.getPrewhitenedSignal()
# get the image ifft (size is the size of the full image)
img_matrix = self.get2DKSIFFT(pw_signalrawdata) * np.sqrt(Rtot)
# now the image is folded
imfold=cm.shrinkToAliasedMatrix2D(img_matrix,[R1,R2])
pw_sensmap=self.getCoilSensitivityMatrix()
MRimage = np.zeros((nf, nph),dtype=self.complexType)
for irow in range(nf // R1):
for icol in range(nph//R2):
r1=np.arange(irow,nf,nf//R1)
r2=np.arange(icol,nph,nph//R2)
current_R1 = len(r1)
current_R2 = len(r2)
current_Rtot = current_R1 * current_R2
s=np.zeros((current_R1,current_R2,nc),dtype=self.complexType)
for i,_x in enumerate(r1):
for j,_y in enumerate(r2):
s[i,j,:] = pw_sensmap[_x,_y, :]
s = s.reshape((current_Rtot, nc),order='F')
s = np.transpose(s,[1,0])
s[np.isnan(s)] = 0 + 1j*0
u = np.linalg.pinv(s.conj().T @ invRn @ s) @ (s.conj().T @ invRn)
u[np.isnan(u)] = 0 + 1j*0
U=np.reshape(u @ imfold[irow,icol],(current_R1,current_R2),order='F')
for i,_x in enumerate(r1):
for j,_y in enumerate(r2):
MRimage[_x,_y] = U[i,j]
if (cm.needs_regridding(self.getSignalKSpace(),[R1,R2])):
MRimage=cm.resizeIM2D(MRimage,self.getSignalKSpaceSize());
return MRimage
class cm2DKellmanSENSE(cm2DReconSENSE):
def getOutput(self):
if isinstance(self.Acceleration, (list, tuple)):
R1,R2=self.Acceleration
else:
R1=1
R2=self.Acceleration
Rtot = R1 * R2
nc=self.getsignalNCoils()
nf,nph = self.getSignalKSpaceSize()
invRn=self.getInverseNoiseCovariancePrewhitened()
pw_signalrawdata=self.getPrewhitenedSignal()
img_matrix = self.get2DKSIFFT(pw_signalrawdata) * np.sqrt(Rtot)
imfold=cm.shrinkToAliasedMatrix2D(img_matrix,[R1,R2])
pw_sensmap=self.getCoilSensitivityMatrix()
Rn = np.eye(nc,dtype=self.complexType); #it's prewhitened
MRimage = np.zeros((nf, nph),dtype=self.complexType)
_c=np.sqrt(2.0)
for irow in range(nf // R1):
for icol in range(nph//R2):
r1=np.arange(irow,nf,nf//R1)
r2=np.arange(icol,nph,nph//R2)
current_R1 = len(r1)
current_R2 = len(r2)
current_Rtot = current_R1 * current_R2
s=np.zeros((current_R1,current_R2,nc),dtype=self.complexType)
for i,_x in enumerate(r1):
for j,_y in enumerate(r2):
s[i,j,:] = pw_sensmap[_x,_y, :]
s = s.reshape((current_Rtot, nc),order='F')
s = np.transpose(s,[1,0])
s[np.isnan(s)] = 0 + 1j*0
u = np.linalg.pinv(s.conj().T @ invRn @ s) @ (s.conj().T @ invRn)
u[np.isnan(u)] = 0 + 1j*0
U=np.reshape(u @ imfold[irow,icol]/np.diag(np.sqrt(u @ Rn @ u.conj().T)),(current_R1,current_R2),order='F')
for i,_x in enumerate(r1):
for j,_y in enumerate(r2):
MRimage[_x,_y] = _c*U[i,j]
if (cm.needs_regridding(self.getSignalKSpace(),[R1,R2])):
MRimage=cm.resizeIM2D(MRimage,self.getSignalKSpaceSize())
return MRimage
import pygrappa
class cm2DGFactorv2(cm2DReconSENSE):
def getOutput(self):
if isinstance(self.Acceleration, (list, tuple)):
R1,R2=self.Acceleration
else:
R1=1
R2=self.Acceleration
pw_sensmap=self.getCoilSensitivityMatrix()
MRimage = pygrappa.gfactor(pw_sensmap, R1, R2)
if (cm.needs_regridding(self.getSignalKSpace(),[R1,R2])):
MRimage=cm.resizeIM2D(MRimage,self.getSignalKSpaceSize())
return MRimage
class cm2DGFactorSENSE(cm2DReconSENSE):
def getOutput(self):
if isinstance(self.Acceleration, (list, tuple)):
R1,R2=self.Acceleration
else:
R1=1
R2=self.Acceleration
nc=self.getsignalNCoils()
nf,nph = self.getSignalKSpaceSize()
invRn=self.getInverseNoiseCovariancePrewhitened()
pw_sensmap=self.getCoilSensitivityMatrix()
MRimage = np.zeros((nf, nph))
_c=np.sqrt(2.0)
for irow in range(nf // R1):
for icol in range(nph//R2):
r1=np.arange(irow,nf,nf//R1)
r2=np.arange(icol,nph,nph//R2)
current_R1 = len(r1)
current_R2 = len(r2)
current_Rtot = current_R1 * current_R2
s=np.zeros((current_R1,current_R2,nc),dtype=self.complexType)
for i,_x in enumerate(r1):
for j,_y in enumerate(r2):
s[i,j,:] = pw_sensmap[_x,_y, :]
s = s.reshape((current_Rtot, nc),order='F')
s = np.transpose(s,[1,0])
s[np.isnan(s)] = 0 + 1j*0
u1=s.conj().T @ invRn @ s
u = np.diag(np.linalg.pinv(u1))*np.diag(u1)
u[np.isnan(u)] = 0 + 1j*0
U=np.reshape(u,(current_R1,current_R2),order='F')
for i,_x in enumerate(r1):
for j,_y in enumerate(r2):
MRimage[_x,_y] = np.abs(_c*U[i,j])
if (cm.needs_regridding(self.getSignalKSpace(),[R1,R2])):
MRimage=cm.resizeIM2D(MRimage,self.getSignalKSpaceSize())
return MRimage
from pygrappa import sense1d,cgsense
class cm2DReconSENSEv1(cm2DReconSENSE):
def getOutput(self):
return sense1d(self.getPrewhitenedSignal(),self.getCoilSensitivityMatrix(), Ry=self.Acceleration, coil_axis=-1,imspace=False)
class cm2DReconCGSENSE(cm2DReconSENSE):
def getOutput(self):
return cgsense(self.getPrewhitenedSignal(),self.getCoilSensitivityMatrix(), coil_axis=-1)
class cm2DReconGRAPPA(cm2DReconWithSensitivityAutocalibrated):
"""GRAPPA REconstruction
Args:
cm2DReconWithSensitivityAutocalibrated (_type_): _description_
"""
def __init__(self):
super().__init__()
self.HasSensitivity=False
self.GRAPPAKernel=[3,2]
self.PrewhitenedSignalKspaceACL=cm.k2d()
self.reconstructor=cm2DReconRSS()
def getPrewhitenedReferenceKSpaceACL(self):
"""
Gets the prewhitened signalACL.
Returns:
np.ndarray(f,p,c): prewhitened signal
"""
RF=self.getPrewhitenedReferenceKSpace()
return cm.getAutocalibrationsLines2DKSpace(RF,self.Autocalibration)
def setGRAPPAKernel(self,GK):
self.GRAPPAKernel=GK
def getR(this):
SS = this.getSignalKSpaceSize()
R = this.Acceleration[-1]
np = SS[1]
if np % R != 0:
tempR = R
while np % tempR != 0:
tempR -= 1
R = tempR
ss = f'Acceleration R reduced to {R} ' \
f', so the number of lines can be exactly divided by R'
print(ss)
return R
def getOutput(self):
grappa_kernel=self.GRAPPAKernel
data_acs=self.getPrewhitenedReferenceKSpaceACL()
pw_signalrawdata=self.getPrewhitenedSignal();
K=cm.getGRAPPAKspace(pw_signalrawdata,data_acs,grappa_kernel)
K=self.checkKSpacePixelType(K)
R=self.reconstructor
R.setPrewhitenedSignal(K)
return R.getOutput()
##MR PMR
class cm2DSignalToNoiseRatio(cm.cmOutput):
def __init__(self, message=None):
super().__init__(message)
"""
the class expects a 3D matrix composed by a tile of 2D numpy images
"""
self.appendLog("SNR Calculation started instantiated", "start")
self.SNR = None
self.Type = "MR"
self.SubType = ""
def resetASPMR(self):
self.SignalPrewhitened.reset()
def resetANPMR(self):
if not isinstance(self.NoiseBandWidth,str):
self.NoiseBandWidth=None
print('reset the NBW')
self.NoiseCovariance = np.array([])
self.InverseNoiseCovariance = np.array([])
self.SignalPrewhitened.reset()
class cm2DSignalToNoiseRatioMultipleReplicas(cm2DSignalToNoiseRatio):
def __init__(self,x=None,message=None):
"""
the class expects a 3D matrix composed by a tile of 2D images
"""
super().__init__(message)
self.imageArray=cm.i3d()
self.Mean=None
self.STD=None
self.Max=None
self.Type = "MR"
self.reconstructor=None
self.referenceImage=cm.i2d()
if x is not None:
self.add2DStackOfImages(x)
def reset(self):
self.Max=None
self.Mean=None
self.STD=None
self.SNR=None
def getReferenceImage(self):
return self.referenceImage.get()
def setReferenceImage(self,x):
self.referenceImage.set(x)
def add2DImage(self, x):
self.reset()
"""
add a 2D image to the class
"""
if len(x.shape)==2:
x=np.expand_dims(x,axis=-1)
if self.imageArray.isEmpty():
self.setImageArray(x)
else:
self.setImageArray(np.concatenate((self.getImageArray(), x), axis=-1))
def add2DStackOfImages(self, x):
"""
add a stack of 2D images to the class
"""
self.add2DImage(x)
def setSignalKSpace(self,signal):
self.reconstructor.setSignalKSpace(signal)
self.add2DImage(self.reconstructor.getOutput())
def add2DKspace(self,signal):
self.reconstructor.setSignalKSpace(signal)
self.add2DImage(self.reconstructor.getOutput())
def setReconstructor(self,recon):
"""We dont' want to recalculate the Sensitivity map at every replica
Args:
recon (cm2dRecon): REconstructor class
"""
self.reconstructor=recon
self.reconstructor.resetAfterSignal=MethodType(resetASPMR,self.reconstructor)
self.reconstructor.resetAfterNoise=MethodType(resetANPMR,self.reconstructor)
def getReconstructor(self):
return self.reconstructor
def resetAfterNoise(self,keepsensitivity=False):
super().resetAfterNoise()
if not keepsensitivity:
self.setCoilSensitivityMatrix.reset()
def getImageArray(self):
"""
return the image array
"""
return self.imageArray.get()
def setImageArray(self,x):
"""
return the image array
"""
self.imageArray.set(x)
def getOutput(self):
"""
calculate the mean and standard deviation of the image array
"""
if self.SNR is None:
self.SNR = np.divide(self.getImageArrayMean(), self.getImageArraySTD())
return self.SNR
def getImageArrayMean(self):
"""
return the mean of the image array
"""
if self.Mean is None:
self.Mean=np.nanmean(np.abs(self.getImageArray()), axis=-1)
return self.Mean
def getImageArraySTD(self):
"""
return the standard deviation of the image array
"""
if self.STD is None:
self.STD=np.nanstd(np.abs(self.getImageArray()), axis=-1,ddof=1) #matlab 0
return self.STD
def getImageArrayMax(self):
"""
return the standard deviation of the image array
"""
if self.Max is None:
self.Max=np.nanmax(np.abs(self.getImageArray()), axis=-1) #matlab 0
return self.Max
def plotImageArray(self, p=0.5):
"""
plot the image array
"""
im = self.getImageArray()
for t in range(im.shape[-1]):
plt.subplot(121)
plt.imshow(im[:, :, t])
plt.colorbar()
plt.title("Replicas number: " + str(t+1))
if t>0:
plt.subplot(122)
plt.imshow(im[:, :, t]-im[:, :, t-1])
plt.title(f'differerence {t+1} - {t}')
plt.colorbar()
plt.pause(interval=1)
plt.show()
class cm2DSignalToNoiseRatioPseudoMultipleReplicas(cm2DSignalToNoiseRatioMultipleReplicas):
def __init__(self, x=None, message=None):
super().__init__(x, message)
self.numberOfReplicas=20
self.D=None
def createPseudoReplica(self,S,corr_noise_factor):
sh=self.reconstructor.getSignalKSpaceSize()
N= cm.get_pseudo_noise(msize=[*sh, self.reconstructor.getsignalNCoils()],corr_noise_factor=corr_noise_factor)
self.add2DKspace(S+N)
def getSNRDenumerator(self):
if self.D is None:
D=np.nanstd(np.abs(self.getImageArray())+np.max(np.abs(self.getImageArrayMax())), axis=-1,ddof=1)
D[D<=np.finfo(np.float64).eps]=1
self.D=D
return self.D
def reset(self):
super().reset()
self.D=None
def getOutput(self):
# set the reference image
# self.setReferenceImage(self.reconstructor.getOutput())
corr_noise_factor=cm.get_correlation_factor(correlation_matrix=self.reconstructor.getNoiseCovariance())
S=self.reconstructor.getSignalKSpace()
for a in range(self.numberOfReplicas):
#add in the queue
self.createPseudoReplica(S,corr_noise_factor)
if self.referenceImage.isEmpty():
self.setReferenceImage(self.reconstructor.getOutput())
D=self.getSNRDenumerator()
SNR=np.divide(self.getReferenceImage(),D)
return SNR
class cm2DSignalToNoiseRatioPseudoMultipleReplicasWein(cm2DSignalToNoiseRatioPseudoMultipleReplicas):
def __init__(self, x=None, message=None):
super().__init__(x, message)
self.Type='CR'
self.boxSize=2
def getSNRDenumerator(self):
if self.referenceImage.isEmpty():
self.setReferenceImage(self.reconstructor.getOutput())
r=self.getReferenceImage()-self.getImageArrayMean()
return cm.get_wien_noise_image(r,self.boxSize)
| cloudmrhub-com/cloudmrhub | cloudmrhub/cm2D.py | cm2D.py | py | 38,909 | python | en | code | 0 | github-code | 13 |
30478730301 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Importing libraries
# import tensorflow as tf
import os
import time
import numpy as np
import pickle
# load and show an image with Pillow
from PIL import Image
# import flask, flask_bootstrap, werkzeug
from flask import Flask, request, redirect, url_for, render_template
from flask_bootstrap import Bootstrap
from werkzeug.utils import secure_filename
# setting up folder structure for deployment
OUTPUT_DIR = 'uploads'
DOWNLOAD_DIR = "static/images"
if not os.path.isdir(OUTPUT_DIR):
print('Creating static folder..')
os.mkdir(OUTPUT_DIR)
# Image width & length
img_size = 8
app = Flask(__name__)
# To setup Bootstrap templates
Bootstrap(app)
app.config['SECRET_KEY'] = 'your secret key'
app.config['UPLOAD_FOLDER'] = OUTPUT_DIR
app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_DIR
# Load trained model
with open("model/classifier.pickle", "rb") as handle:
classifier = pickle.load(handle)
@app.route('/', methods=['GET', 'POST'])
def load_image():
if request.method == 'POST':
# Check if the post request has the file part
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
# Check if no file was submitted to the HTML form
if file.filename == '':
print('No selected file')
return redirect(request.url)
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['DOWNLOAD_FOLDER'], filename))
print(os.path.join(app.config['DOWNLOAD_FOLDER'], filename))
image = Image.open(os.path.join(app.config['DOWNLOAD_FOLDER'], filename))
output = make_prediction(image)
path_to_image = os.path.join(app.config['DOWNLOAD_FOLDER'], filename)
result = {
'output': output,
'path_to_image': path_to_image,
'size': 200
}
return render_template('show.html', result=result)
return render_template('index.html')
def make_prediction(image):
scaled_img = rbg_to_pixel_intensities(image)
transformed_img = img_reshape(scaled_img)
# Generate predictions
predictions = classifier.predict(transformed_img)
print('This is the given prediction' +
' = ' + str(predictions))
return predictions
def rbg_to_pixel_intensities(image):
print('Image Format' + ' = ' + str(image.format))
print('Image Mode' + ' = ' + str(image.mode))
print('Image Size' + ' = ' + str(image.size))
# Image resize to train model
resize_img = image.resize((img_size, img_size))
print('New Image Size' + ' = ' + str(resize_img.size))
img_to_array = np.array(resize_img)
scaled = (255 - img_to_array) / 255
print('scaled shape' + ' = ' + str(scaled.shape))
print('scaled max' + ' = ' + str(scaled.max()))
print('scaled min' + ' = ' + str(scaled.min()))
return np.sqrt(scaled[:, :, 0] ** 2 + scaled[:, :, 1] ** 2 + scaled[:, :, 2] ** 2)
def img_reshape(scaled):
print('Entering Scaled Image' + ' = ' + str(scaled.shape))
# Reshape array to fit training model
transformed_img = scaled.reshape(1, 64)
transformed_img = np.interp(transformed_img, (transformed_img.min(), transformed_img.max()), (0, 16))
print('transformed_img shape' + ' = ' + str(transformed_img.shape))
print('transformed_img max' + ' = ' + str(transformed_img.max()))
print('transformed_img min' + ' = ' + str(transformed_img.min()))
return transformed_img
if __name__ == '__main__':
app.run(debug=True)
| fabiogeraci/heroku | app.py | app.py | py | 3,660 | python | en | code | 0 | github-code | 13 |
19770400014 | """
An example model that has double the number of convolutional layers
that DeepSEA (Zhou & Troyanskaya, 2015) has. Otherwise, the architecture
is identical to DeepSEA.
When making a model architecture file of your own, please review this
file in its entirety. In addition to the model class, Selene expects
that `criterion` and `get_optimizer(lr)` are also specified in this file.
"""
import numpy as np
import torch
import torch.nn as nn
class DeepLIFT(nn.Module):
"""
A DeepLIFT model architecture.
Parameters
----------
sequence_length : int
The length of the sequences on which the model trains and and makes
predictions.
n_targets : int
The number of targets (classes) to predict.
Attributes
----------
classifier : torch.nn.Sequential
The linear classifier and sigmoid transformation components of the
model.
"""
def __init__(self, sequence_length, n_targets):
super(DeepLIFT, self).__init__()
hidden_size1 = 8000
hidden_size2 = 1000
# fully connected layer
self.classifier = nn.Sequential(
nn.Linear(4000, hidden_size1),
nn.ReLU(inplace=True),
nn.Linear(hidden_size1, hidden_size2),
nn.ReLU(inplace=True),
nn.BatchNorm1d(hidden_size2),
nn.Linear(hidden_size2, n_targets),
nn.Sigmoid())
def forward(self, x):
"""
Forward propagation of a batch.
"""
#print(x.shape)
reshape_x = x.reshape(x.size(0), 4000)
out = self.classifier(reshape_x)
return out
def criterion():
"""
Specify the appropriate loss function (criterion) for this
model.
Returns
-------
torch.nn._Loss
"""
return nn.MSELoss()
def get_optimizer(lr):
"""
Specify an optimizer and its parameters.
Returns
-------
tuple(torch.optim.Optimizer, dict)
The optimizer class and the dictionary of kwargs that should
be passed in to the optimizer constructor.
"""
return (torch.optim.SGD,
{"lr": lr, "weight_decay": 1e-6, "momentum": 0.9})
| snwessel/NeuralNetworksForGWAS | models/deeplift.py | deeplift.py | py | 2,176 | python | en | code | 0 | github-code | 13 |
74936008656 | import os
import sys
import click
from flask import Flask,render_template
from flask_sqlalchemy import SQLAlchemy
WIN=sys.platform.startswith('win')
if WIN: #如果是windows系统
prefix='sqlite:///'
else: #其它系统
prefix='sqlite:////'
app=Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = prefix + os.path.join(app.root_path, 'data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # 关闭对模型修改的监控
app.config['SECRET_KEY'] = 'dev' # 等同于 app.secret_key = 'dev'
# 在扩展类实例化前加载配置
db = SQLAlchemy(app)
class Word(db.Model): #表名将会是 word (自动生成,小写处理)
id=db.Column(db.Integer,primary_key=True) #主键
en=db.Column(db.String(50)) #英文
ch=db.Column(db.String(50)) #中文
@app.cli.command() #注册为命令
def init():
"""
导入初始单词数据
"""
db.create_all()
words=[
{'en':'tension','ch':'张力'},
{'en':'python','ch':'蟒蛇'}
]
for w in words:
word=Word(en=w['en'],ch=w['ch'])
db.session.add(word)
db.session.commit()
click.echo('已导入初始单词数据')
#init()
from flask import request, url_for, redirect, flash
@app.route('/',methods=['GET','POST'])
def index():
if request.method=='POST':
# 从表单中获取数据
en=request.form.get('en')
ch=request.form.get('ch')
if not en or not ch or len(en)>45 or len(ch)>45:
flash('无效输入')
return redirect(url_for('index'))
#保存表单数据到数据库
word=Word(en=en,ch=ch)
db.session.add(word)
db.session.commit()
flash('新添单词:'+en)
return redirect(url_for('index')) #重定向回主页
words=Word.query.all()
return render_template('index.html',words=words)
@app.route('/word/edit/<int:word_id>',methods=['GET','POST'])
def edit(word_id):
word=Word.query.get_or_404(word_id)
if request.method=='POST':
en = request.form.get('en')
ch = request.form.get('ch')
if not en or not ch or len(en) > 45 or len(ch) > 45:
flash('无效输入')
return redirect(url_for('index'))
# 保存表单数据到数据库
word.en=en
word.ch=ch
db.session.commit()
flash('更新单词:' + en)
return redirect(url_for('index')) # 重定向回主页
return render_template('edit.html',word=word)
@app.route('/word/delete/<int:word_id>',methods=['POST'])
def delete(word_id):
word=Word.query.get_or_404(word_id)
db.session.delete(word)
db.session.commit()
flash('删除单词:'+word.en)
return redirect(url_for('index'))
| Tensiont/dictionary | app.py | app.py | py | 2,733 | python | en | code | 0 | github-code | 13 |
28560367752 | __all__ = [
'HasLayout',
'MatchesAncestry',
'ContainsNoVfsCalls',
'ReturnsUnlockable',
'RevisionHistoryMatches',
]
from bzrlib import (
osutils,
revision as _mod_revision,
)
from bzrlib import lazy_import
lazy_import.lazy_import(globals(),
"""
from bzrlib.smart.request import request_handlers as smart_request_handlers
from bzrlib.smart import vfs
""")
from testtools.matchers import Equals, Mismatch, Matcher
class ReturnsUnlockable(Matcher):
"""A matcher that checks for the pattern we want lock* methods to have:
They should return an object with an unlock() method.
Calling that method should unlock the original object.
:ivar lockable_thing: The object which can be locked that will be
inspected.
"""
def __init__(self, lockable_thing):
Matcher.__init__(self)
self.lockable_thing = lockable_thing
def __str__(self):
return ('ReturnsUnlockable(lockable_thing=%s)' %
self.lockable_thing)
def match(self, lock_method):
lock_method().unlock()
if self.lockable_thing.is_locked():
return _IsLocked(self.lockable_thing)
return None
class _IsLocked(Mismatch):
"""Something is locked."""
def __init__(self, lockable_thing):
self.lockable_thing = lockable_thing
def describe(self):
return "%s is locked" % self.lockable_thing
class _AncestryMismatch(Mismatch):
"""Ancestry matching mismatch."""
def __init__(self, tip_revision, got, expected):
self.tip_revision = tip_revision
self.got = got
self.expected = expected
def describe(self):
return "mismatched ancestry for revision %r was %r, expected %r" % (
self.tip_revision, self.got, self.expected)
class MatchesAncestry(Matcher):
"""A matcher that checks the ancestry of a particular revision.
:ivar graph: Graph in which to check the ancestry
:ivar revision_id: Revision id of the revision
"""
def __init__(self, repository, revision_id):
Matcher.__init__(self)
self.repository = repository
self.revision_id = revision_id
def __str__(self):
return ('MatchesAncestry(repository=%r, revision_id=%r)' % (
self.repository, self.revision_id))
def match(self, expected):
self.repository.lock_read()
try:
graph = self.repository.get_graph()
got = [r for r, p in graph.iter_ancestry([self.revision_id])]
if _mod_revision.NULL_REVISION in got:
got.remove(_mod_revision.NULL_REVISION)
finally:
self.repository.unlock()
if sorted(got) != sorted(expected):
return _AncestryMismatch(self.revision_id, sorted(got),
sorted(expected))
class HasLayout(Matcher):
"""A matcher that checks if a tree has a specific layout.
:ivar entries: List of expected entries, as (path, file_id) pairs.
"""
def __init__(self, entries):
Matcher.__init__(self)
self.entries = entries
def get_tree_layout(self, tree):
"""Get the (path, file_id) pairs for the current tree."""
tree.lock_read()
try:
for path, ie in tree.iter_entries_by_dir():
if ie.parent_id is None:
yield (u"", ie.file_id)
else:
yield (path+ie.kind_character(), ie.file_id)
finally:
tree.unlock()
@staticmethod
def _strip_unreferenced_directories(entries):
"""Strip all directories that don't (in)directly contain any files.
:param entries: List of path strings or (path, ie) tuples to process
"""
directories = []
for entry in entries:
if isinstance(entry, basestring):
path = entry
else:
path = entry[0]
if not path or path[-1] == "/":
# directory
directories.append((path, entry))
else:
# Yield the referenced parent directories
for dirpath, direntry in directories:
if osutils.is_inside(dirpath, path):
yield direntry
directories = []
yield entry
def __str__(self):
return 'HasLayout(%r)' % self.entries
def match(self, tree):
actual = list(self.get_tree_layout(tree))
if self.entries and isinstance(self.entries[0], basestring):
actual = [path for (path, fileid) in actual]
if not tree.has_versioned_directories():
entries = list(self._strip_unreferenced_directories(self.entries))
else:
entries = self.entries
return Equals(entries).match(actual)
class RevisionHistoryMatches(Matcher):
"""A matcher that checks if a branch has a specific revision history.
:ivar history: Revision history, as list of revisions. Oldest first.
"""
def __init__(self, history):
Matcher.__init__(self)
self.expected = history
def __str__(self):
return 'RevisionHistoryMatches(%r)' % self.expected
def match(self, branch):
branch.lock_read()
try:
graph = branch.repository.get_graph()
history = list(graph.iter_lefthand_ancestry(
branch.last_revision(), [_mod_revision.NULL_REVISION]))
history.reverse()
finally:
branch.unlock()
return Equals(self.expected).match(history)
class _NoVfsCallsMismatch(Mismatch):
"""Mismatch describing a list of HPSS calls which includes VFS requests."""
def __init__(self, vfs_calls):
self.vfs_calls = vfs_calls
def describe(self):
return "no VFS calls expected, got: %s" % ",".join([
"%s(%s)" % (c.method,
", ".join([repr(a) for a in c.args])) for c in self.vfs_calls])
class ContainsNoVfsCalls(Matcher):
"""Ensure that none of the specified calls are HPSS calls."""
def __str__(self):
return 'ContainsNoVfsCalls()'
@classmethod
def match(cls, hpss_calls):
vfs_calls = []
for call in hpss_calls:
try:
request_method = smart_request_handlers.get(call.call.method)
except KeyError:
# A method we don't know about doesn't count as a VFS method.
continue
if issubclass(request_method, vfs.VfsRequest):
vfs_calls.append(call.call)
if len(vfs_calls) == 0:
return None
return _NoVfsCallsMismatch(vfs_calls)
| ag1455/OpenPLi-PC | pre/python/lib/python2.7/dist-packages/bzrlib/tests/matchers.py | matchers.py | py | 6,664 | python | en | code | 19 | github-code | 13 |
23028283029 | rule_file = open("grammar_wsj_cnf_top_2500.txt")
rule_file_nt = open("grammar_wsj_cnf.txt")
rules_orig = []
for r in rule_file.readlines():
rules_orig.append(r)
rules_nt = []
for r in rule_file_nt.readlines():
rules_nt.append(r)
rule_map = {}
for idx,r in enumerate(rules_orig):
if r in rules_nt:
rule_map[rules_nt.index(r)] = int(idx)
import json
json.dump(rule_map,open("rules_non_terminal_mapping_wsj_2500.json","w"))
import time
print(rule_map)
time.sleep(5)
print("") | anshuln/Diora_with_rules | rulesets/data_preprocessing/Basic-CYK-Parser/map_non_terminal_rules.py | map_non_terminal_rules.py | py | 508 | python | en | code | 4 | github-code | 13 |
15706684074 | from django.db import models
from django.contrib.auth.models import AbstractUser
from utils.models import BaseModel
# Create your models here.
class User(AbstractUser):
mobile = models.CharField(max_length=11, unique=True, verbose_name='手机')
# 这里外键关联的是下面定义的,如果不用引号则会陷入循环调用不到的,因为他们2个彼此调用对方,所以用引号可以解决这问题。
default_address = models.ForeignKey('Address', related_name='users', null=True, blank=True,
on_delete=models.SET_NULL, verbose_name='默认地址')
class Meta:
db_table = 'tb_users'
verbose_name = '用户'
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class Address(BaseModel):
"""
用户地址
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses', verbose_name='用户')
title = models.CharField(max_length=20, verbose_name='地址名称')
receiver = models.CharField(max_length=20, verbose_name='收货人')
province = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='province_addresses', verbose_name='省')
city = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='city_addresses', verbose_name='市')
district = models.ForeignKey('areas.Area', on_delete=models.PROTECT, related_name='district_addresses', verbose_name='区')
place = models.CharField(max_length=50, verbose_name='详细地址')
mobile = models.CharField(max_length=11, verbose_name='手机')
tel = models.CharField(max_length=20, null=True, blank=True, default='', verbose_name='固定电话')
email = models.EmailField(max_length=30, null=True, blank=True, default='', verbose_name='电子邮箱')
is_deleted = models.BooleanField(default=False, verbose_name='逻辑删除')
class Meta:
db_table = 'tb_address'
# 默认是按照id排序,可以指定为按照修改时间降序排列
ordering = ['-update_time']
verbose_name = '收获地址'
verbose_name_plural = verbose_name
def __str__(self):
return self.place
"""
Address模型类中的外键指向Areas / models里面的Area,指明外键ForeignKey时,可以使用字符串应用名.模型类名来定义
related_name
在进行反向关联查询时使用的属性,如
city = models.ForeignKey(‘areas.Area’, related_name =‘city_addresses’)表示可以通过Area对象.city_addresses属性获取所有相关的city数据。
ordering
表名在进行Address查询时,默认使用的排序方式
models.PROTECT: 保护模式,如果采用该选项,删除的时候,会抛出ProtectedError错误。
""" | juehuan182/MeiduoShopping | meiduo_mall/users/models.py | models.py | py | 2,811 | python | en | code | 0 | github-code | 13 |
20399789982 | #!/usr/bin/env python3
import json
import numpy
from types import SimpleNamespace
import subprocess
import pandas as pd
class Empty(SimpleNamespace):
def __getattr__(self, name):
setattr(self, name, Empty())
return getattr(self, name)
class OrbitalElements:
def __init__(self, semiMajorAxis:float, eccentricity:float, inclination:float, argumentOfPeriapsis:float, longitudeOfAscendingNode:float, trueAnomaly:float):
self.type = "keplerian"
self.semiMajorAxis=semiMajorAxis
self.eccentricity=eccentricity
self.inclination=inclination
self.argumentOfPeriapsis=argumentOfPeriapsis
self.longitudeOfAscendingNode=longitudeOfAscendingNode
self.trueAnomaly=trueAnomaly
class tudatConfig(Empty):
def __init__(self, sateliteName: str, finalEpoch : int, intialPosition: OrbitalElements):
self.initialEpoch = 0
self.finalEpoch = finalEpoch
self.globalFrameOrientation = "J2000"
self.spice.useStandardKernels = True
self.spice.preloadEphemeris = False
self.bodies.Earth.useDefaultSettings = True
getattr(self.bodies, sateliteName).initialState = intialPosition
self.propagators = []
self.propagators.append(Empty(integratedStateType = "translational",
centralBodies = ["Earth"],
bodiesToPropagate = [sateliteName]))
getattr(self.propagators[0].accelerations, sateliteName).Earth = [Empty(type="pointMassGravity")]
self.integrator.type = "rungeKutta4"
self.integrator.stepSize = 10
self.export =[Empty(file = "@path(stateHistory.txt)", variables = [ Empty(type="state") ])]
self.options.fullSettingsFile = "@path(fullSettings.json)"
def save(config: tudatConfig):
serialized = json.dumps(config, indent=2, default=lambda o: o.__dict__)
with open("main.json", mode='w') as f:
f.write(serialized)
def run(config: tudatConfig):
save(config)
a = subprocess.run(["../tudatBundle/tudat/bin/json_interface", "main.json"])
assert(a.returncode==0)
data = numpy.loadtxt("stateHistory.txt")
return pd.DataFrame(data = data[:,1:], index = data[:,0], columns = ["p_x", "p_y", "p_z", "v_x", "v_y", "v_z"])
| przecze/pytudat | pytudat.py | pytudat.py | py | 2,308 | python | en | code | 0 | github-code | 13 |
31576091782 | import numpy as np
arr =[
[1,2,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[0,0,0,0,0]
]
arr_n = np.array(arr)
arr_n90 = np.rot90(arr_n)
arr_n180 = np.rot90(arr_n90)
arr_n270 = np.rot90(arr_n180)
arr_n_lr = np.fliplr(arr_n)
arr_n90_lr = np.fliplr(arr_n90)
arr_n180_lr = np.fliplr(arr_n180)
arr_n270_lr = np.fliplr(arr_n270)
print(arr_n)
print(arr_n90)
print(arr_n180)
print(arr_n270)
print(arr_n_lr)
print(arr_n90_lr)
print(arr_n180_lr)
print(arr_n270_lr)
| gyeomii/DDITBasicAI | day25/myaug.py | myaug.py | py | 514 | python | en | code | 0 | github-code | 13 |
30842082157 | import copy
import ctypes
import os
import pickle
import struct
from collections import OrderedDict
from ctypes import cdll
import numpy as np
from sklearn.model_selection import train_test_split
from torchvision import datasets, transforms
import torch
from sampling import cifar_iid, cifar_noniid, mnist_iid, mnist_noniid_unequal, mnist_noniid, purchase100_noniid, \
purchase100_iid
LIB_CRYPTO = cdll.LoadLibrary("D:\\VisualCode\\SecureAggregation\\x64\\Debug\\App.dll")
COUNTER_LEN = 16
DATA_SET_DIR = 'dataset'
RESULTS_DIR = 'exp/results'
d=10
def sgx_encrypt(raw_data):
"""
:param raw_data: 原始字节数组
:return: 加密后的字节数组
调用SGX加密函数加密数据
"""
key = b'1234567812345678'
dst = (ctypes.c_uint8 * len(raw_data))()
LIB_CRYPTO.encrypt_app(raw_data,len(raw_data),key,dst,len(dst))
return bytes(dst)
def sgx_decrypt(encode_data):
"""
:param encode_data: 加密后的字节数组
:return: 解密后的字节数组
调用SGX解密函数解密数据
"""
key = b'1234567812345678'
dst = (ctypes.c_uint8 * len(encode_data))() # Create an array of uint8_t to hold the encrypted data
LIB_CRYPTO.decrypt_app(encode_data, key, dst, len(encode_data))
decrypted_data=[dst[i] for i in range(len(encode_data))]
return bytes(decrypted_data)
def sgx_aggregate(encode_all_client_data_bytes,clients):
"""
:param encode_all_client_data_bytes: 多个客户端本地训练数据加密后的字节数据
:param clients: 客户端数目
:return: 返回更新后的梯度数据
"""
encode_data_len=len(encode_all_client_data_bytes)
update_param_bytes = (ctypes.c_float * d)()
LIB_CRYPTO.aggregate(encode_all_client_data_bytes,encode_data_len,update_param_bytes,clients)
return update_param_bytes
def count_parameters(model):
params = 0
for p in model.parameters():
if p.requires_grad:
params += p.numel()
return params
def get_buffer_names(model):
return [name for name, _ in model.named_buffers()]
def flatten_params(learnable_parameters):
"""
Args:
learnable_parameters (OrderedDict): parameters without buffers (such as bn.running_mean)
Returns:
flat (torch.Tensor):
whose dim is one, like [0.1, ..., 0.2]
"""
ir = [torch.flatten(p) for _, p in learnable_parameters.items()]
flat = torch.cat(ir).view(-1, 1).flatten()
return flat
def get_learnable_parameters(state_dict, buffer_names):
learnable_parameters = OrderedDict()
for key, value in state_dict.items():
if key not in buffer_names:
learnable_parameters[key] = value
return learnable_parameters
def get_dataset(args, path_project, num_of_label_k, is_random_num_label):
if args.dataset == 'cifar10':
data_dir = os.path.join(path_project, DATA_SET_DIR, 'cifar10')
apply_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
train_dataset = datasets.CIFAR10(
data_dir,
train=True,
download=True,
transform=apply_transform)
test_dataset = datasets.CIFAR10(
data_dir,
train=False,
download=True,
transform=apply_transform)
if args.data_dist == 'IID':
user_groups = cifar_iid(train_dataset, args.num_users)
else: # args.data_dist == 'non-IID':
if args.unequal:
raise NotImplementedError()
else:
user_groups = cifar_noniid(train_dataset, args.num_users, num_of_label_k, is_random_num_label)
class_labels = set(test_dataset.class_to_idx.values())
elif args.dataset == 'mnist' or args.dataset == 'fmnist':
data_dir = os.path.join(path_project, DATA_SET_DIR, args.dataset)
apply_transform = transforms.Compose([
transforms.ToTensor(),
# https://discuss.pytorch.org/t/normalization-in-the-mnist-example/457
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST(
data_dir,
train=True,
download=True,
transform=apply_transform)
test_dataset = datasets.MNIST(
data_dir,
train=False,
download=True,
transform=apply_transform)
if args.data_dist == 'IID':
user_groups = mnist_iid(train_dataset, args.num_users)
else: # args.data_dist == 'non-IID':
if args.unequal:
user_groups = mnist_noniid_unequal(
train_dataset, args.num_users)
else:
user_groups = mnist_noniid(train_dataset, args.num_users, num_of_label_k, is_random_num_label)
class_labels = set(test_dataset.train_labels.numpy())
elif args.dataset == 'cifar100':
data_dir = os.path.join(path_project, DATA_SET_DIR, 'cifar100')
CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
transform_train = transforms.Compose([
# transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])
train_dataset = datasets.CIFAR100(data_dir, train=True, download=True, transform=transform_train)
test_dataset = datasets.CIFAR100(data_dir, train=False, download=True, transform=transform_test)
if args.data_dist == 'IID':
user_groups = cifar_iid(train_dataset, args.num_users)
else: # args.data_dist == 'non-IID':
if args.unequal:
raise NotImplementedError()
else:
user_groups = cifar_noniid(train_dataset, args.num_users, num_of_label_k, is_random_num_label)
class_labels = set(test_dataset.class_to_idx.values())
elif args.dataset == 'purchase100':
data_dir = os.path.join(path_project, DATA_SET_DIR, 'purchase100')
save_purchase100(120000, 0.2, data_dir)
train_dataset, test_dataset = load_purchase100(120000, 0.2, data_dir)
if args.data_dist == 'IID':
user_groups = purchase100_iid(train_dataset, args.num_users)
else: # args.data_dist == 'non-IID':
if args.unequal:
raise NotImplementedError()
else:
user_groups = purchase100_noniid(train_dataset, args.num_users, num_of_label_k, is_random_num_label)
class_labels = set([label for _, label in test_dataset])
else:
exit('Error: unrecognized model')
return train_dataset, test_dataset, user_groups, class_labels
def save_purchase100(target_size, target_test_train_ratio, data_dir, seed=0, force_update=False):
if force_update or os.path.exists(data_dir + '/target_data.npz'):
print("data already prepared")
return
print('-' * 10 + 'Saving purchase100 data' + '-' * 10 + '\n')
gamma = target_test_train_ratio
x = pickle.load(open(data_dir + '/purchase_100_features.p', 'rb'))
y = pickle.load(open(data_dir + '/purchase_100_labels.p', 'rb'))
x = np.array(x, dtype=np.float32)
y = np.array(y, dtype=np.int64)
print(x.shape, y.shape)
# assert if data is enough for sampling target data
assert(len(x) >= (1 + gamma) * target_size)
x, train_x, y, train_y = train_test_split(x, y, test_size=target_size, stratify=y, random_state=seed)
print("Training set size: X: {}, y: {}".format(train_x.shape, train_y.shape))
x, test_x, y, test_y = train_test_split(x, y, test_size=int(gamma*target_size), stratify=y, random_state=seed+1)
print("Test set size: X: {}, y: {}".format(test_x.shape, test_y.shape))
# save target data
np.savez(data_dir + '/target_data.npz', train_x, train_y, test_x, test_y)
def load_purchase100(target_size, target_test_train_ratio, data_dir):
gamma = target_test_train_ratio
with np.load(data_dir + '/target_data.npz') as f:
train_x, train_y, test_x, test_y = [f['arr_%d' % i] for i in range(len(f.files))]
train_x = np.array(train_x, dtype=np.float32)
test_x = np.array(test_x, dtype=np.float32)
train_y = np.array(train_y, dtype=np.int32)
test_y = np.array(test_y, dtype=np.int32)
train_dataset = [(feature, np.int64(label)) for feature, label in zip(train_x, train_y)]
test_dataset = [(feature, np.int64(label)) for feature, label in zip(test_x[:int(gamma*target_size)], test_y[:int(gamma*target_size)])]
return train_dataset, test_dataset
def zero_except_top_k_weights(state_dict, buffer_names, k):
"""Given dense weights and set all parameters except top-k to zero.
Args:
state_dict: OrderedDict
ex. model.state_dict()
Returns:
new_state: OrderedDict
ex. model.state_dict()
top_k_indices: [int]
indices of top-k parameters
"""
learnable_parameters = get_learnable_parameters(state_dict, buffer_names)
tensor_flat_params = flatten_params(learnable_parameters)
float_flat_params = tensor_flat_params.tolist()
# convert dense weights to sparse form
float_flat_sparse_params = [(idx, val)
for idx, val in enumerate(float_flat_params)]
float_flat_sparse_params.sort(key=lambda x: abs(x[1]), reverse=True)
top_k_float_flat_sparse_params = [0.0] * len(float_flat_sparse_params)
top_k_indices = []
for i in range(k):
idx, val = float_flat_sparse_params[i]
top_k_float_flat_sparse_params[idx] = val
top_k_indices.append(idx)
return recover_flattened(torch.Tensor(top_k_float_flat_sparse_params), state_dict,
learnable_parameters), top_k_indices
def recover_flattened(flat_params, base_state_dict, learnable_parameters):
"""
Args:
flat_params (torch.Tensor):
whose dim is one, like [0.1, ..., 0.2]
base_state_dict (OrderedDict)
ex. model.state_dict():
buffers are inherent
learnable_parameters (OrderedDict):
parameters without buffers (such as bn.running_mean)
Returns:
new_state: OrderedDict
ex. model.state_dict()
"""
index_ranges = get_index_ranges(learnable_parameters)
ir = [flat_params[s:e] for (s, e) in index_ranges]
new_state = copy.deepcopy(base_state_dict)
for flat, (key, value) in zip(ir, learnable_parameters.items()):
if len(value.shape) == 0:
new_state[key] = flat[0]
else:
new_state[key] = flat.view(*value.shape)
return new_state
def get_index_ranges(learnable_parameters):
"""
Args:
learnable_parameters (OrderedDict): parameters without buffers (such as bn.running_mean)
Returns:
indices: [(int, int)]
[(start, end)]
"""
index_ranges = []
s = 0
for _, p in learnable_parameters.items():
size = torch.flatten(p).shape[0]
index_ranges.append((s, s + size))
s += size
return index_ranges
def flatten_params(learnable_parameters):
"""
Args:
learnable_parameters (OrderedDict): parameters without buffers (such as bn.running_mean)
Returns:
flat (torch.Tensor):
whose dim is one, like [0.1, ..., 0.2]
"""
ir = [torch.flatten(p) for _, p in learnable_parameters.items()]
flat = torch.cat(ir).view(-1, 1).flatten()
return flat
def serialize_sparse(state_dict, buffer_names, top_k_indices):
"""
Args:
state_dict: OrderedDict
ex. model.state_dict()
top_k_indices: [int]
top-k indices
Returns:
bytes_buffer: bytes
bytes format is "Index (4bytes unsigned int) Value (4bytes float)"
indices: [(int, int)]
"""
learnable_parameters = get_learnable_parameters(state_dict, buffer_names)
tensor_flat_params = flatten_params(learnable_parameters)
unpakced_flat_params = [elm for tupl in zip(top_k_indices, tensor_flat_params[top_k_indices]) for elm in tupl]
bytes_buffer = struct.pack(len(top_k_indices) * 'If', *unpakced_flat_params)
return bytes_buffer
def serialize_dense(state_dict, buffer_names, d):
"""
Args:
state_dict: OrderedDict
ex. model.state_dict()
d: int
how many parameters are in original model
Returns:
bytes_buffer: bytes
bytes format is "Index (4bytes unsigned int) Value (4bytes float)"
"""
learnable_parameters = get_learnable_parameters(state_dict, buffer_names)
tensor_flat_params = flatten_params(learnable_parameters)
float_flat_params = tensor_flat_params.tolist()
unpakced_flat_params = [element for tupl in enumerate(
float_flat_params) for element in tupl]
bytes_buffer = struct.pack(
d * 'If',
*unpakced_flat_params)
return bytes_buffer
def index_privacy(top_k_indices, num_of_params, random_state, r):
sampled_candidates = np.array(list(set(range(num_of_params)) - set(top_k_indices)))
randomized_indices = list(np.concatenate((random_state.choice(sampled_candidates, size=int(len(top_k_indices) * r), replace=False), np.array(top_k_indices))))
random_state.shuffle(randomized_indices)
return randomized_indices
def FedAvg(w):
w_avg = copy.deepcopy(w[0])
for k in w_avg.keys():
for i in range(1, len(w)):
w_avg[k] += w[i][k]
w_avg[k] = torch.div(w_avg[k], len(w))
return w_avg
| MJXXGPF/SecureAggregation_GPF | client/utils.py | utils.py | py | 14,040 | python | en | code | 0 | github-code | 13 |
22592578539 | import subprocess
import logging
import os
logger = logging.getLogger(__name__)
def get_cmd_stdout(cmd, log_func=print, check=True, cmd_dir=None, cmd_env=None, quiet=False):
is_shell = isinstance(cmd, str)
cmd_str = ' '.join(cmd) if not is_shell else cmd
if not quiet:
logger.info('run shell commond. [cmd=%s], [cmd_dir=%s]' % (cmd_str, cmd_dir))
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=is_shell,
cwd=cmd_dir,
env=cmd_env,
)
output = ""
# if log_func:
# log_func("Start print stdout for bash: ")
for line in p.stdout:
line_text = line.rstrip().decode('utf8')
output += line_text
output += '\n'
if log_func:
log_func(line_text)
p.communicate()
returncode = p.returncode
if check and returncode:
raise subprocess.CalledProcessError(
returncode=returncode,
cmd=cmd_str,
)
return output.strip()
def run_cmd_async(cmd, cmd_dir=None, cmd_env=None, quiet=False):
if not quiet:
logger.info('async run shell commond. [cmd=%s], [cmd_dir=%s]' % (cmd, cmd_dir))
return subprocess.Popen(cmd,
shell=True,
cwd=cmd_dir,
env=cmd_env,
preexec_fn=os.setpgrp
).pid
# @perf_log
def run_cmd_sync(cmd, log_func=print, check=True, cmd_dir=None, cmd_env=None, quiet=False):
"""run a cmd and log stdout/stderr using the same logger
@params:
- cmd: both list and string mode are supported
"""
# https://docs.python.org/3.7/library/subprocess.html#frequently-used-arguments
# If you wish to capture and combine both streams into one,
# use stdout=PIPE and stderr=STDOUT
is_shell = isinstance(cmd, str)
cmd_str = ' '.join(cmd) if not is_shell else cmd
if not quiet:
logger.info('run sync shell commond. [cmd=%s], [cmd_dir=%s]' % (cmd_str, cmd_dir))
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=is_shell,
cwd=cmd_dir,
env=cmd_env,
)
log_doc = []
if log_func:
log_func("Start print stdout for bash: ")
for line in iter(p.stdout.readline, b''):
log_doc.append(line)
line_text = line.rstrip()
log_func(line_text)
# https://docs.python.org/3.7/library/subprocess.html#subprocess.Popen.wait
# p.wait deadlock when using stdout=PIPE or stderr=PIPE
# and the child process generates enough output to a pipe
# such that it blocks waiting for the OS pipe buffer to accept more data.
# Use Popen.communicate() when using pipes to avoid that.
p.communicate()
returncode = p.returncode
if check and returncode:
if log_func != print:
print("Start print stdout for bash: ")
for line in log_doc: # if cmd failed, log will print to stdout
line_text = line.rstrip()
print(line_text)
raise subprocess.CalledProcessError(
returncode=returncode,
cmd=cmd_str,
)
return p.returncode
if __name__ == '__main__':
print('=== test1 ===')
try:
ret = run_cmd_sync(['sh', 'fake_long_running_shell.sh'])
print('return code: %s' % ret)
except subprocess.CalledProcessError as e:
print(e)
print('=== test2 ===')
try:
run_cmd_sync('sh fake_long_running_shell.sh')
except subprocess.CalledProcessError as e:
print(e)
print('=== run without print ===')
run_cmd_sync('sh fake_long_running_shell.sh', check=False, log_func=None)
print('=== done ===')
| JackonYang/web-shell | utils/shell_runner.py | shell_runner.py | py | 4,018 | python | en | code | 0 | github-code | 13 |
17045553754 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOverseasTravelFliggyShopTransferModel(object):
def __init__(self):
self._data = None
self._open_id = None
self._unique_id = None
self._user_id = None
self._user_id_type = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def unique_id(self):
return self._unique_id
@unique_id.setter
def unique_id(self, value):
self._unique_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def user_id_type(self):
return self._user_id_type
@user_id_type.setter
def user_id_type(self, value):
self._user_id_type = value
def to_alipay_dict(self):
params = dict()
if self.data:
if hasattr(self.data, 'to_alipay_dict'):
params['data'] = self.data.to_alipay_dict()
else:
params['data'] = self.data
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.unique_id:
if hasattr(self.unique_id, 'to_alipay_dict'):
params['unique_id'] = self.unique_id.to_alipay_dict()
else:
params['unique_id'] = self.unique_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.user_id_type:
if hasattr(self.user_id_type, 'to_alipay_dict'):
params['user_id_type'] = self.user_id_type.to_alipay_dict()
else:
params['user_id_type'] = self.user_id_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOverseasTravelFliggyShopTransferModel()
if 'data' in d:
o.data = d['data']
if 'open_id' in d:
o.open_id = d['open_id']
if 'unique_id' in d:
o.unique_id = d['unique_id']
if 'user_id' in d:
o.user_id = d['user_id']
if 'user_id_type' in d:
o.user_id_type = d['user_id_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayOverseasTravelFliggyShopTransferModel.py | AlipayOverseasTravelFliggyShopTransferModel.py | py | 2,774 | python | en | code | 241 | github-code | 13 |
37655061384 | import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
class lg_dataset:
def __init__(self):
self.train_size=2000
self.train_param1=tf.random.uniform([100],minval=2,maxval=10)
self.train_param2=tf.random.uniform([100],minval=50,maxval=110)
self.train_data=tf.stack([self.train_param1,self.train_param2],axis=1)
self.test_size=200
self.train_param11=tf.random.uniform([100],minval=12,maxval=20)
self.train_param22=tf.random.uniform([100],minval=60,maxval=120)
self.train_data = tf.concat([self.train_data,tf.stack([self.train_param11,self.train_param22],axis=1)],0)
self.train_label = tf.random.uniform([100],minval=1,maxval=5)
self.train_label = tf.concat([self.train_label,tf.random.uniform([100],minval=-1,maxval=-5)],0)
def load_data(self):
return (self.train_data,self.train_label)
def plot(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('param1')
ax.set_ylabel('param2')
ax.set_zlabel('targets')
ax.scatter(self.train_data[:,0],self.train_data[:,1],self.train_label,c='g',label="ground truth")
ax.legend()
plt.show()
def load_data(dataset_name,plot=False):
if dataset_name=='svm':
__dataset=lg_dataset()
if plot:
__dataset.plot()
return __dataset.load_data()
else:
raise ValueError('Dataset not found')
class SVM:
def __init__(self,train_data,train_label,epochs):
self.train_data = train_data
self.train_label = train_label
self.initialize_parameter()
self.weights = tf.Variable(self.weights,dtype=tf.float32)
self.bias = tf.Variable(self.bias,dtype=tf.float32)
self.epsilon = 1
self.epochs = epochs
self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.1)
def model(self,data):
return tf.linalg.matmul(data,self.weights)+self.bias
@tf.function
def update_param(self,data,label):
self.loss = lambda : tf.reduce_sum(0.5*((self.weights)**2) + tf.reduce_sum(tf.maximum(0.0,self.epsilon-(label*self.model(data)))))
self.optimizer.minimize(self.loss,var_list=[self.weights,self.bias])
def train(self):
data = self.train_data
label = self.train_label
while self.epochs:
self.update_param(data,label)
self.epochs -= 1
def plot3D(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
clas_1 = self.model(self.train_data)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('targets')
ax.scatter(self.train_data[:,0],self.train_data[:,1],self.train_label,c='g',label="train_data")
ax.scatter(self.train_data[:,0],self.train_data[:,1],clas_1-1,c='r',label='boundary > 1')
ax.scatter(self.train_data[:,0],self.train_data[:,1],clas_1,c='b',label = 'boundary = 0')
ax.scatter(self.train_data[:,0],self.train_data[:,1],clas_1+1,c='r',label='boundary < 1')
ax.legend()
plt.show()
def plot2D(self):
plt.scatter(self.train_data[:,0],self.train_data[:,1],c='g',label='test_data')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.plot()
plt.show()
def plot_data(self):
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('targets')
ax.scatter(self.train_data[:,0],self.train_data[:,1],self.train_label,c='g',label="train_data")
ax.legend()
plt.show()
def initialize_parameter(self):
self.weights = tf.ones([self.train_data.shape[1],1],dtype=tf.float32)
self.bias = [0]
if __name__=='__main__':
(train_data,train_label) = load_data('svm')
epochs = int(input("Enter Epochs:- "))
classifier = SVM(train_data,train_label,epochs)
tf.print('width: ',2/tf.norm(classifier.weights))
classifier.plot_data()
classifier.train()
tf.print('width: ',2/tf.norm(classifier.weights))
classifier.plot3D()
| DextroLaev/Machine-Learning-And-Deep-Learning | Basic ML ALgo/SVM/svm.py | svm.py | py | 3,814 | python | en | code | 1 | github-code | 13 |
36724937202 | from core.robot import get_robot_wrapper, Shelf
from py_trees.behaviour import Behaviour
from core.logger import log, LogLevel
from py_trees.common import Status
import numpy as np
"""
Drive forward until the object is within grabbing range.
"""
class DriveToWithinRangeOfTarget(Behaviour):
def __init__(self, name=None):
super(DriveToWithinRangeOfTarget, self).__init__(name)
self.robot = get_robot_wrapper()
def update(self):
target_obj, shelf = self.robot.get_target_object()
if target_obj is None:
log("No target object found to drive to.")
return Status.FAILURE
distance_to_target = np.linalg.norm([target_obj.getPosition()[0], target_obj.getPosition()[1]])
drive_target_distance = 0.65 if shelf == Shelf.TOP else 0.65
# If the object is too far, drive forward slowly
if distance_to_target > drive_target_distance:
self.robot.parts["wheel_left_joint"].setVelocity(
self.robot.parts["wheel_left_joint"].getMaxVelocity() / 10.0)
self.robot.parts["wheel_right_joint"].setVelocity(
self.robot.parts["wheel_right_joint"].getMaxVelocity() / 10.0)
return Status.RUNNING
# Object within range.
else:
self.robot.parts["wheel_left_joint"].setVelocity(0.0)
self.robot.parts["wheel_right_joint"].setVelocity(0.0)
return Status.SUCCESS
def terminate(self, new_status):
log("Finished driving towards object", LogLevel.DEBUG)
| tannerleise/RoboticsFinal | final_project/controllers/grocery_shopper/behavior/drive_to_within_range_of_target.py | drive_to_within_range_of_target.py | py | 1,554 | python | en | code | 0 | github-code | 13 |
25213558328 | import numpy as np
import pandas as pd
import re
from pitci.base import LeafNodeScaledConformalPredictor
import pitci
import pytest
from unittest.mock import Mock
class DummyLeafNodeScaledConformalPredictor(LeafNodeScaledConformalPredictor):
"""Dummy class inheriting from LeafNodeScaledConformalPredictor so it's
functionality can be tested.
"""
def __init__(self, model="abcd"):
super().__init__(model=model)
def _generate_predictions(self, data):
"""Dummy function that returns 0s of shape (n,) where data has n rows."""
return np.zeros(data.shape[0])
def _generate_leaf_node_predictions(self, data):
"""Dummy function for returning leaf node index predictions, not implemented in
DummyLeafNodeScaledConformalPredictor so it has to be implemented specifically in
each test requiring it.
"""
raise NotImplementedError(
"_generate_leaf_node_predictions not implemented in DummyLeafNodeScaledConformalPredictor"
)
class TestCalibrate:
"""Tests for the LeafNodeScaledConformalPredictor.calibrate method."""
@pytest.mark.parametrize("alpha", [(-0.0001), (-1), (1.0001), (2), (55)])
def test_alpha_value_error(self, dmatrix_2x1_with_label, alpha):
"""Test an exception is raised if alpha is below 0 or greater than 1."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
with pytest.raises(
ValueError, match=re.escape("alpha must be in range [0 ,1]")
):
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label, alpha=alpha, response=np.array([0, 1])
)
def test_alpha_incorrect_type_error(self, dmatrix_2x1_with_label):
"""Test an exception is raised if alpha is not an int or float."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
with pytest.raises(
TypeError,
match=re.escape(
f"alpha is not in expected types {[int, float]}, got {str}"
),
):
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label, alpha="abc", response=np.array([0, 1])
)
def test_response_incorrect_type_error(self, dmatrix_2x1_with_label):
"""Test an exception is raised if response is not a pd.Series or np.ndarray."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
with pytest.raises(
TypeError,
match=re.escape(
f"response is not in expected types {[pd.Series, np.ndarray]}, got {bool}"
),
):
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label, alpha=0.5, response=False
)
def test_calibrate_calls_no_train_data(self, mocker, dmatrix_2x1_with_label):
"""Test the calls to _calibrate_interval and _calibrate_leaf_node_counts methods
when train_data is None.
"""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
mock_manager = Mock()
mocked = mocker.patch.object(
pitci.base.LeafNodeScaledConformalPredictor, "_calibrate_leaf_node_counts"
)
mocked2 = mocker.patch.object(
pitci.base.LeafNodeScaledConformalPredictor, "_calibrate_interval"
)
mock_manager.attach_mock(mocked, "the_calibrate_leaf_node_counts")
mock_manager.attach_mock(mocked2, "the_calibrate_interval")
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label,
alpha=0.1,
response=dmatrix_2x1_with_label.get_label(),
train_data=None,
)
# test each function is called the correct number of times
assert (
mocked.call_count == 1
), "incorrect number of calls to _calibrate_leaf_node_counts"
assert (
mocked2.call_count == 1
), "incorrect number of calls to _calibrate_interval"
# test the order of calls to functions
assert (
mock_manager.mock_calls[0][0] == "the_calibrate_leaf_node_counts"
), "_calibrate_leaf_node_counts not called first"
assert (
mock_manager.mock_calls[1][0] == "the_calibrate_interval"
), "_calibrate_interval not called second"
# test the argumnets in the _calibrate_leaf_node_counts call
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _calibrate_leaf_node_counts call"
assert (
call_kwargs["data"] == dmatrix_2x1_with_label
), "data arg incorrect in _calibrate_leaf_node_counts call"
# test the arguments in the _calibrate_interval call
call_args = mocked2.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _calibrate_interval call"
np.testing.assert_array_equal(
call_kwargs["response"], dmatrix_2x1_with_label.get_label()
)
assert (
call_kwargs["alpha"] == 0.1
), "alpha arg incorrect in _calibrate_interval call"
assert (
call_kwargs["data"] == dmatrix_2x1_with_label
), "data arg incorrect in _calibrate_interval call"
def test_calibrate_calls_with_train_data(
self, mocker, dmatrix_2x1_with_label, dmatrix_2x1_with_label_gamma
):
"""Test the calls to _calibrate_interval and _calibrate_leaf_node_counts methods
when train_data is specified.
"""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
mock_manager = Mock()
mocked = mocker.patch.object(
pitci.base.LeafNodeScaledConformalPredictor, "_calibrate_leaf_node_counts"
)
mocked2 = mocker.patch.object(
pitci.base.LeafNodeScaledConformalPredictor, "_calibrate_interval"
)
mock_manager.attach_mock(mocked, "the_calibrate_leaf_node_counts")
mock_manager.attach_mock(mocked2, "the_calibrate_interval")
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label,
alpha=0.1,
response=dmatrix_2x1_with_label.get_label(),
train_data=dmatrix_2x1_with_label_gamma,
)
# test each function is called the correct number of times
assert (
mocked.call_count == 1
), "incorrect number of calls to _calibrate_leaf_node_counts"
assert (
mocked2.call_count == 1
), "incorrect number of calls to _calibrate_interval"
# test the order of calls to functions
assert (
mock_manager.mock_calls[0][0] == "the_calibrate_leaf_node_counts"
), "_calibrate_leaf_node_counts not called first"
assert (
mock_manager.mock_calls[1][0] == "the_calibrate_interval"
), "_calibrate_interval not called second"
# test the argumnets in the _calibrate_leaf_node_counts call
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _calibrate_leaf_node_counts call"
assert (
call_kwargs["data"] == dmatrix_2x1_with_label_gamma
), "data arg incorrect in _calibrate_leaf_node_counts call"
# test the arguments in the _calibrate_interval call
call_args = mocked2.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _calibrate_interval call"
np.testing.assert_array_equal(
call_kwargs["response"], dmatrix_2x1_with_label.get_label()
)
assert (
call_kwargs["alpha"] == 0.1
), "alpha arg incorrect in _calibrate_interval call"
assert (
call_kwargs["data"] == dmatrix_2x1_with_label
), "data arg incorrect in _calibrate_interval call"
class TestCalculateScalingFactors:
"""Tests for the LeafNodeScaledConformalPredictor._calculate_scaling_factors method."""
def test_leaf_node_counts_exception(self):
"""Test an exception is raised if the leaf_node_counts attribute does not exist."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
assert not hasattr(
dummy_confo_model, "leaf_node_counts"
), "dummy_confo_model already has leaf_node_counts attribute"
with pytest.raises(
AttributeError,
match="leaf_node_counts attribute missing, run calibrate first.",
):
dummy_confo_model._calculate_scaling_factors(np.array([0, 1, 3, -9]))
def test_generate_leaf_node_predictions(self, mocker):
"""Test _generate_leaf_node_predictions is called with the data arg and the output
from this method is passed to the _count_leaf_node_visits_from_calibration
method.
"""
leaf_nodes_return_value = np.array([1, 0, 1 / 3, 2])
# set return value from _generate_leaf_node_predictions
mocked = mocker.patch.object(
DummyLeafNodeScaledConformalPredictor,
"_generate_leaf_node_predictions",
return_value=leaf_nodes_return_value,
)
mocked2 = mocker.patch.object(
DummyLeafNodeScaledConformalPredictor,
"_count_leaf_node_visits_from_calibration",
return_value=np.array([1]),
)
# set a dummy value for leaf_node_counts attribute as
# _count_leaf_node_visits_from_calibration is mocked
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
dummy_confo_model.leaf_node_counts = 1234
data_arg = np.array([0, 1, 3, -9])
dummy_confo_model._calculate_scaling_factors(data_arg)
# test the call to _generate_leaf_node_predictions
assert (
mocked.call_count == 1
), "incorrect number of calls to _generate_leaf_node_predictions"
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_kwargs == {}
), "keyword args incorrect in _generate_leaf_node_predictions call"
assert len(call_pos_args) == 1, "incorrect number of positional args"
np.testing.assert_array_equal(call_pos_args[0], data_arg)
# test _count_leaf_node_visits_from_calibration called with
# _generate_leaf_node_predictions outputs
assert (
mocked2.call_count == 1
), "incorrect number of calls to _count_leaf_node_visits_from_calibration"
call_args = mocked2.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _count_leaf_node_visits_from_calibration call"
assert list(call_kwargs.keys()) == [
"leaf_node_predictions"
], "incorrect kwargs in _count_leaf_node_visits_from_calibration call"
np.testing.assert_array_equal(
call_kwargs["leaf_node_predictions"], leaf_nodes_return_value
)
def test_expected_output(self, mocker):
"""Test that the output from the function is calculated as 1 / _count_leaf_node_visits_from_calibration
method output.
"""
count_leaf_nodes_return_value = np.array([-4, 0, 1 / 3, 2])
# set return value from _count_leaf_node_visits_from_calibration
mocker.patch.object(
DummyLeafNodeScaledConformalPredictor,
"_count_leaf_node_visits_from_calibration",
return_value=count_leaf_nodes_return_value,
)
# mock _generate_leaf_node_predictions so it doesn't run
mocker.patch.object(
DummyLeafNodeScaledConformalPredictor, "_generate_leaf_node_predictions"
)
expected_results = 1 / count_leaf_nodes_return_value
# set a dummy value for leaf_node_counts attribute as
# _count_leaf_node_visits_from_calibration is mocked
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
dummy_confo_model.leaf_node_counts = 1234
results = dummy_confo_model._calculate_scaling_factors(np.array([0]))
np.testing.assert_array_equal(results, expected_results)
class TestCountLeafNodeVisitsFromCalibration:
"""Tests for the LeafNodeScaledConformalPredictor._count_leaf_node_visits_from_calibration method."""
def test_sum_dict_values(self, mocker):
"""Test that _sum_dict_values is applied to every row in the passed
leaf_node_predictions args.
"""
mocked = mocker.patch.object(
LeafNodeScaledConformalPredictor, "_sum_dict_values"
)
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
# set leaf_node_counts attribute so np.apply_along_axis can run
dummy_confo_model.leaf_node_counts = {"a": 1}
leaf_node_predictions_value = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dummy_confo_model._count_leaf_node_visits_from_calibration(
leaf_node_predictions_value
)
assert (
mocked.call_count == leaf_node_predictions_value.shape[0]
), "incorrect number of calls to _sum_dict_values"
for call_no in range(leaf_node_predictions_value.shape[0]):
call_args = mocked.call_args_list[call_no]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert call_kwargs == {
"counts": dummy_confo_model.leaf_node_counts
}, f"keyword args in _sum_dict_values call {call_no} incorrect"
assert (
len(call_pos_args) == 1
), f"number of positional args in _sum_dict_values call {call_no} incorrect"
np.testing.assert_array_equal(
call_pos_args[0], leaf_node_predictions_value[call_no, :]
)
def test_sum_dict_values_returned(self, mocker):
"""Test the output of running _sum_dict_values on each row is returned from the method."""
# set the return value from _sum_dict_values calls
sum_dict_values_return_values = [-2, 1, 0]
mocker.patch.object(
LeafNodeScaledConformalPredictor,
"_sum_dict_values",
side_effect=sum_dict_values_return_values,
)
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
# set leaf_node_counts attribute so np.apply_along_axis can run
dummy_confo_model.leaf_node_counts = {"a": 1}
# set leaf_node_predictions arg so _sum_dict_values will be called 3 times
leaf_node_predictions_value = np.array([[1], [2], [3]])
results = dummy_confo_model._count_leaf_node_visits_from_calibration(
leaf_node_predictions_value
)
np.testing.assert_array_equal(results, np.array(sum_dict_values_return_values))
class TestCalibrateLeafNodeCounts:
"""Tests for the LeafNodeScaledConformalPredictor._calibrate_leaf_node_counts method."""
def test_leaf_node_counts_calculated_correctly(self, mocker):
"""Test that leaf_node_counts are calculated as expected."""
leaf_node_preds = np.array(
[[1, 2, 3, 1, 3], [2, 2, 4, 2, 1], [1, 2, 5, 1, 7], [1, 2, 0, -4, 1]]
)
# set return value from _generate_leaf_node_predictions
mocker.patch.object(
DummyLeafNodeScaledConformalPredictor,
"_generate_leaf_node_predictions",
return_value=leaf_node_preds,
)
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
dummy_confo_model._calibrate_leaf_node_counts(np.array([0]))
# leaf_node_counts should be a tabulation of each column in leaf_node_preds
expected_leaf_node_counts = [
{1: 3, 2: 1},
{2: 4},
{0: 1, 3: 1, 4: 1, 5: 1},
{-4: 1, 1: 2, 2: 1},
{1: 2, 3: 1, 7: 1},
]
assert (
dummy_confo_model.leaf_node_counts == expected_leaf_node_counts
), "leaf_node_counts not calculated correctly"
class TestSumDictValues:
"""Tests for the LeafNodeScaledConformalPredictor._sum_dict_values method."""
@pytest.mark.parametrize(
"arr, counts, expected_output",
[
(np.array([1]), {0: {1: 123}}, 123),
(
np.array([1, 1, 1]),
{0: {1: 123, 0: 21}, 1: {3: -1, 1: 100}, 2: {1: 5}},
228,
),
(
np.array([1, 2, 3]),
{0: {1: -1}, 1: {3: 21, 1: 100, 2: -1}, 2: {1: 5, 2: 99, 3: -1}},
-3,
),
],
)
def test_expected_output(self, arr, counts, expected_output):
"""Test the correct values are summed in function."""
output = LeafNodeScaledConformalPredictor._sum_dict_values(arr, counts)
assert output == expected_output, "_sum_dict_values produced incorrect output"
class TestCalculateNonconformityScores:
"""Tests for the LeafNodeScaledConformalPredictor._calculate_nonconformity_scores method."""
def test_scaled_absolute_error_call(self, mocker):
"""Test the nonconformity.scaled_absolute_error function is called correctly."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
nonconformity_scores_return_value = 1234
predictions_value = 1
response_value = 2
scaling_factors_value = 3
mocker.patch.object(
pitci.nonconformity,
"scaled_absolute_error",
return_value=nonconformity_scores_return_value,
)
result = dummy_confo_model._calculate_nonconformity_scores(
predictions_value, response_value, scaling_factors_value
)
assert (
pitci.nonconformity.scaled_absolute_error.call_count == 1
), "nonconformity.scaled_absolute_error function not called the correct number of times"
assert (
pitci.nonconformity.scaled_absolute_error.call_args_list[0][0] == ()
), "positional arguments in nonconformity.scaled_absolute_error call incorrect"
expected_call_kwargs = {
"predictions": predictions_value,
"response": response_value,
"scaling": scaling_factors_value,
}
assert (
pitci.nonconformity.scaled_absolute_error.call_args_list[0][1]
== expected_call_kwargs
), "keyword arguments in nonconformity.scaled_absolute_error call incorrect"
assert result == nonconformity_scores_return_value, (
"return value from _calculate_nonconformity_scores is not the output from "
"nonconformity.scaled_absolute_error function"
)
| richardangell/pitci | tests/base/test_LeafNodeScaledConformalPredictor.py | test_LeafNodeScaledConformalPredictor.py | py | 19,186 | python | en | code | 7 | github-code | 13 |
10438095906 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
import os
import string
import textwrap
from sys import stderr as STDERR
from six.moves import map
def read_fasta(source, obj=None, header_delimiter="\t", fix_duplicates=True):
""" Reads a collection of sequences econded in FASTA format."""
if obj is None:
from ..coretype import seqgroup
SC = seqgroup.SeqGroup()
else:
SC = obj
names = set([])
seq_id = -1
# Prepares handle from which read sequences
if os.path.isfile(source):
if source.endswith('.gz'):
import gzip
_source = gzip.open(source)
else:
_source = open(source, "r")
else:
_source = iter(source.split("\n"))
seq_name = None
for line in _source:
line = line.strip()
if line.startswith('#') or not line:
continue
# Reads seq number
elif line.startswith('>'):
# Checks if previous name had seq
if seq_id>-1 and SC.id2seq[seq_id] == "":
raise Exception("No sequence found for "+seq_name)
seq_id += 1
# Takes header info
seq_header_fields = [_f.strip() for _f in line[1:].split(header_delimiter)]
seq_name = seq_header_fields[0]
# Checks for duplicated seq names
if fix_duplicates and seq_name in names:
tag = str(len([k for k in list(SC.name2id.keys()) if k.endswith(seq_name)]))
old_name = seq_name
seq_name = tag+"_"+seq_name
print("Duplicated entry [%s] was renamed to [%s]" %(old_name, seq_name), file=STDERR)
# stores seq_name
SC.id2seq[seq_id] = ""
SC.id2name[seq_id] = seq_name
SC.name2id[seq_name] = seq_id
SC.id2comment[seq_id] = seq_header_fields[1:]
names.add(seq_name)
else:
if seq_name is None:
raise Exception("Error reading sequences: Wrong format.")
# removes all white spaces in line
s = line.strip().replace(" ","")
# append to seq_string
SC.id2seq[seq_id] += s
if seq_name and SC.id2seq[seq_id] == "":
print(seq_name,"has no sequence", file=STDERR)
return None
# Everything ok
return SC
def write_fasta(sequences, outfile = None, seqwidth = 80):
""" Writes a SeqGroup python object using FASTA format. """
wrapper = textwrap.TextWrapper()
wrapper.break_on_hyphens = False
wrapper.replace_whitespace = False
wrapper.expand_tabs = False
wrapper.break_long_words = True
wrapper.width = 80
text = '\n'.join([">%s\n%s\n" %( "\t".join([name]+comment), wrapper.fill(seq)) for
name, seq, comment in sequences])
if outfile is not None:
OUT = open(outfile,"w")
OUT.write(text)
OUT.close()
else:
return text
| dongzhang0725/PhyloSuite | PhyloSuite/ete3/parser/fasta.py | fasta.py | py | 4,449 | python | en | code | 118 | github-code | 13 |
35554795465 | import requests
from pyquery import PyQuery as pq
from flask import Flask
import re
import time
import json
sess = requests.session()
def search_company(keywords,
longi=None,
lati=None,
dis=None,
biztype=None,
beginPage=1):
"""
:param keywords:查询关键词
:param longi:经度
:param lati:纬度
:param dis: 100,200,300,unlimited
:param biztype:生产加工、经销批发、招商代理、商业服务、经营模式
:return:
"""
url = "https://s.1688.com/company/company_search.htm?" + \
"?button_click=top&" + \
"n=y&" + \
"keywords=" + requests.utils.quote(keywords, encoding='gbk') + \
"&beginPage=" + str(beginPage)
if longi:
url += "&longi=" + str(longi) + \
"&lati=" + str(lati) + \
"&dis=" + str(dis)
if biztype:
url += "&biztype=" + str(biztype)
resp = sess.get(url)
print(resp.url)
resp.encoding = 'gbk'
html = pq(resp.text)
company_items = html(".company-list-item")
companies = []
for i in range(company_items.length):
co = company_items.eq(i)
it = dict()
a = co(".list-item-title .list-item-title-text")
it['name'] = a.text()
it['page'] = a.attr("href")
if '?' in it['page']:
it['page'] = it['page'][:it['page'].index("?")]
companies.append(it)
return companies
def search_all_company(keywords="羽绒服女",
longi="116.4075",
lati="39.904030",
dis="200",
biztype="2"):
beginPage = 0
a = []
while 1:
beginPage += 1
com = search_company(keywords, longi, lati, dis, biztype, beginPage)
if not com:
break
a.extend(com)
return a
def get_product_cnt(company, keyword="羽绒服"):
resp = sess.get(
company['page'] + "/page/offerlist.htm?keywords=" + requests.utils.quote(self.keywords, encoding='gbk'))
resp.encoding = 'gbk'
print(resp.url)
with open("haha.html", "w", encoding='gbk') as f:
f.write(resp.text)
res = re.search("共搜索到<em>\s*(\d+)\s*</span></em>个符合条件的产品", resp.text)
cnt = res.group(1)
company['product_cnt'] = cnt
co = search_all_company()
for i in co:
# get_product_cnt(i, '羽绒服')
print(i)
| weiyinfu/java-python-crawler | tao/使用requests.py | 使用requests.py | py | 2,500 | python | en | code | 4 | github-code | 13 |
14547996265 | # -*- coding: utf-8 -*-
# @Time : 2020/11/22 12:56
# @Author : ooooo
from typing import *
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
m = dict()
for i in s:
if i in m:
m[i] += 1
else:
m[i] = 1
for i in t:
if i not in m or m[i] == 0:
return False
m[i] -= 1
if m[i] == 0:
del m[i]
return len(m) == 0
if __name__ == '__main__':
s = Solution()
print(s.isAnagram('anagram', 'nagaram')) # True
print(s.isAnagram('rat', 'cat')) # False
print(s.isAnagram('ab', 'a')) # False
| ooooo-youwillsee/leetcode | 0000-0500/0242-Valid-Anagram/py_0242/solution1.py | solution1.py | py | 670 | python | en | code | 7 | github-code | 13 |
10441630462 | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class InitializingWP(WaitPage):
wait_for_all_groups = True
class ScoreWP(WaitPage):
def after_all_players_arrive(self):
self.group.sum_score()
self.group.ranking_for_groups()
self.group.given_types()
class Score(Page):
def before_next_page(self):
self.player.var_between_apps()
page_sequence = [
InitializingWP,
ScoreWP,
Score,
]
| manumunoz/Switch_Ranking_ES | given_type/pages.py | pages.py | py | 513 | python | en | code | 0 | github-code | 13 |
42231041902 | # filebot.py
import os
import json
from difflib import get_close_matches
from unidecode import unidecode
import logging
class FileBot:
def __init__(self, qa_pairs_file='qa_pairs.json'):
self.qa_pairs_file = qa_pairs_file
self.qa_pairs = self.load_qa_pairs()
def preprocess_text(self, text):
"""Pré-processa o texto para facilitar a comparação."""
return unidecode(text.lower())
def load_qa_pairs(self):
"""Carrega pares de pergunta e resposta a partir de um arquivo."""
try:
if os.path.exists(self.qa_pairs_file):
with open(self.qa_pairs_file, 'r', encoding='utf-8') as file:
return json.load(file)
return {}
except Exception as e:
logging.error(f"Erro ao carregar pares de pergunta e resposta: {str(e)}")
return {}
def save_qa_pairs(self):
"""Salva pares de pergunta e resposta em um arquivo."""
try:
with open(self.qa_pairs_file, 'w', encoding='utf-8') as file:
json.dump(self.qa_pairs, file, ensure_ascii=False, indent=4)
except Exception as e:
logging.error(f"Erro ao salvar pares de pergunta e resposta: {str(e)}")
def train_with_text(self, text):
"""Treina o FileBot com um texto contendo pares de pergunta e resposta."""
try:
qa_pairs = [line.strip().split(':') for line in text.split('\n') if ':' in line]
for question, answer in qa_pairs:
preprocessed_question = self.preprocess_text(question)
if preprocessed_question in self.qa_pairs:
logging.warning(f"A pergunta '{preprocessed_question}' já foi treinada. Substituindo resposta.")
self.qa_pairs[preprocessed_question] = answer
self.save_qa_pairs()
except Exception as e:
logging.error(f"Erro durante o treinamento: {str(e)}")
def find_most_similar_question(self, user_question):
"""Encontra a pergunta mais similar nas perguntas treinadas."""
try:
user_question_processed = self.preprocess_text(user_question)
possible_questions = get_close_matches(user_question_processed, self.qa_pairs.keys(), n=1, cutoff=0.6)
if possible_questions:
return possible_questions[0]
else:
return None
except Exception as e:
logging.error(f"Erro ao encontrar pergunta mais similar: {str(e)}")
return None
def answer_question(self, user_question):
"""Gera uma resposta para a pergunta."""
try:
similar_question = self.find_most_similar_question(user_question)
if similar_question is not None:
return self.qa_pairs[similar_question]
else:
return "Desculpe, não sei a resposta para essa pergunta."
except Exception as e:
logging.error(f"Erro ao gerar resposta: {str(e)}")
return "Desculpe, ocorreu um erro ao processar a pergunta."
| gustavojskk/chat-bot | filebot.py | filebot.py | py | 3,102 | python | pt | code | 0 | github-code | 13 |
70038531218 | import cv2
import numpy as np
import AIT1000_walkman
def box_bounding_to_box_center(box_bounding):
"""
用于把输入的边界值转换为中心点的xy坐标以及box的高度和宽度
box_bounding:[left, top, right, bottom]
:param box_bounding: 边界值
:return: 中心点的xy坐标以及box的高度和宽度
"""
center_x = (int(box_bounding[0]) + int(box_bounding[2])) / 2
center_y = (int(box_bounding[1]) + int(box_bounding[3])) / 2
box_width = (int(box_bounding[2]) - int(box_bounding[0]))
box_height = (int(box_bounding[3]) - int(box_bounding[1]))
return (center_x,center_y,box_width,box_height)
def box_center_to_box_bounding(box_center):
"""
用于把输入的中心点xy位置以及宽高转化为边界像素值
box_center:[center_x,center_y,width,height]
:param box_bounding: 边界值
:return: 四个边界值
"""
x1 = box_center[0] - box_center[2]//2
y1 = box_center[1] - box_center[3]//2
x2 = box_center[0] + box_center[2] // 2
y2 = box_center[1] + box_center[3] // 2
return [x1, y1, x2, y2]
def plot_one_box(box_bounding, img, color=(0, 200, 0), target=False):
"""
用于绘制边框
:param box_bounding:边框的边界值
:param img:绘制的图像
:param color:颜色
:param target:是否是我们正在跟踪的目标,如果是,则绘制为红色
:return: NONE
"""
right_bottom = (int(box_bounding[0]), int(box_bounding[1]))
# xy1是右边框和下边框
left_top = (int(box_bounding[2]), int(box_bounding[3]))
# xy2是左边框和上边框
if target:
color = (0, 0, 255)
cv2.rectangle(img, right_bottom, left_top, color, 1, cv2.LINE_AA) # filled
# 调用cv2的矩形绘制函数绘制矩形,cv2.LINE_AA是绘制的线的样式,1表示线的磅数
def updata_trace_list(box_center, trace_list, max_list_len=50):
"""
用于更新轨迹列表,这个trace_list是在draw_trace函数里面使用的,用于绘制轨迹
:param box_center:box的中心
:param trace_list:一系列box的center,包括之前的很多个帧的跟踪目标的box_center
:param max_list_len:最大列表长度,直接决定了绘制的轨迹的长度
:return: NONE
"""
if len(trace_list) <= max_list_len:
trace_list.append(box_center)
# 如果长度不够,也就是在跟踪刚开始的阶段,直接往里面加就行
else:
trace_list.pop(0)
trace_list.append(box_center)
# 后面的阶段,为了保证轨迹为定长度,因此先pop后append
return trace_list
def draw_trace(img, trace_list):
"""
用于绘制轨迹列表
:param img: 图像
:param trace_list: 轨迹列表
:return: NONE
"""
for i, item in enumerate(trace_list):
# i == index
# item = trace_list[i]
if i < 1:
continue
cv2.line(img,
(item[0], item[1]), (trace_list[i - 1][0], trace_list[i - 1][1]),
(255, 255, 0), 3, cv2.LINE_AA)
# 调用cv2的line函数进行画线
# parameters:img + start_point + end_point + color + width + line_style
def cal_iou(box1, box2):
"""
计算两个box的iou,iou越大说明两个box的重合度越高,重合度最高的box就认为是我们的观测值,用于后面的KF
:param box1: box1
:param box2: box2
:return: iou
"""
# box1 是第一个box的左上右下
# box2 是第二个box的左上右下
x1min, y1min, x1max, y1max = box1[0], box1[1], box1[2], box1[3]
x2min, y2min, x2max, y2max = box2[0], box2[1], box2[2], box2[3]
# print('--------')
# print(x1max, x1min, y1max, y1min)
# print(x2max, x2min, y2max, y2min)
# print('--------')
# 计算两个框的面积
s1 = (y1max - y1min + 1.) * (x1max - x1min + 1.)
# +1.是框的偏移
s2 = (y2max - y2min + 1.) * (x2max - x2min + 1.)
# 计算相交部分的坐标
xmin = max(x1min, x2min)
ymin = max(y1min, y2min)
xmax = min(x1max, x2max)
ymax = min(y1max, y2max)
# 这里的xmin,xmax,ymin,ymax构成了一个box,这个box是box1和box2的重合部分
inter_h = max(ymax - ymin + 1, 0)
inter_w = max(xmax - xmin + 1, 0)
# 计算box1 & box2的重合部分的宽高(如果有重合部分的话)
intersection = inter_h * inter_w
# 重合部分的面积
union = s1 + s2 - intersection
# union == 总面积 - 重合部分的面积,也就是没有重合的总面积
# 计算iou
iou = intersection / union
# print(iou)
# iou == box1和box2相交部分的面积 / box1和box2不相交部分的面积
# iou越高,就说明两个box的契合度越高
return iou
def cal_distance(box1, box2):
"""
用于计算两个box的中心点的距离
:param box1: box1
:param box2: box2
:return: distance
"""
center1 = ((box1[0] + box1[2]) // 2, (box1[1] + box1[3]) // 2)
center2 = ((box2[0] + box2[2]) // 2, (box2[1] + box2[3]) // 2)
dis = ((center1[0] - center2[0]) ** 2 + (center1[1] - center2[1]) ** 2) ** 0.5
return dis
def Kalman_walkman(last_best_estimated_pos_center, last_best_estimated_P, frame, cascade_classifier='/Users/a111/Downloads/haarcascade_fullbody.xml'):
"""
:param last_best_estimated_pos: 上一帧的最优估计的目标位置,box_bounding形式
:param last_best_estimated_P: 上一帧的最优估计的矩阵P
:param frame: 视频帧
:return: new_best_estimated_pos, new_best_estimated_P这一帧的最优估计的目标位置
"""
'''
这里需要先用yolo或者其他方法识别出frame中的所有行人框位置,
做成一个list,假设为walkman_pos_list
'''
last_best_estimated_pos_bounding = box_center_to_box_bounding(last_best_estimated_pos_center)
walkman_pos_list = []
max_iou = 0.3
# 在这里设置IOU的阈值
target_matched_judgement = False
last_best_estimated_status = np.array([[last_best_estimated_pos_center[0],
last_best_estimated_pos_center[1],
last_best_estimated_pos_center[2],
last_best_estimated_pos_center[3],
0,
0]]).T
observated_status = np.array([[0,
0,
0,
0,
0,
0]]).T
# 初始化X和Z
# 这里的walkman_pos_list就是我们需要调用算法得到的,frame中所有的行人的位置,bounding形式
walkman_pos_list = AIT1000_walkman.get_persons_pos(frame)
for index, box_pos in enumerate(walkman_pos_list):
box_pos = box_center_to_box_bounding(box_pos)
iou = cal_iou(last_best_estimated_pos_bounding, box_pos)
# 把这一帧里的所有行人框和上一帧的最优估计进行契合度的计算,
# 把iou最高的box作为这一帧的观测值
if iou >= max_iou:
target_pos_bounding = box_pos
# 记录box位置
max_iou = iou
# 更新max_iou
target_matched_judgement = True
if target_matched_judgement:
# 如果匹配上,则进行kalman filter的预测
# 先计算dx,dy
target_pos_center = box_bounding_to_box_center(target_pos_bounding)
dx = target_pos_center[0] - last_best_estimated_pos_center[0]
dy = target_pos_center[1] - last_best_estimated_pos_center[1]
# 补全X和Z
last_best_estimated_status[4], last_best_estimated_status[5] = dx, dy
# print(target_pos_center)
observated_status[0:4] = np.array(target_pos_center).reshape(4, 1)
observated_status[4], observated_status[5] = dx, dy
X_last = last_best_estimated_status
Z = observated_status
# 也就是观测值直接都用这一帧的检测值即可
# 其中H、R、B都是不变的,直接定义就可以(或者传递过来,这里图简单,就直接定义了)
H = np.eye(6)
R = np.eye(6)
P_last = last_best_estimated_P
Q = np.eye(6) * 0.1
B = None
# 状态转移矩阵
A = np.array([[1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
'''
状态转移矩阵,上一时刻的状态转移到当前时刻
x2 = x1 + dx
y2 = y1 + dy
w2 = w1 认为矩形框高度宽度不变
h2 = h1
dx2 = dx1
dy2 = dy1 认为目标匀速运动?
'''
# -----进行先验估计-----------------
X_prior = np.dot(A, X_last)
# box_prior = box_center_to_box_bounding(X_prior[0:4])
# plot_one_box(box_prior, frame, color=(0, 0, 0), target=False)
# -----计算状态估计协方差矩阵P--------
P_prior = np.dot(np.dot(A, P_last), A.T) + Q
# ------计算卡尔曼增益---------------------
k1 = np.dot(P_prior, H.T)
k2 = np.dot(np.dot(H, P_prior), H.T) + R
K = np.dot(k1, np.linalg.inv(k2))
# --------------后验估计------------
# X_posterior_1 = Z - np.dot(H, X_prior)
X_best_estimated = X_prior + np.dot(K, Z - np.dot(H, X_prior))
box_posterior = box_center_to_box_bounding(X_best_estimated[0:4])
# plot_one_box(box_posterior, frame, color=(255, 255, 255), target=False)
# ---------更新状态估计协方差矩阵P-----
# P_posterior_1 = np.eye(6) - np.dot(K, H)
I = np.eye(6)
P_best_estimated = np.dot(I-np.dot(K,H), P_prior)
return box_posterior, P_best_estimated, False
# 有匹配上的行人,FIRST_FLAG置为False,表示下一帧不需要重新选择目标
else:
# 如果没有匹配上,则认为这一帧的最优估计与上一帧相比,保持不变
# 没有匹配上的情况对应的就是目标丢失,目标丢失则需要获取一个新的追踪目标,调用算法获取危险距离内的最近目标即可
# P也进行初始化
new_best_estimated_P = np.eye(6)
target_dis, target_pos = AIT1000_walkman.get_target_pos(frame)
if target_pos == 'nobody' or target_pos == []:
# 如果没有需要追踪的target:则直接返回上一帧的目标位置
new_best_estimated_pos = last_best_estimated_pos_bounding
else:
# 如果有需要追踪的目标,就把需要追踪的目标作为下一次检测的目标位置
new_best_estimated_pos = box_center_to_box_bounding(target_pos)
return new_best_estimated_pos, new_best_estimated_P, True
# 没有匹配上目标,因此下一帧需要重新选择目标
"""
@9.4 @Fox
目前存在的问题:
1. 如何识别图片中每一帧的所有行人的位置
1.1. YOLO:识别率高,算法复杂度也高一些?
1.2. 通过其他算法?找找看
2. 如何确定我们要跟踪的目标
2.1. 计算Box面积,自动选择面积最大的那个Box进行跟踪预测
2.2. 多目标跟踪预测
3. 如何把我们的YOLO的预测结果和我们的预警系统结合起来?也就是,什么情况下我们认为是危险情况?
4. 如何获取新的一帧?@Line 251
@9.6 @Fox
解决9.4的问题:
1. 目前使用HOG+OpenCV处理
2. & 3. 用单目测距地方法进行处理
2. 对于first_frame的距离最近的行人(且该行人小于危险阈值)进行跟踪;如果跟踪丢失,则换一个最近的其他的距离小于危险阈值的目标进行跟踪
3. 行人目标 < 5m: 警戒
行人目标 < 2m: 进行报警
3. 直接把新的一个frame传入即可
""" | fromthefox/Pedestrian-distance-prediction-based-on-Kalman-filtering | AIT1000_kalman.py | AIT1000_kalman.py | py | 11,951 | python | zh | code | 1 | github-code | 13 |
38659326962 | import pickle
import numpy as np
import os
import matplotlib.pyplot as plt
import cv2
from scipy import misc
def map_loss(dot_org_img,x,y,size,feature_value):
return np.sum(dot_org_img[x:x+size,y:y+size]) - feature_value
channel_map_list = pickle.load(open('feature_map_list.p','rb'))
final = np.zeros((20,30))
images_path = pickle.load(open('dataset.p','rb'))
list_path = 'imglist.p'
image_list=[]
if not os.path.exists(list_path):
for num,i in enumerate(images_path):
img=misc.imread(name = i,flatten = False , mode = 'RGB')
tiny_image=misc.imresize(img,(64,64,3))
image_list += [tiny_image]
pickle.dump(image_list,open('imglist.p','wb'))
else:
image_list = pickle.load(open('imglist.p','rb'))
# for i in channel_map_list:
# print(i)
org = pickle.load(open('feature_map_list.p','rb'))
org = np.array(org)
print(org.shape)
width = channel_map_list[0].shape[0]
height = channel_map_list[0].shape[1]
image = np.zeros((width,height))
#image[:]=1300
print(len(channel_map_list))
for i in range(0,width):
for j in range(0,height):
max_value = map_loss(org,i,j,64,channel_map_list[0][i][j])
#max_value = channel_map_list[0][i][j]
index = 0
for k in range(0,len(channel_map_list)):
if max_value < map_loss(org,i,j,64,channel_map_list[k][i][j]):
max_value = map_loss(org,i,j,64,channel_map_list[k][i][j])
index = k
image[i][j] = index
print(image)
final_image = np.zeros((1280,1920,3),dtype = 'uint8')
for i in range(image.shape[0]):
for j in range(image.shape[1]):
#print(image_list[1].shape)
#print(image[i,j])
if int(image[i,j]) != 1300:
final_image[i*64:(i+1)*64,j*64:(j+1)*64,:] = image_list[int(image[i,j])]
else:
final_image[i*64:(i+1)*64,j*64:(j+1)*64,:] = 0
plt.imshow(final_image)
final_save = np.zeros((1280,1920,3),dtype = 'uint8')
final_save[:,:,0] = final_image[:,:,2]
final_save[:,:,1] = final_image[:,:,0]
final_save[:,:,2] = final_image[:,:,1]
cv2.imwrite('final.jpg',final_save)
plt.show()
| pohanchi/2018_DSP_FINAL_Mosaic | DSP/DSP/mapping.py | mapping.py | py | 2,184 | python | en | code | 0 | github-code | 13 |
28188254585 | from misc import allstrings,functions
from misc.dbfunctions import WebtyDb
import datetime,ntplib,time,os,sys
from PyQt4 import QtGui,QtCore
class TimeMonitor():
def __init__(self):
self.currentTime = ''
self.allowedTimeBackdatedInSeconds = functions.calculateAllowedTimeInSeconds(numOfMins=20)
self.allowedForwardTimeInSeconds = functions.calculateAllowedTimeInSeconds(numOfDays=10)
self.db = WebtyDb()
def setCurrentTime(self,currentTime):
self.currentTime = currentTime
def getCurrentTime(self):
return self.currentTime
def confirmTimeIntegrity(self):
mainTime = self.getStoredTimeFromDb()
internetTime = self.getCurrentTimeFromInternet()
if internetTime or mainTime=='':
mainTime = functions.convertTimeToDatetime(internetTime)
systemTime = self.getCurrentTimeOfSystem()
# timeDiff = max(systemTime,mainTime) - min(systemTime,mainTime)
if mainTime>systemTime and (mainTime - systemTime).seconds > self.allowedTimeBackdatedInSeconds:
return False
elif systemTime>mainTime and (systemTime - mainTime).seconds > self.allowedForwardTimeInSeconds:
return False
else:
mainTime = systemTime
self.saveCurrentTimeToDb(mainTime)
return True
def getCurrentTimeOfSystem(self):
d = datetime.datetime.now()
return d
# ****************************** DATABASE ************************************
def getDbPrefTableName(self):
return allstrings.webtyprefsTableName
def getAllPrefsData(self):
d = self.db.retrieveAllVals(self.getDbPrefTableName())
return d
def getStoredTimeFromDb(self):
targetDict = {allstrings.webtyprefs_item_column:allstrings.webtyprefs_item_timestamp}
e = self.db.retrieveRecord(self.getDbPrefTableName(),targetDict)
e = functions.dbListTupleToListDict(e,self.getDbPrefTableName())
if e == []:
return ''
e = e[0]
e = e[allstrings.date_added_column]
return e
def saveCurrentTimeToDb(self,currentTime):
found = False
recordDict = {allstrings.webtyprefs_item_column:allstrings.webtyprefs_item_timestamp,
allstrings.date_added_column:currentTime}
data = self.getAllPrefsData()
data = functions.dbListTupleToListDict(data,self.getDbPrefTableName())
for i in data:
if allstrings.webtyprefs_item_timestamp in i.values():
found = True
dataId = i[allstrings.allid_column]
self.db.updateRecord(self.getDbPrefTableName(),recordDict,dataId)
break
if found == False:
self.db.insertNewRecord(self.getDbPrefTableName(),recordDict)
return True
# **************************************************************************
#********************************** INTERNET *******************************
def getCurrentTimeFromInternet(self):
try:
client = ntplib.NTPClient()
response = client.request('pool.ntp.org')
# response = time.strftime('%Y-%m-%d-%H:%M',time.localtime(response.tx_time))
response = time.localtime(response.tx_time)
return response
except:
return False
# **************************************************************************
if __name__ == '__main__':
t = TimeMonitor()
r = t.confirmTimeIntegrity() | brownharryb/webtydesk | time_monitor.py | time_monitor.py | py | 3,529 | python | en | code | 0 | github-code | 13 |
30753449238 | import json
import pymysql
import boto3
def get_meme_by_id(event, context):
# Fetch RDS connection details from Parameter Store
parameter_store = boto3.client('ssm')
rds_host = parameter_store.get_parameter(Name='memify-db-url')['Parameter']['Value']
username = parameter_store.get_parameter(Name='memify-db-username')['Parameter']['Value']
password = parameter_store.get_parameter(Name='memify-db-password', WithDecryption=True)['Parameter']['Value']
db_name = parameter_store.get_parameter(Name='memify-db-name')['Parameter']['Value']
# Extract meme ID from path parameters
meme_id = event['pathParameters']['id']
# Connect to the RDS database
conn = pymysql.connect(
host=rds_host,
user=username,
password=password,
db=db_name,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
try:
with conn.cursor() as cursor:
# Return picture URL, title, views, likes and shares for the meme.
sql = '''
SELECT
m.id AS meme_id,
p.image_url,
m.date_created,
m.name,
COUNT(DISTINCT l.id) AS like_count,
COUNT(DISTINCT s.id) AS share_count,
COUNT(DISTINCT v.id) AS view_count
FROM memes m
JOIN pictures p ON m.picture_id = p.id
LEFT JOIN likes l ON l.meme_id = m.id
LEFT JOIN shares s ON s.meme_id = m.id
LEFT JOIN views v ON v.meme_id = m.id
WHERE m.id = %s
GROUP BY m.id, p.image_url
'''
cursor.execute(sql, (meme_id,))
result = cursor.fetchone()
if result:
body = {
"message": "Meme retrieved successfully",
"meme": result
}
status_code = 200
else:
body = {
"message": "Meme not found"
}
status_code = 404
response = {
"statusCode": status_code,
"body": json.dumps(body, default=str)
}
return response
finally:
# Close the database connection
conn.close() | AjdinBajric/memify-backend | memes/get-by-id/handler.py | handler.py | py | 2,283 | python | en | code | 0 | github-code | 13 |
7858934564 | # -*- coding: utf-8 -*-
"""
A helper module to work with CloudWatch Logs Group, Stream, put log events,
and query logs insights.
Requirements:
- Python: 3.7+
- Dependencies:
# content of requirements.txt
boto3
func_args>=0.1.1,<1.0.0
Usage:
.. code-block:: python
from aws_cloudwatch_logs_insights_query import (
get_log_group,
create_log_group,
delete_log_group,
get_log_stream,
create_log_stream,
delete_log_stream,
Event,
BaseJsonMessage,
put_log_events,
get_ts_in_second,
get_ts_in_millisecond,
QueryStatusEnum,
wait_logs_insights_query_to_succeed,
run_query,
reformat_query_results,
)
"""
import typing as T
import time
import json
import enum
import dataclasses
from datetime import datetime, timezone, timedelta
import botocore.exceptions
from func_args import NOTHING, resolve_kwargs
if T.TYPE_CHECKING: # pragma: no cover
from mypy_boto3_logs import CloudWatchLogsClient
from mypy_boto3_logs.type_defs import (
LogGroupTypeDef,
LogStreamTypeDef,
PutLogEventsResponseTypeDef,
GetQueryResultsResponseTypeDef,
)
# ------------------------------------------------------------------------------
# Idempotent API
#
# CRUD for log group and stream in boto3 is not idempotent, they don't check
# if the resource already exists or not. So we made some improvements.
#
# - get_xyz:
# - create_xyz:
# - delete_xyz:
# ------------------------------------------------------------------------------
# --- Log Group ---
def get_log_group(
logs_client: "CloudWatchLogsClient",
group_name: str,
) -> T.Optional[T.Union[dict, "LogGroupTypeDef"]]:
"""
Get a log group details by name, if it doesn't exist, return None.
Ref:
- describe_log_groups: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/describe_log_groups.html
:return: A dict with the log group details, or None if it doesn't exist.
"""
res = logs_client.describe_log_groups(
logGroupNamePrefix=group_name,
)
groups = [
dct
for dct in res.get("logGroups", [])
if dct.get("logGroupName", "******") == group_name
]
if len(groups) == 1:
return groups[0]
else:
return None
def create_log_group(
logs_client: "CloudWatchLogsClient",
group_name: str,
kms_key_id: str = NOTHING,
tags: T.Dict[str, str] = NOTHING,
) -> bool:
"""
Create a log group, if it already exists, do nothing.
Ref:
- create_log_group: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/create_log_group.html
:return: True if the log group was created, False if it already existed.
"""
try:
logs_client.create_log_group(
**resolve_kwargs(
logGroupName=group_name,
kmsKeyId=kms_key_id,
tags=tags,
)
)
return True
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ResourceAlreadyExistsException":
return False
else: # pragma: no cover
raise e
def delete_log_group(
logs_client: "CloudWatchLogsClient",
group_name: str,
) -> bool:
"""
Delete a log group, if it doesn't exist, do nothing.
Ref:
- delete_log_group: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/delete_log_group.html
:return: True if the log group was deleted, False if it didn't exist.
"""
try:
logs_client.delete_log_group(
logGroupName=group_name,
)
return True
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
return False
else: # pragma: no cover
raise e
# --- Log Stream ---
def get_log_stream(
logs_client: "CloudWatchLogsClient",
group_name: str,
stream_name: str,
) -> T.Optional[T.Union[dict, "LogStreamTypeDef"]]:
"""
Get a log stream details by name, if it doesn't exist, return None.
Ref:
- describe_log_streams: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/describe_log_streams.html
:return: A dict with the log stream details, or None if it doesn't exist.
"""
res = logs_client.describe_log_streams(
logGroupName=group_name,
logStreamNamePrefix=stream_name,
)
streams = [
dct
for dct in res.get("logStreams", [])
if dct.get("logStreamName", "unknown-log-stream-name") == stream_name
]
if len(streams):
return streams[0]
else:
return None
def create_log_stream(
logs_client: "CloudWatchLogsClient",
group_name: str,
stream_name: str,
) -> bool:
"""
Create a log stream, if it already exists, do nothing.
Ref:
- create_log_stream: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/create_log_stream.html
:return: True if the log stream was created, False if it already existed.
"""
try:
logs_client.create_log_stream(
logGroupName=group_name,
logStreamName=stream_name,
)
return True
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ResourceAlreadyExistsException":
return False
else: # pragma: no cover
raise e
def delete_log_stream(
logs_client: "CloudWatchLogsClient",
group_name: str,
stream_name: str,
) -> bool:
"""
Delete a log stream, if it doesn't exist, do nothing.
Ref:
- delete_log_stream: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/delete_log_stream.html
:return: True if the log stream was deleted, False if it didn't exist.
"""
try:
logs_client.delete_log_stream(
logGroupName=group_name,
logStreamName=stream_name,
)
return True
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
return False
else: # pragma: no cover
raise e
# ------------------------------------------------------------------------------
# Idempotent API
#
# CRUD for log group and stream in boto3 is not idempotent, they don't check
# if the resource already exists or not. So we made some improvements.
#
# - get_xyz:
# - create_xyz:
# - delete_xyz:
# ------------------------------------------------------------------------------
EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
def get_utc_now() -> datetime:
return datetime.utcnow().replace(tzinfo=timezone.utc)
def get_utc_now_ts() -> int:
"""
The put log events API expects a timestamp in milliseconds since epoch.
"""
return int((get_utc_now() - EPOCH).total_seconds() * 1000)
@dataclasses.dataclass
class Event:
"""
Log event data model.
"""
message: str = dataclasses.field()
timestamp: int = dataclasses.field(default_factory=get_utc_now_ts)
@dataclasses.dataclass
class BaseJsonMessage:
"""
Base class for json encoded log message.
"""
def to_json(self) -> str:
"""
Convert the object to a json string.
You can override this method to customize the json serialization.
"""
return json.dumps(dataclasses.asdict(self))
@classmethod
def from_json(cls, json_str: str): # pragma: no cover
"""
You can override this module to customize the json deserialization.
"""
dct = json.loads(json_str)
return cls(**dct)
def put_log_events(
logs_client: "CloudWatchLogsClient",
group_name: str,
stream_name: str,
events: T.List[Event],
auto_create_stream: bool = True,
) -> T.Optional[T.Union[dict, "PutLogEventsResponseTypeDef"]]:
"""
Put a list of events into a log stream.
Ref:
- put_log_events: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/put_log_events.html
:param logs_client: The boto3 logs client.
:param group_name: The log group name.
:param stream_name: The log stream name.
:param events: A list of :class:`Event` objects.
:param auto_create_stream: if True, f log stream doesn't exist,
automatically create it.
:return: A dict with the response from the put_log_events call.
"""
if len(events) == 0: # pragma: no cover
return None
kwargs = dict(
logGroupName=group_name,
logStreamName=stream_name,
logEvents=[dataclasses.asdict(event) for event in events],
)
try:
res = logs_client.put_log_events(**kwargs)
return res
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
if "log stream" in str(e):
if auto_create_stream:
create_log_stream(logs_client, group_name, stream_name)
res = logs_client.put_log_events(**kwargs)
return res
raise e # pragma: no cover
def get_ts(dt: datetime) -> float:
"""
Convert a datetime object to a timestamp in seconds since epoch.
It assumes the datetime object is in UTC if it doesn't have a timezone.
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
else:
dt = dt.astimezone(timezone.utc)
return (dt - EPOCH).total_seconds()
def get_ts_in_second(dt: datetime) -> int:
"""
Convert a datetime object to a timestamp in seconds since epoch.
"""
return int(get_ts(dt))
def get_ts_in_millisecond(dt: datetime) -> int:
"""
Convert a datetime object to a timestamp in milliseconds since epoch.
"""
return int(get_ts(dt) * 1000)
class QueryStatusEnum(str, enum.Enum):
"""
Enum for the query status.
Ref: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/get_query_results.html
"""
Scheduled = "Scheduled"
Running = "Running"
Complete = "Complete"
Failed = "Failed"
Cancelled = "Cancelled"
Timeout = "Timeout"
Unknown = "Unknown"
def wait_logs_insights_query_to_succeed(
logs_client: "CloudWatchLogsClient",
query_id: str,
delta: int = 1,
timeout: int = 30,
) -> T.Union[dict, "GetQueryResultsResponseTypeDef"]:
"""
Wait a given athena query to reach ``Complete`` status. If failed,
raise ``RuntimeError`` immediately. If timeout, raise ``TimeoutError``.
Ref:
- get_query_results: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/get_query_results.html
:param logs_client: The boto3 cloudwatch logs client.
:param query_id: The query id from the response of ``start_query`` API call.
:param delta: The time interval in seconds between each query status check.
:param timeout: The maximum time in seconds to wait for the query to succeed.
"""
elapsed = 0
for _ in range(999999):
res = logs_client.get_query_results(queryId=query_id)
status = res["status"]
if status == QueryStatusEnum.Complete.value:
return res
elif status in [
QueryStatusEnum.Failed.value,
QueryStatusEnum.Cancelled.value,
QueryStatusEnum.Timeout.value,
]: # pragma: no cover
raise RuntimeError(f"query {query_id} reached status: {status}")
else:
time.sleep(delta)
elapsed += delta
if elapsed > timeout: # pragma: no cover
raise TimeoutError(f"logs insights query timeout in {timeout} seconds!")
def strip_out_limit_clause(query: str) -> str:
"""
Strip out the limit clause from a query string.
"""
lines = query.splitlines()
return "\n".join([line for line in lines if not line.startswith("| limit")])
def get_time_range(
last_n_minutes: T.Union[int, float] = 0,
last_n_hours: T.Union[int, float] = 0,
last_n_days: T.Union[int, float] = 0,
) -> T.Tuple[datetime, datetime]:
"""
Calculate the start and end datetime for a given time range.
"""
if all(
[last_n_minutes == 0, last_n_hours == 0, last_n_days == 0]
): # pragma: no cover
raise ValueError
end_datetime = get_utc_now()
start_datetime = end_datetime - timedelta(
days=last_n_days,
hours=last_n_hours,
minutes=last_n_minutes,
)
return start_datetime, end_datetime
def run_query(
logs_client: "CloudWatchLogsClient",
query: str,
log_group_name: T.Optional[str] = NOTHING,
log_group_name_list: T.Optional[T.List[str]] = NOTHING,
log_group_id_list: T.Optional[T.List[str]] = NOTHING,
start_datetime: T.Optional[datetime] = None,
end_datetime: T.Optional[datetime] = None,
last_n_minutes: T.Optional[int] = 0,
last_n_hours: T.Optional[int] = 0,
last_n_days: T.Optional[int] = 0,
limit: int = 1000,
wait: bool = True,
delta: int = 1,
timeout: int = 30,
) -> T.Tuple[str, T.Optional[T.Union[dict, "GetQueryResultsResponseTypeDef"]]]:
"""
Run a logs insights query and wait for the query to succeed. It is a more
human friendly wrapper of the ``start_query`` and ``get_query_results`` API.
Ref: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/start_query.html
:param logs_client: The boto3 cloudwatch logs client.
:param query: The query string. don't use ``| limit abc`` in your query,
use the ``limit`` parameter instead.
:param log_group_name: see ``start_query`` API.
:param log_group_name_list: see ``start_query`` API.
:param log_group_id_list: see ``start_query`` API.
:param start_datetime: python datetime object for start time,
if timezone is not set, it assumes UTC.
:param end_datetime: python datetime object for end time,
if timezone is not set, it assumes UTC.
:param last_n_minutes: query the time range from now to ``last_n_minutes`` ago.
:param last_n_hours: query the time range from now to ``last_n_hours`` ago.
:param last_n_days: query the time range from now to ``last_n_days`` ago.
:param wait: if True, wait until query succeeded and return the query result,
otherwise return the query id only and set query result as None.
:param limit: see ``start_query`` API.
:param delta: The time interval in seconds between each query status check.
:param timeout: The maximum time in seconds to wait for the query to succeed.
"""
# resolve start and end time
if start_datetime is None and end_datetime is None:
start_datetime, end_datetime = get_time_range(
last_n_minutes=last_n_minutes,
last_n_hours=last_n_hours,
last_n_days=last_n_days,
)
# resolve start_query kwargs
start_ts = get_ts_in_second(start_datetime)
end_ts = get_ts_in_second(end_datetime)
kwargs = dict(
logGroupName=log_group_name,
logGroupNames=log_group_name_list,
logGroupIds=log_group_id_list,
startTime=start_ts,
endTime=end_ts,
queryString=query,
limit=limit,
)
# run query
res = logs_client.start_query(**resolve_kwargs(**kwargs))
# get results
query_id = res["queryId"]
if wait:
res = wait_logs_insights_query_to_succeed(logs_client, query_id, delta, timeout)
else: # pragma: no cover
res = None
return query_id, res
def extract_query_results(response: dict) -> T.List[dict]:
"""
The ``get_query_results`` API response returns the query results in a
list of key value pair format. Human usually prefer dict format. This function
can extract the ``results`` field and reformat it to a list of dict.
.. code-block:: python
{
'results': [
[
{
'field': 'string',
'value': 'string'
},
{
'field': 'string',
'value': 'string'
},
...
],
],
...
}
:param response: the response from ``get_query_results`` API call.
:return: a list of dict.
"""
return [
{dct["field"]: dct["value"] for dct in result}
for result in response.get("results", [])
]
| MacHu-GWU/fixa-project | fixa/aws/aws_cloudwatch_logs_insights_query.py | aws_cloudwatch_logs_insights_query.py | py | 16,721 | python | en | code | 0 | github-code | 13 |
73295909776 | from collections import defaultdict
from datetime import datetime, timedelta, timezone
from connect.client import ConnectClient, R
TCR_UPDATE_TYPE_MAPPING = {
'setup': 'new',
'update': 'update',
'adjustment': 'update',
}
def remove_properties(obj: dict, properties: list):
for prop in properties:
if prop in obj.keys():
obj.pop(prop)
def verify_property(obj: dict, properties: dict[str, str]):
for prop in properties.keys():
if prop not in obj.keys():
obj[prop] = properties[prop]
def populate_dependents(parameters: list):
"""
Approx. the following reversal logic is directly applied to the list of parameters for deps:
[
{'id': 'PRM-1', 'constraints': {'dependency': {'parameter': {'id': 'PRM-2'}}}},
{'id': 'PRM-2'},
]
->
[
{'id': 'PRM-1'},
{'id': 'PRM-2', 'constraints': {'dependents': [{'id': 'PRM-2'}]}},
]
"""
dependency_exists = False
dependency_map = defaultdict(list)
for param in parameters:
dep = ((param.get('constraints') or {}).get('dependency') or {}).pop('parameter', None)
if dep:
dependency_exists = True
dependent_object = param['constraints'].pop('dependency')
dependent_object['id'] = param['id']
dependent_object['name'] = param.get('name')
dependent_object['value'] = dependent_object.pop('values', [])
dependency_map[dep['id']].append(dependent_object)
if not dependency_exists:
return
for param in parameters:
dependents = dependency_map.get(param['id'])
if dependents:
param.setdefault('constraints', {})
param['constraints']['dependents'] = dependents
def sanitize_product(product: dict):
remove_properties(
product,
[
'changes_description',
'public',
'events',
'configurations',
'usage_rule',
'stats',
'extensions',
],
)
verify_property(
product,
{
'published_at': datetime.now(
tz=timezone(timedelta(hours=0)),
).isoformat(timespec='seconds'),
},
)
return product
def sanitize_parameters(parameters: list):
populate_dependents(parameters)
for parameter in parameters:
remove_properties(
parameter,
['events'],
)
return parameters
def prepare_product_data_from_listing_request(
client: ConnectClient,
listing_request: dict,
):
product_id = listing_request['product']['id']
data = {
'table_name': 'cmp_connect_product',
'update_type': listing_request['type'],
'product': listing_request['product'],
}
if listing_request['type'] != 'remove':
data['product'] = sanitize_product(client.products[product_id].get())
data['product']['parameters'] = sanitize_parameters(
list(client.products[product_id].parameters.all()),
)
else:
verify_property(
listing_request['product'],
{
'published_at': datetime.now(
tz=timezone(timedelta(hours=0)),
).isoformat(timespec='seconds'),
},
)
return data
def prepare_product_data_from_product(
client: ConnectClient,
product: dict,
):
product_id = product['id']
data = {
'table_name': 'cmp_connect_product',
'update_type': 'update',
'product': sanitize_product(product),
}
data['product']['parameters'] = sanitize_parameters(
list(client.products[product_id].parameters.all()),
)
return data
def clear_gdpr_data(tc: dict):
remove_properties(
tc['account'],
['contact_info'],
)
if 'tiers' in tc.keys():
if 'tier1' in tc['tiers'].keys():
remove_properties(tc['tiers']['tier1'], ['contact_info'])
if 'tier2' in tc['tiers'].keys():
remove_properties(tc['tiers']['tier2'], ['contact_info'])
def fix_param_id_and_name(client: ConnectClient, tc: dict):
parameter_names = [param['id'] for param in tc['params']]
parameters = client.products[tc['product']['id']].parameters.filter(
R().name.in_(parameter_names),
)
param_name_id_map = {param['name']: param['id'] for param in parameters}
for param in tc['params']:
sanitize_tc_param(param)
param['name'] = param['id']
if param['id'] in param_name_id_map.keys():
param['id'] = param_name_id_map[param['id']]
def sanitize_tcr(tcr: dict):
remove_properties(
tcr,
[
'parent_configuration',
'events',
'previous_approved_request',
'assignee',
],
)
def include_last_tcr_request(client: ConnectClient, tc: dict):
last_tcr = client('tier').config_requests.filter(
R().configuration.id.eq(tc['id']),
).select(
'-tiers',
'-configuration',
).order_by('-created').first()
sanitize_tcr(last_tcr)
tc['last_request'] = last_tcr
def sanitize_tc(client: ConnectClient, tc: dict):
remove_properties(
tc,
[
'connection',
'events',
'template',
'open_request',
],
)
clear_gdpr_data(tc)
fix_param_id_and_name(client, tc)
verify_property(
tc,
{
'published_at': datetime.now(
tz=timezone(timedelta(hours=0)),
).isoformat(timespec='seconds'),
},
)
return tc
def sanitize_tc_param(tc_param: dict):
remove_properties(
tc_param,
['value_error'],
)
return tc_param
def prepare_tc_data_from_tcr(client: ConnectClient, tcr: dict):
tc_id = tcr['configuration']['id']
tcr_type = tcr['type']
tc = client('tier').configs[tc_id].get()
include_last_tcr_request(client, tc)
return {
'table_name': 'cmp_connect_tierconfig',
'update_type': TCR_UPDATE_TYPE_MAPPING[tcr_type],
'tier_config': sanitize_tc(client, tc),
}
def prepare_tc_data(client: ConnectClient, tc: dict):
include_last_tcr_request(client, tc)
return {
'table_name': 'cmp_connect_tierconfig',
'update_type': 'update',
'tier_config': sanitize_tc(client, tc),
}
def sanitize_translation(translation: dict):
remove_properties(
translation,
[
'events',
'comment',
],
)
verify_property(
translation,
{
'published_at': datetime.now(
tz=timezone(timedelta(hours=0)),
).isoformat(timespec='seconds'),
},
)
translation['product_id'] = translation['context']['instance_id']
translation['locale_id'] = translation['locale']['id']
return translation
def sanitize_translation_attribute(attribute: dict):
remove_properties(
attribute,
[
'events',
'auto_translated',
],
)
return attribute
def include_translation_attributes(client: ConnectClient, translation: dict):
translation['attributes'] = []
attributes = client('localization').translations[translation['id']].attributes.all()
for attribute in attributes:
translation['attributes'].append(sanitize_translation_attribute(attribute))
def prepare_translation_data(client: ConnectClient, translation: dict):
include_translation_attributes(client, translation)
return {
'table_name': 'cmp_connect_translation',
'update_type': 'update',
'translation': sanitize_translation(translation),
}
| cloudblue/extension-xv-datalake | connect_ext_datalake/services/payloads.py | payloads.py | py | 7,774 | python | en | code | 1 | github-code | 13 |
12051054872 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import sklearn as sk
from sklearn import ensemble
from sklearn import tree
from sklearn.metrics import accuracy_score
import graphviz
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from collections import Counter
import os #to read files we use library os
def make_dict(root_dir):
all_words = [] #list of words
emails = [os.path.join(root_dir,f)#read the file
for f in os.listdir(root_dir)]
for mail in emails:
with open(mail) as m:#m is alias for open(mail)
for line in m:#from mail it will go to each line
words = line.split(' ')#split the words from lines
all_words += words #wordsl will concatinate
dictionary = Counter(all_words)
list_to_remove = list(dictionary)
for item in list_to_remove:
if item.isalpha() == False:#it will remove numerical value from every line-> alpha is a numerical value
del dictionary[item]
elif len(item) == 1:
del dictionary[item]
dictionary = dictionary.most_common(3000)#dict carrying most top 3000 words
return dictionary
TRAIN_DIR = "/home/prajakta/Desktop/train-mails"
TEST_DIR = "/home/prajakta/Desktop/test-mails"
dictionary = make_dict(TRAIN_DIR)
print(dictionary)
# In[3]:
from sklearn.metrics import accuracy_score
def extract_features(mail_dir):#we will make words to features
files = [os.path.join(mail_dir,fi)for fi in os.listdir(mail_dir)]
features_matrix = np.zeros((len(files),3000))
train_labels = np.zeros(len(files))
count = 0;
docID = 0;
for fil in files:
with open(fil) as fi:
for i,line in enumerate(fi):
if i == 2:
words = line.split()
for word in words:
wordID = 0
for i,d in enumerate(dictionary):
if d[0] == word:
wordID = i
features_matrix[docID,wordID] = words.count(word)
train_labels[docID] = 0
filepathTokens = fil.split('/')
lastToken = filepathTokens[len(filepathTokens) - 1]
if lastToken.startswith("spmsg"):
train_labels[docID] = 1
count = count + 1
docID = docID +1
return features_matrix,train_labels
TRAIN_DIR = "/home/prajakta/Desktop/train-mails"
TEST_DIR = "/home/prajakta/Desktop/test-mails"
features_matrix,labels = extract_features(TRAIN_DIR)
print(features_matrix,labels)
# In[5]:
from sklearn.ensemble import RandomForestClassifier
TRAIN_DIR = "/home/prajakta/Desktop/train-mails"
TEST_DIR = "/home/prajakta/Desktop/test-mails"
dictionary = make_dict(TRAIN_DIR)
print("reading and processing emails from file")
#creating train data set
features_matrix, labels = extract_features(TRAIN_DIR)
#creating test data set
test_feature_matrix, test_labels = extract_features(TEST_DIR)
#Random forest classifier
model = RandomForestClassifier()
print("Training model")
#train model
model.fit(features_matrix, labels)#spam and not spam labels
predicted_labels = model.predict(test_feature_matrix)
print("FINISHED classifying.accuracy score:")
print(accuracy_score(test_labels, predicted_labels))
# In[ ]:
# In[ ]:
# In[ ]:
| sawantprajakta/Machine_Learning | MachineLearning/Supervised_Algorithms/EnsembleAlgorithm_RandomForest(using mails dataset).py | EnsembleAlgorithm_RandomForest(using mails dataset).py | py | 3,446 | python | en | code | 0 | github-code | 13 |
70160337937 | # coding=utf8
"""Finite analog input task with a reference trigger.
Demo script for acquiring a finite (but unknown) number of analog
values with a National Instruments DAQ device, where both the start
and end of the acquisition is given by triggers.
To test this script, the NI MAX (Measurement & Automation
Explorer) has been used to create simulated devices.
In this test, a simulated device NI PCIe-6321 with 16 analog input (AI)
ports was created and named "Dev1".
The start trigger can watch a raising or falling edge of an analog
or digital signal. If an analog signal is chosen, a certain threshold
value has to be given, too (see parameter 'startTriggerLevel').
The stop trigger is given by a so called reference trigger. This
can only be enabled for finite, input tasks. However, such a trigger
will implicitely let the finite task behave like a continuous task.
This means, that you have to continuously retrieve the newest data using
'getVal' or 'copyVal' such that the internal buffer does not overflow.
The stop event for the task is defined by three conditions, that have
to be met: At first, a certain number of samples (refTriggerPreTriggerSamples)
have to be acquired, before the raising or falling edge of the given
refTriggerSource is monitored. Then, this source must have the requested
signal change. Once, this change is detected, the task will record further
samples, whose number is called postTriggerSamples. They are calculated by
"samplesPerChannel" - "refTriggerPreTriggerSamples". Then, the task is
stopped and the parameter "taskStarted" becomes 0.
Hint: It depends on the NI DAQ devices, if they allow
integrating different devices into the same measurement
task or not. Many devices do not allow this.
Hint: The reference trigger could only be tested by the developer
by a simulated NI device. This immediately fires the refTriggerSources, such
that a 100% testing could not be executed.
"""
import time
# Demo 1: Analog input task, finite acquisition, 80 samples / sec
plugin = dataIO(
"NI-DAQmx",
"analogInput",
taskName="myTaskName",
taskMode="finite",
samplingRate=200
)
plugin.showToolbox()
# Each getVal / copyVal command will retrieve 800 samples per
# channel. This is also the number used to calculate the post-trigger
# samples ("samplesPerChannel" - "refTriggerPreTriggerSamples")
plugin.setParam("samplesPerChannel", 800)
# Configure the channels:
plugin.setParam("channels", "Dev1/ai0,2,-10.0,10.0;Dev1/ai2,0,-5,5")
# enable a start trigger: here acquisition starts with a raising
# edge on the digital trigger input PFI0 (simulated devices will
# automatically send this trigger).
plugin.setParam("startTriggerMode", "digitalEdge")
plugin.setParam("startTriggerSource", "PFI0")
plugin.setParam("startTriggerRisingEdge", 1)
# enable a reference trigger using a digital, falling edge of PFI0 as
# trigger signal. The task is only stopped, if the trigger has been
# detected, at least pre-trigger samples have been acquired and after
# the trigger signal, another ("samplesPerChannel" - preTriggerSamples)
# will be acquired.
plugin.setParam("refTriggerMode", "digitalEdge")
plugin.setParam("refTriggerSource", "PFI0")
plugin.setParam("refTriggerRisingEdge", 0)
plugin.setParam("refTriggerPreTriggerSamples", 200)
# enable the on-board clock as continuous trigger
plugin.setParam("sampleClockSource", "OnboardClock")
# after having configured the task, start the device.
# The task is then configured in the device. It will be
# started with plugin.acquire() later.
plugin.startDevice()
# start the acquisition
plugin.acquire()
a = []
# continuously obtain new data until the task is not started
# any more (since the ref. trigger conditions are all met):
while plugin.getParam("taskStarted"):
print("retrieve subset of data...")
d = dataObject()
plugin.copyVal(d)
a.append(d)
print("The ref. trigger conditions are fulfilled.")
# plot the acquired values from both channels from the last run.
# the output dataObject already contains the correct axes units,
# descriptions etc...
plot1(a[-1],
properties={"legendPosition": "Right", "legendTitles": ("AI0", "AI2")})
# stop and remove the configured task
plugin.stopDevice()
| itom-project/plugins | niDAQmx/demo/demo_ai_finite_ref_trigger.py | demo_ai_finite_ref_trigger.py | py | 4,233 | python | en | code | 1 | github-code | 13 |
34794687539 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Reference: https://github.com/rm-hull/luma.examples
from pathlib import Path
from PIL import Image
from luma.core.interface.serial import i2c
from luma.oled.device import ssd1306, ssd1325, ssd1331, sh1106
import time
import threading
try:
serial = i2c(port=1, address=0x3C) # Set the I2C address of the OLED
device = ssd1306(serial, rotate=0)
except:
print('OLED disconnected')
def main():
img_path = str(Path(__file__).resolve().parent.joinpath('images', 'pi_logo.png'))
logo = Image.open(img_path).convert("RGBA")
fff = Image.new(logo.mode, logo.size, (255,) * 4)
background = Image.new("RGBA", device.size, "white")
posn = ((device.width - logo.width) // 2, 0)
while True:
for angle in range(0, 360, 2):
rot = logo.rotate(angle, resample=Image.BILINEAR)
img = Image.composite(rot, fff, rot)
background.paste(img, posn)
device.display(background.convert(device.mode))
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| adeept/adeept_OLED | 2_pi_logo.py | 2_pi_logo.py | py | 1,113 | python | en | code | 2 | github-code | 13 |
73961871056 | import json
from ast import literal_eval
from pathlib import Path
import numpy as np
from minibuilder.file_config.parts import load_json
def find_vertices(mesh, *args):
li = []
if len(args[0]) == 3:
for i, (x, y, z) in enumerate(mesh.vertices):
for value in args:
check = np.array(value) - np.array([x, y, z])
if (-10e-04 < check).all() and (check < 10e-04).all():
li.append(i)
if len(li) == 1:
return {"vertex": li[0]}
else:
return {"vertex_list": li}
elif isinstance(args[0][0], dict):
return args[0][0]
elif args[0][0].startswith("'face'"):
return literal_eval("{" + args[0][0] + "}")
elif args[0][0].startswith("'plan'"):
for i, facet in enumerate(mesh.facets):
if int(args[0][0].split(':')[-1]) in facet:
return {"facet": i}
print('Marker is not on face but on facet')
return literal_eval("{" + args[0][0].replace('plan', 'face') + "}")
def find_mesh_connector(mesh, graph, form_result, mesh_info):
# TODO dextral when not needed (ex: node larm)
if form_result.get('marker_bitz'):
mesh_info[form_result.get('file_name')]['bitz'] = find_vertices(mesh, *literal_eval(f"[[{form_result.get('marker_bitz')}]]"))
else:
for k, v in form_result.items():
if k.startswith('marker_') and v:
node = graph.nodes[k.replace('marker_', '')]
if k.replace('marker_', '') in list(graph.predecessors(form_result.get('node'))):
node = graph.nodes[form_result.get('node')]
folder = node['folder']
if node.get('dextral'):
mesh_info[form_result.get('file_name')]['dextral'] = node.get('dextral')
mesh_info[form_result.get('file_name')][folder] = find_vertices(mesh, *literal_eval(f"[[{form_result.get(k)}]]"))
else:
folder = node['folder']
if node.get('dex_type'):
if not mesh_info[form_result.get('file_name')].get(folder):
mesh_info[form_result.get('file_name')][folder] = {}
mesh_info[form_result.get('file_name')][folder].update({node.get('dextral'): find_vertices(mesh, *literal_eval(f"[[{form_result.get(k)}]]"))})
else:
mesh_info[form_result.get('file_name')][folder] = find_vertices(mesh, *literal_eval(f"[[{form_result.get(k)}]]"))
return mesh_info
def save_file_config_json(graph, data_folder, builder_name, conf_json, form_result, mesh_info):
if form_result.get('marker_bitz'):
folder = 'bitz'
else:
folder = graph.nodes[form_result.get('node')]['folder']
try:
conf = load_json(f"{data_folder}/{builder_name}/configuration/{conf_json}")
if conf.get(form_result.get('category')):
if conf[form_result.get('category')]['stl'].get(form_result.get('file_name')):
conf[form_result.get('category')]['stl'][form_result.get('file_name')].update(mesh_info[form_result.get('file_name')])
else:
conf[form_result.get('category')]['stl'].update(mesh_info)
print(f"{data_folder}/{builder_name}/configuration/{conf_json} has been updated !")
else:
conf[form_result.get('category')] = {
"desc": {
"display": form_result.get('category'),
"path": f"{builder_name}/{folder}/{form_result.get('category')}/"
},
"stl": mesh_info
}
print(f"{data_folder}/{builder_name}/configuration/{conf_json} has been updated with {form_result.get('category')}!")
if folder == 'bitz':
conf[form_result.get('category')]["desc"]['bitz'] = True
else:
conf[form_result.get('category')]["desc"]["nodes"] = [folder]
except Exception as e:
print(e)
conf = {
form_result.get('category'): {
"desc": {
"display": form_result.get('category'),
"path": f"{builder_name}/{folder}/{form_result.get('category')}/"
},
"stl": mesh_info
}
}
if folder == 'bitz':
conf[form_result.get('category')]["desc"]['bitz'] = True
else:
conf[form_result.get('category')]["desc"]["nodes"] = [folder]
print(f"{data_folder}/{builder_name}/configuration/{conf_json} has been created with {form_result.get('category')}!")
with open(f"{data_folder}/{builder_name}/configuration/conf.json", "r+") as node_file:
data = json.load(node_file)
if folder == 'bitz':
data['graph']['bitz_files'].append(conf_json)
print(f"{conf_json} added to bitz_files !")
else:
for i, node in enumerate(data['nodes']):
if node.get('id') == form_result.get('node'):
folder = node.get('folder')
break
for i, node in enumerate(data['nodes']):
if node.get('folder') == folder:
data['nodes'][i]['files'].append(conf_json)
print(f"{conf_json} added to files of {form_result.get('node')}!")
node_file.seek(0)
json.dump(data, node_file, indent=4)
json_file_path = f"{data_folder}/{builder_name}/configuration/{conf_json}"
json_file_path = Path(json_file_path.replace('.', '/', json_file_path.count('.') - 1))
json_file_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(json_file_path), "w") as outfile:
json.dump(conf, outfile, indent=4)
| LeoGrosjean/minibuilder_old | minibuilder/utils/mesh_config.py | mesh_config.py | py | 5,904 | python | en | code | 3 | github-code | 13 |
31015613953 | #-*- codeing=utf-8 -*-
#@time: 2020/8/19 12:52
#@Author: Shang-gang Lee
import build_model
import ProcessingData
import training
import pandas as pd
import torch
if __name__ == '__main__':
# data
train_data = pd.read_csv(r'.\data\raw\in_domain_train.tsv',
delimiter='\t', header=None, names=['sentence_source', 'label', 'label_notes', 'sentence'])
dev_data = pd.read_csv(r'.\data\raw\in_domain_dev.tsv',
delimiter='\t', header=None, names=['sentence_source', 'label', 'label_notes', 'sentence'])
# print('train_data.shape:',train_data.shape)
# print('train_data.isnull.size:',train_data.isnull().sum())
# print('dev_data.shape:',dev_data.shape)
# print('dev_data.isnull.size:',dev_data.isnull().sum())
sentences = train_data.sentence.values
labels = train_data.label.values
#getting inputdata
input_ids, attention_masks =ProcessingData.get_inputdata(sentences)
labels = torch.tensor(labels)
#get loader
train_loader, validation_dataloader=ProcessingData.get_loader(input_ids, attention_masks,labels)
#build model
model=build_model.build_model()
#training
train_result=training.train(model,train_loader,validation_dataloader)
print(train_result) | shanggangli/Research-in-NLP | BertForSenquenceClassification/main.py | main.py | py | 1,317 | python | en | code | 0 | github-code | 13 |
25306710446 | """
This script allows plotting the surface of the bed.
"""
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import random
# With reset of probe
points = [{'Y': 0.0, 'X': 0.0, 'Z': 0.0}, {'Y': 0.0, 'X': 50.0, 'Z': -3.7500000000000207e-05}, {'Y': 50.0, 'X': 0.0, 'Z': 4.9999999999999697e-05}, {'Y': 0.0, 'X': -50.0, 'Z': -3.7500000000000207e-05}, {'Y': -40.0, 'X': 0.0, 'Z': 4.9999999999999697e-05}, {'Y': 0.0, 'X': 25.0, 'Z': 4.9999999999999697e-05}, {'Y': 25.0, 'X': 0.0, 'Z': 7.5000000000000414e-05}, {'Y': 0.0, 'X': -25.0, 'Z': 1.2500000000000358e-05}, {'Y': -25.0, 'X': 0.0, 'Z': 1.2500000000000358e-05}]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = []
y = []
z = []
for p in points:
x.append(p["X"])
y.append(p["Y"])
z.append(p["Z"]*1000)
z2 = z
# Set up the canonical least squares form
degree = 2
Ax = np.vander(x, degree)
Ay = np.vander(y, degree)
A = np.hstack((Ax, Ay))
A = np.column_stack((np.ones(len(x)), x, y))
# Solve for a least squares estimate
(coeffs, residuals, rank, sing_vals) = np.linalg.lstsq(A, z)
X = np.linspace(min(x), max(x), 3)
Y = np.linspace(min(y), max(y), 3)
X, Y = np.meshgrid(X, Y)
Z = coeffs[0]+coeffs[1]*X + coeffs[2]*Y
ax.plot(x, y, z, linestyle="none", marker="o", mfc="none", markeredgecolor="red")
ax.plot_surface(X, Y, Z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
#print "Diff Z: "+str(np.array(z2)-np.array(z))
plt.show()
| Sciumo/redeem | tools/bed_compensation.py | bed_compensation.py | py | 1,480 | python | en | code | 0 | github-code | 13 |
19442373257 | from past.builtins import basestring
import json
import logging
import pika
# This module provides functions and constants to implement the core protocol
# used by the timer, dispatcher, and ETL services.
ANNOUNCE_SERVICE_EXCHANGE = 'mettle_announce_service'
ANNOUNCE_PIPELINE_RUN_EXCHANGE = 'mettle_announce_pipeline_run'
ACK_PIPELINE_RUN_EXCHANGE = 'mettle_ack_pipeline_run'
NACK_PIPELINE_RUN_EXCHANGE = 'mettle_nack_pipeline_run'
CLAIM_JOB_EXCHANGE = 'mettle_claim_job'
END_JOB_EXCHANGE = 'mettle_end_job'
JOB_LOGS_EXCHANGE = 'mettle_job_logs'
PIKA_PERSISTENT_MODE = 2
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def declare_exchanges(rabbit):
for exchange in (ANNOUNCE_SERVICE_EXCHANGE,
ANNOUNCE_PIPELINE_RUN_EXCHANGE,
ACK_PIPELINE_RUN_EXCHANGE,
NACK_PIPELINE_RUN_EXCHANGE,
CLAIM_JOB_EXCHANGE,
END_JOB_EXCHANGE,
JOB_LOGS_EXCHANGE):
rabbit.exchange_declare(exchange=exchange, type='topic', durable=True)
def pipeline_routing_key(service_name, pipeline_name):
return '.'.join([service_name, pipeline_name])
def service_queue_name(service_name):
return 'etl_service_' + service_name
def mq_escape(chars):
"""
Given a string that you might want to use in a RabbitMQ routing key, replace
any dots, stars, or hashes with underscores, so it won't throw off Rabbit's
bindings.
"""
return chars.replace('*', '_').replace('.', '_').replace('#', '_')
def announce_service(rabbit, service_name, pipeline_names):
payload = {
'service': service_name,
'pipeline_names': pipeline_names,
}
logger.info("Announcing service %s:%s." % (service_name,
', '.join(pipeline_names)))
rabbit.basic_publish(
exchange=ANNOUNCE_SERVICE_EXCHANGE,
routing_key=service_name,
body=json.dumps(payload),
properties=pika.BasicProperties(delivery_mode=PIKA_PERSISTENT_MODE)
)
def announce_pipeline_run(rabbit, service_name, pipeline_name, target_time,
run_id):
payload = {
'service': service_name,
'pipeline': pipeline_name,
'run_id': run_id,
'target_time': target_time,
}
logger.info("Announcing pipeline run %s:%s:%s." % (service_name,
pipeline_name,
target_time))
rabbit.basic_publish(
exchange=ANNOUNCE_PIPELINE_RUN_EXCHANGE,
routing_key=pipeline_routing_key(service_name, pipeline_name),
body=json.dumps(payload),
properties=pika.BasicProperties(delivery_mode=PIKA_PERSISTENT_MODE)
)
def find_cycle(targets):
"""
Given a dict representing a target dependency graph, return a list of any
nodes involved in a dependency cycle. Returns the first cycle found.
"""
# Thank you Guido
# http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed
# -graph.html
todo = set(targets.keys())
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in targets[top]:
if node in stack:
return stack[stack.index(node):]
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return None
def validate_targets_graph(targets):
for k, v in targets.items():
# all keys are strings
assert isinstance(k, basestring), "%s is not a string" % k
# all values are lists
assert isinstance(v, list), "%s is not a list" % v
# each item in each value list is also a key in the dict
for dep in v:
assert dep in targets, "%s is not a target" % dep
# No cycles
cycle_nodes = find_cycle(targets)
if cycle_nodes:
raise AssertionError(
"Found cycle in target graph involving these targets:"
", ".join(cycle_nodes))
def ack_pipeline_run(rabbit, service_name, pipeline_name, target_time, run_id,
targets, target_parameters):
# targets should be a dictionary like this:
# targets = {
# "file1.txt": [],
# "file2.txt": [],
# "file3.txt": [],
# "manifest.txt": ["file1.txt", "file2.txt", "file3.txt"]
# }
# Where the key in each dict is a string representing the target to be made,
# and each value is a list of that target's dependencies.
#
# Each depependency must itself be a target (key) in the dict, and cyclical
# dependencies are not allowed.
logger.info("Acking pipeline run %s:%s:%s." % (service_name, pipeline_name,
run_id))
validate_targets_graph(targets)
payload = {
'service': service_name,
'pipeline': pipeline_name,
'run_id': run_id,
'target_time': target_time,
'targets': targets,
'target_parameters': target_parameters,
}
rabbit.basic_publish(
exchange=ACK_PIPELINE_RUN_EXCHANGE,
routing_key=pipeline_routing_key(service_name, pipeline_name),
body=json.dumps(payload),
properties=pika.BasicProperties(delivery_mode=PIKA_PERSISTENT_MODE)
)
def nack_pipeline_run(rabbit, service_name, pipeline_name, run_id,
reannounce_time, message):
logger.info("Nacking pipeline run %s:%s:%s." % (service_name, pipeline_name,
run_id))
payload = {
'service': service_name,
'pipeline': pipeline_name,
'run_id': run_id,
'reannounce_time': reannounce_time,
'message': message,
}
rabbit.basic_publish(
exchange=NACK_PIPELINE_RUN_EXCHANGE,
routing_key=pipeline_routing_key(service_name, pipeline_name),
body=json.dumps(payload),
properties=pika.BasicProperties(delivery_mode=PIKA_PERSISTENT_MODE)
)
def queue_job(rabbit, queue_name, service_name, pipeline_name, target_time, target,
target_parameters, run_id, job_id):
# 'target' should be a string that includes all the information that the ETL
# service worker will need to produce this output. If it's a particular
# slice of rows in a DB table, for example, then 'target' should include the
# LIMIT and OFFSET parameters.
logger.info("Announcing job %s:%s:%s:%s:%s." % (service_name, pipeline_name,
run_id, target, job_id))
payload = {
'service': service_name,
'pipeline': pipeline_name,
'target_time': target_time,
'target': target,
'target_parameters': target_parameters,
'run_id': run_id,
'job_id': job_id,
}
rabbit.basic_publish(
exchange='',
routing_key=queue_name,
body=json.dumps(payload),
properties=pika.BasicProperties(
delivery_mode=PIKA_PERSISTENT_MODE,
),
)
def claim_job(rabbit, job_id, worker_name, start_time, expires, corr_id):
logger.info("Claiming job %s:%s:%s" % (job_id, worker_name, corr_id))
payload = {
'job_id': job_id,
'worker_name': worker_name,
'start_time': start_time,
'expires': expires,
}
rabbit.basic_publish(
exchange=CLAIM_JOB_EXCHANGE,
routing_key=worker_name,
body=json.dumps(payload),
properties=pika.BasicProperties(reply_to=worker_name,
correlation_id=corr_id, )
)
def grant_job(rabbit, worker_name, corr_id, granted):
# This method is not like the others. While those messages publish to topic
# exchanges, bound to shared queues, and have JSON payloads, this message
# publishes to the special "default" exchange, directly to a worker-specific
# queue, and has a payload of '0' or '1', letting a specific worker know
# whether its job claim has been granted or not.
# In other words, while the other messages are broadcast and queued, this
# message is sent directly as an RPC response.
rabbit.basic_publish(
exchange='',
routing_key=worker_name,
properties=pika.BasicProperties(correlation_id=corr_id),
body='1' if granted else '0',
)
def end_job(rabbit, service_name, pipeline_name, target_time, target, job_id,
end_time, succeeded):
logger.info("Ending job %s:%s:%s:%s." % (service_name, pipeline_name,
target, job_id))
payload = {
'service': service_name,
'pipeline': pipeline_name,
'target_time': target_time,
'target': target,
'job_id': job_id,
'end_time': end_time,
'succeeded': succeeded,
}
rabbit.basic_publish(
exchange=END_JOB_EXCHANGE,
routing_key=pipeline_routing_key(service_name, pipeline_name),
body=json.dumps(payload),
properties=pika.BasicProperties(delivery_mode=PIKA_PERSISTENT_MODE)
)
def send_log_msg(rabbit, service_name, pipeline_name, run_id, target, job_id,
line_num, msg):
logger.info("Job msg %s:%s:%s:%s:%s" % (service_name, pipeline_name, job_id,
run_id, msg))
routing_key = '.'.join([
service_name,
pipeline_name,
str(run_id),
mq_escape(target),
str(job_id),
])
payload = {
'service': service_name,
'pipeline': pipeline_name,
'run_id': run_id,
'job_id': job_id,
'line_num': line_num,
'msg': msg,
}
rabbit.basic_publish(
exchange=JOB_LOGS_EXCHANGE,
routing_key=routing_key,
body=json.dumps(payload),
properties=pika.BasicProperties(delivery_mode=PIKA_PERSISTENT_MODE)
)
| yougov/mettle-protocol | mettle_protocol/messages.py | messages.py | py | 10,076 | python | en | code | 0 | github-code | 13 |
19496235018 | #!/usr/bin/env python3
import argparse, urllib.request, os, sys
sqli = ["'", "\"", "`", "and 1=0", "or 1=0", "' and 1=0", "' or 1=0", "\" and 1=0", "\" or 1=0", "`and 1=0", "` or 1=0"]
xss = ["'<SCRIPT>alert(0)</SCRIPT>#",
"'<SCRIPT>alert(0)</SCRIPT>//",
"';alert(String.fromCharCode(88,83,83))//",
"\";alert(String.fromCharCode(88,83,83))//",
"--></SCRIPT>\">'><SCRIPT>alert(String.fromCharCode(88,83,83))</SCRIPT>",
"'';!--\"<XSS>=&{()}",
"<IMG SRC=\"javascript:alert('XSS');\">",
"<IMG SRC=javascript:alert('XSS')>",
"<IMG SRC=JaVaScRiPt:alert('XSS')>",
"<IMG SRC=`javascript:alert(\"Test, 'XSS'\")`>",
"<a onmouseover=\"alert(document.cookie)\">xxs link</a>",
"<a onmouseover=alert(document.cookie)>xxs link</a>",
"<IMG \"\"\"><SCRIPT>alert(\"XSS\")</SCRIPT>\">",
"<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>"]
lfi = ["C:\\boot.ini",
"C:\\WINDOWS\\win.ini",
"C:\\WINNT\\win.ini",
"C:\\WINDOWS\\Repair\SAM",
"C:\\WINDOWS\\php.ini",
"C:\\WINNT\\php.ini",
"C:\\Program Files\\Apache Group\\Apache\\conf\\httpd.conf",
"C:\\Program Files\\Apache Group\\Apache2\\conf\\httpd.conf",
"C:\\Program Files\\xampp\\apache\\conf\\httpd.conf",
"C:\\php\\php.ini",
"C:\\php5\\php.ini",
"C:\\php4\\php.ini",
"C:\\apache\\php\\php.ini",
"C:\\xampp\\apache\\bin\\php.ini",
"C:\\home2\\bin\\stable\\apache\\php.ini",
"C:\\home\\bin\\stable\\apache\\php.ini",
"C:\\Program Files\\Apache Group\\Apache\\logs\\access.log",
"C:\\Program Files\\Apache Group\\Apache\\logs\\error.log",
"C:\\WINDOWS\\TEMP\\",
"C\\php\\sessions\\",
"C:\\php5\\sessions\\",
"C:\\php4\\sessions\\"]
rfi = ["http://www.snailbook.com/docs/publickey-file.txt"]
rce = []
def clearScreen(): # Clears the screen and re-prints the logo
if sys.platform == 'win32':os.system('cls')
else:os.system('clear')
print('''
__ __ ____ ____
____ ____ _____/ /___ __/ /__ ) \ / (
/ __ \/ __ \/ ___/ __/ / / / / _ \ )_ \_V_/ _(
/ / / / /_/ / /__/ /_/ /_/ / / __/ )__ __(
/_/ /_/\____/\___/\__/\__,_/_/\___/ `-'
''')
def get(): # Function for the -m get parameter
x = input("\n\t1. SQLi\n\t2. XSS\n\t3. LFI\n\t4. RFI\n\t5. RCE\n")
if x=="1":
for i in sqli:
print("Testing: " + i)
req = args.url + i
try:
with urllib.request.urlopen(req) as response:
html = str(response.read())
with open("sql_errors.txt", "r") as f:
for line in f.readlines():
line = line.rstrip('\n')
if line in html:
print("Likely vulnerable.\n")
break
except Exception as e:
print(e)
elif x=="2":
for i in xss:
req = args.url + i
try:
with urllib.request.urlopen(req) as response:
html = str(response.read())
if i in html:
print("\nLikely vulnerable.")
break
except Exception as e:
print(e)
elif x=="3":
n = args.url.find('=') + 1
overall = False
for i in lfi:
req = args.url[:n] + i
try:
print("Trying: " + i)
with urllib.request.urlopen(req) as response:
html = str(response.read())
vuln = False
with open("lfi_404.txt", "r") as f:
for line in f.readlines():
line = line.rstrip('\n')
if line in html:
print("Not found.\n")
break
vuln = True
if vuln:
print("File detected.\n")
overall = True
except Exception as e:
print(str(e) + "\n")
print("\nOverall: Vulnerable")
elif x=="4":
n = args.url.find('=') + 1
for i in rfi:
print("Trying: " + i)
req = args.url[:n] + i
try:
with urllib.request.urlopen(req) as response:
html = str(response.read())
with urllib.request.urlopen(i) as response:
file = str(response.read())
if file in html:print("Likely vulnerable.\n")
except Exception as e:
print(str(e) + "\n")
elif x=="5":atk = rce
def post(): # Function for the -m post parameter (unfinished)
print("POST functionality not yet developed.")
parser = argparse.ArgumentParser() # Begin agument setup
parser.add_argument('-m', '--method', dest='method', help='get or post', default='get')
parser.add_argument('-u', '--url', dest='url', help='url to target')
parser.add_argument('-p', '--parameter', dest='parameter', help='parameter to use', default=None)
parser.add_argument('-d', '--data', dest='data', help='data to use for the post module', default=None)
args = parser.parse_args() # End argument setup
clearScreen()
# Change color on Windows and other (hopefully)
if sys.platform == 'win32':os.system('color a')
else:os.system('setterm -foreground green -store')
if args.url.lower()[0] is not 'h':args.url = 'http://' + args.url # Add 'http://' to the beginning of the url if it's not there
try:
if args.method.lower() == 'get':get()
elif args.method.lower() == 'post':post()
else:parser.print_help()
except:
parser.print_help()
| dxeheh/noctule | noctule.py | noctule.py | py | 6,335 | python | en | code | 0 | github-code | 13 |
10275853932 | from django.db.models.functions import Coalesce
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.urls import resolve
from base.models import Product, Review
from base.serializers import ProductSerializer
# we get all ours products in json format, and show them on /products/ urls
@api_view(['GET'])
def get_products(request):
name_query = request.query_params.get('keyword')
price_query = request.query_params.get('price')
brand_query = request.query_params.get('brand')
category_query = request.query_params.get('category')
if name_query is None:
name_query = ''
if price_query is None:
price_query = ''
if brand_query is None:
brand_query = ''
if category_query is None:
category_query = ''
# return all products from our database
products = Product.objects.all()
if len(brand_query) > 0:
brand_query = brand_query.replace('/', '').split(',')
# brand__in a given iterable; often a list, tuple, or queryset. It’s not a common use case, but strings
# (being iterables) are accepted.
products = Product.objects.filter(brand__in=brand_query)
if len(price_query) > 0:
price_query = price_query.replace('/', '').split('-')
# brand__in a given iterable; often a list, tuple, or queryset. It’s not a common use case, but strings
# (being iterables) are accepted.
products = Product.objects.filter(price__range=(
int(price_query[0]), int(price_query[1])))
if len(name_query) > 0:
# name__icontains means - if the title of the product contains any values inside of query,
# then it`s going to go ahead and filter it and return those products
products = Product.objects.filter(name__icontains=name_query)
if len(category_query) > 0:
category_query = category_query.replace('/', '').split(',')
products = Product.objects.filter(category__in=category_query)
page = request.query_params.get('page')
paginator = Paginator(products, 8)
try:
products = paginator.page(page)
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(paginator.num_pages)
if page is None:
page = 1
page = int(page)
serializer = ProductSerializer(products, many=True)
return Response({'products': serializer.data, 'page': page, 'pages': paginator.num_pages})
@api_view(['GET'])
def get_top_products(request):
products = Product.objects.filter(rating__gte=4).order_by('-rating')[0:5]
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
# we get all ours products by category
@api_view(['GET'])
def get_products_by_category(request):
products = Product.objects.all().order_by('category')
serializer = ProductSerializer(products, many=True)
return Response({'products': serializer.data})
# we get all ours products by brand
@api_view(['GET'])
def get_products_by_brand(request):
products = Product.objects.all()
serializer = ProductSerializer(products, many=True)
return Response({'products': serializer.data})
# we get just one our product in json format, filtered by pk (e.g _id) and show her on /products/<str:pk>/ urls
@api_view(['GET'])
def get_product(request, pk):
product = Product.objects.get(_id=pk)
serializer = ProductSerializer(product, many=False)
return Response(serializer.data)
# we get all ours products by category
@api_view(['GET'])
def get_all_products(request):
products = Product.objects.all()
serializer = ProductSerializer(products, many=True)
return Response({'products': serializer.data})
@api_view(['GET'])
def sort_all_products_by_high_price(request):
products = Product.objects.order_by('-price')
serializer = ProductSerializer(products, many=True)
return Response({'products': serializer.data})
# we get just one our product in json format, filtered by pk (e.g _id) and create product on admin screen
@api_view(['POST'])
@permission_classes([IsAdminUser])
def create_product(request):
# we take current user
user = request.user
product = Product.objects.create(
user=user,
name='Sample Name',
price=0,
brand='Sample Brand',
countInStock=0,
category='Sample Category',
description=''
)
serializer = ProductSerializer(product, many=False)
return Response(serializer.data)
@api_view(['PUT'])
@permission_classes([IsAdminUser])
def update_product(request, pk):
# we taken data from the from
data = request.data
# we take the product by his id
product = Product.objects.get(_id=pk)
# we modify the data
product.name = data['name']
product.price = data['price']
product.brand = data['brand']
product.countInStock = data['countInStock']
product.category = data['category']
product.description = data['description']
# we save modify data
product.save()
# we serialized data
serializer = ProductSerializer(product, many=False)
# we returned to frontend
return Response(serializer.data)
# we get just one our product in json format, filtered by pk (e.g _id) and create product on admin screen
@api_view(['DELETE'])
@permission_classes([IsAdminUser])
def delete_product(request, pk):
# we find a product
product = Product.objects.get(_id=pk)
# we remove our product from database
product.delete()
return Response('Product Deleted')
# we save our image, from the frontend side, to our static folder
@api_view(['POST'])
def upload_image(request):
data = request.data
product_id = data['product_id']
product = Product.objects.get(_id=product_id)
# we get file, what we send from out frontend side with key 'image'
product.image = request.FILES.get('image')
product.save()
return Response('Image was uploaded')
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def create_product_review(request, pk):
user = request.user
product = Product.objects.get(_id=pk)
data = request.data
# 1 - we check if Review already exists
# querying all product reviews here
already_exists = product.review_set.filter(user=user).exists()
if already_exists:
# this message user see on frontend side
content = {'detail': 'Product already reviewed'}
return Response(content, status=status.HTTP_400_BAD_REQUEST)
# 2 - we check if user has submitted a Rating or 0
elif data['rating'] == 0:
content = {'detail': 'Please select a rating'}
return Response(content, status=status.HTTP_400_BAD_REQUEST)
# 3 - Create review
else:
review = Review.objects.create(
user=user,
product=product,
name=user.first_name,
rating=data['rating'],
comment=data['comment'],
)
reviews = product.review_set.all()
# total up the number of reviews
product.numReviews = len(reviews)
# all of the reviews calculated
total = 0
for i in reviews:
# we get the rating
total += i.rating
# we calculate rating
product.rating = total / len(reviews)
# we save it to the product
product.save()
return Response('Review Added')
| barserkaua/Django_React_ecommerce_project | backend/base/views/product_views.py | product_views.py | py | 7,638 | python | en | code | 0 | github-code | 13 |
31269619999 | # -*- coding: utf-8 -*-
"""
Symbolic differentiation of prefix expressions
This code calculates derivatives in prefix notation
This code only accepts strings of expressions in prefix notation with proper spacing and nested parenthesis indicating the order of operations
Expression operators, functions and arguments must all be lowercase
The expressions must have balanced parentesis and with spaces between list items
Expressions are single variable expressions using x as the variable
This code calculates the correct derivative but does not simplify the result
This code is built to manage any combination of these operators and functions (+ - * / ^ cos sin tan exp ln)
Examples
5(x+3) -> ('(* (+ x 3) 5)') returns 5
5cos(x^2) -> ('(* 5 (cos (^ x 2)))') returns (* 5 (* (* 2 x) (* -1 (sin (^ x 2))))) -> -10sin(x^2)
"""
def main():
expression = input("Enter the expression of x here in prefix notation: ")
print(diff(expression))
def parse_f(s):
op = s[1]
if op in ("+", "-", "*", "/", "^"):
p = -1
sep = []
for i in range(len(s)):
if s[i] == "(":
p += 1
if s[i] == ")":
p -= 1
if s[i] == " " and p == 0:
sep.append(i)
arg1 = s[sep[0]+1: sep[1]]
arg2 = s[sep[1]+1: -1]
return[op, arg1, arg2]
else:
for i in range(len(s)):
if s[i] == " ":
arg = s[i+1: -1]
return [op, arg]
elif i>1:
op = op + s[i]
def type_f(arg):
if "(" in arg:
return "ex"
if arg.isnumeric():
return "nm"
else:
return "vr"
def addition(tup):
for i in range(len(tup)):
if type_f(tup[i]) == "ex":
tup[i] = diff(tup[i])
elif type_f(tup[i]) == "nm":
tup[i] = "0"
elif type_f(tup[i]) == "vr":
tup[i] = "1"
return "(+ " + tup[0] + " " + tup[1] + ")"
def subtraction(tup):
for i in range(len(tup)):
if type_f(tup[i]) == "ex":
tup[i] = diff(tup[i])
elif type_f(tup[i]) == "nm":
tup[i] = "0"
elif type_f(tup[i]) == "vr":
tup[i] = "1"
return "(- " + tup[0] + " " + tup[1] + ")"
def cosine(tup):
if type_f(tup[0]) == "vr":
return "(* -1 (sin " + tup[0] + "))"
elif type_f(tup[0]) == "nm":
return "0"
else:
return "(* " + diff(tup[0]) + " (* -1 (sin " + tup[0] + ")))"
def sine(tup):
if type_f(tup[0]) == "vr":
return "(cos " + tup[0] + ")"
elif type_f(tup[0]) == "nm":
return "0"
else:
return "(* " + diff(tup[0]) + " (cos " + tup[0] + "))"
def tangent(tup):
if type_f(tup[0]) == "vr":
return "(/ 1 (^ (cos " + tup[0] + ") 2))"
elif type_f(tup[0]) == "nm":
return "0"
else:
return "(* " + diff(tup[0]) + " (^ (cos " + tup[0] + ") -2))"
def nat_log(tup):
if type_f(tup[0]) == "vr":
return "(/ 1 " + tup[0] + ")"
elif type_f(tup[0]) == "nm":
return "0"
else:
return "(/ " + diff(tup[0]) + " " + tup[0] + ")"
def e_to_the(tup):
if type_f(tup[0]) == "vr":
return "(exp " + tup[0] + ")"
elif type_f(tup[0]) == "nm":
return "0"
else:
return "(* " + diff(tup[0]) + " (exp " + tup[0] + "))"
def multiplication(tup):
if type_f(tup[0]) == "ex" and type_f(tup[1]) == "ex":
return "(+ (* " + tup[0] + " " + diff(tup[1]) + ") (* " + diff(tup[0]) + " " + tup[1] + "))"
if type_f(tup[0]) == "nm" and type_f(tup[1]) == "vr":
return tup[0]
if type_f(tup[0]) == "vr" and type_f(tup[1]) == "nm":
return tup[1]
if type_f(tup[0]) == "nm" and type_f(tup[1]) == "nm":
return "0"
if type_f(tup[0]) == "vr" and type_f(tup[1]) == "vr":
return "(* 2 " + tup[0] + ")"
if type_f(tup[0]) == "ex" and type_f(tup[1]) == "nm":
return "(* " + tup[1] + " " + diff(tup[0]) + ")"
if type_f(tup[0]) == "nm" and type_f(tup[1]) == "ex":
return "(* " + tup[0] + " " + diff(tup[1]) + ")"
if type_f(tup[0]) == "ex" and type_f(tup[1]) == "vr":
return "(+ " + tup[0] + " (* " + tup[1] + " " + diff(tup[0]) + "))"
if type_f(tup[0]) == "vr" and type_f(tup[1]) == "ex":
return "(+ " + tup[1] + " (* " + tup[0] + " " + diff(tup[1]) + "))"
def quotient(tup):
return "(/ (- (* " + diff(tup[0]) + " " + tup[1] + ") (* " + diff(tup[1]) + " " + tup[0] + ")) (^ " + tup[1] + " 2))"
def power(tup):
if type_f(tup[0]) == "vr" and type_f(tup[1]) == "nm":
return "(* " + tup[1] + " (^ " + tup[0] + " " + dec(tup[1]) + "))"
else:
print("(exp (* " + tup[1] + "(ln " + tup[0] + ")))")
return diff("(exp (* " + tup[1] + " (ln " + tup[0] + ")))")
def dec(s):
i = int(s)
i -= 1
s = str(i)
return s
def comb(s):
open_p = []
close_p = []
combos = []
for i in range(len(s)):
if s[i] == "(":
open_p.append(i)
elif s[i] == ")":
close_p.append(i)
open_p.reverse()
for o in open_p:
for c in close_p:
if c > o:
combos.append((o, c))
close_p.remove(c)
break
return combos
def simplify(s):
combos = comb(s)
temp_s = s + " "
for c in combos:
t = parse_f(temp_s[c[0]: c[1]+1])
if t[0] == "+":
if type_f(t[1]) == "nm" and type_f(t[2]) == "nm":
v = int(t[1]) + int(t[2])
new_s = temp_s[:c[0]] + str(v) + temp_s[c[1]+1:-1]
return simplify(new_s)
if t[0] == "*":
if type_f(t[1]) == "nm" and type_f(t[2]) == "nm":
v = int(t[1]) * int(t[2])
new_s = temp_s[:c[0]] + str(v) + temp_s[c[1]+1:-1]
return simplify(new_s)
if t[1] == "0" or t[2] == "0":
new_s = temp_s[:c[0]] + "0" + temp_s[c[1]+1:-1]
return simplify(new_s)
if t[1] == "1":
new_s = temp_s[:c[0]] + t[2] + temp_s[c[1]+1:-1]
return simplify(new_s)
elif t[2] == "1":
new_s = temp_s[:c[0]] + t[1] + temp_s[c[1]+1:-1]
return simplify(new_s)
if t[0] == "-":
if type_f(t[1]) == "nm" and type_f(t[2]) == "nm":
v = int(t[1]) - int(t[2])
new_s = temp_s[:c[0]] + str(v) + temp_s[c[1]+1:-1]
return simplify(new_s)
if t[0] == "^":
if type_f(t[1]) == "vr" and t[2] == "1":
new_s = temp_s[:c[0]] + t[1] + temp_s[c[1]+1:-1]
return simplify(new_s)
if t[2] == "0":
new_s = temp_s[:c[0]] + "1" + temp_s[c[1]+1:-1]
return simplify(new_s)
if type_f(t[1]) == "nm" and type_f(t[2]) == "nm":
v = int(t[1]) ** int(t[2])
new_s = temp_s[:c[0]] + str(v) + temp_s[c[1]+1:-1]
return simplify(new_s)
if t[0] == "/":
if type_f(t[1]) == "nm" and type_f(t[2]) == "nm":
v = int(t[1]) / int(t[2])
new_s = temp_s[:c[0]] + str(v) + temp_s[c[1]+1:-1]
return simplify(new_s)
return s
def diff(s):
if s.isnumeric():
return "0"
elif len(s) == 1:
return "1"
tup = parse_f(s)
# All 10 functions
if tup[0] == "+":
s = addition(tup[1:])
if tup[0] == "-":
s = subtraction(tup[1:])
if tup[0] == "cos":
s = cosine(tup[1:])
if tup[0] == "sin":
s = sine(tup[1:])
if tup[0] == "tan":
s = tangent(tup[1:])
if tup[0] == "ln":
s = nat_log(tup[1:])
if tup[0] == "exp":
s = e_to_the(tup[1:])
if tup[0] == "*":
s = multiplication(tup[1:])
if tup[0] == "/":
s = quotient(tup[1:])
if tup[0] == "^":
s = power(tup[1:])
new_s = simplify(s)
while new_s != s:
s = new_s
new_s = simplify(s)
return new_s
if __name__=="__main__":
main() | josephcoveai/Projects | derivative calculator.py | derivative calculator.py | py | 8,426 | python | en | code | 0 | github-code | 13 |
18028014195 |
from __future__ import print_function
from sklearn import datasets
import matplotlib.pyplot as plt
import math
import numpy as np
# Import helper functions
from ravdl.neural_networks import NeuralNetwork
from ravdl.neural_networks.layers import Conv2D, Dense, Dropout, BatchNormalization, Activation, Flatten
from ravdl.neural_networks.optimizers import Adam
from ravdl.neural_networks.loss_functions import CrossEntropy
from sklearn.model_selection import train_test_split
import ravop as R
R.initialize(ravenverse_token='<ravenverse_token>',username='cnn_test')
algo = R.Graph(name='cnn', algorithm='convolutional_neural_network', approach='distributed')
def to_categorical(x, n_col=None):
if not n_col:
n_col = np.amax(x) + 1
one_hot = np.zeros((x.shape[0], n_col))
one_hot[np.arange(x.shape[0]), x] = 1
return one_hot
#----------
# Conv Net
#----------
optimizer = Adam()
data = datasets.load_digits()
X = data.data
y = data.target
# Convert to one-hot encoding
y = to_categorical(y.astype("int"))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
# Reshape X to (n_samples, channels, height, width)
X_train = X_train.reshape((-1,1,8,8))
X_test = X_test.reshape((-1,1,8,8))
clf = NeuralNetwork(optimizer=optimizer,
loss=CrossEntropy,
validation_data=(X_test, y_test))
clf.add(Conv2D(n_filters=16, filter_shape=(3,3), stride=1, input_shape=(1,8,8), padding='same'))
clf.add(Activation('relu'))
clf.add(Dropout(0.25))
clf.add(BatchNormalization())
clf.add(Conv2D(n_filters=32, filter_shape=(3,3), stride=1, padding='same'))
clf.add(Activation('relu'))
clf.add(Dropout(0.25))
clf.add(BatchNormalization())
clf.add(Flatten())
clf.add(Dense(256))
clf.add(Activation('relu'))
clf.add(Dropout(0.4))
clf.add(BatchNormalization())
clf.add(Dense(10))
clf.add(Activation('softmax'))
clf.summary()
train_err, val_err = clf.fit(X_train, y_train, n_epochs=5, batch_size=256)
# # Training and validation error plot
# n = len(train_err)
# training, = plt.plot(range(n), train_err, label="Training Error")
# validation, = plt.plot(range(n), val_err, label="Validation Error")
# plt.legend(handles=[training, validation])
# plt.title("Error Plot")
# plt.ylabel('Error')
# plt.xlabel('Iterations')
# plt.show()
# _, accuracy = clf.test_on_batch(X_test, y_test)
# print ("Accuracy:", accuracy)
# y_pred = np.argmax(clf.predict(X_test), axis=1)
# X_test = X_test.reshape(-1, 8*8)
# # Reduce dimension to 2D using PCA and plot the results
# Plot().plot_in_2d(X_test, y_pred, title="Convolutional Neural Network", accuracy=accuracy, legend_labels=range(10))
algo.end() | 7enTropy7/raven_hybrid | CNN_example.py | CNN_example.py | py | 2,658 | python | en | code | 0 | github-code | 13 |
29294016638 | from collections import Counter
input_file = 'day-01/input.txt'
def part1(input):
counter = Counter(input)
return counter['('] - counter[')']
def part2(input):
floor = 0
for i, c in enumerate(input, 1):
floor += {"(": 1, ")": -1}[c]
if floor == -1:
return i
if __name__ == "__main__":
with open(input_file) as f:
data = f.read()
print("Part 1: ", part1(data))
print("Part 2: ", part2(data)) | stevenhorsman/advent-of-code-2015 | day-01/not_quite_lisp.py | not_quite_lisp.py | py | 426 | python | en | code | 0 | github-code | 13 |
3392894450 | import pandas as pd
from faker import Faker
from IPython.display import display
from collections import defaultdict
import random
from datetime import datetime
fake = Faker()
select_data = defaultdict(list)
pCSV = pd.read_csv("peliculas.csv")
pId = list(pCSV["id"])
sCSV = pd.read_csv("sala.csv")
sID = list(sCSV["id"])
tuples = 100
for i in range(tuples):
select_data["funcion_id"].append("FUN" + "0" * (6 - len(str(i + 1))) + str(i + 1))
select_data["sala_id"].append(sID[random.randint(0, len(sID)-1)])
idPel = pId[random.randint(0, len(pId) - 1)]
select_data["pelicula_id"].append(idPel)
fEstreno = datetime.strptime(list(pCSV.loc[(pCSV.id == idPel)]["fecha_estreno"])[0], '%Y-%m-%d').date()
fFuncion = fake.date_between(start_date=fEstreno, end_date='+30d')
select_data["fecha"].append(fFuncion)
select_data["hora"].append(fake.time())
df_select_data = pd.DataFrame(select_data)
df_select_data.to_csv('funcion.csv', index=False)
df_select_data.drop_duplicates(keep='first', inplace=True)
display(df_select_data)
"""
2da FASE -> MATAR SALAS QUE INCUMPLAN RESTRICCION
for id_ in list(pCSV["id"]):
fEstreno = list(pCSV.loc[(pCSV.id == id_)]["fecha_estreno"])[0]
fEstreno = datetime.strptime(fEstreno, '%Y-%m-%d').date()
fFuncion = fake.date_between(start_date=fEstreno, end_date='+30d')
print(id_, " -> ", fEstreno, " ", fFuncion)
"""
| alexandermoralesp/Cinemania-BD | Data-Generator/funcion.py | funcion.py | py | 1,439 | python | en | code | 0 | github-code | 13 |
37832596001 | import numpy as np
import pandas as pd
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
train = pd.read_json('./data/train.json')
test = pd.read_json('./data/test.json')
X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75,75) for band in train['band_1']])
X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75,75) for band in train['band_2']])
X_train = np.concatenate([X_band_1[:,:,:,np.newaxis], X_band_2[:,:,:,np.newaxis], ((X_band_1+X_band_2)/2)[:,:,:,np.newaxis]], axis=-1)
pic = X_band_1[12,:,:]
z = pic[::1, ::1]
x, y = np.mgrid[:z.shape[0], :z.shape[1]]
title = 'iceberg'
# 2d color map
fig = plt.figure()
plt.imshow(z)
plt.title(title)
plt.show()
# 3d surface plot
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.title(title)
plt.show()
| p768lwy3/ml_selfstudy | Kaggle/StatoilCCOREIcebergClassifierChallenge/data_visulization.py | data_visulization.py | py | 953 | python | en | code | 0 | github-code | 13 |
1071365353 | import json
import threading
import kafka
from kafka.client_async import selectors
import kafka.errors
from oslo_log import log as logging
from oslo_utils import eventletutils
import tenacity
from oslo_messaging._drivers import base
from oslo_messaging._drivers import common as driver_common
from oslo_messaging._drivers.kafka_driver import kafka_options
from oslo_messaging._i18n import _LE
from oslo_messaging._i18n import _LW
from oslo_serialization import jsonutils
import logging as l
l.basicConfig(level=l.INFO)
l.getLogger("kafka").setLevel(l.WARN)
l.getLogger("stevedore").setLevel(l.WARN)
if eventletutils.is_monkey_patched('select'):
# monkeypatch the vendored SelectSelector._select like eventlet does
# https://github.com/eventlet/eventlet/blob/master/eventlet/green/selectors.py#L32
from eventlet.green import select
selectors.SelectSelector._select = staticmethod(select.select)
# Force to use the select selectors
KAFKA_SELECTOR = selectors.SelectSelector
else:
KAFKA_SELECTOR = selectors.DefaultSelector
LOG = logging.getLogger(__name__)
def unpack_message(msg):
context = {}
message = None
msg = json.loads(msg)
message = driver_common.deserialize_msg(msg)
context = message['_context']
del message['_context']
return context, message
def pack_message(ctxt, msg):
"""Pack context into msg."""
if isinstance(ctxt, dict):
context_d = ctxt
else:
context_d = ctxt.to_dict()
msg['_context'] = context_d
msg = driver_common.serialize_msg(msg)
return msg
def concat(sep, items):
return sep.join(filter(bool, items))
def target_to_topic(target, priority=None, vhost=None):
"""Convert target into topic string
:param target: Message destination target
:type target: oslo_messaging.Target
:param priority: Notification priority
:type priority: string
:param priority: Notification vhost
:type priority: string
"""
return concat(".", [target.topic, priority, vhost])
def retry_on_retriable_kafka_error(exc):
return (isinstance(exc, kafka.errors.KafkaError) and exc.retriable)
def with_reconnect(retries=None):
def decorator(func):
@tenacity.retry(
retry=tenacity.retry_if_exception(retry_on_retriable_kafka_error),
wait=tenacity.wait_fixed(1),
stop=tenacity.stop_after_attempt(retries),
reraise=True
)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
class Connection(object):
def __init__(self, conf, url):
self.driver_conf = conf.oslo_messaging_kafka
self.security_protocol = self.driver_conf.security_protocol
self.sasl_mechanism = self.driver_conf.sasl_mechanism
self.ssl_cafile = self.driver_conf.ssl_cafile
self.url = url
self.virtual_host = url.virtual_host
self._parse_url()
def _parse_url(self):
self.hostaddrs = []
self.username = None
self.password = None
for host in self.url.hosts:
# NOTE(ansmith): connections and failover are transparently
# managed by the client library. Credentials will be
# selectd from first host encountered in transport_url
if self.username is None:
self.username = host.username
self.password = host.password
else:
if self.username != host.username:
LOG.warning(_LW("Different transport usernames detected"))
if host.hostname:
self.hostaddrs.append("%s:%s" % (host.hostname, host.port))
def reset(self):
"""Reset a connection so it can be used again."""
pass
class ConsumerConnection(Connection):
def __init__(self, conf, url):
super(ConsumerConnection, self).__init__(conf, url)
self.consumer = None
self.consumer_timeout = self.driver_conf.kafka_consumer_timeout
self.max_fetch_bytes = self.driver_conf.kafka_max_fetch_bytes
self.group_id = self.driver_conf.consumer_group
self.enable_auto_commit = self.driver_conf.enable_auto_commit
self.max_poll_records = self.driver_conf.max_poll_records
self._consume_loop_stopped = False
@with_reconnect()
def _poll_messages(self, timeout):
messages = self.consumer.poll(timeout * 1000.0)
messages = [record.value
for records in messages.values() if records
for record in records]
if not messages:
# NOTE(sileht): really ? you return payload but no messages...
# simulate timeout to consume message again
raise kafka.errors.ConsumerNoMoreData()
if not self.enable_auto_commit:
self.consumer.commit()
return messages
def consume(self, timeout=None):
"""Receive up to 'max_fetch_messages' messages.
:param timeout: poll timeout in seconds
"""
def _raise_timeout(exc):
raise driver_common.Timeout(str(exc))
timer = driver_common.DecayingTimer(duration=timeout)
timer.start()
poll_timeout = (self.consumer_timeout if timeout is None
else min(timeout, self.consumer_timeout))
while True:
if self._consume_loop_stopped:
return
try:
return self._poll_messages(poll_timeout)
except kafka.errors.ConsumerNoMoreData as exc:
poll_timeout = timer.check_return(
_raise_timeout, exc, maximum=self.consumer_timeout)
except Exception:
LOG.exception(_LE("Failed to consume messages"))
return
def stop_consuming(self):
self._consume_loop_stopped = True
def close(self):
if self.consumer:
self.consumer.close()
self.consumer = None
@with_reconnect()
def declare_topic_consumer(self, topics, group=None):
self.consumer = kafka.KafkaConsumer(
*topics, group_id=(group or self.group_id),
enable_auto_commit=self.enable_auto_commit,
bootstrap_servers=self.hostaddrs,
max_partition_fetch_bytes=self.max_fetch_bytes,
max_poll_records=self.max_poll_records,
security_protocol=self.security_protocol,
sasl_mechanism=self.sasl_mechanism,
sasl_plain_username=self.username,
sasl_plain_password=self.password,
ssl_cafile=self.ssl_cafile,
selector=KAFKA_SELECTOR
)
class ProducerConnection(Connection):
def __init__(self, conf, url):
super(ProducerConnection, self).__init__(conf, url)
self.batch_size = self.driver_conf.producer_batch_size
self.linger_ms = self.driver_conf.producer_batch_timeout * 1000
self.producer = None
self.producer_lock = threading.Lock()
def notify_send(self, topic, ctxt, msg, retry):
"""Send messages to Kafka broker.
:param topic: String of the topic
:param ctxt: context for the messages
:param msg: messages for publishing
:param retry: the number of retry
"""
retry = retry if retry >= 0 else None
message = pack_message(ctxt, msg)
message = jsonutils.dumps(message).encode('utf-8')
@with_reconnect(retries=retry)
def wrapped_with_reconnect():
self._ensure_producer()
# NOTE(sileht): This returns a future, we can use get()
# if we want to block like other driver
future = self.producer.send(topic, message)
future.get()
try:
wrapped_with_reconnect()
except Exception:
# NOTE(sileht): if something goes wrong close the producer
# connection
self._close_producer()
raise
def close(self):
self._close_producer()
def _close_producer(self):
with self.producer_lock:
if self.producer:
self.producer.close()
self.producer = None
def _ensure_producer(self):
if self.producer:
return
with self.producer_lock:
if self.producer:
return
self.producer = kafka.KafkaProducer(
bootstrap_servers=self.hostaddrs,
linger_ms=self.linger_ms,
batch_size=self.batch_size,
security_protocol=self.security_protocol,
sasl_mechanism=self.sasl_mechanism,
sasl_plain_username=self.username,
sasl_plain_password=self.password,
ssl_cafile=self.ssl_cafile,
selector=KAFKA_SELECTOR)
class OsloKafkaMessage(base.RpcIncomingMessage):
def __init__(self, ctxt, message):
super(OsloKafkaMessage, self).__init__(ctxt, message)
def requeue(self):
LOG.warning(_LW("requeue is not supported"))
def reply(self, reply=None, failure=None):
LOG.warning(_LW("reply is not supported"))
def heartbeat(self):
LOG.warning(_LW("heartbeat is not supported"))
class KafkaListener(base.PollStyleListener):
def __init__(self, conn):
super(KafkaListener, self).__init__()
self._stopped = threading.Event()
self.conn = conn
self.incoming_queue = []
# FIXME(sileht): We do a first poll to ensure we topics are created
# This is a workaround mainly for functional tests, in real life
# this is fine if topics are not created synchroneously
self.poll(5)
@base.batch_poll_helper
def poll(self, timeout=None):
while not self._stopped.is_set():
if self.incoming_queue:
return self.incoming_queue.pop(0)
try:
messages = self.conn.consume(timeout=timeout) or []
for message in messages:
msg = OsloKafkaMessage(*unpack_message(message))
self.incoming_queue.append(msg)
except driver_common.Timeout:
return None
def stop(self):
self._stopped.set()
self.conn.stop_consuming()
def cleanup(self):
self.conn.close()
class KafkaDriver(base.BaseDriver):
"""Note: Current implementation of this driver is experimental.
We will have functional and/or integrated testing enabled for this driver.
"""
def __init__(self, conf, url, default_exchange=None,
allowed_remote_exmods=None):
conf = kafka_options.register_opts(conf, url)
super(KafkaDriver, self).__init__(
conf, url, default_exchange, allowed_remote_exmods)
self.listeners = []
self.virtual_host = url.virtual_host
self.pconn = ProducerConnection(conf, url)
def cleanup(self):
self.pconn.close()
for c in self.listeners:
c.close()
self.listeners = []
def send(self, target, ctxt, message, wait_for_reply=None, timeout=None,
call_monitor_timeout=None, retry=None):
raise NotImplementedError(
'The RPC implementation for Kafka is not implemented')
def send_notification(self, target, ctxt, message, version, retry=None):
"""Send notification to Kafka brokers
:param target: Message destination target
:type target: oslo_messaging.Target
:param ctxt: Message context
:type ctxt: dict
:param message: Message payload to pass
:type message: dict
:param version: Messaging API version (currently not used)
:type version: str
:param call_monitor_timeout: Maximum time the client will wait for the
call to complete before or receive a message heartbeat indicating
the remote side is still executing.
:type call_monitor_timeout: float
:param retry: an optional default kafka consumer retries configuration
None means to retry forever
0 means no retry
N means N retries
:type retry: int
"""
self.pconn.notify_send(target_to_topic(target,
vhost=self.virtual_host),
ctxt, message, retry)
def listen(self, target, batch_size, batch_timeout):
raise NotImplementedError(
'The RPC implementation for Kafka is not implemented')
def listen_for_notifications(self, targets_and_priorities, pool,
batch_size, batch_timeout):
"""Listen to a specified list of targets on Kafka brokers
:param targets_and_priorities: List of pairs (target, priority)
priority is not used for kafka driver
target.exchange_target.topic is used as
a kafka topic
:type targets_and_priorities: list
:param pool: consumer group of Kafka consumers
:type pool: string
"""
conn = ConsumerConnection(self.conf, self._url)
topics = set()
for target, priority in targets_and_priorities:
topics.add(target_to_topic(target, priority))
conn.declare_topic_consumer(topics, pool)
listener = KafkaListener(conn)
return base.PollStyleListenerAdapter(listener, batch_size,
batch_timeout)
| ualberta-smr/PyMigBench | data/codefile/openstack@oslo.messaging__5a842ae__oslo_messaging$_drivers$impl_kafka.py.source.py | openstack@oslo.messaging__5a842ae__oslo_messaging$_drivers$impl_kafka.py.source.py | py | 13,641 | python | en | code | 3 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.