max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
assignments/07-csv/blastomatic.py | marissalopezpier/biosys-analytics | 0 | 6622051 | #!/usr/bin/env python3
"""
Author : <NAME>
Date : 3 13 2019
Purpose: Python program for blastomics
"""
import os
import sys
import argparse
import csv
#import Bio
#from Bio import SeqIO
from collections import defaultdict
#-------------------------------------
def main():
args = get_args()
hits_header = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
annotations_filename = args.annotations
hits_filename = args.hits_file
outfile = args.outfile
if outfile=='':
f_out = sys.stdout
else:
f_out = open(outfile,'w')
if not os.path.isfile(hits_filename):
print('"{}" is not a file'.format(hits_filename),file=sys.stderr)
exit(1)
if not os.path.isfile(annotations_filename):
print('"{}" is not a file'.format(annotations_filename),file=sys.stderr)
exit(1)
f_annot = open(annotations_filename,'r')
genus_dict = {}
species_dict = {}
csv_reader = csv.DictReader(f_annot) #using first line as hits_header
for row in csv_reader:
centroid = row['centroid']
genus = row['genus']
if not genus:
genus = 'NA'
species = row['species']
if not species:
species = 'NA'
genus_dict[centroid] = genus
species_dict[centroid] = species
f_annot.close()
print('seq_id\tpident\tgenus\tspecies',file=f_out)
f_hits = open(hits_filename,'r')
#sseqid = []
#pident = []
csv_reader = csv.DictReader(f_hits,fieldnames=hits_header,delimiter='\t')
for row in csv_reader:
#print(row)
sseqid = row["sseqid"]
pident = row["pident"]
if sseqid in genus_dict:
genus = genus_dict[sseqid]
species = species_dict[sseqid]
print('{}\t{}\t{}\t{}'.format(sseqid,pident,genus,species),file=f_out)
else:
print('Cannot find seq "{}" in lookup'.format(sseqid),file=sys.stderr)
f_out.close()
# --------------------------------------------------
def get_args():
"""get arguments"""
parser = argparse.ArgumentParser(
description='Annotate BLAST output',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'hits_file',
metavar='FILE',
help='BLAST output(-outfmt 6)',
type=str
)
parser.add_argument(
'-a',
'--annotations',
help='Annotaiton file',
metavar='FILE',
default='',
)
parser.add_argument(
'-o',
'--outfile',
help='Output file',
metavar='FILE',
default='',
)
return parser.parse_args()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""
Author : <NAME>
Date : 3 13 2019
Purpose: Python program for blastomics
"""
import os
import sys
import argparse
import csv
#import Bio
#from Bio import SeqIO
from collections import defaultdict
#-------------------------------------
def main():
args = get_args()
hits_header = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
annotations_filename = args.annotations
hits_filename = args.hits_file
outfile = args.outfile
if outfile=='':
f_out = sys.stdout
else:
f_out = open(outfile,'w')
if not os.path.isfile(hits_filename):
print('"{}" is not a file'.format(hits_filename),file=sys.stderr)
exit(1)
if not os.path.isfile(annotations_filename):
print('"{}" is not a file'.format(annotations_filename),file=sys.stderr)
exit(1)
f_annot = open(annotations_filename,'r')
genus_dict = {}
species_dict = {}
csv_reader = csv.DictReader(f_annot) #using first line as hits_header
for row in csv_reader:
centroid = row['centroid']
genus = row['genus']
if not genus:
genus = 'NA'
species = row['species']
if not species:
species = 'NA'
genus_dict[centroid] = genus
species_dict[centroid] = species
f_annot.close()
print('seq_id\tpident\tgenus\tspecies',file=f_out)
f_hits = open(hits_filename,'r')
#sseqid = []
#pident = []
csv_reader = csv.DictReader(f_hits,fieldnames=hits_header,delimiter='\t')
for row in csv_reader:
#print(row)
sseqid = row["sseqid"]
pident = row["pident"]
if sseqid in genus_dict:
genus = genus_dict[sseqid]
species = species_dict[sseqid]
print('{}\t{}\t{}\t{}'.format(sseqid,pident,genus,species),file=f_out)
else:
print('Cannot find seq "{}" in lookup'.format(sseqid),file=sys.stderr)
f_out.close()
# --------------------------------------------------
def get_args():
"""get arguments"""
parser = argparse.ArgumentParser(
description='Annotate BLAST output',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'hits_file',
metavar='FILE',
help='BLAST output(-outfmt 6)',
type=str
)
parser.add_argument(
'-a',
'--annotations',
help='Annotaiton file',
metavar='FILE',
default='',
)
parser.add_argument(
'-o',
'--outfile',
help='Output file',
metavar='FILE',
default='',
)
return parser.parse_args()
if __name__ == "__main__":
main()
| en | 0.309554 | #!/usr/bin/env python3 Author : <NAME> Date : 3 13 2019 Purpose: Python program for blastomics #import Bio #from Bio import SeqIO #------------------------------------- #using first line as hits_header #sseqid = [] #pident = [] #print(row) # -------------------------------------------------- get arguments | 2.679633 | 3 |
5. WEB/CustomTransform/distanceEncoder.py | doyaguillo1997/Data2Gether | 1 | 6622052 | <filename>5. WEB/CustomTransform/distanceEncoder.py
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
class DistanceEncoder(BaseEstimator, TransformerMixin):
"""
Clase que devuelve la distancia a un punto específico según latitud/longitud.
MAGNITUDES: Distancias en <km> y latitudes/longitudes en <minutos>.
LÓGICA: Se suma el cuadrado de la diferencias de las latitudes/longitudes y se obtiene la raiz cuadrada.
El resultado se obtiene en <minutos>, por lo que se devide entre 60 para transformarlo en <grados> y
se multiplica por 6370 (radio de la Tierra en <km>) para pasarlo a <km>.
:param X: DataFrame sobre el que se van a realizar los cambios.
:param new_columns: Nombre con el se crea la nueva columna.
:param transformed_columns: Columnas de donde obtener los datos. Las magnitudes de estas columnas deben de ser en
"minutos".
:param origin: Punto de referencia para obtener las distancias.
:return: Devuelve el DataFrame modificado con la nueva columna, que contiene la distancia al punto seleccionado en
kilometros.
"""
def __init__(self, new_columns, transformed_columns, origin):
super().__init__()
self.new_columns = new_columns
self.transformed_columns = transformed_columns
self.origin = origin
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_ = X.copy()
X_[self.new_columns] = (np.sqrt((X_[self.transformed_columns[0]] - self.origin[0]) ** 2 +
(X_[self.transformed_columns[1]] - self.origin[1]) ** 2)
/ 60) * 6371
return X_
| <filename>5. WEB/CustomTransform/distanceEncoder.py
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
class DistanceEncoder(BaseEstimator, TransformerMixin):
"""
Clase que devuelve la distancia a un punto específico según latitud/longitud.
MAGNITUDES: Distancias en <km> y latitudes/longitudes en <minutos>.
LÓGICA: Se suma el cuadrado de la diferencias de las latitudes/longitudes y se obtiene la raiz cuadrada.
El resultado se obtiene en <minutos>, por lo que se devide entre 60 para transformarlo en <grados> y
se multiplica por 6370 (radio de la Tierra en <km>) para pasarlo a <km>.
:param X: DataFrame sobre el que se van a realizar los cambios.
:param new_columns: Nombre con el se crea la nueva columna.
:param transformed_columns: Columnas de donde obtener los datos. Las magnitudes de estas columnas deben de ser en
"minutos".
:param origin: Punto de referencia para obtener las distancias.
:return: Devuelve el DataFrame modificado con la nueva columna, que contiene la distancia al punto seleccionado en
kilometros.
"""
def __init__(self, new_columns, transformed_columns, origin):
super().__init__()
self.new_columns = new_columns
self.transformed_columns = transformed_columns
self.origin = origin
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_ = X.copy()
X_[self.new_columns] = (np.sqrt((X_[self.transformed_columns[0]] - self.origin[0]) ** 2 +
(X_[self.transformed_columns[1]] - self.origin[1]) ** 2)
/ 60) * 6371
return X_
| es | 0.955222 | Clase que devuelve la distancia a un punto específico según latitud/longitud.
MAGNITUDES: Distancias en <km> y latitudes/longitudes en <minutos>.
LÓGICA: Se suma el cuadrado de la diferencias de las latitudes/longitudes y se obtiene la raiz cuadrada.
El resultado se obtiene en <minutos>, por lo que se devide entre 60 para transformarlo en <grados> y
se multiplica por 6370 (radio de la Tierra en <km>) para pasarlo a <km>.
:param X: DataFrame sobre el que se van a realizar los cambios.
:param new_columns: Nombre con el se crea la nueva columna.
:param transformed_columns: Columnas de donde obtener los datos. Las magnitudes de estas columnas deben de ser en
"minutos".
:param origin: Punto de referencia para obtener las distancias.
:return: Devuelve el DataFrame modificado con la nueva columna, que contiene la distancia al punto seleccionado en
kilometros. | 3.026276 | 3 |
BeesEtAl/F3_Fly.py | FJFranklin/BeesEtAl | 1 | 6622053 | import numpy as np
from .Base_Automaton import Base_Automaton
class F3_Fly(object):
def __init__(self, garden, id_no, gender, orientation):
self.G = garden # the F3_Garden object
self.id_no = id_no # a reference number to identify this fly
self.gender = gender # 'M', 'N' or 'F'
self.orientation = orientation # list of one or more genders
self.automaton = Base_Automaton(2 + self.G.bee_shells, self.G.bee_reward, self.G.bee_punish)
self.X = None # current position
self.best_X = None # best personal position
self.best_XM = None # associated MESO position
def X_from_MESO(self):
indices = []
if np.array_equal(self.best_X, self.best_XM):
X = self.best_X
else:
X = np.copy(self.best_X)
for ix in range(0, len(X)):
if X[ix] != self.best_XM[ix]:
if np.random.rand(1) < 0.5:
X[ix] = self.best_XM[ix]
indices.append(ix)
if self.G.costfn.verbose:
print(' >8< Bee: MESO = {i}'.format(i=indices))
return X
def bees(self, count):
if self.G.costfn.verbose:
print('==== Fly {p} (gender={g}, orientation={o}): #bees={b}, radius={r}'.format(p=self.id_no, g=self.gender, o=self.orientation, b=count, r=self.G.bee_radius))
for b in range(0, count):
meso_X = self.X_from_MESO()
cell = self.automaton.cell()
if cell == 0:
new_X = self.G.new_position_in_neighbourhood(meso_X, self.G.bee_radius, 'gauss')
elif cell < (self.automaton.count - 1):
new_X = self.G.new_position_in_neighbourhood(meso_X, self.G.bee_radius * cell, 'sphere')
else:
radius = self.G.bee_radius * (self.automaton.count - 1)
radius = radius + self.G.rand_exp(radius)
new_X = self.G.new_position_in_neighbourhood(meso_X, radius, 'sphere')
if self.G.costfn.calculate_cost(new_X) is not None:
if self.G.plotter is not None:
self.G.plotter.bee(self.G.costfn.XA)
if self.G.compare(self.G.costfn.XA, self.best_X):
if self.G.costfn.verbose:
print('(updating personal best)')
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.G.costfn.XA, self.X, None)
self.best_X = self.G.costfn.XA
self.best_XM = self.G.costfn.XM
self.X = self.G.costfn.XA
self.automaton.reward(cell)
else:
self.automaton.punish(cell)
if False: # this is very noisy
self.automaton.summarise()
def new_local_search(self, flies, ranks, radius, jitter):
if self.G.costfn.verbose:
print('==== Fly {p} (gender={g}, orientation={o}): rank={k}, radius={r}'.format(p=self.id_no, g=self.gender, o=self.orientation, k=ranks[0], r=radius))
if ranks[0] == 0: # self-fly is superior to any it is attracted to; let's be narcissistic
new_X = self.best_X
else:
old_X = self.G.baseline(self.X, radius)
new_X = np.zeros(self.G.Ndim)
weight = np.zeros(len(flies))
for f in range(1, len(flies)):
if ranks[f] < ranks[0]: # a better fly than self-fly
weight[f] = 1 / (1 + ranks[f])
weight = weight / sum(weight) # weight must sum to 1; it's a probability set
for f in range(1, len(flies)):
if ranks[f] < ranks[0]: # a better fly than self-fly
new_X = new_X + weight[f] * self.G.attraction(flies[f].best_X - old_X, radius)
new_X = new_X + old_X
new_X = self.G.new_position_in_neighbourhood(new_X, jitter)
if self.G.costfn.calculate_cost(new_X) is not None:
if self.G.compare(self.G.costfn.XA, self.best_X):
if self.G.costfn.verbose:
print('(updating personal best)')
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.G.costfn.XA, self.X, None)
self.best_X = self.G.costfn.XA
self.best_XM = self.G.costfn.XM
self.X = self.G.costfn.XA
else:
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.G.costfn.XA, self.X, self.best_X)
self.X = self.G.costfn.XA
else:
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.X, None, self.best_X)
self.gender = self.G.transition(self.gender)
return self.best_X # return the local best solution, even if old
def new_global_search(self):
cost, XA, XM = self.G.scout.pop()
while cost is None: # shouldn't happen, but could (if solution space is small), so just in case...
print('* * * No scouts banked! * * *')
self.G.scout.schedule(1)
self.G.scout.evaluate(1)
cost, XA, XM = self.G.scout.pop() # although, if we exhaust all of space, this will go infinite
self.X = XA
self.best_X = XA
self.best_XM = XM
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.X, None, None)
return self.best_X # return the local best solution, even if old
| import numpy as np
from .Base_Automaton import Base_Automaton
class F3_Fly(object):
def __init__(self, garden, id_no, gender, orientation):
self.G = garden # the F3_Garden object
self.id_no = id_no # a reference number to identify this fly
self.gender = gender # 'M', 'N' or 'F'
self.orientation = orientation # list of one or more genders
self.automaton = Base_Automaton(2 + self.G.bee_shells, self.G.bee_reward, self.G.bee_punish)
self.X = None # current position
self.best_X = None # best personal position
self.best_XM = None # associated MESO position
def X_from_MESO(self):
indices = []
if np.array_equal(self.best_X, self.best_XM):
X = self.best_X
else:
X = np.copy(self.best_X)
for ix in range(0, len(X)):
if X[ix] != self.best_XM[ix]:
if np.random.rand(1) < 0.5:
X[ix] = self.best_XM[ix]
indices.append(ix)
if self.G.costfn.verbose:
print(' >8< Bee: MESO = {i}'.format(i=indices))
return X
def bees(self, count):
if self.G.costfn.verbose:
print('==== Fly {p} (gender={g}, orientation={o}): #bees={b}, radius={r}'.format(p=self.id_no, g=self.gender, o=self.orientation, b=count, r=self.G.bee_radius))
for b in range(0, count):
meso_X = self.X_from_MESO()
cell = self.automaton.cell()
if cell == 0:
new_X = self.G.new_position_in_neighbourhood(meso_X, self.G.bee_radius, 'gauss')
elif cell < (self.automaton.count - 1):
new_X = self.G.new_position_in_neighbourhood(meso_X, self.G.bee_radius * cell, 'sphere')
else:
radius = self.G.bee_radius * (self.automaton.count - 1)
radius = radius + self.G.rand_exp(radius)
new_X = self.G.new_position_in_neighbourhood(meso_X, radius, 'sphere')
if self.G.costfn.calculate_cost(new_X) is not None:
if self.G.plotter is not None:
self.G.plotter.bee(self.G.costfn.XA)
if self.G.compare(self.G.costfn.XA, self.best_X):
if self.G.costfn.verbose:
print('(updating personal best)')
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.G.costfn.XA, self.X, None)
self.best_X = self.G.costfn.XA
self.best_XM = self.G.costfn.XM
self.X = self.G.costfn.XA
self.automaton.reward(cell)
else:
self.automaton.punish(cell)
if False: # this is very noisy
self.automaton.summarise()
def new_local_search(self, flies, ranks, radius, jitter):
if self.G.costfn.verbose:
print('==== Fly {p} (gender={g}, orientation={o}): rank={k}, radius={r}'.format(p=self.id_no, g=self.gender, o=self.orientation, k=ranks[0], r=radius))
if ranks[0] == 0: # self-fly is superior to any it is attracted to; let's be narcissistic
new_X = self.best_X
else:
old_X = self.G.baseline(self.X, radius)
new_X = np.zeros(self.G.Ndim)
weight = np.zeros(len(flies))
for f in range(1, len(flies)):
if ranks[f] < ranks[0]: # a better fly than self-fly
weight[f] = 1 / (1 + ranks[f])
weight = weight / sum(weight) # weight must sum to 1; it's a probability set
for f in range(1, len(flies)):
if ranks[f] < ranks[0]: # a better fly than self-fly
new_X = new_X + weight[f] * self.G.attraction(flies[f].best_X - old_X, radius)
new_X = new_X + old_X
new_X = self.G.new_position_in_neighbourhood(new_X, jitter)
if self.G.costfn.calculate_cost(new_X) is not None:
if self.G.compare(self.G.costfn.XA, self.best_X):
if self.G.costfn.verbose:
print('(updating personal best)')
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.G.costfn.XA, self.X, None)
self.best_X = self.G.costfn.XA
self.best_XM = self.G.costfn.XM
self.X = self.G.costfn.XA
else:
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.G.costfn.XA, self.X, self.best_X)
self.X = self.G.costfn.XA
else:
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.X, None, self.best_X)
self.gender = self.G.transition(self.gender)
return self.best_X # return the local best solution, even if old
def new_global_search(self):
cost, XA, XM = self.G.scout.pop()
while cost is None: # shouldn't happen, but could (if solution space is small), so just in case...
print('* * * No scouts banked! * * *')
self.G.scout.schedule(1)
self.G.scout.evaluate(1)
cost, XA, XM = self.G.scout.pop() # although, if we exhaust all of space, this will go infinite
self.X = XA
self.best_X = XA
self.best_XM = XM
if self.G.plotter is not None:
self.G.plotter.fly(self.gender, self.X, None, None)
return self.best_X # return the local best solution, even if old
| en | 0.806999 | # the F3_Garden object # a reference number to identify this fly # 'M', 'N' or 'F' # list of one or more genders # current position # best personal position # associated MESO position #bees={b}, radius={r}'.format(p=self.id_no, g=self.gender, o=self.orientation, b=count, r=self.G.bee_radius)) # this is very noisy # self-fly is superior to any it is attracted to; let's be narcissistic # a better fly than self-fly # weight must sum to 1; it's a probability set # a better fly than self-fly # return the local best solution, even if old # shouldn't happen, but could (if solution space is small), so just in case... # although, if we exhaust all of space, this will go infinite # return the local best solution, even if old | 3.045636 | 3 |
util/control_flow.py | Elric2718/HierGMM | 0 | 6622054 | # -*- coding: utf-8 -*-
"""
Utilities for control flow
"""
class Register:
"""
Register instances are usually used as decorators, which behaves like "factory pattern".
It helps get rid of the tedious if-else clauses.
"""
def __init__(self):
self._register_map = dict()
def get(self, name):
return self._register_map.get(name)
def build(self, name, *args, **kwargs):
return self._register_map[name](*args, **kwargs)
def __call__(self, name):
def _register(func):
if name in self._register_map:
raise KeyError("{} has been registered".format(name))
if func is not None:
self._register_map[name] = func
return func
return _register
| # -*- coding: utf-8 -*-
"""
Utilities for control flow
"""
class Register:
"""
Register instances are usually used as decorators, which behaves like "factory pattern".
It helps get rid of the tedious if-else clauses.
"""
def __init__(self):
self._register_map = dict()
def get(self, name):
return self._register_map.get(name)
def build(self, name, *args, **kwargs):
return self._register_map[name](*args, **kwargs)
def __call__(self, name):
def _register(func):
if name in self._register_map:
raise KeyError("{} has been registered".format(name))
if func is not None:
self._register_map[name] = func
return func
return _register
| en | 0.892641 | # -*- coding: utf-8 -*- Utilities for control flow Register instances are usually used as decorators, which behaves like "factory pattern". It helps get rid of the tedious if-else clauses. | 3.966385 | 4 |
other_handlers/BaseModelHandler.py | Plawn/petit_ts | 1 | 6622055 | <filename>other_handlers/BaseModelHandler.py
from petit_ts.ts_store import TSTypeStore
from typing import List, Tuple, Optional, Dict, Any, get_type_hints
from petit_type_system import ClassHandler
from petit_ts import TSTypeStore
from pydantic import BaseModel
class BaseModelHandler(ClassHandler):
def is_mapping(self) -> bool:
return True
def should_handle(self, cls: Any, origin: Any, args: List[Any]) -> bool:
return issubclass(cls, BaseModel)
def build(self, cls: BaseModel, origin, args) -> Tuple[Optional[str], Dict[str, Any]]:
name = cls.__name__
fields = get_type_hints(cls)
return name, fields
| <filename>other_handlers/BaseModelHandler.py
from petit_ts.ts_store import TSTypeStore
from typing import List, Tuple, Optional, Dict, Any, get_type_hints
from petit_type_system import ClassHandler
from petit_ts import TSTypeStore
from pydantic import BaseModel
class BaseModelHandler(ClassHandler):
def is_mapping(self) -> bool:
return True
def should_handle(self, cls: Any, origin: Any, args: List[Any]) -> bool:
return issubclass(cls, BaseModel)
def build(self, cls: BaseModel, origin, args) -> Tuple[Optional[str], Dict[str, Any]]:
name = cls.__name__
fields = get_type_hints(cls)
return name, fields
| none | 1 | 2.069968 | 2 | |
aws_text_insight/lbd/response.py | MacHu-GWU/aws_text_insight-project | 0 | 6622056 | <filename>aws_text_insight/lbd/response.py
# -*- coding: utf-8 -*-
"""
Response object.
"""
import typing
import attr
from attrs_mate import AttrsClass
@attr.s
class Error(AttrsClass):
traceback: str = attr.ib()
@attr.s
class Response(AttrsClass):
message: str = attr.ib()
data: typing.Union[dict, None] = attr.ib(default=None)
error: typing.Union[Error, None] = Error.ib_nested(default=None)
| <filename>aws_text_insight/lbd/response.py
# -*- coding: utf-8 -*-
"""
Response object.
"""
import typing
import attr
from attrs_mate import AttrsClass
@attr.s
class Error(AttrsClass):
traceback: str = attr.ib()
@attr.s
class Response(AttrsClass):
message: str = attr.ib()
data: typing.Union[dict, None] = attr.ib(default=None)
error: typing.Union[Error, None] = Error.ib_nested(default=None)
| en | 0.822329 | # -*- coding: utf-8 -*- Response object. | 2.20751 | 2 |
medium_test.py | devsearchcomponent/redux-python | 1 | 6622057 | from typing import *
import asyncio
import pytest
import redux
@pytest.fixture(scope="session", autouse=True)
def setup_environment():
redux.RemoteManager().RECONNECT_TIMEOUT = 0.1
@redux.behavior("sub:local:", redux.SubscribeRecycleOption())
class LocalMediumSubscribeReducer(redux.Reducer):
def __init__(self):
mapping = {
"name": self.name,
}
super(LocalMediumSubscribeReducer, self).__init__(mapping)
async def name(self, action, state=None):
if action == "setName":
state = "Kenny"
return state
class L(redux.Listener):
async def on_changed(self, changed_key: List[str], state: Dict[str, Any]):
print(changed_key)
@redux.behavior("listener:", redux.IdleTimeoutRecycleOption(5))
class LocalMediumListenerReducer(redux.Reducer):
async def action_received(self, action: redux.Action):
if action == "sub":
await redux.LocalMedium(self.store).subscribe(self.key, "sub:local:1", L())
async def local_subscribe():
store = redux.Store([LocalMediumSubscribeReducer, LocalMediumListenerReducer])
await store.dispatch("listener:1", redux.Action("sub"))
await store.dispatch("sub:local:1", redux.Action("setName"))
await asyncio.sleep(5)
def test_idle():
asyncio.get_event_loop().run_until_complete(local_subscribe())
| from typing import *
import asyncio
import pytest
import redux
@pytest.fixture(scope="session", autouse=True)
def setup_environment():
redux.RemoteManager().RECONNECT_TIMEOUT = 0.1
@redux.behavior("sub:local:", redux.SubscribeRecycleOption())
class LocalMediumSubscribeReducer(redux.Reducer):
def __init__(self):
mapping = {
"name": self.name,
}
super(LocalMediumSubscribeReducer, self).__init__(mapping)
async def name(self, action, state=None):
if action == "setName":
state = "Kenny"
return state
class L(redux.Listener):
async def on_changed(self, changed_key: List[str], state: Dict[str, Any]):
print(changed_key)
@redux.behavior("listener:", redux.IdleTimeoutRecycleOption(5))
class LocalMediumListenerReducer(redux.Reducer):
async def action_received(self, action: redux.Action):
if action == "sub":
await redux.LocalMedium(self.store).subscribe(self.key, "sub:local:1", L())
async def local_subscribe():
store = redux.Store([LocalMediumSubscribeReducer, LocalMediumListenerReducer])
await store.dispatch("listener:1", redux.Action("sub"))
await store.dispatch("sub:local:1", redux.Action("setName"))
await asyncio.sleep(5)
def test_idle():
asyncio.get_event_loop().run_until_complete(local_subscribe())
| none | 1 | 2.102119 | 2 | |
experiments/problems/functions/hyperellipsoid.py | QuintonWeenink/investigating-cpso-for-nn-training | 2 | 6622058 | import numpy as np
from mlpy.numberGenerator.bounds import Bounds
from experiments.problems.functions.structure.function import Function
class Hyperellipsoid(Function):
def function(self, x):
return np.sum(np.arange(1, len(x) + 1) * np.power(x, 2))
def getBounds(self):
return Bounds(-5.12, 5.12)
def test(self):
assert(3 == self.function(np.array([1, 1])))
assert(12 == self.function(np.array([2, 2]))) | import numpy as np
from mlpy.numberGenerator.bounds import Bounds
from experiments.problems.functions.structure.function import Function
class Hyperellipsoid(Function):
def function(self, x):
return np.sum(np.arange(1, len(x) + 1) * np.power(x, 2))
def getBounds(self):
return Bounds(-5.12, 5.12)
def test(self):
assert(3 == self.function(np.array([1, 1])))
assert(12 == self.function(np.array([2, 2]))) | none | 1 | 2.833435 | 3 | |
python version/testing.py | asiddi24/box_model | 0 | 6622059 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 10:27:14 2019
@author: asiddi24
"""
'''Running the box_model'''
'''Initializing variables'''
from box_model import fourbox
N=4000
Kv=1e-5
AI=1000
Mek=25e6
Aredi=1000
M_s=15e6
D0=500
T0s=2
T0n=4
T0l=17
T0d=3
S0s=34
S0n=35
S0l=36
S0d=34.5
Fws=1e6
Fwn=1e6
epsilon=1.2e-4
(M_n, M_upw, M_eddy, D_low, T, S, sigma0) = fourbox(N,Kv,AI,Mek,Aredi,M_s,D0,T0s,T0n,T0l,T0d,S0s,S0n,S0l,S0d,Fws,Fwn,epsilon)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 10:27:14 2019
@author: asiddi24
"""
'''Running the box_model'''
'''Initializing variables'''
from box_model import fourbox
N=4000
Kv=1e-5
AI=1000
Mek=25e6
Aredi=1000
M_s=15e6
D0=500
T0s=2
T0n=4
T0l=17
T0d=3
S0s=34
S0n=35
S0l=36
S0d=34.5
Fws=1e6
Fwn=1e6
epsilon=1.2e-4
(M_n, M_upw, M_eddy, D_low, T, S, sigma0) = fourbox(N,Kv,AI,Mek,Aredi,M_s,D0,T0s,T0n,T0l,T0d,S0s,S0n,S0l,S0d,Fws,Fwn,epsilon) | en | 0.520058 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Thu Jun 6 10:27:14 2019 @author: asiddi24 Running the box_model Initializing variables | 2.073694 | 2 |
test_pysolar.py | JohnOmernik/solarpi | 1 | 6622060 | #!/usr/bin/python3
from pysolar import solar
import time
import datetime
import pytz
import os.path
MYLAT = 1000.0
MYLNG = 1000.0
STRTZ = ""
ENV_FILE = "env.list"
if not os.path.isfile(ENV_FILE):
print("ENV_FILE at %s not found - exiting")
sys.exit(1)
e = open(ENV_FILE, "r")
lines = e.read()
e.close()
for line in lines.split("\n"):
myline = line.strip()
if myline.find("#") == 0:
pass
elif myline != "":
arline = myline.split("=")
if arline[0] == "MYLAT":
MYLAT = float(arline[1])
if arline[0] == "MYLNG":
MYLNG = float(arline[1])
if arline[0] == "STRTZ":
STRTZ = arline[1]
if MYLAT == 1000.0 or MYLNG == 1000.0 or STRTZ == "":
print("ENV Values not found please check your env.list file to ensure valid values exist for MYLAT, MYLNG, and STRTZ")
sys.exit(1)
print("==================")
print("Starting with values:")
print("MYLAT: %s" % MYLAT)
print("MYLNG: %s" % MYLNG)
print("STRTZ: %s" % STRTZ)
print("=================")
print("")
def main():
timezone = pytz.timezone(STRTZ)
#date = datetime.datetime(2018, 10, 22, 13, 20, 10, 130320)
while True:
date = datetime.datetime.now()
mydate = timezone.localize(date)
mystrtime = mydate.strftime("%Y-%m-%d %H:%M:%S")
curalt, curaz = get_alt_az(mydate)
print("%s - Alt: %s - Az: %s" % (mystrtime, curalt, curaz))
time.sleep(10)
def get_alt_az(dt):
alt = solar.get_altitude(MYLAT, MYLNG, dt)
az = solar.get_azimuth(MYLAT, MYLNG, dt)
return alt, az
if __name__ == '__main__':
main()
| #!/usr/bin/python3
from pysolar import solar
import time
import datetime
import pytz
import os.path
MYLAT = 1000.0
MYLNG = 1000.0
STRTZ = ""
ENV_FILE = "env.list"
if not os.path.isfile(ENV_FILE):
print("ENV_FILE at %s not found - exiting")
sys.exit(1)
e = open(ENV_FILE, "r")
lines = e.read()
e.close()
for line in lines.split("\n"):
myline = line.strip()
if myline.find("#") == 0:
pass
elif myline != "":
arline = myline.split("=")
if arline[0] == "MYLAT":
MYLAT = float(arline[1])
if arline[0] == "MYLNG":
MYLNG = float(arline[1])
if arline[0] == "STRTZ":
STRTZ = arline[1]
if MYLAT == 1000.0 or MYLNG == 1000.0 or STRTZ == "":
print("ENV Values not found please check your env.list file to ensure valid values exist for MYLAT, MYLNG, and STRTZ")
sys.exit(1)
print("==================")
print("Starting with values:")
print("MYLAT: %s" % MYLAT)
print("MYLNG: %s" % MYLNG)
print("STRTZ: %s" % STRTZ)
print("=================")
print("")
def main():
timezone = pytz.timezone(STRTZ)
#date = datetime.datetime(2018, 10, 22, 13, 20, 10, 130320)
while True:
date = datetime.datetime.now()
mydate = timezone.localize(date)
mystrtime = mydate.strftime("%Y-%m-%d %H:%M:%S")
curalt, curaz = get_alt_az(mydate)
print("%s - Alt: %s - Az: %s" % (mystrtime, curalt, curaz))
time.sleep(10)
def get_alt_az(dt):
alt = solar.get_altitude(MYLAT, MYLNG, dt)
az = solar.get_azimuth(MYLAT, MYLNG, dt)
return alt, az
if __name__ == '__main__':
main()
| en | 0.44471 | #!/usr/bin/python3 #date = datetime.datetime(2018, 10, 22, 13, 20, 10, 130320) | 2.9041 | 3 |
array/0922_sort_array_by_parity_ii/0922_sort_array_by_parity_ii.py | zdyxry/LeetCode | 6 | 6622061 | <reponame>zdyxry/LeetCode
# -*- coding: utf-8 -*-
class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
j = 1
for i in xrange(0, len(A), 2):
if A[i] % 2:
while A[j] % 2:
j += 2
A[i], A[j] = A[j], A[i]
print(A)
return A
A = [4,2,5,7]
print(Solution().sortArrayByParityII(A)) | # -*- coding: utf-8 -*-
class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
j = 1
for i in xrange(0, len(A), 2):
if A[i] % 2:
while A[j] % 2:
j += 2
A[i], A[j] = A[j], A[i]
print(A)
return A
A = [4,2,5,7]
print(Solution().sortArrayByParityII(A)) | en | 0.504075 | # -*- coding: utf-8 -*- :type A: List[int] :rtype: List[int] | 3.455213 | 3 |
etl/scripts.py | GrishenkovP/flask_unit_economy | 1 | 6622062 | # импорт библиотек
from typing import List, Tuple, Dict, Set
import pandas as pd
import sqlite3
import datetime as dt
def func_extract_transform(path: str) -> pd.DataFrame:
"""Предварительная обработка датасета"""
# Считываем датасет
df = pd.read_csv(path, sep=',')
# Приводим названия столбцов датасета к нижнему регистру
list_col = list(map(str.lower, df.columns))
df.columns = list_col
# Избавляемся от времени и трансформируем строку-дату в правильный формат
df['invoicedate'] = df['invoicedate'].apply(lambda x: x.split(' ')[0])
df['invoicedate'] = pd.to_datetime(df['invoicedate'], format='%m/%d/%Y')
# Рассчитываем сумму покупки по каждому товару
df['amount'] = df['quantity'] * df['unitprice']
# Удаляем ненужные для дальнейшего анализа столбцы
df = df.drop(['stockcode', 'description', 'quantity', 'unitprice'], axis=1)
# Заполняем строки, где не указан номер покупателя, константой 777777
values = {'customerid': 777777}
df = df.fillna(value=values)
df['customerid'] = df['customerid'].astype('int')
# Округляем общую сумму покупки до целового числа
df = df.round({'amount': 0})
df['amount'] = df['amount'].astype('int')
# Удаляем все строки, в которых есть пропуски перед группировкой
df = df.dropna()
# Группируем строки, чтобы прийти к детализации до уровня одного чека
df_result = df.groupby(by=['invoiceno', 'invoicedate', 'customerid', 'country']).agg({'amount': sum}).reset_index()
# Трансформируем даты в текст для упрощения загрузки в БД
df_result['invoicedate'] = df_result['invoicedate'].dt.strftime('%Y-%m-%d')
return df_result
def func_val_list(df: pd.DataFrame) -> List:
"""Трансформируем датафрейм в список списков"""
val_list = df.values.tolist()
return val_list
def func_sqlite_connection(con: str, records: List):
"""Создаем базу данных. Создаем таблицу для записей фактов продаж и заполняем ее значениями из датасета"""
sqlite_connection = None
try:
# Создаем соединение
sqlite_connection = sqlite3.connect(con)
# Создаем курсор
cur = sqlite_connection.cursor()
# Создаем таблицу
cur.execute("""CREATE TABLE IF NOT EXISTS sales (
invoiceno TEXT NOT NULL,
invoicedate TEXT NOT NULL,
customerid INTEGER NOT NULL,
country TEXT NOT NULL,
amount INTEGER NOT NULL)""")
# Добавляем записи
cur.executemany("INSERT INTO sales VALUES(?,?,?,?,?)", records)
# Сохраняем транзакцию
sqlite_connection.commit()
# Закрываем курсор
cur.close()
except sqlite3.Error as err:
print("Ошибка выполнения запроса", err)
finally:
# Закрываем соединение
if sqlite_connection is not None:
sqlite_connection.close()
print("Соединение закрыто!")
| # импорт библиотек
from typing import List, Tuple, Dict, Set
import pandas as pd
import sqlite3
import datetime as dt
def func_extract_transform(path: str) -> pd.DataFrame:
"""Предварительная обработка датасета"""
# Считываем датасет
df = pd.read_csv(path, sep=',')
# Приводим названия столбцов датасета к нижнему регистру
list_col = list(map(str.lower, df.columns))
df.columns = list_col
# Избавляемся от времени и трансформируем строку-дату в правильный формат
df['invoicedate'] = df['invoicedate'].apply(lambda x: x.split(' ')[0])
df['invoicedate'] = pd.to_datetime(df['invoicedate'], format='%m/%d/%Y')
# Рассчитываем сумму покупки по каждому товару
df['amount'] = df['quantity'] * df['unitprice']
# Удаляем ненужные для дальнейшего анализа столбцы
df = df.drop(['stockcode', 'description', 'quantity', 'unitprice'], axis=1)
# Заполняем строки, где не указан номер покупателя, константой 777777
values = {'customerid': 777777}
df = df.fillna(value=values)
df['customerid'] = df['customerid'].astype('int')
# Округляем общую сумму покупки до целового числа
df = df.round({'amount': 0})
df['amount'] = df['amount'].astype('int')
# Удаляем все строки, в которых есть пропуски перед группировкой
df = df.dropna()
# Группируем строки, чтобы прийти к детализации до уровня одного чека
df_result = df.groupby(by=['invoiceno', 'invoicedate', 'customerid', 'country']).agg({'amount': sum}).reset_index()
# Трансформируем даты в текст для упрощения загрузки в БД
df_result['invoicedate'] = df_result['invoicedate'].dt.strftime('%Y-%m-%d')
return df_result
def func_val_list(df: pd.DataFrame) -> List:
"""Трансформируем датафрейм в список списков"""
val_list = df.values.tolist()
return val_list
def func_sqlite_connection(con: str, records: List):
"""Создаем базу данных. Создаем таблицу для записей фактов продаж и заполняем ее значениями из датасета"""
sqlite_connection = None
try:
# Создаем соединение
sqlite_connection = sqlite3.connect(con)
# Создаем курсор
cur = sqlite_connection.cursor()
# Создаем таблицу
cur.execute("""CREATE TABLE IF NOT EXISTS sales (
invoiceno TEXT NOT NULL,
invoicedate TEXT NOT NULL,
customerid INTEGER NOT NULL,
country TEXT NOT NULL,
amount INTEGER NOT NULL)""")
# Добавляем записи
cur.executemany("INSERT INTO sales VALUES(?,?,?,?,?)", records)
# Сохраняем транзакцию
sqlite_connection.commit()
# Закрываем курсор
cur.close()
except sqlite3.Error as err:
print("Ошибка выполнения запроса", err)
finally:
# Закрываем соединение
if sqlite_connection is not None:
sqlite_connection.close()
print("Соединение закрыто!")
| ru | 0.980142 | # импорт библиотек Предварительная обработка датасета # Считываем датасет # Приводим названия столбцов датасета к нижнему регистру # Избавляемся от времени и трансформируем строку-дату в правильный формат # Рассчитываем сумму покупки по каждому товару # Удаляем ненужные для дальнейшего анализа столбцы # Заполняем строки, где не указан номер покупателя, константой 777777 # Округляем общую сумму покупки до целового числа # Удаляем все строки, в которых есть пропуски перед группировкой # Группируем строки, чтобы прийти к детализации до уровня одного чека # Трансформируем даты в текст для упрощения загрузки в БД Трансформируем датафрейм в список списков Создаем базу данных. Создаем таблицу для записей фактов продаж и заполняем ее значениями из датасета # Создаем соединение # Создаем курсор # Создаем таблицу CREATE TABLE IF NOT EXISTS sales (
invoiceno TEXT NOT NULL,
invoicedate TEXT NOT NULL,
customerid INTEGER NOT NULL,
country TEXT NOT NULL,
amount INTEGER NOT NULL) # Добавляем записи # Сохраняем транзакцию # Закрываем курсор # Закрываем соединение | 3.045467 | 3 |
opportune/tests/test_search.py | Mildly-Sketchy/Mildly-Sketchy | 0 | 6622063 | from pyramid import testing
def test_render_search_view(dummy_request):
"""Test search view"""
from ..views.search import search_view
response = search_view(dummy_request)
assert type(response) == dict
def test_search_view_no_keywords(dummy_request):
"""Test search view response when the user does not give any keywords"""
from ..views.search import search_view
response = search_view(dummy_request)
len(response) == 0
assert type(response) == dict
def test_search_view_with_no_keywords(dummy_request):
"""Test search view with no keywords"""
from ..views.search import search_view
dummy_request.method = 'GET'
response = search_view(dummy_request)
assert response == {'message': 'You do not have any keywords saved. Add one!'}
def test_search_view_gets_keywords(dummy_request):
'''Test search view returns keywords with fake authenticated user'''
from ..views.search import search_view
from ..models.accounts import Account
from ..models.keywords import Keyword
from ..models.association import Association
config = testing.setUp()
config.testing_securitypolicy(
userid='codefellows', permissive=True
)
new_account = Account(
username='codefellows',
password='password',
email='<EMAIL>'
)
dummy_request.dbsession.add(new_account)
new_keyword = Keyword()
new_keyword.keyword = 'developer'
dummy_request.dbsession.add(new_keyword)
dummy_request.dbsession.commit()
new_association = Association()
new_association.user_id = 'codefellows'
new_association.keyword_id = 'developer'
dummy_request.dbsession.add(new_association)
dummy_request.dbsession.commit()
response = search_view(dummy_request)
assert response['keywords'][0].keyword == 'developer'
def test_handle_keywords_view_bad_request(dummy_request):
'''test handle keywords bad request'''
from ..views.search import handle_keywords
from pyramid.httpexceptions import HTTPBadRequest
dummy_request.method = 'POST'
response = handle_keywords(dummy_request)
assert response.status_code == 400
assert isinstance(response, HTTPBadRequest)
def test_handle_keywords_gets_keyword(dummy_request):
'''test that it gets the key word'''
from ..views.search import handle_keywords
from pyramid.httpexceptions import HTTPFound
dummy_request.POST = {'keyword': 'web developer'}
dummy_request.method = 'POST'
response = handle_keywords(dummy_request)
assert isinstance(response, HTTPFound)
def test_handle_keywords_number_as_a_keyword_throws_error(dummy_request):
'''test that a number throws the correct error'''
from ..views.search import handle_keywords
dummy_request.POST = {'keyword': '4'}
dummy_request.method = 'POST'
response = handle_keywords(dummy_request)
assert response == {'error': 'Search term cannot be a number.'}
def test_delete_keyword_view_bad_request(dummy_request):
'''test delete keywords bad request'''
from ..views.search import delete_keyword
from pyramid.httpexceptions import HTTPBadRequest
dummy_request.method = 'POST'
response = delete_keyword(dummy_request)
assert response.status_code == 400
assert isinstance(response, HTTPBadRequest)
| from pyramid import testing
def test_render_search_view(dummy_request):
"""Test search view"""
from ..views.search import search_view
response = search_view(dummy_request)
assert type(response) == dict
def test_search_view_no_keywords(dummy_request):
"""Test search view response when the user does not give any keywords"""
from ..views.search import search_view
response = search_view(dummy_request)
len(response) == 0
assert type(response) == dict
def test_search_view_with_no_keywords(dummy_request):
"""Test search view with no keywords"""
from ..views.search import search_view
dummy_request.method = 'GET'
response = search_view(dummy_request)
assert response == {'message': 'You do not have any keywords saved. Add one!'}
def test_search_view_gets_keywords(dummy_request):
'''Test search view returns keywords with fake authenticated user'''
from ..views.search import search_view
from ..models.accounts import Account
from ..models.keywords import Keyword
from ..models.association import Association
config = testing.setUp()
config.testing_securitypolicy(
userid='codefellows', permissive=True
)
new_account = Account(
username='codefellows',
password='password',
email='<EMAIL>'
)
dummy_request.dbsession.add(new_account)
new_keyword = Keyword()
new_keyword.keyword = 'developer'
dummy_request.dbsession.add(new_keyword)
dummy_request.dbsession.commit()
new_association = Association()
new_association.user_id = 'codefellows'
new_association.keyword_id = 'developer'
dummy_request.dbsession.add(new_association)
dummy_request.dbsession.commit()
response = search_view(dummy_request)
assert response['keywords'][0].keyword == 'developer'
def test_handle_keywords_view_bad_request(dummy_request):
'''test handle keywords bad request'''
from ..views.search import handle_keywords
from pyramid.httpexceptions import HTTPBadRequest
dummy_request.method = 'POST'
response = handle_keywords(dummy_request)
assert response.status_code == 400
assert isinstance(response, HTTPBadRequest)
def test_handle_keywords_gets_keyword(dummy_request):
'''test that it gets the key word'''
from ..views.search import handle_keywords
from pyramid.httpexceptions import HTTPFound
dummy_request.POST = {'keyword': 'web developer'}
dummy_request.method = 'POST'
response = handle_keywords(dummy_request)
assert isinstance(response, HTTPFound)
def test_handle_keywords_number_as_a_keyword_throws_error(dummy_request):
'''test that a number throws the correct error'''
from ..views.search import handle_keywords
dummy_request.POST = {'keyword': '4'}
dummy_request.method = 'POST'
response = handle_keywords(dummy_request)
assert response == {'error': 'Search term cannot be a number.'}
def test_delete_keyword_view_bad_request(dummy_request):
'''test delete keywords bad request'''
from ..views.search import delete_keyword
from pyramid.httpexceptions import HTTPBadRequest
dummy_request.method = 'POST'
response = delete_keyword(dummy_request)
assert response.status_code == 400
assert isinstance(response, HTTPBadRequest)
| en | 0.794286 | Test search view Test search view response when the user does not give any keywords Test search view with no keywords Test search view returns keywords with fake authenticated user test handle keywords bad request test that it gets the key word test that a number throws the correct error test delete keywords bad request | 2.51929 | 3 |
storymaker/english.py | bitcraft/storymaker | 6 | 6622064 | from pygoap.precepts import *
def name(p):
try:
return p.name
except AttributeError:
return p
def make_english(caller, p):
"""
create an english phrase from a precept
very simple!!
:rtype : str
"""
if isinstance(p, DatumPrecept):
if p.entity is caller:
if p.name == "name":
return "My name is {}.".format(p.value)
return "I {} is {}.".format(p.name, name(p.value))
elif p.entity is None:
return "Did you know that {} is {}?".format(p.name,
name(p.value))
else:
if p.name == "name":
return "His name is {}.".format(p.value)
return "Did you know that {}\'s {} is {}?".format(
name(p.entity), p.name, name(p.value))
elif isinstance(p, ActionPrecept):
if p.entity is caller:
if p.object is None:
return "I did {}!".format(p.action)
else:
return "I did {} with {}!".format(p.action, name(p.object))
else:
if p.object is None:
return "I saw {} doing {}!".format(p.entity.name, p.action)
else:
return "I saw {} doing {} with {}!".format(p.entity.name,
p.action,
name(p.object))
elif isinstance(p, SpeechPrecept):
if p.entity is caller:
return 'I said "{}"'.format(p.message)
else:
return 'I heard {} say "{}"'.format(p.entity.name, p.message)
elif isinstance(p, TimePrecept):
return "The time is now {}.".format(p.time)
elif isinstance(p, MoodPrecept):
if p.entity is caller:
if p.value < .5:
return 'I am not {}.'.format(p.name)
else:
return 'I am {}.'.format(p.name)
else:
if p.value < .5:
return '{} is feeling not {}.'.format(name(p.entity),
p.name)
else:
return '{} is feeling {}.'.format(name(p.entity), p.name)
else:
return "I don't know how to express [{}].".format(p)
| from pygoap.precepts import *
def name(p):
try:
return p.name
except AttributeError:
return p
def make_english(caller, p):
"""
create an english phrase from a precept
very simple!!
:rtype : str
"""
if isinstance(p, DatumPrecept):
if p.entity is caller:
if p.name == "name":
return "My name is {}.".format(p.value)
return "I {} is {}.".format(p.name, name(p.value))
elif p.entity is None:
return "Did you know that {} is {}?".format(p.name,
name(p.value))
else:
if p.name == "name":
return "His name is {}.".format(p.value)
return "Did you know that {}\'s {} is {}?".format(
name(p.entity), p.name, name(p.value))
elif isinstance(p, ActionPrecept):
if p.entity is caller:
if p.object is None:
return "I did {}!".format(p.action)
else:
return "I did {} with {}!".format(p.action, name(p.object))
else:
if p.object is None:
return "I saw {} doing {}!".format(p.entity.name, p.action)
else:
return "I saw {} doing {} with {}!".format(p.entity.name,
p.action,
name(p.object))
elif isinstance(p, SpeechPrecept):
if p.entity is caller:
return 'I said "{}"'.format(p.message)
else:
return 'I heard {} say "{}"'.format(p.entity.name, p.message)
elif isinstance(p, TimePrecept):
return "The time is now {}.".format(p.time)
elif isinstance(p, MoodPrecept):
if p.entity is caller:
if p.value < .5:
return 'I am not {}.'.format(p.name)
else:
return 'I am {}.'.format(p.name)
else:
if p.value < .5:
return '{} is feeling not {}.'.format(name(p.entity),
p.name)
else:
return '{} is feeling {}.'.format(name(p.entity), p.name)
else:
return "I don't know how to express [{}].".format(p)
| en | 0.724139 | create an english phrase from a precept very simple!! :rtype : str | 3.083351 | 3 |
dashboard/layout/navbar.py | PPBP-2021/photogrammetry | 0 | 6622065 | import dash
import dash_bootstrap_components as dbc
from dash import dcc
from dash import html
layout = dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("Image Segmentation", href="segmentation"),
dbc.DropdownMenuItem("Litophane", href="litophane"),
dbc.DropdownMenuItem("Stereo Litophane", href="litophane_from_stereo"),
],
nav=True,
in_navbar=True,
label="Modules",
),
],
brand="👁👅👁 🗿 Photogrammetry Practical",
brand_href="/",
color="primary",
dark=True,
)
| import dash
import dash_bootstrap_components as dbc
from dash import dcc
from dash import html
layout = dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("Image Segmentation", href="segmentation"),
dbc.DropdownMenuItem("Litophane", href="litophane"),
dbc.DropdownMenuItem("Stereo Litophane", href="litophane_from_stereo"),
],
nav=True,
in_navbar=True,
label="Modules",
),
],
brand="👁👅👁 🗿 Photogrammetry Practical",
brand_href="/",
color="primary",
dark=True,
)
| none | 1 | 2.174537 | 2 | |
tests/Transform/test_Transform.py | kamilazdybal/multipy | 0 | 6622066 | import unittest
import numpy as np
import multipy
################################################################################
################################################################################
####
#### Class: Transform
####
################################################################################
################################################################################
class Transform(unittest.TestCase):
def test__Transform__allowed_calls(self):
try:
transform = multipy.Transform()
except Exception:
self.assertTrue(False)
| import unittest
import numpy as np
import multipy
################################################################################
################################################################################
####
#### Class: Transform
####
################################################################################
################################################################################
class Transform(unittest.TestCase):
def test__Transform__allowed_calls(self):
try:
transform = multipy.Transform()
except Exception:
self.assertTrue(False)
| de | 0.8686 | ################################################################################ ################################################################################ #### #### Class: Transform #### ################################################################################ ################################################################################ | 2.684736 | 3 |
taxi_domain/methods/train_autoencoder_for_kmeans.py | CORE-Robotics-Lab/Personalized_Neural_Trees | 3 | 6622067 | """
trains autoencoder
"""
import torch
import sys
import torch.nn as nn
import pickle
import os
# sys.path.insert(0, '/home/Anonymous/PycharmProjects/bayesian_prolo')
import numpy as np
from torch.autograd import Variable
from utils.global_utils import save_pickle
import itertools
sys.path.insert(0, '../')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
np.random.seed(0)
class Autoencoder(nn.Module):
"""
autoencoder torch model
"""
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(242, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, 11),
)
self.decoder = nn.Sequential(
nn.Linear(11, 32),
nn.Tanh(),
nn.Linear(32, 64),
nn.Tanh(),
nn.Linear(64, 128),
nn.Tanh(),
nn.Linear(128, 242)
)
def forward(self, x):
"""
forward pass
:param x:
:return:
"""
x = self.encoder(x)
x = self.decoder(x)
return x
def forward_only_encoding(self, x):
"""
produce encoding
:param x:
:return:
"""
z = self.encoder(x)
return z
class AutoEncoderTrain:
"""
create and train the autoencoder
"""
def __init__(self):
self.states, self.actions, self.failed_list, self.mturkcodes, self.indices_of_failed = self.load_in_data()
self.mean_embedding = None
self.embedding_np = None
self.matrixes = None
self.total_binary_embeddings = None
self.counter_splits = []
# self.states = None
@staticmethod
def load_in_data():
"""
loads in train data
:return:
"""
states, actions, failed_list, mturkcodes = pickle.load(open(os.path.join('../datasets/', 'training_data_from_all_users.pkl'), 'rb'))
indices_of_failed = []
for i in failed_list:
if i[0] not in indices_of_failed:
indices_of_failed.append(i[0])
return states, actions, failed_list, mturkcodes, indices_of_failed
# noinspection PyArgumentList
def compute_mean(self):
"""
computes the mean embedding by first computing all embeddings for every step of the schedule,
adding them to a numpy array and computing the avg
:return:
"""
# load_in_all_parameters(self.save_directory, self.auto_encoder)
counter = 0
for i, data_row in enumerate(self.states):
for e, data in enumerate(data_row):
input_nn = data
prediction_embedding = input_nn
print(prediction_embedding)
if counter == 0:
self.embedding_np = prediction_embedding
else:
self.embedding_np = np.vstack((self.embedding_np, prediction_embedding))
counter += 1
self.mean_embedding = np.average(self.embedding_np, axis=0)
print('mean embedding is ', self.mean_embedding)
def create_iterables(self):
"""
adds all possible state combinations
:return:
"""
iterables = [[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1]]
self.iter_states = []
for t in itertools.product(*iterables):
self.iter_states.append(t)
# noinspection PyArgumentList
def round_each_encoding_and_create_array(self):
"""
rounds each encoding by comparing it to the mean, and then stacks these in an array
:return:
"""
self.total_binary_embeddings = np.zeros((0))
counter = 0
for i, data_row in enumerate(self.states):
self.counter_splits.append(counter)
for e, data in enumerate(data_row):
prediction_embedding = data
embedding_copy = np.zeros((1, 5))
for j, each_element in enumerate(self.mean_embedding):
if each_element > prediction_embedding[j]:
embedding_copy[0][j] = 0
else:
embedding_copy[0][j] = 1
if counter == 0:
self.total_binary_embeddings = embedding_copy
else:
self.total_binary_embeddings = np.vstack((self.total_binary_embeddings, embedding_copy))
counter += 1
# This should generate n schedules of binary data
print('finished turning all elements of schedule into binary')
def pass_in_embedding_out_state_ID(self, binary):
"""
pass in a binary embedding, and itll return the state id
:param binary:
:return:
"""
binary_as_tuple = tuple(binary)
index = self.iter_states.index(binary_as_tuple)
return index
def populate_a_matrix_per_schedule(self):
"""
creates matrixes bases on the binary embeddings
:return:
"""
self.matrixes = []
for i in range(len(self.states)):
m = np.zeros((32, 3))
self.matrixes.append(m)
for i, each_matrix in enumerate(self.matrixes):
# lets look at elements of schedule 1
for j in range(len(self.states[i])):
binary_embedding = self.total_binary_embeddings[j]
index = self.pass_in_embedding_out_state_ID(binary_embedding)
# action taken at this instance
action = self.actions[i][j]
each_matrix[index][action] += 1
total_sum = each_matrix.sum()
self.matrixes[i] = np.divide(each_matrix, total_sum)
print('n matrices have been generated')
# def cluster_matrixes(self):
# # vectorize each matrix
# vectorized_set = []
# for i in self.matrixes:
# vectorized = i.reshape(20 * 2048, 1)
# vectorized_set.append(vectorized)
# kmeans = KMeans(n_clusters=3)
# # Fitting the input data
# new_set = np.hstack(tuple(vectorized_set)).reshape(self.num_schedules, 20 * 2048)
# self.kmeans = kmeans.fit(np.asarray(new_set))
# self.label = self.kmeans.predict(np.asarray(new_set))
def save_matrixes(self):
"""
saves the matrixes so these can be used to cluster in the gmm etc.
:return:
"""
save_pickle('/home/Anonymous/PycharmProjects/bayesian_prolo/taxi_domain/methods', self.matrixes, 'taxi_matrixes.pkl')
def main():
"""
entry point for file
:return:
"""
trainer = AutoEncoderTrain()
trainer.compute_mean()
trainer.create_iterables()
trainer.round_each_encoding_and_create_array()
trainer.populate_a_matrix_per_schedule()
trainer.save_matrixes()
if __name__ == '__main__':
main()
| """
trains autoencoder
"""
import torch
import sys
import torch.nn as nn
import pickle
import os
# sys.path.insert(0, '/home/Anonymous/PycharmProjects/bayesian_prolo')
import numpy as np
from torch.autograd import Variable
from utils.global_utils import save_pickle
import itertools
sys.path.insert(0, '../')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
np.random.seed(0)
class Autoencoder(nn.Module):
"""
autoencoder torch model
"""
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(242, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, 11),
)
self.decoder = nn.Sequential(
nn.Linear(11, 32),
nn.Tanh(),
nn.Linear(32, 64),
nn.Tanh(),
nn.Linear(64, 128),
nn.Tanh(),
nn.Linear(128, 242)
)
def forward(self, x):
"""
forward pass
:param x:
:return:
"""
x = self.encoder(x)
x = self.decoder(x)
return x
def forward_only_encoding(self, x):
"""
produce encoding
:param x:
:return:
"""
z = self.encoder(x)
return z
class AutoEncoderTrain:
"""
create and train the autoencoder
"""
def __init__(self):
self.states, self.actions, self.failed_list, self.mturkcodes, self.indices_of_failed = self.load_in_data()
self.mean_embedding = None
self.embedding_np = None
self.matrixes = None
self.total_binary_embeddings = None
self.counter_splits = []
# self.states = None
@staticmethod
def load_in_data():
"""
loads in train data
:return:
"""
states, actions, failed_list, mturkcodes = pickle.load(open(os.path.join('../datasets/', 'training_data_from_all_users.pkl'), 'rb'))
indices_of_failed = []
for i in failed_list:
if i[0] not in indices_of_failed:
indices_of_failed.append(i[0])
return states, actions, failed_list, mturkcodes, indices_of_failed
# noinspection PyArgumentList
def compute_mean(self):
"""
computes the mean embedding by first computing all embeddings for every step of the schedule,
adding them to a numpy array and computing the avg
:return:
"""
# load_in_all_parameters(self.save_directory, self.auto_encoder)
counter = 0
for i, data_row in enumerate(self.states):
for e, data in enumerate(data_row):
input_nn = data
prediction_embedding = input_nn
print(prediction_embedding)
if counter == 0:
self.embedding_np = prediction_embedding
else:
self.embedding_np = np.vstack((self.embedding_np, prediction_embedding))
counter += 1
self.mean_embedding = np.average(self.embedding_np, axis=0)
print('mean embedding is ', self.mean_embedding)
def create_iterables(self):
"""
adds all possible state combinations
:return:
"""
iterables = [[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1]]
self.iter_states = []
for t in itertools.product(*iterables):
self.iter_states.append(t)
# noinspection PyArgumentList
def round_each_encoding_and_create_array(self):
"""
rounds each encoding by comparing it to the mean, and then stacks these in an array
:return:
"""
self.total_binary_embeddings = np.zeros((0))
counter = 0
for i, data_row in enumerate(self.states):
self.counter_splits.append(counter)
for e, data in enumerate(data_row):
prediction_embedding = data
embedding_copy = np.zeros((1, 5))
for j, each_element in enumerate(self.mean_embedding):
if each_element > prediction_embedding[j]:
embedding_copy[0][j] = 0
else:
embedding_copy[0][j] = 1
if counter == 0:
self.total_binary_embeddings = embedding_copy
else:
self.total_binary_embeddings = np.vstack((self.total_binary_embeddings, embedding_copy))
counter += 1
# This should generate n schedules of binary data
print('finished turning all elements of schedule into binary')
def pass_in_embedding_out_state_ID(self, binary):
"""
pass in a binary embedding, and itll return the state id
:param binary:
:return:
"""
binary_as_tuple = tuple(binary)
index = self.iter_states.index(binary_as_tuple)
return index
def populate_a_matrix_per_schedule(self):
"""
creates matrixes bases on the binary embeddings
:return:
"""
self.matrixes = []
for i in range(len(self.states)):
m = np.zeros((32, 3))
self.matrixes.append(m)
for i, each_matrix in enumerate(self.matrixes):
# lets look at elements of schedule 1
for j in range(len(self.states[i])):
binary_embedding = self.total_binary_embeddings[j]
index = self.pass_in_embedding_out_state_ID(binary_embedding)
# action taken at this instance
action = self.actions[i][j]
each_matrix[index][action] += 1
total_sum = each_matrix.sum()
self.matrixes[i] = np.divide(each_matrix, total_sum)
print('n matrices have been generated')
# def cluster_matrixes(self):
# # vectorize each matrix
# vectorized_set = []
# for i in self.matrixes:
# vectorized = i.reshape(20 * 2048, 1)
# vectorized_set.append(vectorized)
# kmeans = KMeans(n_clusters=3)
# # Fitting the input data
# new_set = np.hstack(tuple(vectorized_set)).reshape(self.num_schedules, 20 * 2048)
# self.kmeans = kmeans.fit(np.asarray(new_set))
# self.label = self.kmeans.predict(np.asarray(new_set))
def save_matrixes(self):
"""
saves the matrixes so these can be used to cluster in the gmm etc.
:return:
"""
save_pickle('/home/Anonymous/PycharmProjects/bayesian_prolo/taxi_domain/methods', self.matrixes, 'taxi_matrixes.pkl')
def main():
"""
entry point for file
:return:
"""
trainer = AutoEncoderTrain()
trainer.compute_mean()
trainer.create_iterables()
trainer.round_each_encoding_and_create_array()
trainer.populate_a_matrix_per_schedule()
trainer.save_matrixes()
if __name__ == '__main__':
main()
| en | 0.604618 | trains autoencoder # sys.path.insert(0, '/home/Anonymous/PycharmProjects/bayesian_prolo') autoencoder torch model forward pass :param x: :return: produce encoding :param x: :return: create and train the autoencoder # self.states = None loads in train data :return: # noinspection PyArgumentList computes the mean embedding by first computing all embeddings for every step of the schedule, adding them to a numpy array and computing the avg :return: # load_in_all_parameters(self.save_directory, self.auto_encoder) adds all possible state combinations :return: # noinspection PyArgumentList rounds each encoding by comparing it to the mean, and then stacks these in an array :return: # This should generate n schedules of binary data pass in a binary embedding, and itll return the state id :param binary: :return: creates matrixes bases on the binary embeddings :return: # lets look at elements of schedule 1 # action taken at this instance # def cluster_matrixes(self): # # vectorize each matrix # vectorized_set = [] # for i in self.matrixes: # vectorized = i.reshape(20 * 2048, 1) # vectorized_set.append(vectorized) # kmeans = KMeans(n_clusters=3) # # Fitting the input data # new_set = np.hstack(tuple(vectorized_set)).reshape(self.num_schedules, 20 * 2048) # self.kmeans = kmeans.fit(np.asarray(new_set)) # self.label = self.kmeans.predict(np.asarray(new_set)) saves the matrixes so these can be used to cluster in the gmm etc. :return: entry point for file :return: | 2.641095 | 3 |
json_to_relation/test/test_mongodb.py | paepcke/json_to_relation | 4 | 6622068 | <filename>json_to_relation/test/test_mongodb.py
# Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Sep 15, 2013
@author: paepcke
'''
from json_to_relation.mongodb import MongoDB
import unittest
TEST_ALL = True
class MongoTest(unittest.TestCase):
'''
Test the mongodb.py module. Uses a library that fakes
a MongoDB server. See https://pypi.python.org/pypi/mongomock/1.0.1
'''
def setUp(self):
self.objs = [{"fname" : "Franco", "lname" : "Corelli"},
{"fname" : "Leonardo", "lname" : "DaVinci", "age" : 300},
{"fname" : "Franco", "lname" : "Gandolpho"}]
self.mongodb = MongoDB(dbName='unittest', collection='unittest')
self.mongodb.clearCollection(collection="unittest")
self.mongodb.clearCollection(collection="new_coll")
self.mongodb.setCollection("unittest")
def tearDown(self):
self.mongodb.dropCollection(collection='unittest')
self.mongodb.dropCollection(collection='new_coll')
self.mongodb.close()
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_update_and_find_one(self):
self.mongodb.insert(self.objs[0])
# Get a generator for the results:
resGen = self.mongodb.query({"fname" : "Franco"}, limit=1, collection="unittest")
res = resGen.next()
self.assertEqual('Corelli', res['lname'], "Failed retrieval of single obj; expected '%s' but got '%s'" % ('Corelli', res['lname']))
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_set_coll_use_different_coll(self):
# Insert into unittest:
self.mongodb.insert(self.objs[0])
# Switch to new_coll:
self.mongodb.setCollection('new_coll')
self.mongodb.insert({"recommendation" : "Hawaii"})
# We're in new_coll; the following should be empty result:
self.mongodb.query({"fname" : "Franco"}, limit=1)
resCount = self.mongodb.resultCount({"fname" : "Franco"})
self.assertIsNone(resCount, "Got non-null result that should be null: %s" % resCount)
# But this search is within new_coll, and should succeed:
resGen = self.mongodb.query({"recommendation" : {'$regex' : '.*'}}, limit=1)
res = resGen.next()
self.assertEqual('Hawaii', res['recommendation'], "Failed retrieval of single obj; expected '%s' but got '%s'" % ('Hawaii', res['recommendation']))
# Try inline collection switch:
resGen = self.mongodb.query({"fname" : "Franco"}, limit=1, collection="unittest")
res = resGen.next()
self.assertEqual('Corelli', res['lname'], "Failed retrieval of single obj; expected '%s' but got '%s'" % ('Corelli', res['lname']))
# But the default collection should still be new_coll,
# so a search with unspecified coll should be in new_coll:
resGen = self.mongodb.query({"recommendation" : {'$regex' : '.*'}}, limit=1)
res = resGen.next()
self.assertEqual('Hawaii', res['recommendation'], "Failed retrieval of single obj; expected '%s' but got '%s'" % ('Hawaii', res['recommendation']))
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_multi_result(self):
# Insert two docs with fname == Franco:
self.mongodb.insert(self.objs[0])
self.mongodb.insert(self.objs[2])
resGen = self.mongodb.query({"fname" : "Franco"})
# To get result count, must retrieve at least one result first:
resGen.next()
resCount = self.mongodb.resultCount({"fname" : "Franco"})
if resCount != 2:
self.fail("Added two Franco objects, but only %s are found." % str(resCount))
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_clear_collection(self):
self.mongodb.insert({"foo" : 10})
resGen = self.mongodb.query({"foo" : 10}, limit=1)
res = resGen.next()
self.assertIsNotNone(res, "Did not find document that was just inserted.")
self.mongodb.clearCollection()
resGen = self.mongodb.query({"foo" : 10}, limit=1)
self.assertRaises(StopIteration, resGen.next)
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_only_some_return_columns(self):
# Also tests the suppression of _id col when desired:
self.mongodb.insert(self.objs[0])
self.mongodb.insert(self.objs[1])
resGen = self.mongodb.query({}, ("lname"))
names = []
for lnameDict in resGen:
resCount = self.mongodb.resultCount({})
self.assertEqual(2, resCount)
names.append(lnameDict['lname'])
self.assertItemsEqual(['Corelli','DaVinci'], names, "Did not receive expected lnames: %s" % str(names))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | <filename>json_to_relation/test/test_mongodb.py
# Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Sep 15, 2013
@author: paepcke
'''
from json_to_relation.mongodb import MongoDB
import unittest
TEST_ALL = True
class MongoTest(unittest.TestCase):
'''
Test the mongodb.py module. Uses a library that fakes
a MongoDB server. See https://pypi.python.org/pypi/mongomock/1.0.1
'''
def setUp(self):
self.objs = [{"fname" : "Franco", "lname" : "Corelli"},
{"fname" : "Leonardo", "lname" : "DaVinci", "age" : 300},
{"fname" : "Franco", "lname" : "Gandolpho"}]
self.mongodb = MongoDB(dbName='unittest', collection='unittest')
self.mongodb.clearCollection(collection="unittest")
self.mongodb.clearCollection(collection="new_coll")
self.mongodb.setCollection("unittest")
def tearDown(self):
self.mongodb.dropCollection(collection='unittest')
self.mongodb.dropCollection(collection='new_coll')
self.mongodb.close()
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_update_and_find_one(self):
self.mongodb.insert(self.objs[0])
# Get a generator for the results:
resGen = self.mongodb.query({"fname" : "Franco"}, limit=1, collection="unittest")
res = resGen.next()
self.assertEqual('Corelli', res['lname'], "Failed retrieval of single obj; expected '%s' but got '%s'" % ('Corelli', res['lname']))
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_set_coll_use_different_coll(self):
# Insert into unittest:
self.mongodb.insert(self.objs[0])
# Switch to new_coll:
self.mongodb.setCollection('new_coll')
self.mongodb.insert({"recommendation" : "Hawaii"})
# We're in new_coll; the following should be empty result:
self.mongodb.query({"fname" : "Franco"}, limit=1)
resCount = self.mongodb.resultCount({"fname" : "Franco"})
self.assertIsNone(resCount, "Got non-null result that should be null: %s" % resCount)
# But this search is within new_coll, and should succeed:
resGen = self.mongodb.query({"recommendation" : {'$regex' : '.*'}}, limit=1)
res = resGen.next()
self.assertEqual('Hawaii', res['recommendation'], "Failed retrieval of single obj; expected '%s' but got '%s'" % ('Hawaii', res['recommendation']))
# Try inline collection switch:
resGen = self.mongodb.query({"fname" : "Franco"}, limit=1, collection="unittest")
res = resGen.next()
self.assertEqual('Corelli', res['lname'], "Failed retrieval of single obj; expected '%s' but got '%s'" % ('Corelli', res['lname']))
# But the default collection should still be new_coll,
# so a search with unspecified coll should be in new_coll:
resGen = self.mongodb.query({"recommendation" : {'$regex' : '.*'}}, limit=1)
res = resGen.next()
self.assertEqual('Hawaii', res['recommendation'], "Failed retrieval of single obj; expected '%s' but got '%s'" % ('Hawaii', res['recommendation']))
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_multi_result(self):
# Insert two docs with fname == Franco:
self.mongodb.insert(self.objs[0])
self.mongodb.insert(self.objs[2])
resGen = self.mongodb.query({"fname" : "Franco"})
# To get result count, must retrieve at least one result first:
resGen.next()
resCount = self.mongodb.resultCount({"fname" : "Franco"})
if resCount != 2:
self.fail("Added two Franco objects, but only %s are found." % str(resCount))
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_clear_collection(self):
self.mongodb.insert({"foo" : 10})
resGen = self.mongodb.query({"foo" : 10}, limit=1)
res = resGen.next()
self.assertIsNotNone(res, "Did not find document that was just inserted.")
self.mongodb.clearCollection()
resGen = self.mongodb.query({"foo" : 10}, limit=1)
self.assertRaises(StopIteration, resGen.next)
@unittest.skipIf(not TEST_ALL, "Skipping")
def test_only_some_return_columns(self):
# Also tests the suppression of _id col when desired:
self.mongodb.insert(self.objs[0])
self.mongodb.insert(self.objs[1])
resGen = self.mongodb.query({}, ("lname"))
names = []
for lnameDict in resGen:
resCount = self.mongodb.resultCount({})
self.assertEqual(2, resCount)
names.append(lnameDict['lname'])
self.assertItemsEqual(['Corelli','DaVinci'], names, "Did not receive expected lnames: %s" % str(names))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | en | 0.724218 | # Copyright (c) 2014, Stanford University # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Created on Sep 15, 2013 @author: paepcke Test the mongodb.py module. Uses a library that fakes a MongoDB server. See https://pypi.python.org/pypi/mongomock/1.0.1 # Get a generator for the results: # Insert into unittest: # Switch to new_coll: # We're in new_coll; the following should be empty result: # But this search is within new_coll, and should succeed: # Try inline collection switch: # But the default collection should still be new_coll, # so a search with unspecified coll should be in new_coll: # Insert two docs with fname == Franco: # To get result count, must retrieve at least one result first: # Also tests the suppression of _id col when desired: #import sys;sys.argv = ['', 'Test.testName'] | 1.525905 | 2 |
reddit2telegram/channels/smilethoughts/app.py | mainyordle/reddit2telegram | 187 | 6622069 | <filename>reddit2telegram/channels/smilethoughts/app.py<gh_stars>100-1000
#encoding:utf-8
subreddit = 'MadeMeSmile'
t_channel = '@SmileThoughts'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| <filename>reddit2telegram/channels/smilethoughts/app.py<gh_stars>100-1000
#encoding:utf-8
subreddit = 'MadeMeSmile'
t_channel = '@SmileThoughts'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| en | 0.735217 | #encoding:utf-8 | 1.379314 | 1 |
scrape_korea.py | LiveCoronaDetector/Crawler | 8 | 6622070 | # -*- coding:utf-8 -*-
"""대한민국 (+ 세계) 환자수 수집"""
import re
import time
import requests
from bs4 import BeautifulSoup
from utils import postprocess, load_json
from slack_handler import SlackHandler
patients = load_json("./_data.json")
# def scrape_namuWiki():
# """나무위키에서 대한민국 확진환자수, 격리해제수, 사망자수 수집
#
# Returns:
# (dict) 한국의 세계 확진환자수(cc), 격리해제수(recovered), 사망자수(dead)
# """
# html = requests.get("https://namu.wiki/w/%EC%8B%A0%EC%A2%85%20%EC%BD%94%EB%A1%9C%EB%82%98%EB%B0%94%EC%9D%B4%EB%9F%AC%EC%8A%A4%EA%B0%90%EC%97%BC%EC%A6%9D")
# soup = BeautifulSoup(html.text, "lxml")
# table = soup.find("a", id=r's-3.2').parent.\
# findNext("div", class_="wiki-heading-content").\
# find("div", class_="wiki-table-wrap table-center").\
# find("tbody")
#
# data = table.find_all("tr")
# for datum in data:
# if "대한민국" in str(datum):
# country_info = datum.find_all("div", class_="wiki-paragraph")
# cc = country_info[1].text
# dead = country_info[2].text
# recovered = country_info[3].text
# postproc = postprocess([cc, recovered, dead])
# return_data = {"cc": postproc[0],
# "recovered": postproc[1],
# "dead": postproc[2]}
# push_scrape("scraper_korea.py >> scrape_namuWiki()",
# [("대한민국", return_data)])
# return return_data
# push_scrape("scrape_korea.py >> scrape_namuWiki()",
# [("대한민국", None)])
# return None
def scrape_worldOmeter(korea=True):
"""worldOmeter에서 세계 확진환자수, 격리해제수, 사망자수 수집
Args:
world: 대한민국 데이터만 수집하려면, True
세계 데이터를 수집하려면, False
Returns:
(dict) 한국의 확진환자수(cc_sum), 격리해제수(recovered), 사망자수(dead)
"""
html = requests.get("https://www.worldometers.info/coronavirus/")
soup = BeautifulSoup(html.text, "html.parser")
data = soup.select("#main_table_countries > tbody > tr")
world_data = {}
world_cc, world_recovered, world_dead = 0, 0, 0
push = []
for datum in data:
country = datum.find_all("td")[0].text.strip()
cc = datum.find_all("td")[1].text.strip()
recovered = datum.find_all("td")[5].text.strip()
dead = datum.find_all("td")[3].text.strip()
postproc = postprocess([cc, recovered, dead])
cc, recovered, dead = postproc[0], postproc[1], postproc[2]
if cc:
world_cc += cc
if recovered:
world_recovered += recovered
if dead:
world_dead += dead
if korea:
if country != "S. Korea":
continue
korea_patients = patients.copy()
korea_patients["cc_sum"] = cc
korea_patients["recovered"] = recovered
korea_patients["dead"] = dead
push.append(("대한민국", korea_patients))
SlackHandler().add_scraping_msg("scrape_korea.py >> scrape_worldOmeter()", push)
return korea_patients
world_data[country] = patients.copy()
world_data[country]["cc_sum"] = cc
world_data[country]["recovered"] = recovered
world_data[country]["dead"] = dead
push.append((country, world_data[country]))
time.sleep(0.2)
world_data["world"] = patients.copy()
world_data["world"]["cc_sum"] = world_cc
world_data["world"]["recovered"] = world_recovered
world_data["world"]["dead"] = world_dead
push.append(("world", world_data["world"]))
SlackHandler().add_scraping_msg(
"scrape_korea.py >> scrape_worldOmeter(korea=False)", push)
return world_data
def scrape_KCDC_korea():
"""KCDC에서 대한민국 확진환자수, 격리해제수, 사망자수 수집
Returns:
(dict) 한국의 세계 확진환자수(cc), 격리해제수(recovered), 사망자수(dead)
"""
html = requests.get("http://ncov.mohw.go.kr/index_main.jsp")
soup = BeautifulSoup(html.text, "lxml")
data = soup.select("div.co_cur > ul > li > a.num")
regex = re.compile(r"\d[,\d]+")
cc = regex.search(data[0].text).group()
recovered = regex.search(data[1].text).group()
dead = regex.search(data[2].text).group()
postproc = postprocess([cc, recovered, dead])
return_data = patients.copy()
return_data["cc_sum"] = postproc[0]
return_data["recovered"] = postproc[1]
return_data["dead"] = postproc[2]
SlackHandler().add_scraping_msg("scrape_korea.py >> scrape_KCDC_korea()",
[("대한민국", return_data)])
return return_data
def run_korea():
"""사이트에서 수집한 대한민국 확진환자수, 격리해제수, 사망자수 취합
사이트: KCDC, worldOmeter
Returns:
(dict) 각 사이트에서 취합한 대한민국 확진환자수, 격리해제수, 사망자수
"""
func_list = [scrape_KCDC_korea, scrape_worldOmeter]
base = patients.copy()
base["cc_sum"], base["recovered"], base["dead"] = 0, 0, 0
for func in func_list:
datum = None
for _ in range(3):
try:
datum = func()
except Exception as e: # TODO: 구채적인 error 처리
print(e)
print("[{}] scraping retry..".format(func.__name__))
else:
print("func [{}]: {}".format(func.__name__, datum))
break
time.sleep(1)
for key in base.keys():
if (datum is not None) and (datum[key] is not None):
if base[key] < datum[key]:
base[key] = datum[key]
return base
if __name__ == "__main__":
run_korea()
| # -*- coding:utf-8 -*-
"""대한민국 (+ 세계) 환자수 수집"""
import re
import time
import requests
from bs4 import BeautifulSoup
from utils import postprocess, load_json
from slack_handler import SlackHandler
patients = load_json("./_data.json")
# def scrape_namuWiki():
# """나무위키에서 대한민국 확진환자수, 격리해제수, 사망자수 수집
#
# Returns:
# (dict) 한국의 세계 확진환자수(cc), 격리해제수(recovered), 사망자수(dead)
# """
# html = requests.get("https://namu.wiki/w/%EC%8B%A0%EC%A2%85%20%EC%BD%94%EB%A1%9C%EB%82%98%EB%B0%94%EC%9D%B4%EB%9F%AC%EC%8A%A4%EA%B0%90%EC%97%BC%EC%A6%9D")
# soup = BeautifulSoup(html.text, "lxml")
# table = soup.find("a", id=r's-3.2').parent.\
# findNext("div", class_="wiki-heading-content").\
# find("div", class_="wiki-table-wrap table-center").\
# find("tbody")
#
# data = table.find_all("tr")
# for datum in data:
# if "대한민국" in str(datum):
# country_info = datum.find_all("div", class_="wiki-paragraph")
# cc = country_info[1].text
# dead = country_info[2].text
# recovered = country_info[3].text
# postproc = postprocess([cc, recovered, dead])
# return_data = {"cc": postproc[0],
# "recovered": postproc[1],
# "dead": postproc[2]}
# push_scrape("scraper_korea.py >> scrape_namuWiki()",
# [("대한민국", return_data)])
# return return_data
# push_scrape("scrape_korea.py >> scrape_namuWiki()",
# [("대한민국", None)])
# return None
def scrape_worldOmeter(korea=True):
"""worldOmeter에서 세계 확진환자수, 격리해제수, 사망자수 수집
Args:
world: 대한민국 데이터만 수집하려면, True
세계 데이터를 수집하려면, False
Returns:
(dict) 한국의 확진환자수(cc_sum), 격리해제수(recovered), 사망자수(dead)
"""
html = requests.get("https://www.worldometers.info/coronavirus/")
soup = BeautifulSoup(html.text, "html.parser")
data = soup.select("#main_table_countries > tbody > tr")
world_data = {}
world_cc, world_recovered, world_dead = 0, 0, 0
push = []
for datum in data:
country = datum.find_all("td")[0].text.strip()
cc = datum.find_all("td")[1].text.strip()
recovered = datum.find_all("td")[5].text.strip()
dead = datum.find_all("td")[3].text.strip()
postproc = postprocess([cc, recovered, dead])
cc, recovered, dead = postproc[0], postproc[1], postproc[2]
if cc:
world_cc += cc
if recovered:
world_recovered += recovered
if dead:
world_dead += dead
if korea:
if country != "S. Korea":
continue
korea_patients = patients.copy()
korea_patients["cc_sum"] = cc
korea_patients["recovered"] = recovered
korea_patients["dead"] = dead
push.append(("대한민국", korea_patients))
SlackHandler().add_scraping_msg("scrape_korea.py >> scrape_worldOmeter()", push)
return korea_patients
world_data[country] = patients.copy()
world_data[country]["cc_sum"] = cc
world_data[country]["recovered"] = recovered
world_data[country]["dead"] = dead
push.append((country, world_data[country]))
time.sleep(0.2)
world_data["world"] = patients.copy()
world_data["world"]["cc_sum"] = world_cc
world_data["world"]["recovered"] = world_recovered
world_data["world"]["dead"] = world_dead
push.append(("world", world_data["world"]))
SlackHandler().add_scraping_msg(
"scrape_korea.py >> scrape_worldOmeter(korea=False)", push)
return world_data
def scrape_KCDC_korea():
"""KCDC에서 대한민국 확진환자수, 격리해제수, 사망자수 수집
Returns:
(dict) 한국의 세계 확진환자수(cc), 격리해제수(recovered), 사망자수(dead)
"""
html = requests.get("http://ncov.mohw.go.kr/index_main.jsp")
soup = BeautifulSoup(html.text, "lxml")
data = soup.select("div.co_cur > ul > li > a.num")
regex = re.compile(r"\d[,\d]+")
cc = regex.search(data[0].text).group()
recovered = regex.search(data[1].text).group()
dead = regex.search(data[2].text).group()
postproc = postprocess([cc, recovered, dead])
return_data = patients.copy()
return_data["cc_sum"] = postproc[0]
return_data["recovered"] = postproc[1]
return_data["dead"] = postproc[2]
SlackHandler().add_scraping_msg("scrape_korea.py >> scrape_KCDC_korea()",
[("대한민국", return_data)])
return return_data
def run_korea():
"""사이트에서 수집한 대한민국 확진환자수, 격리해제수, 사망자수 취합
사이트: KCDC, worldOmeter
Returns:
(dict) 각 사이트에서 취합한 대한민국 확진환자수, 격리해제수, 사망자수
"""
func_list = [scrape_KCDC_korea, scrape_worldOmeter]
base = patients.copy()
base["cc_sum"], base["recovered"], base["dead"] = 0, 0, 0
for func in func_list:
datum = None
for _ in range(3):
try:
datum = func()
except Exception as e: # TODO: 구채적인 error 처리
print(e)
print("[{}] scraping retry..".format(func.__name__))
else:
print("func [{}]: {}".format(func.__name__, datum))
break
time.sleep(1)
for key in base.keys():
if (datum is not None) and (datum[key] is not None):
if base[key] < datum[key]:
base[key] = datum[key]
return base
if __name__ == "__main__":
run_korea()
| ko | 0.790841 | # -*- coding:utf-8 -*- 대한민국 (+ 세계) 환자수 수집 # def scrape_namuWiki(): # """나무위키에서 대한민국 확진환자수, 격리해제수, 사망자수 수집 # # Returns: # (dict) 한국의 세계 확진환자수(cc), 격리해제수(recovered), 사망자수(dead) # """ # html = requests.get("https://namu.wiki/w/%EC%8B%A0%EC%A2%85%20%EC%BD%94%EB%A1%9C%EB%82%98%EB%B0%94%EC%9D%B4%EB%9F%AC%EC%8A%A4%EA%B0%90%EC%97%BC%EC%A6%9D") # soup = BeautifulSoup(html.text, "lxml") # table = soup.find("a", id=r's-3.2').parent.\ # findNext("div", class_="wiki-heading-content").\ # find("div", class_="wiki-table-wrap table-center").\ # find("tbody") # # data = table.find_all("tr") # for datum in data: # if "대한민국" in str(datum): # country_info = datum.find_all("div", class_="wiki-paragraph") # cc = country_info[1].text # dead = country_info[2].text # recovered = country_info[3].text # postproc = postprocess([cc, recovered, dead]) # return_data = {"cc": postproc[0], # "recovered": postproc[1], # "dead": postproc[2]} # push_scrape("scraper_korea.py >> scrape_namuWiki()", # [("대한민국", return_data)]) # return return_data # push_scrape("scrape_korea.py >> scrape_namuWiki()", # [("대한민국", None)]) # return None worldOmeter에서 세계 확진환자수, 격리해제수, 사망자수 수집 Args: world: 대한민국 데이터만 수집하려면, True 세계 데이터를 수집하려면, False Returns: (dict) 한국의 확진환자수(cc_sum), 격리해제수(recovered), 사망자수(dead) KCDC에서 대한민국 확진환자수, 격리해제수, 사망자수 수집 Returns: (dict) 한국의 세계 확진환자수(cc), 격리해제수(recovered), 사망자수(dead) 사이트에서 수집한 대한민국 확진환자수, 격리해제수, 사망자수 취합 사이트: KCDC, worldOmeter Returns: (dict) 각 사이트에서 취합한 대한민국 확진환자수, 격리해제수, 사망자수 # TODO: 구채적인 error 처리 | 2.807457 | 3 |
src/data/wave_cir.py | poly-ai/fluid-surface-estimation | 2 | 6622071 | import numpy as np
import math
from matplotlib.image import AxesImage
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Fixed for the time being...
WAVE_FREQ = 10 # This scalar defines the speed of the wave
WAVE_NUMBER = 1 # This scalar defines the speed of the wave
X_CENTER = np.pi
Y_CENTER = np.pi
IMAGE_DIMENSION = 64
NUM_FRAMES = 400
dt = 0.01 # Constant now (Don't change this)
# This vector defines the direction of the travelling wave.
# The magnitude of this vector defines the "length" of the wave
def create_cir_wave(image_dimension=IMAGE_DIMENSION,
num_frames=NUM_FRAMES,
wave_freq=WAVE_FREQ,
wave_number=WAVE_NUMBER,
x_center=X_CENTER,
y_center=Y_CENTER):
# Data to generate
data = np.zeros((num_frames,image_dimension,image_dimension))
# Deltas, num ticks in example
dx = 2*np.pi/(image_dimension-1)
dy = dx
ticks = np.linspace(0, dt*num_frames, num_frames)
# Create frames
for k in range(num_frames):
t0 = ticks[k]
for i in range(image_dimension):
for j in range(image_dimension):
x = dx*j - x_center
y = (image_dimension-i)*dy - y_center
r = (1+(x*x+y*y)**(0.5))
data[k,i,j] = 1*math.cos(wave_number*r-wave_freq*t0)/(r**0.5)
return data
# ------------------------------------------------------------------------------
# Create 2D video
# ------------------------------------------------------------------------------
def animate_2D(frame_number, image_ref: AxesImage, data):
image_ref.set_array(data[frame_number,:,:])
return frame_number
def show_2D_animation(data):
fig = plt.figure()
ax = plt.axes()
im = plt.imshow(data[0,:,:], cmap="gray")
num_frames = data.shape[0]
anim = animation.FuncAnimation(fig, animate_2D, interval=2,
fargs=(im, data), frames=num_frames)
plt.show()
def main():
wave = create_cir_wave()
show_2D_animation(wave)
if __name__ == "__main__":
main()
| import numpy as np
import math
from matplotlib.image import AxesImage
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Fixed for the time being...
WAVE_FREQ = 10 # This scalar defines the speed of the wave
WAVE_NUMBER = 1 # This scalar defines the speed of the wave
X_CENTER = np.pi
Y_CENTER = np.pi
IMAGE_DIMENSION = 64
NUM_FRAMES = 400
dt = 0.01 # Constant now (Don't change this)
# This vector defines the direction of the travelling wave.
# The magnitude of this vector defines the "length" of the wave
def create_cir_wave(image_dimension=IMAGE_DIMENSION,
num_frames=NUM_FRAMES,
wave_freq=WAVE_FREQ,
wave_number=WAVE_NUMBER,
x_center=X_CENTER,
y_center=Y_CENTER):
# Data to generate
data = np.zeros((num_frames,image_dimension,image_dimension))
# Deltas, num ticks in example
dx = 2*np.pi/(image_dimension-1)
dy = dx
ticks = np.linspace(0, dt*num_frames, num_frames)
# Create frames
for k in range(num_frames):
t0 = ticks[k]
for i in range(image_dimension):
for j in range(image_dimension):
x = dx*j - x_center
y = (image_dimension-i)*dy - y_center
r = (1+(x*x+y*y)**(0.5))
data[k,i,j] = 1*math.cos(wave_number*r-wave_freq*t0)/(r**0.5)
return data
# ------------------------------------------------------------------------------
# Create 2D video
# ------------------------------------------------------------------------------
def animate_2D(frame_number, image_ref: AxesImage, data):
image_ref.set_array(data[frame_number,:,:])
return frame_number
def show_2D_animation(data):
fig = plt.figure()
ax = plt.axes()
im = plt.imshow(data[0,:,:], cmap="gray")
num_frames = data.shape[0]
anim = animation.FuncAnimation(fig, animate_2D, interval=2,
fargs=(im, data), frames=num_frames)
plt.show()
def main():
wave = create_cir_wave()
show_2D_animation(wave)
if __name__ == "__main__":
main()
| en | 0.54219 | # Fixed for the time being... # This scalar defines the speed of the wave # This scalar defines the speed of the wave # Constant now (Don't change this) # This vector defines the direction of the travelling wave. # The magnitude of this vector defines the "length" of the wave # Data to generate # Deltas, num ticks in example # Create frames # ------------------------------------------------------------------------------ # Create 2D video # ------------------------------------------------------------------------------ | 3.374965 | 3 |
flowerpath.py | laurentgrangeau/minecraft-pi | 1 | 6622072 | <filename>flowerpath.py
import mcpi.minecraft as minecraft
import time
mc = minecraft.Minecraft.create()
while True:
pos = mc.player.getPos()
x = pos.x
y = pos.y
z = pos.z
block = 38
mc.setBlock(x, y, z, block)
time.sleep(0.2)
| <filename>flowerpath.py
import mcpi.minecraft as minecraft
import time
mc = minecraft.Minecraft.create()
while True:
pos = mc.player.getPos()
x = pos.x
y = pos.y
z = pos.z
block = 38
mc.setBlock(x, y, z, block)
time.sleep(0.2)
| none | 1 | 2.114092 | 2 | |
src/luq.py | zfergus2/APLMOO | 1 | 6622073 | <reponame>zfergus2/APLMOO
"""
Compute the LUQ decomposition of a sparse square matrix.
Based on Pawel Kowal's MatLab code.
Written by: <NAME>
"""
import numpy
import scipy.sparse
import scipy.sparse.linalg
def luq(A, do_pivot, tol = 1e-8):
"""
PURPOSE: calculates the following decomposition
A = L |Ubar 0 | Q
|0 0 |
where Ubar is a square invertible matrix
and matrices L, Q are invertible.
USAGE: [L,U,Q] = luq(A,do_pivot,tol)
INPUT:
A - a sparse matrix
do_pivot = 1 with column pivoting
= 0 without column pivoting
tol - uses the tolerance tol in separating zero and nonzero values
OUTPUT:
L,U,Q matrices
COMMENTS:
This method is based on lu decomposition,
https://en.wikipedia.org/wiki/LU_decomposition.
Based on LREM_SOLVE:
Copyright (c) <NAME> (2006)
All rights reserved
LREM_SOLVE toolbox is available free for noncommercial academic use only.
<EMAIL>
"""
n, m = A.shape
# Test if A is a sparse matrix
# if ~issparse(A)
# A = sparse(A)
# end
###########################################################################
# SPECIAL CASES
###########################################################################
if(n == 0 or m == 0):
# Return (L, U, Q) = (I(nxn), A, I(mxm))
return (scipy.sparse.identity(n), A, scipy.sparse.identity(m))
###########################################################################
# LU DECOMPOSITION
###########################################################################
# Perform a LU decomposition on A.
# Returns a scipy.sparse.linalg.SuperLU
LUDecomp = scipy.sparse.linalg.splu(A)
L = LUDecomp.L
U = LUDecomp.U
P = scipy.sparse.csr_matrix((n, n))
P[numpy.arange(m), LUDecomp.perm_r] = 1 # Construct a Permutation matrix
if do_pivot:
Q = scipy.sparse.csr_matrix((m, m))
Q[numpy.arange(m), LUDecomp.perm_c] = 1
Q = Q.T
else:
Q = scipy.sparse.identity(m)
# import pdb; pdb.set_trace()
p = n - L.shape[1]
if(p != 0):
LL = scipy.sparse.vstack([scipy.sparse.csc_matrix((n - p, p)),
scipy.sparse.identity(p).tocsc()])
L = scipy.sparse.hstack([P.T.dot(L), P[(n - p):n, :].T])
U = scipy.sparse.vstack([U, scipy.sparse.csc_matrix((p, m))])
###########################################################################
# FINDS ROWS WITH ZERO AND NONZERO ELEMENTS ON THE DIAGONAL
###########################################################################
if(U.shape[0] == 1 or U.shape[1] == 1):
S = scipy.sparse.csc_matrix(U[0, 0])
else:
S = scipy.sparse.dia_matrix((U.diagonal(), [0]), shape=U.shape)
# I = find(abs(S)>tol)
I = (abs(S) > tol).nonzero()
# Jl = (1:n)'
Jl = numpy.arange(0, n).reshape((1, n)).T
# Jl(I) = []
Jl = numpy.delete(Jl, I[0])
# Jq = (1:m)'
Jq = numpy.arange(0, m).reshape((1, m)).T
# Jq(I) = []
Jq = numpy.delete(Jq, I)
# Ubar1 = U(I,I)
Ubar1 = U[I]
# Ubar2 = U(Jl,Jq)
Ubar2 = U[Jl.flatten(), Jq.flatten()]
# Qbar1 = Q(I,:)
Qbar1 = Q[I[0], :]
# Lbar1 = L(:,I)
Lbar1 = L[:, I[1]]
###########################################################################
# ELIMINATES NONZEZO ELEMENTS BELOW AND ON THE RIGHT OF THE
# INVERTIBLE BLOCK OF THE MATRIX U
#
# UPDATES MATRICES L, Q
###########################################################################
# if ~isempty(I)
import pdb
pdb.set_trace()
if(I[0].shape[0] != 0):
# Utmp = U(I,Jq)
Utmp = U[I[0], Jq]
# X = Ubar1'\U(Jl,I)'
X = scipy.sparse.linalg.spsolve(Ubar1.T, U[Jl, I].T)
# Ubar2 = Ubar2-X'*Utmp
Ubar2 = Ubar2 - X.T.dot(Utmp)
# Lbar1 = Lbar1+L(:,Jl)*X'
Lbar1 = Lbar1 + L[:, Jl].dot(X.T)
# X = Ubar1\Utmp
X = scipy.sparse.linalg.spsolve(Ubar1, Utmp)
# Qbar1 = Qbar1+X*Q(Jq,:)
Qbar1 = Qbar1 + X.dot(Q[Jq, :])
# Utmp = []
Utmp = numpy.empty(1)
# X = []
X = numpy.empty(1)
# end
###########################################################################
# FINDS ROWS AND COLUMNS WITH ONLY ZERO ELEMENTS
###########################################################################
# I2 = find(max(abs(Ubar2),[],2)>tol)
I2 = ((abs(Ubar2)).max(1) > tol).nonzero()
# I5 = find(max(abs(Ubar2),[],1)>tol)
I5 = ((abs(Ubar2)).max(0) > tol).nonzero()
# I3 = Jl(I2)
I3 = Jl[I2]
# I4 = Jq(I5)
I4 = Jq[I5]
# Jq(I5) = []
Jq[I5] = numpy.empty(1)
# Jl(I2) = []
J1[I2] = numpy.empty(1)
# U = []
U = numpy.empty(1)
###########################################################################
# FINDS A PART OF THE MATRIX U WHICH IS NOT IN THE REQIRED FORM
###########################################################################
# A = Ubar2(I2,I5)
A = Ubar[I2, I5]
###########################################################################
# PERFORMS LUQ DECOMPOSITION OF THE MATRIX A
###########################################################################
# [L1,U1,Q1] = luq(A,do_pivot,tol)
L1, U1, Q1 = luq(A, do_pivot, tol)
###########################################################################
# UPDATES MATRICES L, U, Q
###########################################################################
# Lbar2 = L(:,I3)*L1
Lbar2 = L[:, I3].dot(L1)
# Qbar2 = Q1*Q(I4,:)
Qbar2 = Q1.dot(Q[I4, :])
# L = [Lbar1 Lbar2 L(:,Jl)]
L = scipy.sparse.hstack([Lbar1, Lbar2, L[:, Jl]])
# Q = [Qbar1; Qbar2; Q(Jq,:)]
Q = scipy.sparse.vstack([Qbar1, Qbar2, Q[Jq, :]])
# n1 = length(I)
n1 = I.shape[0]
# n2 = length(I3)
n2 = I3.shape[0]
# m2 = length(I4)
m2 = I4.shape[0]
# U = [Ubar1 sparse(n1,m-n1);sparse(n2,n1) U1 sparse(n2,m-n1-m2);
# sparse(n-n1-n2,m)]
U = scipy.sparse.vstack([
scipy.sparse.hstack([Ubar1, scipy.sparse.csc_matrix(
shape = (n1, m - n1))]),
scipy.sparse.hstack([scipy.sparse.csc_matrix(
shape = (n2, n1)), U1, scipy.sparse.csc_matrix(
shape = (n2, m - n1 - m2))]),
scipy.sparse.csc_matrix(n - n1 - n2, m)
])
return L, U, Q
if __name__ == "__main__":
# A = scipy.sparse.csc_matrix(numpy.ones((4, 4)))
A = scipy.sparse.identity(4).tocsc()
L, U, Q = luq(A, True)
print("L:\n%s" % L)
print("U:\n%s" % U)
print("Q:\n%s" % Q)
print("A = L*U*Q:\n%s" % L.dot(U).dot(Q))
| """
Compute the LUQ decomposition of a sparse square matrix.
Based on Pawel Kowal's MatLab code.
Written by: <NAME>
"""
import numpy
import scipy.sparse
import scipy.sparse.linalg
def luq(A, do_pivot, tol = 1e-8):
"""
PURPOSE: calculates the following decomposition
A = L |Ubar 0 | Q
|0 0 |
where Ubar is a square invertible matrix
and matrices L, Q are invertible.
USAGE: [L,U,Q] = luq(A,do_pivot,tol)
INPUT:
A - a sparse matrix
do_pivot = 1 with column pivoting
= 0 without column pivoting
tol - uses the tolerance tol in separating zero and nonzero values
OUTPUT:
L,U,Q matrices
COMMENTS:
This method is based on lu decomposition,
https://en.wikipedia.org/wiki/LU_decomposition.
Based on LREM_SOLVE:
Copyright (c) <NAME> (2006)
All rights reserved
LREM_SOLVE toolbox is available free for noncommercial academic use only.
<EMAIL>
"""
n, m = A.shape
# Test if A is a sparse matrix
# if ~issparse(A)
# A = sparse(A)
# end
###########################################################################
# SPECIAL CASES
###########################################################################
if(n == 0 or m == 0):
# Return (L, U, Q) = (I(nxn), A, I(mxm))
return (scipy.sparse.identity(n), A, scipy.sparse.identity(m))
###########################################################################
# LU DECOMPOSITION
###########################################################################
# Perform a LU decomposition on A.
# Returns a scipy.sparse.linalg.SuperLU
LUDecomp = scipy.sparse.linalg.splu(A)
L = LUDecomp.L
U = LUDecomp.U
P = scipy.sparse.csr_matrix((n, n))
P[numpy.arange(m), LUDecomp.perm_r] = 1 # Construct a Permutation matrix
if do_pivot:
Q = scipy.sparse.csr_matrix((m, m))
Q[numpy.arange(m), LUDecomp.perm_c] = 1
Q = Q.T
else:
Q = scipy.sparse.identity(m)
# import pdb; pdb.set_trace()
p = n - L.shape[1]
if(p != 0):
LL = scipy.sparse.vstack([scipy.sparse.csc_matrix((n - p, p)),
scipy.sparse.identity(p).tocsc()])
L = scipy.sparse.hstack([P.T.dot(L), P[(n - p):n, :].T])
U = scipy.sparse.vstack([U, scipy.sparse.csc_matrix((p, m))])
###########################################################################
# FINDS ROWS WITH ZERO AND NONZERO ELEMENTS ON THE DIAGONAL
###########################################################################
if(U.shape[0] == 1 or U.shape[1] == 1):
S = scipy.sparse.csc_matrix(U[0, 0])
else:
S = scipy.sparse.dia_matrix((U.diagonal(), [0]), shape=U.shape)
# I = find(abs(S)>tol)
I = (abs(S) > tol).nonzero()
# Jl = (1:n)'
Jl = numpy.arange(0, n).reshape((1, n)).T
# Jl(I) = []
Jl = numpy.delete(Jl, I[0])
# Jq = (1:m)'
Jq = numpy.arange(0, m).reshape((1, m)).T
# Jq(I) = []
Jq = numpy.delete(Jq, I)
# Ubar1 = U(I,I)
Ubar1 = U[I]
# Ubar2 = U(Jl,Jq)
Ubar2 = U[Jl.flatten(), Jq.flatten()]
# Qbar1 = Q(I,:)
Qbar1 = Q[I[0], :]
# Lbar1 = L(:,I)
Lbar1 = L[:, I[1]]
###########################################################################
# ELIMINATES NONZEZO ELEMENTS BELOW AND ON THE RIGHT OF THE
# INVERTIBLE BLOCK OF THE MATRIX U
#
# UPDATES MATRICES L, Q
###########################################################################
# if ~isempty(I)
import pdb
pdb.set_trace()
if(I[0].shape[0] != 0):
# Utmp = U(I,Jq)
Utmp = U[I[0], Jq]
# X = Ubar1'\U(Jl,I)'
X = scipy.sparse.linalg.spsolve(Ubar1.T, U[Jl, I].T)
# Ubar2 = Ubar2-X'*Utmp
Ubar2 = Ubar2 - X.T.dot(Utmp)
# Lbar1 = Lbar1+L(:,Jl)*X'
Lbar1 = Lbar1 + L[:, Jl].dot(X.T)
# X = Ubar1\Utmp
X = scipy.sparse.linalg.spsolve(Ubar1, Utmp)
# Qbar1 = Qbar1+X*Q(Jq,:)
Qbar1 = Qbar1 + X.dot(Q[Jq, :])
# Utmp = []
Utmp = numpy.empty(1)
# X = []
X = numpy.empty(1)
# end
###########################################################################
# FINDS ROWS AND COLUMNS WITH ONLY ZERO ELEMENTS
###########################################################################
# I2 = find(max(abs(Ubar2),[],2)>tol)
I2 = ((abs(Ubar2)).max(1) > tol).nonzero()
# I5 = find(max(abs(Ubar2),[],1)>tol)
I5 = ((abs(Ubar2)).max(0) > tol).nonzero()
# I3 = Jl(I2)
I3 = Jl[I2]
# I4 = Jq(I5)
I4 = Jq[I5]
# Jq(I5) = []
Jq[I5] = numpy.empty(1)
# Jl(I2) = []
J1[I2] = numpy.empty(1)
# U = []
U = numpy.empty(1)
###########################################################################
# FINDS A PART OF THE MATRIX U WHICH IS NOT IN THE REQIRED FORM
###########################################################################
# A = Ubar2(I2,I5)
A = Ubar[I2, I5]
###########################################################################
# PERFORMS LUQ DECOMPOSITION OF THE MATRIX A
###########################################################################
# [L1,U1,Q1] = luq(A,do_pivot,tol)
L1, U1, Q1 = luq(A, do_pivot, tol)
###########################################################################
# UPDATES MATRICES L, U, Q
###########################################################################
# Lbar2 = L(:,I3)*L1
Lbar2 = L[:, I3].dot(L1)
# Qbar2 = Q1*Q(I4,:)
Qbar2 = Q1.dot(Q[I4, :])
# L = [Lbar1 Lbar2 L(:,Jl)]
L = scipy.sparse.hstack([Lbar1, Lbar2, L[:, Jl]])
# Q = [Qbar1; Qbar2; Q(Jq,:)]
Q = scipy.sparse.vstack([Qbar1, Qbar2, Q[Jq, :]])
# n1 = length(I)
n1 = I.shape[0]
# n2 = length(I3)
n2 = I3.shape[0]
# m2 = length(I4)
m2 = I4.shape[0]
# U = [Ubar1 sparse(n1,m-n1);sparse(n2,n1) U1 sparse(n2,m-n1-m2);
# sparse(n-n1-n2,m)]
U = scipy.sparse.vstack([
scipy.sparse.hstack([Ubar1, scipy.sparse.csc_matrix(
shape = (n1, m - n1))]),
scipy.sparse.hstack([scipy.sparse.csc_matrix(
shape = (n2, n1)), U1, scipy.sparse.csc_matrix(
shape = (n2, m - n1 - m2))]),
scipy.sparse.csc_matrix(n - n1 - n2, m)
])
return L, U, Q
if __name__ == "__main__":
# A = scipy.sparse.csc_matrix(numpy.ones((4, 4)))
A = scipy.sparse.identity(4).tocsc()
L, U, Q = luq(A, True)
print("L:\n%s" % L)
print("U:\n%s" % U)
print("Q:\n%s" % Q)
print("A = L*U*Q:\n%s" % L.dot(U).dot(Q)) | de | 0.392463 | Compute the LUQ decomposition of a sparse square matrix. Based on Pawel Kowal's MatLab code. Written by: <NAME> PURPOSE: calculates the following decomposition A = L |Ubar 0 | Q |0 0 | where Ubar is a square invertible matrix and matrices L, Q are invertible. USAGE: [L,U,Q] = luq(A,do_pivot,tol) INPUT: A - a sparse matrix do_pivot = 1 with column pivoting = 0 without column pivoting tol - uses the tolerance tol in separating zero and nonzero values OUTPUT: L,U,Q matrices COMMENTS: This method is based on lu decomposition, https://en.wikipedia.org/wiki/LU_decomposition. Based on LREM_SOLVE: Copyright (c) <NAME> (2006) All rights reserved LREM_SOLVE toolbox is available free for noncommercial academic use only. <EMAIL> # Test if A is a sparse matrix # if ~issparse(A) # A = sparse(A) # end ########################################################################### # SPECIAL CASES ########################################################################### # Return (L, U, Q) = (I(nxn), A, I(mxm)) ########################################################################### # LU DECOMPOSITION ########################################################################### # Perform a LU decomposition on A. # Returns a scipy.sparse.linalg.SuperLU # Construct a Permutation matrix # import pdb; pdb.set_trace() ########################################################################### # FINDS ROWS WITH ZERO AND NONZERO ELEMENTS ON THE DIAGONAL ########################################################################### # I = find(abs(S)>tol) # Jl = (1:n)' # Jl(I) = [] # Jq = (1:m)' # Jq(I) = [] # Ubar1 = U(I,I) # Ubar2 = U(Jl,Jq) # Qbar1 = Q(I,:) # Lbar1 = L(:,I) ########################################################################### # ELIMINATES NONZEZO ELEMENTS BELOW AND ON THE RIGHT OF THE # INVERTIBLE BLOCK OF THE MATRIX U # # UPDATES MATRICES L, Q ########################################################################### # if ~isempty(I) # Utmp = U(I,Jq) # X = Ubar1'\U(Jl,I)' # Ubar2 = Ubar2-X'*Utmp # Lbar1 = Lbar1+L(:,Jl)*X' # X = Ubar1\Utmp # Qbar1 = Qbar1+X*Q(Jq,:) # Utmp = [] # X = [] # end ########################################################################### # FINDS ROWS AND COLUMNS WITH ONLY ZERO ELEMENTS ########################################################################### # I2 = find(max(abs(Ubar2),[],2)>tol) # I5 = find(max(abs(Ubar2),[],1)>tol) # I3 = Jl(I2) # I4 = Jq(I5) # Jq(I5) = [] # Jl(I2) = [] # U = [] ########################################################################### # FINDS A PART OF THE MATRIX U WHICH IS NOT IN THE REQIRED FORM ########################################################################### # A = Ubar2(I2,I5) ########################################################################### # PERFORMS LUQ DECOMPOSITION OF THE MATRIX A ########################################################################### # [L1,U1,Q1] = luq(A,do_pivot,tol) ########################################################################### # UPDATES MATRICES L, U, Q ########################################################################### # Lbar2 = L(:,I3)*L1 # Qbar2 = Q1*Q(I4,:) # L = [Lbar1 Lbar2 L(:,Jl)] # Q = [Qbar1; Qbar2; Q(Jq,:)] # n1 = length(I) # n2 = length(I3) # m2 = length(I4) # U = [Ubar1 sparse(n1,m-n1);sparse(n2,n1) U1 sparse(n2,m-n1-m2); # sparse(n-n1-n2,m)] # A = scipy.sparse.csc_matrix(numpy.ones((4, 4))) | 3.567048 | 4 |
aperte/base/views.py | smevirtual/aperte | 0 | 6622074 | <reponame>smevirtual/aperte
# Copyright 2018 SME Virtual Network Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base views.
"""
# Third Party
from django.conf import settings
from django.shortcuts import render
def render_csrf_failure(request, reason: str = '', template_name: str = '403_csrf.html'):
"""
View for rendering CSRF verification failures.
Parameters
----------
request
The request object used to generate this response.
reason
The reason constant from `django.middleware.csrf` as to why the CSRF
verification failed.
template_name
The full name of the template to render.
Returns
-------
django.http.HttpResponse
A `HttpResponse` object with the rendered text.
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
context = {
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'DEBUG': settings.DEBUG,
}
return render(request, template_name=template_name, context=context, status=403)
def render_text_files(request, template_name: str):
"""
View for rendering text files.
Parameters
----------
request
The request object used to generate this response.
template_name
The full name of the text file template to render.
Returns
-------
django.http.HttpResponse
A `HttpResponse` object with the rendered text.
"""
return render(request, template_name, {}, content_type='text/plain')
| # Copyright 2018 SME Virtual Network Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base views.
"""
# Third Party
from django.conf import settings
from django.shortcuts import render
def render_csrf_failure(request, reason: str = '', template_name: str = '403_csrf.html'):
"""
View for rendering CSRF verification failures.
Parameters
----------
request
The request object used to generate this response.
reason
The reason constant from `django.middleware.csrf` as to why the CSRF
verification failed.
template_name
The full name of the template to render.
Returns
-------
django.http.HttpResponse
A `HttpResponse` object with the rendered text.
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
context = {
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'DEBUG': settings.DEBUG,
}
return render(request, template_name=template_name, context=context, status=403)
def render_text_files(request, template_name: str):
"""
View for rendering text files.
Parameters
----------
request
The request object used to generate this response.
template_name
The full name of the text file template to render.
Returns
-------
django.http.HttpResponse
A `HttpResponse` object with the rendered text.
"""
return render(request, template_name, {}, content_type='text/plain') | en | 0.686578 | # Copyright 2018 SME Virtual Network Contributors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Base views. # Third Party View for rendering CSRF verification failures. Parameters ---------- request The request object used to generate this response. reason The reason constant from `django.middleware.csrf` as to why the CSRF verification failed. template_name The full name of the template to render. Returns ------- django.http.HttpResponse A `HttpResponse` object with the rendered text. View for rendering text files. Parameters ---------- request The request object used to generate this response. template_name The full name of the text file template to render. Returns ------- django.http.HttpResponse A `HttpResponse` object with the rendered text. | 2.095135 | 2 |
py2pxd_/pxclass.py | secretyv/py2pxd | 0 | 6622075 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import logging
from .pxreader import PXReader
from .pxvariable import PXVariable
from .pxfunction import PXFunction
LOGGER = logging.getLogger("INRS.IEHSS.Python.cython.class")
class PXClass(ast.NodeVisitor, PXReader):
def __init__(self):
super(PXClass, self).__init__()
self.node = None
self.name = None
self.type = None
self.bases = []
self.meths = []
self.attrs = {}
def __eq__(self, other):
return self.name == other.name
def merge(self, other):
assert self == other
LOGGER.debug('PXClass.merge: %s', self.name)
self.bases = self.bases + [i for i in other.bases if i not in self.bases]
for k in other.attrs:
self.attrs.setdefault(k, other.attrs[k])
for k in self.attrs:
try:
self.attrs[k].merge(other.attrs[k])
except KeyError:
pass
self.meths = self.meths + [i for i in other.meths if i not in self.meths]
for meth in self.meths:
try:
idx = other.meths.index(meth)
meth.merge(other.meths[idx])
except ValueError:
pass
#--------------------
# Python source code parser (ast visitors)
#--------------------
def getOneBaseName(self, node):
if isinstance(node, ast.Attribute):
return '.'.join((self.getOneBaseName(node.value), node.attr))
elif isinstance(node, ast.Name):
return node.id
def visit_ClassDef(self, node):
print(ValueError('Nested classes are not yet supported'))
def visit_FunctionDef(self, node):
LOGGER.debug('PXClass.visit_FunctionDef')
isSpecialName = False
if len(node.name) > 4 and node.name[:2] == '__' and node.name[-2:] == '__':
isSpecialName = True
v = PXFunction(self)
v.doVisit(node)
if not isSpecialName: self.meths.append(v)
self.attrs.update(v.attrs)
def visit_Assign(self, node):
"""Class attributes"""
LOGGER.debug('PXClass.visit_Assign')
try:
v = ast.literal_eval(node.value)
t = type(v)
except Exception as e:
LOGGER.debug('Exception: %s', str(e))
t = type(None)
for tgt in node.targets:
if tgt.id not in ['__slots__']:
a = PXVariable()
a.doVisit(tgt.id, type_name=t)
self.attrs[a.name] = a
def doVisit(self, node):
LOGGER.debug('PXClass.doVisit')
self.node = node
self.name = self.node.name
LOGGER.debug('PXClass.doVisit: class %s(...)', self.name)
self.bases = [self.getOneBaseName(n) for n in node.bases]
self.generic_visit(node)
def resolveHierarchy(self, knownClasses):
"""
"""
# --- Look for childrens
childs = []
for c in knownClasses:
if self.name in c.bases: childs.append(c)
# --- Resolve first childrens
for c in childs:
c.resolveHierarchy(knownClasses)
# --- Remove attributes allready defined in parent
for c in knownClasses:
if c.name in self.bases:
LOGGER.debug('PXClass.resolveHierarchy: %s is child of %s', self.name, c.name)
for a in c.attrs:
if a in self.attrs:
del self.attrs[a]
#--------------------
# Reader for pxd files
#--------------------
def read_attr(self, attr):
try:
attr = attr.split('cdef ')[1].strip()
attr = attr.split('public ')[1].strip()
except Exception:
pass
a = PXVariable()
a.read_arg(attr)
self.attrs[a.name] = a
def read_decl(self, decl):
assert decl[-1] == ':'
decl = decl[:-1]
decl = decl.split('class ')[1]
decl = decl.strip()
try:
d, h = decl.split('(', 1)
h = h[:-1]
self.bases = [h_.strip() for h_ in h.split(',')]
except Exception:
d = decl
try:
t, n = d.split(' ')
except Exception:
t, n = '', d
self.type = t.strip()
self.name = n.strip()
def read(self, decl, fi):
self.read_decl(decl)
LOGGER.debug('PXClass.read: %s', self.name)
lcls = {}
for l in PXReader.read_line(fi):
if l == 'pass':
pass
elif l[0:5] == 'cdef ':
self.read_attr(l)
elif l[0:6] == 'cpdef ':
f = PXFunction(self)
f.read(l, lcls)
LOGGER.debug(' append method %s', f.name)
self.meths.append(f)
lcls = {}
elif l[0:14] == '@cython.locals':
lcls = PXReader.read_locals(l)
elif l == '':
return
#--------------------
# Writer for pxd file
#--------------------
def write(self, fo, indent=0):
bases = ''
if self.bases:
bases = '(%s)' % ', '.join(self.bases)
fmt = '{indent}cdef class {name}{bases}:\n'
s = fmt.format(indent=' '*indent, name=self.name, bases=bases)
fo.write(s)
indent += 4
if self.attrs or self.meths:
fmt = '{indent}cdef public {type:12s} {name}\n'
for k in sorted(self.attrs.keys()):
s = fmt.format(indent=' '*indent, type=self.attrs[k].type, name=self.attrs[k].name)
fo.write(s)
if self.attrs and self.meths:
s = '{indent}#\n'.format(indent=' '*indent)
fo.write(s)
for m in self.meths:
m.write(fo, indent=indent)
else:
s = '{indent}pass\n'.format(indent=' '*indent)
fo.write(s)
if __name__ == "__main__":
def main():
c = PXClass()
streamHandler = logging.StreamHandler()
LOGGER.addHandler(streamHandler)
LOGGER.setLevel(logging.DEBUG)
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import logging
from .pxreader import PXReader
from .pxvariable import PXVariable
from .pxfunction import PXFunction
LOGGER = logging.getLogger("INRS.IEHSS.Python.cython.class")
class PXClass(ast.NodeVisitor, PXReader):
def __init__(self):
super(PXClass, self).__init__()
self.node = None
self.name = None
self.type = None
self.bases = []
self.meths = []
self.attrs = {}
def __eq__(self, other):
return self.name == other.name
def merge(self, other):
assert self == other
LOGGER.debug('PXClass.merge: %s', self.name)
self.bases = self.bases + [i for i in other.bases if i not in self.bases]
for k in other.attrs:
self.attrs.setdefault(k, other.attrs[k])
for k in self.attrs:
try:
self.attrs[k].merge(other.attrs[k])
except KeyError:
pass
self.meths = self.meths + [i for i in other.meths if i not in self.meths]
for meth in self.meths:
try:
idx = other.meths.index(meth)
meth.merge(other.meths[idx])
except ValueError:
pass
#--------------------
# Python source code parser (ast visitors)
#--------------------
def getOneBaseName(self, node):
if isinstance(node, ast.Attribute):
return '.'.join((self.getOneBaseName(node.value), node.attr))
elif isinstance(node, ast.Name):
return node.id
def visit_ClassDef(self, node):
print(ValueError('Nested classes are not yet supported'))
def visit_FunctionDef(self, node):
LOGGER.debug('PXClass.visit_FunctionDef')
isSpecialName = False
if len(node.name) > 4 and node.name[:2] == '__' and node.name[-2:] == '__':
isSpecialName = True
v = PXFunction(self)
v.doVisit(node)
if not isSpecialName: self.meths.append(v)
self.attrs.update(v.attrs)
def visit_Assign(self, node):
"""Class attributes"""
LOGGER.debug('PXClass.visit_Assign')
try:
v = ast.literal_eval(node.value)
t = type(v)
except Exception as e:
LOGGER.debug('Exception: %s', str(e))
t = type(None)
for tgt in node.targets:
if tgt.id not in ['__slots__']:
a = PXVariable()
a.doVisit(tgt.id, type_name=t)
self.attrs[a.name] = a
def doVisit(self, node):
LOGGER.debug('PXClass.doVisit')
self.node = node
self.name = self.node.name
LOGGER.debug('PXClass.doVisit: class %s(...)', self.name)
self.bases = [self.getOneBaseName(n) for n in node.bases]
self.generic_visit(node)
def resolveHierarchy(self, knownClasses):
"""
"""
# --- Look for childrens
childs = []
for c in knownClasses:
if self.name in c.bases: childs.append(c)
# --- Resolve first childrens
for c in childs:
c.resolveHierarchy(knownClasses)
# --- Remove attributes allready defined in parent
for c in knownClasses:
if c.name in self.bases:
LOGGER.debug('PXClass.resolveHierarchy: %s is child of %s', self.name, c.name)
for a in c.attrs:
if a in self.attrs:
del self.attrs[a]
#--------------------
# Reader for pxd files
#--------------------
def read_attr(self, attr):
try:
attr = attr.split('cdef ')[1].strip()
attr = attr.split('public ')[1].strip()
except Exception:
pass
a = PXVariable()
a.read_arg(attr)
self.attrs[a.name] = a
def read_decl(self, decl):
assert decl[-1] == ':'
decl = decl[:-1]
decl = decl.split('class ')[1]
decl = decl.strip()
try:
d, h = decl.split('(', 1)
h = h[:-1]
self.bases = [h_.strip() for h_ in h.split(',')]
except Exception:
d = decl
try:
t, n = d.split(' ')
except Exception:
t, n = '', d
self.type = t.strip()
self.name = n.strip()
def read(self, decl, fi):
self.read_decl(decl)
LOGGER.debug('PXClass.read: %s', self.name)
lcls = {}
for l in PXReader.read_line(fi):
if l == 'pass':
pass
elif l[0:5] == 'cdef ':
self.read_attr(l)
elif l[0:6] == 'cpdef ':
f = PXFunction(self)
f.read(l, lcls)
LOGGER.debug(' append method %s', f.name)
self.meths.append(f)
lcls = {}
elif l[0:14] == '@cython.locals':
lcls = PXReader.read_locals(l)
elif l == '':
return
#--------------------
# Writer for pxd file
#--------------------
def write(self, fo, indent=0):
bases = ''
if self.bases:
bases = '(%s)' % ', '.join(self.bases)
fmt = '{indent}cdef class {name}{bases}:\n'
s = fmt.format(indent=' '*indent, name=self.name, bases=bases)
fo.write(s)
indent += 4
if self.attrs or self.meths:
fmt = '{indent}cdef public {type:12s} {name}\n'
for k in sorted(self.attrs.keys()):
s = fmt.format(indent=' '*indent, type=self.attrs[k].type, name=self.attrs[k].name)
fo.write(s)
if self.attrs and self.meths:
s = '{indent}#\n'.format(indent=' '*indent)
fo.write(s)
for m in self.meths:
m.write(fo, indent=indent)
else:
s = '{indent}pass\n'.format(indent=' '*indent)
fo.write(s)
if __name__ == "__main__":
def main():
c = PXClass()
streamHandler = logging.StreamHandler()
LOGGER.addHandler(streamHandler)
LOGGER.setLevel(logging.DEBUG)
main()
| en | 0.339342 | #!/usr/bin/env python # -*- coding: utf-8 -*- #-------------------- # Python source code parser (ast visitors) #-------------------- Class attributes # --- Look for childrens # --- Resolve first childrens # --- Remove attributes allready defined in parent #-------------------- # Reader for pxd files #-------------------- #-------------------- # Writer for pxd file #-------------------- #\n'.format(indent=' '*indent) | 2.566183 | 3 |
translation/filters.py | paxenarius/ajiragis-api | 0 | 6622076 | import django_filters
from .models import Word
class WordFilter(django_filters.FilterSet):
iso_639_2_code = django_filters.CharFilter(method='word_iso_code_fiiler')
class Meta:
model = Word
fields = ['word', 'language', 'iso_639_2_code', 'part_of_speech']
def word_iso_code_fiiler(self, queryset, name, value):
return queryset.filter(language__iso_639_2_code=value)
| import django_filters
from .models import Word
class WordFilter(django_filters.FilterSet):
iso_639_2_code = django_filters.CharFilter(method='word_iso_code_fiiler')
class Meta:
model = Word
fields = ['word', 'language', 'iso_639_2_code', 'part_of_speech']
def word_iso_code_fiiler(self, queryset, name, value):
return queryset.filter(language__iso_639_2_code=value)
| none | 1 | 2.332231 | 2 | |
src/evaluation/evaluator_loe.py | tiefenauer/ip7-python | 0 | 6622077 | from src.database.classification_results import LoeClassificationResults
from src.evaluation.evaluator import Evaluator
from src.scoring.loe_scorer_linear import LinearLoeScorer
from src.scoring.loe_scorer_strict import StrictLoeScorer
from src.scoring.loe_scorer_tolerant import TolerantLoeScorer
class LoeEvaluator(Evaluator):
def __init__(self, args):
super(LoeEvaluator, self).__init__(args, LoeClassificationResults())
self.scorer_strict = StrictLoeScorer()
self.scorer_tolerant = TolerantLoeScorer()
self.scorer_linear = LinearLoeScorer()
def get_scorers(self):
return [self.scorer_strict, self.scorer_tolerant, self.scorer_linear]
def calculate_scores(self, class_expected, class_predicted):
score_strict = self.scorer_strict.calculate_score(class_expected, class_predicted)
score_tolerant = self.scorer_tolerant.calculate_score(class_expected, class_predicted)
score_linear = self.scorer_linear.calculate_score(class_expected, class_predicted)
return score_strict, score_tolerant, score_linear
def is_classified(self, predicted_class):
return predicted_class and len(predicted_class) > 0
| from src.database.classification_results import LoeClassificationResults
from src.evaluation.evaluator import Evaluator
from src.scoring.loe_scorer_linear import LinearLoeScorer
from src.scoring.loe_scorer_strict import StrictLoeScorer
from src.scoring.loe_scorer_tolerant import TolerantLoeScorer
class LoeEvaluator(Evaluator):
def __init__(self, args):
super(LoeEvaluator, self).__init__(args, LoeClassificationResults())
self.scorer_strict = StrictLoeScorer()
self.scorer_tolerant = TolerantLoeScorer()
self.scorer_linear = LinearLoeScorer()
def get_scorers(self):
return [self.scorer_strict, self.scorer_tolerant, self.scorer_linear]
def calculate_scores(self, class_expected, class_predicted):
score_strict = self.scorer_strict.calculate_score(class_expected, class_predicted)
score_tolerant = self.scorer_tolerant.calculate_score(class_expected, class_predicted)
score_linear = self.scorer_linear.calculate_score(class_expected, class_predicted)
return score_strict, score_tolerant, score_linear
def is_classified(self, predicted_class):
return predicted_class and len(predicted_class) > 0
| none | 1 | 2.429915 | 2 | |
newsletter/models.py | elyak123/imagewrite | 0 | 6622078 | <reponame>elyak123/imagewrite
from django.db import models
from .storages import upload_to_image_1, OverwriteStorage, Image1Storage
class Newsletter(models.Model):
def upload_to_image_2(self, filename):
if self.pk:
pk = self.pk
else:
instance = Newsletter.objects.all().order_by("-pk").first()
if not instance:
pk = 1
else:
pk = instance.pk + 1
return f"newsletter/volume{pk}/volume{pk}-image-2.{filename.split('.')[-1]}"
def upload_to_image_3(self, filename):
if self.pk:
pk = self.pk
else:
instance = Newsletter.objects.all().order_by("-pk").first()
if not instance:
pk = 1
else:
pk = instance.pk + 1
return f"newsletter/volume{pk}/volume{pk}-image-3.{filename.split('.')[-1]}"
def upload_to_image_4(self, filename):
if self.pk:
pk = self.pk
else:
instance = Newsletter.objects.all().order_by("-pk").first()
if not instance:
pk = 1
else:
pk = instance.pk + 1
return f"newsletter/volume{pk}/volume{pk}-image-4.{filename.split('.')[-1]}"
image_1 = models.ImageField(upload_to=upload_to_image_1, storage=Image1Storage)
image_2 = models.ImageField(upload_to=upload_to_image_2, storage=OverwriteStorage(), null=True, blank=True)
image_3 = models.ImageField(upload_to=upload_to_image_3, storage=OverwriteStorage(), null=True, blank=True)
image_4 = models.ImageField(upload_to=upload_to_image_4, storage=OverwriteStorage(), null=True, blank=True)
| from django.db import models
from .storages import upload_to_image_1, OverwriteStorage, Image1Storage
class Newsletter(models.Model):
def upload_to_image_2(self, filename):
if self.pk:
pk = self.pk
else:
instance = Newsletter.objects.all().order_by("-pk").first()
if not instance:
pk = 1
else:
pk = instance.pk + 1
return f"newsletter/volume{pk}/volume{pk}-image-2.{filename.split('.')[-1]}"
def upload_to_image_3(self, filename):
if self.pk:
pk = self.pk
else:
instance = Newsletter.objects.all().order_by("-pk").first()
if not instance:
pk = 1
else:
pk = instance.pk + 1
return f"newsletter/volume{pk}/volume{pk}-image-3.{filename.split('.')[-1]}"
def upload_to_image_4(self, filename):
if self.pk:
pk = self.pk
else:
instance = Newsletter.objects.all().order_by("-pk").first()
if not instance:
pk = 1
else:
pk = instance.pk + 1
return f"newsletter/volume{pk}/volume{pk}-image-4.{filename.split('.')[-1]}"
image_1 = models.ImageField(upload_to=upload_to_image_1, storage=Image1Storage)
image_2 = models.ImageField(upload_to=upload_to_image_2, storage=OverwriteStorage(), null=True, blank=True)
image_3 = models.ImageField(upload_to=upload_to_image_3, storage=OverwriteStorage(), null=True, blank=True)
image_4 = models.ImageField(upload_to=upload_to_image_4, storage=OverwriteStorage(), null=True, blank=True) | none | 1 | 2.259729 | 2 | |
server/framework/lib/classes/__init__.py | tetelevm/OrdeRPG | 0 | 6622079 | """
A submodule with individual classes.
"""
from .env_parser import __all_for_module__ as __env_parser_all__
from .hasher import __all_for_module__ as __hasher_all__
from .singleton import __all_for_module__ as __singleton_all__
from .env_parser import *
from .hasher import *
from .singleton import *
__all_for_module__ = (
__env_parser_all__ +
__hasher_all__ +
__singleton_all__
)
__all__ = __all_for_module__
| """
A submodule with individual classes.
"""
from .env_parser import __all_for_module__ as __env_parser_all__
from .hasher import __all_for_module__ as __hasher_all__
from .singleton import __all_for_module__ as __singleton_all__
from .env_parser import *
from .hasher import *
from .singleton import *
__all_for_module__ = (
__env_parser_all__ +
__hasher_all__ +
__singleton_all__
)
__all__ = __all_for_module__
| en | 0.637494 | A submodule with individual classes. | 1.389239 | 1 |
simulator/customer.py | animilimina/customer_activity_simulator | 1 | 6622080 | <reponame>animilimina/customer_activity_simulator
import settings
from random import random
from numpy.random import normal
class Customer:
def __init__(self, first_period: int, last_period: int, average_returning_rate: float, average_reactivation_rate:float, average_survival_rate: float):
self.__first_period = first_period
self.__last_period = last_period
self.__seniority = 0
self.__returning_rate = self.__generate_rate(average_returning_rate)
self.__reactivation_rate = self.__generate_rate(average_reactivation_rate)
self.__survival_rate = self.__generate_rate(average_survival_rate)
self.__is_active = True
self.__is_alive = True
self.__active_periods = []
self.__list_active_periods()
@staticmethod
def __generate_rate(rate) -> float:
sd = (1 - rate) / 2
x = normal(rate, sd)
return max(0, min(x, .99))
def __list_active_periods(self):
while self.__is_alive and self.__first_period + self.__seniority < self.__last_period:
self.__test_if_active()
self.__build_active_periods_list()
self.__test_if_survives()
self.__increase_seniority()
def __test_if_active(self):
if self.__seniority == 0:
pass
elif self.__is_active:
self.__activity_if_previously_active()
else:
self.__activity_if_previously_inactive()
def __activity_if_previously_active(self):
if random() > self.__returning_rate:
self.__is_active = False
def __activity_if_previously_inactive(self):
if random() <= self.__reactivation_rate:
self.__is_active = True
def __build_active_periods_list(self):
if self.__is_active:
self.__active_periods.append(self.__first_period + self.__seniority)
def __test_if_survives(self):
if random() > self.__survival_rate:
self.__is_alive = False
def __increase_seniority(self):
self.__seniority += 1
def get_active_periods(self) -> list:
return self.__active_periods
| import settings
from random import random
from numpy.random import normal
class Customer:
def __init__(self, first_period: int, last_period: int, average_returning_rate: float, average_reactivation_rate:float, average_survival_rate: float):
self.__first_period = first_period
self.__last_period = last_period
self.__seniority = 0
self.__returning_rate = self.__generate_rate(average_returning_rate)
self.__reactivation_rate = self.__generate_rate(average_reactivation_rate)
self.__survival_rate = self.__generate_rate(average_survival_rate)
self.__is_active = True
self.__is_alive = True
self.__active_periods = []
self.__list_active_periods()
@staticmethod
def __generate_rate(rate) -> float:
sd = (1 - rate) / 2
x = normal(rate, sd)
return max(0, min(x, .99))
def __list_active_periods(self):
while self.__is_alive and self.__first_period + self.__seniority < self.__last_period:
self.__test_if_active()
self.__build_active_periods_list()
self.__test_if_survives()
self.__increase_seniority()
def __test_if_active(self):
if self.__seniority == 0:
pass
elif self.__is_active:
self.__activity_if_previously_active()
else:
self.__activity_if_previously_inactive()
def __activity_if_previously_active(self):
if random() > self.__returning_rate:
self.__is_active = False
def __activity_if_previously_inactive(self):
if random() <= self.__reactivation_rate:
self.__is_active = True
def __build_active_periods_list(self):
if self.__is_active:
self.__active_periods.append(self.__first_period + self.__seniority)
def __test_if_survives(self):
if random() > self.__survival_rate:
self.__is_alive = False
def __increase_seniority(self):
self.__seniority += 1
def get_active_periods(self) -> list:
return self.__active_periods | none | 1 | 2.980429 | 3 | |
experiments/ZxH.py | Vrekrer/magdynlab | 6 | 6622081 | # -*- coding: utf-8 -*-
import numpy
import time
import os
import magdynlab.instruments
import magdynlab.controllers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
@ThD.gui_safe
def Plot_ColorMap(Data):
f = plt.figure('ZxH', (5, 4))
extent = numpy.array([Data['h'].min(),
Data['h'].max(),
Data['f'].min()/1E3,
Data['f'].max()/1E3])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data['ColorMap'].T,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Field (Oe)')
ax.set_ylabel('Freq (kHz)')
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def Plot_ColorMapTime(Data):
f = plt.figure('Zxt', (5, 4))
extent = numpy.array([Data['t'].min(),
Data['t'].max(),
Data['f'].min()/1E3,
Data['f'].max()/1E3])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data['ColorMap'].T,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Freq (kHz)')
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def Plot_ResFreq(Data):
f = plt.figure('ResFreq', (5, 4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ymax = numpy.nanmax(Data['ResFreq'])/1E3
ymin = numpy.nanmin(Data['ResFreq'])/1E3
dy = numpy.max([ymax - ymin, 1E-6])
if not(ax.lines):
ax.plot([],[],'b.-')
ax.set_xlim([Data['t'].min(), Data['t'].max()])
ax.set_ylim([ymax+dy, ymin-dy])
line = ax.lines[-1]
line.set_data(Data['t'], Data['ResFreq']/1E3)
ax.set_xlabel('Time (s)')
ax.set_ylabel('ResFreq (kHz)')
ax.grid(True)
#check Y scale
yc = (ymax + ymin)/2
ymin, ymax = ax.get_ylim()
ymax = numpy.max([yc + dy*1.1/2, ymax])
ymin = numpy.min([yc - dy*1.1/2, ymin])
ax.set_ylim([ymin, ymax])
f.tight_layout()
f.canvas.draw()
class ZxH(object):
def __init__(self, ResouceNames={}):
logFile = os.path.expanduser('~/MagDynLab.log')
defaultRN = dict(RN_Kepco = 'TCPIP0::192.168.13.7::KepcoBOP2020::INSTR',
RN_IA = 'TCPIP::192.168.13.3::INSTR')
defaultRN.update(ResouceNames)
RN_Kepco = defaultRN['RN_Kepco']
RN_IA = defaultRN['RN_IA']
PowerSource = magdynlab.instruments.KEPCO_BOP(ResourceName=RN_Kepco,
logFile=logFile)
IA = magdynlab.instruments.KEYSIGHT_E4990A(ResourceName=RN_IA,
logFile=logFile)
self.IAC = magdynlab.controllers.IA_Controller(IA)
self.FC = magdynlab.controllers.FieldController(PowerSource)
self.FC.Kepco.Voltage = 5
#Experimental/plot data
self.Data = magdynlab.data_types.DataContainer()
self.Data.file_id = '.ZxH_Raw' #Z vs hs vs fs
self.DataTime = magdynlab.data_types.DataContainer()
self.DataTime.file_id = '.Zxt_Raw' #Z vs ts vs fs
self.ColorMapData = magdynlab.data_types.DataContainer()
self.ColorMapData.file_id = '.ZxH_ColorMap' #|Z| vs hs vs fs
self.SaveFormat = 'npy'
self.Info = ''
self.PlotFunct = numpy.abs
def PlotColorMap(self, i=None):
Z_ref = self.PlotFunct(self.Data['Ref'])
if i is not None:
# Update up to i column
for j in range(i+1):
Z = self.PlotFunct(self.Data['Z'][j])
if self.Data['h'][0] > self.Data['h'][-1]:
j = -1 - j
self.ColorMapData['ColorMap'][j] = Z - Z_ref
else:
Z = self.PlotFunct(self.Data['Z'])
self.ColorMapData['ColorMap'] = Z - Z_ref[None,:]
if self.Data['h'][0] > self.Data['h'][-1]:
self.ColorMapData['ColorMap'] = Z[::-1]
Plot_ColorMap(self.ColorMapData)
def PlotColorMapTime(self, i=None):
Z_ref = self.PlotFunct(self.Data['Ref'])
if i is not None:
# Update up to i column
for j in range(i+1):
Z = self.PlotFunct(self.DataTime['Z'][j])
self.ColorMapData['ColorMap'][j] = Z - Z_ref
else:
Z = self.PlotFunct(self.DataTime['Z'])
self.ColorMapData['ColorMap'] = Z - Z_ref[None,:]
dt = self.DataTime['t'][1] - self.DataTime['t'][0]
if dt < 0:
dt = 1
self.ColorMapData['t'] = numpy.arange(0, len(self.DataTime['t'])) * dt
Plot_ColorMapTime(self.ColorMapData)
if i is not None:
# Update up to i column
for j in range(i+1):
posPeak = self.ColorMapData['ColorMap'][j].argmax()
self.ColorMapData['ResFreq'][j] = self.DataTime['f'][posPeak]
if i >= 1:
Plot_ResFreq(self.ColorMapData)
def MeasureRef(self):
self.Data['Ref'] = self.IAC.getRData(True)
@ThD.as_thread
def Measure(self, fields, file_name, hold_time=0.0):
self.Data['h'] = fields
self.Data['f'] = self.IAC.frequencies
data_shape = (len(self.Data['h']), len(self.Data['f']))
self.Data['Z'] = numpy.zeros(data_shape, dtype=complex)
self.Data.info = self.Info
self.ColorMapData['h'] = self.Data['h']
self.ColorMapData['f'] = self.Data['f']
self.ColorMapData['ColorMap'] = numpy.zeros(data_shape, dtype=float)
self.ColorMapData['ColorMap'] += numpy.nan
self.ColorMapData.info = self.Info
# Loop for each field
for i, h in enumerate(fields):
self.FC.setField(h)
time.sleep(hold_time)
self.Data['Z'][i] = self.IAC.getRData(True)
self.PlotColorMap(i)
ThD.check_stop()
if file_name is not None:
self.Data.save(file_name)
self.FC.TurnOff()
self.FC.Kepco.BEEP()
@ThD.as_thread
def MeasureVsTime(self, field, time_step, n_steps, file_name):
self.DataTime['t'] = numpy.zeros((n_steps))
self.DataTime['f'] = self.IAC.frequencies
data_shape = (len(self.DataTime['t']), len(self.DataTime['f']))
self.DataTime['Z'] = numpy.zeros(data_shape, dtype=complex)
self.ColorMapData['t'] = numpy.arange(0, n_steps)
self.ColorMapData['ResFreq'] = numpy.arange(0, n_steps) + numpy.nan
self.ColorMapData['f'] = self.DataTime['f']
self.ColorMapData['ColorMap'] = numpy.zeros(data_shape, dtype=float)
self.ColorMapData['ColorMap'] += numpy.nan
self.ColorMapData.info = self.Info
self.FC.setField(field)
# Loop for each field
for i in range(n_steps):
time.sleep(time_step)
self.DataTime['t'][i] = time.time()
self.DataTime['Z'][i] = self.IAC.getRData(True)
self.PlotColorMapTime(i)
ThD.check_stop()
self.DataTime.info = self.Info
if file_name is not None:
self.DataTime.save(file_name)
def Stop(self, TurnOff=True):
print('Stoping...')
self.FC.BEEP()
if self.Measure.thread is not None:
self.Measure.stop()
self.Measure.thread.join()
if self.MeasureVsTime.thread is not None:
self.MeasureVsTime.stop()
self.MeasureVsTime.thread.join()
time.sleep(1)
self.FC.BEEP()
time.sleep(0.1)
self.FC.BEEP()
print('DONE')
if TurnOff:
print('Turning field OFF')
self.FC.TurnOff()
print('DONE')
| # -*- coding: utf-8 -*-
import numpy
import time
import os
import magdynlab.instruments
import magdynlab.controllers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
@ThD.gui_safe
def Plot_ColorMap(Data):
f = plt.figure('ZxH', (5, 4))
extent = numpy.array([Data['h'].min(),
Data['h'].max(),
Data['f'].min()/1E3,
Data['f'].max()/1E3])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data['ColorMap'].T,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Field (Oe)')
ax.set_ylabel('Freq (kHz)')
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def Plot_ColorMapTime(Data):
f = plt.figure('Zxt', (5, 4))
extent = numpy.array([Data['t'].min(),
Data['t'].max(),
Data['f'].min()/1E3,
Data['f'].max()/1E3])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data['ColorMap'].T,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Freq (kHz)')
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def Plot_ResFreq(Data):
f = plt.figure('ResFreq', (5, 4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ymax = numpy.nanmax(Data['ResFreq'])/1E3
ymin = numpy.nanmin(Data['ResFreq'])/1E3
dy = numpy.max([ymax - ymin, 1E-6])
if not(ax.lines):
ax.plot([],[],'b.-')
ax.set_xlim([Data['t'].min(), Data['t'].max()])
ax.set_ylim([ymax+dy, ymin-dy])
line = ax.lines[-1]
line.set_data(Data['t'], Data['ResFreq']/1E3)
ax.set_xlabel('Time (s)')
ax.set_ylabel('ResFreq (kHz)')
ax.grid(True)
#check Y scale
yc = (ymax + ymin)/2
ymin, ymax = ax.get_ylim()
ymax = numpy.max([yc + dy*1.1/2, ymax])
ymin = numpy.min([yc - dy*1.1/2, ymin])
ax.set_ylim([ymin, ymax])
f.tight_layout()
f.canvas.draw()
class ZxH(object):
def __init__(self, ResouceNames={}):
logFile = os.path.expanduser('~/MagDynLab.log')
defaultRN = dict(RN_Kepco = 'TCPIP0::192.168.13.7::KepcoBOP2020::INSTR',
RN_IA = 'TCPIP::192.168.13.3::INSTR')
defaultRN.update(ResouceNames)
RN_Kepco = defaultRN['RN_Kepco']
RN_IA = defaultRN['RN_IA']
PowerSource = magdynlab.instruments.KEPCO_BOP(ResourceName=RN_Kepco,
logFile=logFile)
IA = magdynlab.instruments.KEYSIGHT_E4990A(ResourceName=RN_IA,
logFile=logFile)
self.IAC = magdynlab.controllers.IA_Controller(IA)
self.FC = magdynlab.controllers.FieldController(PowerSource)
self.FC.Kepco.Voltage = 5
#Experimental/plot data
self.Data = magdynlab.data_types.DataContainer()
self.Data.file_id = '.ZxH_Raw' #Z vs hs vs fs
self.DataTime = magdynlab.data_types.DataContainer()
self.DataTime.file_id = '.Zxt_Raw' #Z vs ts vs fs
self.ColorMapData = magdynlab.data_types.DataContainer()
self.ColorMapData.file_id = '.ZxH_ColorMap' #|Z| vs hs vs fs
self.SaveFormat = 'npy'
self.Info = ''
self.PlotFunct = numpy.abs
def PlotColorMap(self, i=None):
Z_ref = self.PlotFunct(self.Data['Ref'])
if i is not None:
# Update up to i column
for j in range(i+1):
Z = self.PlotFunct(self.Data['Z'][j])
if self.Data['h'][0] > self.Data['h'][-1]:
j = -1 - j
self.ColorMapData['ColorMap'][j] = Z - Z_ref
else:
Z = self.PlotFunct(self.Data['Z'])
self.ColorMapData['ColorMap'] = Z - Z_ref[None,:]
if self.Data['h'][0] > self.Data['h'][-1]:
self.ColorMapData['ColorMap'] = Z[::-1]
Plot_ColorMap(self.ColorMapData)
def PlotColorMapTime(self, i=None):
Z_ref = self.PlotFunct(self.Data['Ref'])
if i is not None:
# Update up to i column
for j in range(i+1):
Z = self.PlotFunct(self.DataTime['Z'][j])
self.ColorMapData['ColorMap'][j] = Z - Z_ref
else:
Z = self.PlotFunct(self.DataTime['Z'])
self.ColorMapData['ColorMap'] = Z - Z_ref[None,:]
dt = self.DataTime['t'][1] - self.DataTime['t'][0]
if dt < 0:
dt = 1
self.ColorMapData['t'] = numpy.arange(0, len(self.DataTime['t'])) * dt
Plot_ColorMapTime(self.ColorMapData)
if i is not None:
# Update up to i column
for j in range(i+1):
posPeak = self.ColorMapData['ColorMap'][j].argmax()
self.ColorMapData['ResFreq'][j] = self.DataTime['f'][posPeak]
if i >= 1:
Plot_ResFreq(self.ColorMapData)
def MeasureRef(self):
self.Data['Ref'] = self.IAC.getRData(True)
@ThD.as_thread
def Measure(self, fields, file_name, hold_time=0.0):
self.Data['h'] = fields
self.Data['f'] = self.IAC.frequencies
data_shape = (len(self.Data['h']), len(self.Data['f']))
self.Data['Z'] = numpy.zeros(data_shape, dtype=complex)
self.Data.info = self.Info
self.ColorMapData['h'] = self.Data['h']
self.ColorMapData['f'] = self.Data['f']
self.ColorMapData['ColorMap'] = numpy.zeros(data_shape, dtype=float)
self.ColorMapData['ColorMap'] += numpy.nan
self.ColorMapData.info = self.Info
# Loop for each field
for i, h in enumerate(fields):
self.FC.setField(h)
time.sleep(hold_time)
self.Data['Z'][i] = self.IAC.getRData(True)
self.PlotColorMap(i)
ThD.check_stop()
if file_name is not None:
self.Data.save(file_name)
self.FC.TurnOff()
self.FC.Kepco.BEEP()
@ThD.as_thread
def MeasureVsTime(self, field, time_step, n_steps, file_name):
self.DataTime['t'] = numpy.zeros((n_steps))
self.DataTime['f'] = self.IAC.frequencies
data_shape = (len(self.DataTime['t']), len(self.DataTime['f']))
self.DataTime['Z'] = numpy.zeros(data_shape, dtype=complex)
self.ColorMapData['t'] = numpy.arange(0, n_steps)
self.ColorMapData['ResFreq'] = numpy.arange(0, n_steps) + numpy.nan
self.ColorMapData['f'] = self.DataTime['f']
self.ColorMapData['ColorMap'] = numpy.zeros(data_shape, dtype=float)
self.ColorMapData['ColorMap'] += numpy.nan
self.ColorMapData.info = self.Info
self.FC.setField(field)
# Loop for each field
for i in range(n_steps):
time.sleep(time_step)
self.DataTime['t'][i] = time.time()
self.DataTime['Z'][i] = self.IAC.getRData(True)
self.PlotColorMapTime(i)
ThD.check_stop()
self.DataTime.info = self.Info
if file_name is not None:
self.DataTime.save(file_name)
def Stop(self, TurnOff=True):
print('Stoping...')
self.FC.BEEP()
if self.Measure.thread is not None:
self.Measure.stop()
self.Measure.thread.join()
if self.MeasureVsTime.thread is not None:
self.MeasureVsTime.stop()
self.MeasureVsTime.thread.join()
time.sleep(1)
self.FC.BEEP()
time.sleep(0.1)
self.FC.BEEP()
print('DONE')
if TurnOff:
print('Turning field OFF')
self.FC.TurnOff()
print('DONE')
| en | 0.741633 | # -*- coding: utf-8 -*- #check Y scale #Experimental/plot data #Z vs hs vs fs #Z vs ts vs fs #|Z| vs hs vs fs # Update up to i column # Update up to i column # Update up to i column # Loop for each field # Loop for each field | 2.125227 | 2 |
test_grader_lib/testPolarTransform.py | rmok57/sketchresponse | 11 | 6622082 | <filename>test_grader_lib/testPolarTransform.py
from __future__ import absolute_import
from __future__ import division
import unittest
from . import TestDataPolar
from grader_lib import GradeableFunction
from grader_lib import Point
from math import pi, sqrt
class TestPolarTransform(TestDataPolar.TestDataPolar):
def test_polar_transform_points_true(self):
data = self.loadData('test_grader_lib/polar_points_true.csv')
for answer in data:
pt1 = GradeableFunction.GradeableFunction(answer['pt1'])
pt2 = GradeableFunction.GradeableFunction(answer['pt2'])
pt3 = GradeableFunction.GradeableFunction(answer['pt3'])
self.assertTrue(pt1.has_point_at(x=(11 * pi / 6), y=2))
self.assertTrue(pt2.has_point_at(x=(5 * pi / 4), y=sqrt(2)))
self.assertTrue(pt3.has_point_at(x=(2 * pi / 3), y=2))
def test_polar_transform_points_false(self):
data = self.loadData('test_grader_lib/polar_points_false.txt')
for answer in data:
pt1 = GradeableFunction.GradeableFunction(answer['pt1'])
pt2 = GradeableFunction.GradeableFunction(answer['pt2'])
pt3 = GradeableFunction.GradeableFunction(answer['pt3'])
isCorrect = True
isCorrect = isCorrect and pt1.has_point_at(x=(11 * pi / 6), y=2)
isCorrect = isCorrect and pt2.has_point_at(x=(5 * pi / 4), y=sqrt(2))
isCorrect = isCorrect and pt3.has_point_at(x=(2 * pi / 3), y=2)
self.assertFalse(isCorrect)
def test_polar_transform_quartercircle_true(self):
data = self.loadData('test_grader_lib/polar_quartercircle_true.txt')
for answer in data:
f = GradeableFunction.GradeableFunction(answer['f'])
self.assertTrue(f.is_straight_between(pi, (3 * pi / 2)))
self.assertFalse(f.does_exist_between(0, pi))
self.assertFalse(f.does_exist_between((3 * pi / 2), 2 * pi))
def test_polar_transform_quartercircle_false(self):
data = self.loadData('test_grader_lib/polar_quartercircle_false.txt')
for answer in data:
f = GradeableFunction.GradeableFunction(answer['f'])
isCorrect = True
isCorrect = isCorrect and f.is_straight_between(pi, (3 * pi / 2))
isCorrect = isCorrect and not f.does_exist_between(0, pi)
isCorrect = isCorrect and not f.does_exist_between((3 * pi / 2), 2 * pi)
self.assertFalse(isCorrect)
def test_polar_transform_threelobe_true(self):
data = self.loadData('test_grader_lib/polar_threelobe_true.txt')
for answer in data:
f = GradeableFunction.GradeableFunction(answer['f'])
allowedFails = 4
self.assertTrue(f.is_increasing_between(0, (pi / 6), failureTolerance=allowedFails))
self.assertTrue(f.is_decreasing_between((pi / 6), (pi / 3), failureTolerance=allowedFails))
self.assertTrue(f.is_increasing_between((4 * pi / 6), (5 * pi / 6), failureTolerance=allowedFails))
self.assertTrue(f.is_decreasing_between((5 * pi / 6), pi, failureTolerance=allowedFails))
self.assertTrue(f.is_increasing_between((8 * pi / 6), (3 * pi / 2), failureTolerance=allowedFails))
self.assertTrue(f.is_decreasing_between((3 * pi / 2), (10 * pi / 6), failureTolerance=allowedFails))
def test_polar_transform_threelobe_false(self):
data = self.loadData('test_grader_lib/polar_threelobe_false.txt')
for answer in data:
f = GradeableFunction.GradeableFunction(answer['f'])
allowedFails = 4
isCorrect = True
isCorrect = isCorrect and f.is_increasing_between(0, (pi / 6), failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_decreasing_between((pi / 6), (pi / 3), failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_increasing_between((4 * pi / 6), (5 * pi / 6), failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_decreasing_between((5 * pi / 6), pi, failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_increasing_between((8 * pi / 6), (3 * pi / 2), failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_decreasing_between((3 * pi / 2), (10 * pi / 6), failureTolerance=allowedFails)
self.assertFalse(isCorrect)
if __name__ == '__main__':
testPolar = TestPolarTransformMethods()
testPolar.test_polar_transform_points_true()
testPolar.test_polar_transform_points_false()
| <filename>test_grader_lib/testPolarTransform.py
from __future__ import absolute_import
from __future__ import division
import unittest
from . import TestDataPolar
from grader_lib import GradeableFunction
from grader_lib import Point
from math import pi, sqrt
class TestPolarTransform(TestDataPolar.TestDataPolar):
def test_polar_transform_points_true(self):
data = self.loadData('test_grader_lib/polar_points_true.csv')
for answer in data:
pt1 = GradeableFunction.GradeableFunction(answer['pt1'])
pt2 = GradeableFunction.GradeableFunction(answer['pt2'])
pt3 = GradeableFunction.GradeableFunction(answer['pt3'])
self.assertTrue(pt1.has_point_at(x=(11 * pi / 6), y=2))
self.assertTrue(pt2.has_point_at(x=(5 * pi / 4), y=sqrt(2)))
self.assertTrue(pt3.has_point_at(x=(2 * pi / 3), y=2))
def test_polar_transform_points_false(self):
data = self.loadData('test_grader_lib/polar_points_false.txt')
for answer in data:
pt1 = GradeableFunction.GradeableFunction(answer['pt1'])
pt2 = GradeableFunction.GradeableFunction(answer['pt2'])
pt3 = GradeableFunction.GradeableFunction(answer['pt3'])
isCorrect = True
isCorrect = isCorrect and pt1.has_point_at(x=(11 * pi / 6), y=2)
isCorrect = isCorrect and pt2.has_point_at(x=(5 * pi / 4), y=sqrt(2))
isCorrect = isCorrect and pt3.has_point_at(x=(2 * pi / 3), y=2)
self.assertFalse(isCorrect)
def test_polar_transform_quartercircle_true(self):
data = self.loadData('test_grader_lib/polar_quartercircle_true.txt')
for answer in data:
f = GradeableFunction.GradeableFunction(answer['f'])
self.assertTrue(f.is_straight_between(pi, (3 * pi / 2)))
self.assertFalse(f.does_exist_between(0, pi))
self.assertFalse(f.does_exist_between((3 * pi / 2), 2 * pi))
def test_polar_transform_quartercircle_false(self):
data = self.loadData('test_grader_lib/polar_quartercircle_false.txt')
for answer in data:
f = GradeableFunction.GradeableFunction(answer['f'])
isCorrect = True
isCorrect = isCorrect and f.is_straight_between(pi, (3 * pi / 2))
isCorrect = isCorrect and not f.does_exist_between(0, pi)
isCorrect = isCorrect and not f.does_exist_between((3 * pi / 2), 2 * pi)
self.assertFalse(isCorrect)
def test_polar_transform_threelobe_true(self):
data = self.loadData('test_grader_lib/polar_threelobe_true.txt')
for answer in data:
f = GradeableFunction.GradeableFunction(answer['f'])
allowedFails = 4
self.assertTrue(f.is_increasing_between(0, (pi / 6), failureTolerance=allowedFails))
self.assertTrue(f.is_decreasing_between((pi / 6), (pi / 3), failureTolerance=allowedFails))
self.assertTrue(f.is_increasing_between((4 * pi / 6), (5 * pi / 6), failureTolerance=allowedFails))
self.assertTrue(f.is_decreasing_between((5 * pi / 6), pi, failureTolerance=allowedFails))
self.assertTrue(f.is_increasing_between((8 * pi / 6), (3 * pi / 2), failureTolerance=allowedFails))
self.assertTrue(f.is_decreasing_between((3 * pi / 2), (10 * pi / 6), failureTolerance=allowedFails))
def test_polar_transform_threelobe_false(self):
data = self.loadData('test_grader_lib/polar_threelobe_false.txt')
for answer in data:
f = GradeableFunction.GradeableFunction(answer['f'])
allowedFails = 4
isCorrect = True
isCorrect = isCorrect and f.is_increasing_between(0, (pi / 6), failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_decreasing_between((pi / 6), (pi / 3), failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_increasing_between((4 * pi / 6), (5 * pi / 6), failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_decreasing_between((5 * pi / 6), pi, failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_increasing_between((8 * pi / 6), (3 * pi / 2), failureTolerance=allowedFails)
isCorrect = isCorrect and f.is_decreasing_between((3 * pi / 2), (10 * pi / 6), failureTolerance=allowedFails)
self.assertFalse(isCorrect)
if __name__ == '__main__':
testPolar = TestPolarTransformMethods()
testPolar.test_polar_transform_points_true()
testPolar.test_polar_transform_points_false()
| none | 1 | 3.001001 | 3 | |
run.py | slackr31337/home-agent | 1 | 6622083 | <filename>run.py
#!/usr/bin/env python3
"""Run the HomeAgent as a service"""
import sys
import threading
import traceback
import logging
from utilities.log import LOGGER
from utilities.states import ThreadSafeDict
from scheduler import Scheduler
from agent_args import parse_args
from agent import LOG_PREFIX, HomeAgent
from config import APP_NAME, Config, load_config
LOG_PREFIX = "[HomeAgent]"
#########################################
def run_service(config: Config, _sensors=None):
"""Run Home Agent Service"""
LOGGER.info("%s is starting", LOG_PREFIX)
state = ThreadSafeDict()
running = threading.Event()
running.set()
sched = Scheduler(state, running)
agent = HomeAgent(config, running, sched, _sensors)
sched.run(agent.start)
sched.queue(agent.collector, 10)
sched.start()
LOGGER.info("%s Stopping", LOG_PREFIX)
agent.stop()
running.clear()
LOGGER.info("%s has stopped", LOG_PREFIX)
#########################################
def main():
"""Main run function"""
LOGGER.info("Starting %s", APP_NAME)
_args = parse_args(sys.argv[1:], APP_NAME)
if _args.debug:
level = logging.getLevelName("DEBUG")
LOGGER.setLevel(level)
LOGGER.debug("Debug enabled")
if not _args.service:
LOGGER.error("Must use -s argument to run as a service")
sys.exit(2)
LOGGER.info("%s Loading config file: %s", LOG_PREFIX, _args.config)
_config = Config(load_config(_args.config))
try:
run_service(_config)
except Exception as err: # pylint: disable=broad-except
LOGGER.error(err)
LOGGER.error(traceback.format_exc())
LOGGER.info("Quit %s", APP_NAME)
#########################################
if __name__ == "__main__":
main()
| <filename>run.py
#!/usr/bin/env python3
"""Run the HomeAgent as a service"""
import sys
import threading
import traceback
import logging
from utilities.log import LOGGER
from utilities.states import ThreadSafeDict
from scheduler import Scheduler
from agent_args import parse_args
from agent import LOG_PREFIX, HomeAgent
from config import APP_NAME, Config, load_config
LOG_PREFIX = "[HomeAgent]"
#########################################
def run_service(config: Config, _sensors=None):
"""Run Home Agent Service"""
LOGGER.info("%s is starting", LOG_PREFIX)
state = ThreadSafeDict()
running = threading.Event()
running.set()
sched = Scheduler(state, running)
agent = HomeAgent(config, running, sched, _sensors)
sched.run(agent.start)
sched.queue(agent.collector, 10)
sched.start()
LOGGER.info("%s Stopping", LOG_PREFIX)
agent.stop()
running.clear()
LOGGER.info("%s has stopped", LOG_PREFIX)
#########################################
def main():
"""Main run function"""
LOGGER.info("Starting %s", APP_NAME)
_args = parse_args(sys.argv[1:], APP_NAME)
if _args.debug:
level = logging.getLevelName("DEBUG")
LOGGER.setLevel(level)
LOGGER.debug("Debug enabled")
if not _args.service:
LOGGER.error("Must use -s argument to run as a service")
sys.exit(2)
LOGGER.info("%s Loading config file: %s", LOG_PREFIX, _args.config)
_config = Config(load_config(_args.config))
try:
run_service(_config)
except Exception as err: # pylint: disable=broad-except
LOGGER.error(err)
LOGGER.error(traceback.format_exc())
LOGGER.info("Quit %s", APP_NAME)
#########################################
if __name__ == "__main__":
main()
| de | 0.630523 | #!/usr/bin/env python3 Run the HomeAgent as a service ######################################### Run Home Agent Service ######################################### Main run function # pylint: disable=broad-except ######################################### | 2.494845 | 2 |
app.py | andreasca/covid-dashboard | 0 | 6622084 | import dash
import dash_core_components as dcc
import dash_flexbox_grid as dfx
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_table
import flask
import glob
import os
import pathlib
import numpy as np
import pandas as pd
import re
scriptdir = pathlib.Path(os.getcwd()) # this notebook
image_directory_us = scriptdir / 'plots_gp/US/'
list_of_images_us = sorted([f.name for f in image_directory_us.rglob('*.png')])
static_image_route_us = '/staticUS/'
image_directory_world = scriptdir / 'plots_gp/World/'
list_of_images_world = sorted([f.name for f in image_directory_world.rglob('*.png')])
static_image_route_world = '/staticWD/'
image_directory_italy = scriptdir / 'plots_gp/Italy/'
list_of_images_italy = sorted([f.name for f in image_directory_italy.rglob('*.png')])
static_image_route_italy = '/staticIT/'
image_directory_canada = scriptdir / 'plots_gp/Canada/'
list_of_images_canada = sorted([f.name for f in image_directory_canada.rglob('*.png')])
static_image_route_canada = '/staticCA/'
image_directory_s_america = scriptdir / 'plots_gp/South_America/'
list_of_images_s_america = sorted([f.name for f in image_directory_s_america.rglob('*.png')])
static_image_route_s_america = '/staticSA/'
outputdir = scriptdir / 'data' # directory where the csv files are
# # world
# csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_world.csv'
# print(csv_path)
# df = pd.read_csv(csv_path)
# # US
# csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_us.csv'
# print(csv_path)
# df_us = pd.read_csv(csv_path)
# # # CA
# # csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_ca.csv'
# # print(csv_path)
# df_ca = pd.read_csv(csv_path)
app = dash.Dash(__name__)
# Section for Google analytics
if 'DYNO' in os.environ:
app.scripts.config.serve_locally = False
app.scripts.append_script({
'external_url': 'https://raw.githubusercontent.com/csblab/covid-dashboard/master/assets/async_tag.js'
})
app.scripts.append_script({
'external_url': 'https://raw.githubusercontent.com/csblab/covid-dashboard/master/assets/gtag.js'
})
server = app.server #for server deployment
app.scripts.config.serve_locally = True
tabs_styles = {
'height': '44px'
}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'fontWeight': 'bold',
'padding': '10px',
}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#ff7a7a',
'color': 'white',
'padding': '10px',
'align-items': 'center',
'fontWeight': 'bold',
}
cell_styles = []
stuff = {}
cell_styles.append({'if': {'column_id': 'Location'}, 'width': '8%', 'textAlign': 'left'})
for i in range(3):
for j in range(15,256):
if i == 1:
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = 1'.format("{" + str(j) + "}")
},
'backgroundColor': '#E091E1',
'color': '#E091E1'
}
if i == 2:
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = 2'.format("{" + str(j) + "}")
},
'backgroundColor': '#DBFCC3',
'color': '#DBFCC3'
}
cell_styles.append(stuff)
for j in range(15,256):
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = >'.format("{" + str(j) + "}")
},
'backgroundColor': '#05F969',
'color': '#05F969'
}
cell_styles.append(stuff)
for j in range(15,256):
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = |'.format("{" + str(j) + "}")
},
'backgroundColor': '#858684',
'color': '#858684'
}
cell_styles.append(stuff)
for j in range(15,256):
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = _'.format("{" + str(j) + "}")
},
'backgroundColor': 'white',
'color': 'white'
}
cell_styles.append(stuff)
app.layout = html.Div([
dcc.Tabs(
id="tabs-styled-with-inline",
value ='tab-2',
children=[
dcc.Tab(
label='WORLD',
value='tab-2',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='gridw',
fluid=True,
children=[
dfx.Row(
id='row1-1-1',
children=[
dfx.Col(
id='col1-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownWorld1',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld1', style={'height':'90%', 'width':'81%'})
#'width': '600px'
],
),
dfx.Col(
id='col1-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownWorld2',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row1-1-2',
children=[
dfx.Col(
id='col1-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownWorld3',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col1-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownWorld4',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row1-1-3',
children=[
dfx.Col(
id='col1-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownWorld5',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col1-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownWorld6',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[5],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
],
),
dcc.Tab(
label='US',
value='tab-3',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='gridus',
fluid=True,
children=[
dfx.Row(
id='row2-1-1',
children=[
dfx.Col(
id='col2-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownUS1',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus1', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col2-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownUS2',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row2-1-2',
children=[
dfx.Col(
id='col2-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownUS3',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col2-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownUS4',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row2-1-3',
children=[
dfx.Col(
id='col2-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownUS5',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col2-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownUS6',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[5],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
]
),
dcc.Tab(
label='ITALY',
value='tab-4',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='gridit',
fluid=True,
children=[
dfx.Row(
id='row3-1-1',
children=[
dfx.Col(
id='col3-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownIT1',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit1', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col3-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownIT2',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row3-1-2',
children=[
dfx.Col(
id='col3-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownIT3',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col3-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownIT4',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row3-1-3',
children=[
dfx.Col(
id='col3-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownIT5',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col3-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownIT6',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[5],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
]
),
dcc.Tab(
label='CANADA',
value='tab-5',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='gridca',
fluid=True,
children=[
dfx.Row(
id='row4-1-1',
children=[
dfx.Col(
id='col4-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownCA1',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca1', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col4-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownCA2',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row4-1-2',
children=[
dfx.Col(
id='col4-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownCA3',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col4-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownCA4',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row4-1-3',
children=[
dfx.Col(
id='col4-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownCA5',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col4-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownCA6',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
]
),
dcc.Tab(
label='SOUTH AMERICA',
value='tab-6',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='grid',
fluid=True,
children=[
dfx.Row(
id='row5-1-1',
children=[
dfx.Col(
id='col5-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownSA1',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa1', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col5-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownSA2',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row5-1-2',
children=[
dfx.Col(
id='col5-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownSA3',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col5-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownSA4',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row5-1-3',
children=[
dfx.Col(
id='col5-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownSA5',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col5-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownSA6',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[5],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
]
),
],
style=tabs_styles,
),
html.Div(id='tabs-content-inline')
])
#callbacks
# @app.callback(Output('tabs-content-inline', 'children'),
# [Input('tabs-styled-with-inline', 'value')])
#WORLD
@app.callback(
dash.dependencies.Output('imageworld1', 'src'),
[dash.dependencies.Input('image-dropdownWorld1', 'value')]
)
def update_image_srcWorld1(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld2', 'src'),
[dash.dependencies.Input('image-dropdownWorld2', 'value')]
)
def update_image_srcWorld2(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld3', 'src'),
[dash.dependencies.Input('image-dropdownWorld3', 'value')]
)
def update_image_srcWorld3(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld4', 'src'),
[dash.dependencies.Input('image-dropdownWorld4', 'value')]
)
def update_image_srcWorld4(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld5', 'src'),
[dash.dependencies.Input('image-dropdownWorld5', 'value')]
)
def update_image_srcWorld5(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld6', 'src'),
[dash.dependencies.Input('image-dropdownWorld6', 'value')]
)
def update_image_srcWorld6(value):
return static_image_route_world + value
#US
@app.callback(
dash.dependencies.Output('imageus1', 'src'),
[dash.dependencies.Input('image-dropdownUS1', 'value')]
)
def update_image_srcUS1(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus2', 'src'),
[dash.dependencies.Input('image-dropdownUS2', 'value')]
)
def update_image_srcUS2(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus3', 'src'),
[dash.dependencies.Input('image-dropdownUS3', 'value')]
)
def update_image_srcUS3(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus4', 'src'),
[dash.dependencies.Input('image-dropdownUS4', 'value')]
)
def update_image_srcUS4(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus5', 'src'),
[dash.dependencies.Input('image-dropdownUS5', 'value')]
)
def update_image_srcUS5(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus6', 'src'),
[dash.dependencies.Input('image-dropdownUS6', 'value')]
)
def update_image_srcUS6(value):
return static_image_route_us + value
#Italy
@app.callback(
dash.dependencies.Output('imageit1', 'src'),
[dash.dependencies.Input('image-dropdownIT1', 'value')]
)
def update_image_srcIT1(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit2', 'src'),
[dash.dependencies.Input('image-dropdownIT2', 'value')]
)
def update_image_srcIT2(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit3', 'src'),
[dash.dependencies.Input('image-dropdownIT3', 'value')]
)
def update_image_srcIT3(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit4', 'src'),
[dash.dependencies.Input('image-dropdownIT4', 'value')]
)
def update_image_srcIT4(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit5', 'src'),
[dash.dependencies.Input('image-dropdownIT5', 'value')]
)
def update_image_srcIT5(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit6', 'src'),
[dash.dependencies.Input('image-dropdownIT6', 'value')]
)
def update_image_srcIT6(value):
return static_image_route_italy + value
#Canada
@app.callback(
dash.dependencies.Output('imageca1', 'src'),
[dash.dependencies.Input('image-dropdownCA1', 'value')]
)
def update_image_srcCA1(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca2', 'src'),
[dash.dependencies.Input('image-dropdownCA2', 'value')]
)
def update_image_srcCA2(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca3', 'src'),
[dash.dependencies.Input('image-dropdownCA3', 'value')]
)
def update_image_srcCA3(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca4', 'src'),
[dash.dependencies.Input('image-dropdownCA4', 'value')]
)
def update_image_srcCA4(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca5', 'src'),
[dash.dependencies.Input('image-dropdownCA5', 'value')]
)
def update_image_srcCA5(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca6', 'src'),
[dash.dependencies.Input('image-dropdownCA6', 'value')]
)
def update_image_srcCA6(value):
return static_image_route_canada + value
#South America
@app.callback(
dash.dependencies.Output('imagesa1', 'src'),
[dash.dependencies.Input('image-dropdownSA1', 'value')]
)
def update_image_srcSA1(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa2', 'src'),
[dash.dependencies.Input('image-dropdownSA2', 'value')]
)
def update_image_srcSA2(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa3', 'src'),
[dash.dependencies.Input('image-dropdownSA3', 'value')]
)
def update_image_srcSA3(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa4', 'src'),
[dash.dependencies.Input('image-dropdownSA4', 'value')]
)
def update_image_srcSA4(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa5', 'src'),
[dash.dependencies.Input('image-dropdownSA5', 'value')]
)
def update_image_srcSA5(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa6', 'src'),
[dash.dependencies.Input('image-dropdownSA6', 'value')]
)
def update_image_srcSA6(value):
return static_image_route_s_america + value
dash.dependencies.Output('imageNAmerica', 'src'),
[dash.dependencies.Input('image-dropdownNAmerica', 'value')]
# Add a static image route that serves images from desktop
# Be *very* careful here - you don't want to serve arbitrary files
# from your computer or server
@app.server.route('{}<image_path>.png'.format(static_image_route_world))
def serve_imageWorld(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_world:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_world, image_name)
@app.server.route('{}<image_path>.png'.format(static_image_route_us))
def serve_imageUS(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_us:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_us, image_name)
@app.server.route('{}<image_path>.png'.format(static_image_route_italy))
def serve_imageIT(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_italy:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_italy, image_name)
@app.server.route('{}<image_path>.png'.format(static_image_route_canada))
def serve_imageCA(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_canada:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_canada, image_name)
@app.server.route('{}<image_path>.png'.format(static_image_route_s_america))
def serve_imageSA(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_s_america:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_s_america, image_name)
if __name__ == '__main__':
app.run_server(debug=False)
| import dash
import dash_core_components as dcc
import dash_flexbox_grid as dfx
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_table
import flask
import glob
import os
import pathlib
import numpy as np
import pandas as pd
import re
scriptdir = pathlib.Path(os.getcwd()) # this notebook
image_directory_us = scriptdir / 'plots_gp/US/'
list_of_images_us = sorted([f.name for f in image_directory_us.rglob('*.png')])
static_image_route_us = '/staticUS/'
image_directory_world = scriptdir / 'plots_gp/World/'
list_of_images_world = sorted([f.name for f in image_directory_world.rglob('*.png')])
static_image_route_world = '/staticWD/'
image_directory_italy = scriptdir / 'plots_gp/Italy/'
list_of_images_italy = sorted([f.name for f in image_directory_italy.rglob('*.png')])
static_image_route_italy = '/staticIT/'
image_directory_canada = scriptdir / 'plots_gp/Canada/'
list_of_images_canada = sorted([f.name for f in image_directory_canada.rglob('*.png')])
static_image_route_canada = '/staticCA/'
image_directory_s_america = scriptdir / 'plots_gp/South_America/'
list_of_images_s_america = sorted([f.name for f in image_directory_s_america.rglob('*.png')])
static_image_route_s_america = '/staticSA/'
outputdir = scriptdir / 'data' # directory where the csv files are
# # world
# csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_world.csv'
# print(csv_path)
# df = pd.read_csv(csv_path)
# # US
# csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_us.csv'
# print(csv_path)
# df_us = pd.read_csv(csv_path)
# # # CA
# # csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_ca.csv'
# # print(csv_path)
# df_ca = pd.read_csv(csv_path)
app = dash.Dash(__name__)
# Section for Google analytics
if 'DYNO' in os.environ:
app.scripts.config.serve_locally = False
app.scripts.append_script({
'external_url': 'https://raw.githubusercontent.com/csblab/covid-dashboard/master/assets/async_tag.js'
})
app.scripts.append_script({
'external_url': 'https://raw.githubusercontent.com/csblab/covid-dashboard/master/assets/gtag.js'
})
server = app.server #for server deployment
app.scripts.config.serve_locally = True
tabs_styles = {
'height': '44px'
}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'fontWeight': 'bold',
'padding': '10px',
}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#ff7a7a',
'color': 'white',
'padding': '10px',
'align-items': 'center',
'fontWeight': 'bold',
}
cell_styles = []
stuff = {}
cell_styles.append({'if': {'column_id': 'Location'}, 'width': '8%', 'textAlign': 'left'})
for i in range(3):
for j in range(15,256):
if i == 1:
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = 1'.format("{" + str(j) + "}")
},
'backgroundColor': '#E091E1',
'color': '#E091E1'
}
if i == 2:
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = 2'.format("{" + str(j) + "}")
},
'backgroundColor': '#DBFCC3',
'color': '#DBFCC3'
}
cell_styles.append(stuff)
for j in range(15,256):
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = >'.format("{" + str(j) + "}")
},
'backgroundColor': '#05F969',
'color': '#05F969'
}
cell_styles.append(stuff)
for j in range(15,256):
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = |'.format("{" + str(j) + "}")
},
'backgroundColor': '#858684',
'color': '#858684'
}
cell_styles.append(stuff)
for j in range(15,256):
stuff = {'if': {
'column_id': str(j),
'filter_query': '{} = _'.format("{" + str(j) + "}")
},
'backgroundColor': 'white',
'color': 'white'
}
cell_styles.append(stuff)
app.layout = html.Div([
dcc.Tabs(
id="tabs-styled-with-inline",
value ='tab-2',
children=[
dcc.Tab(
label='WORLD',
value='tab-2',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='gridw',
fluid=True,
children=[
dfx.Row(
id='row1-1-1',
children=[
dfx.Col(
id='col1-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownWorld1',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld1', style={'height':'90%', 'width':'81%'})
#'width': '600px'
],
),
dfx.Col(
id='col1-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownWorld2',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row1-1-2',
children=[
dfx.Col(
id='col1-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownWorld3',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col1-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownWorld4',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row1-1-3',
children=[
dfx.Col(
id='col1-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownWorld5',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col1-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownWorld6',
options=[{'label': i, 'value': i} for i in list_of_images_world],
placeholder="Select Country",
value=list_of_images_world[5],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageworld6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
],
),
dcc.Tab(
label='US',
value='tab-3',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='gridus',
fluid=True,
children=[
dfx.Row(
id='row2-1-1',
children=[
dfx.Col(
id='col2-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownUS1',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus1', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col2-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownUS2',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row2-1-2',
children=[
dfx.Col(
id='col2-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownUS3',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col2-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownUS4',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row2-1-3',
children=[
dfx.Col(
id='col2-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownUS5',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col2-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownUS6',
options=[{'label': i, 'value': i} for i in list_of_images_us],
placeholder="Select Country",
value=list_of_images_us[5],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageus6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
]
),
dcc.Tab(
label='ITALY',
value='tab-4',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='gridit',
fluid=True,
children=[
dfx.Row(
id='row3-1-1',
children=[
dfx.Col(
id='col3-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownIT1',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit1', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col3-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownIT2',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row3-1-2',
children=[
dfx.Col(
id='col3-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownIT3',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col3-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownIT4',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row3-1-3',
children=[
dfx.Col(
id='col3-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownIT5',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col3-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownIT6',
options=[{'label': i, 'value': i} for i in list_of_images_italy],
placeholder="Select Country",
value=list_of_images_italy[5],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle",
margin="auto"
)
),
html.Img(id='imageit6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
]
),
dcc.Tab(
label='CANADA',
value='tab-5',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='gridca',
fluid=True,
children=[
dfx.Row(
id='row4-1-1',
children=[
dfx.Col(
id='col4-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownCA1',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca1', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col4-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownCA2',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row4-1-2',
children=[
dfx.Col(
id='col4-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownCA3',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col4-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownCA4',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row4-1-3',
children=[
dfx.Col(
id='col4-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownCA5',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col4-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownCA6',
options=[{'label': i, 'value': i} for i in list_of_images_canada],
placeholder="Select Country",
value=list_of_images_canada[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imageca6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
]
),
dcc.Tab(
label='SOUTH AMERICA',
value='tab-6',
style=tab_style,
selected_style=tab_selected_style,
children=[
dfx.Grid(
id='grid',
fluid=True,
children=[
dfx.Row(
id='row5-1-1',
children=[
dfx.Col(
id='col5-1-1',
xs=6,
lg=6,
children=[
html.H3('Location 1'),
dcc.Dropdown(
id='image-dropdownSA1',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[0],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa1', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col5-1-2',
xs=6,
lg=6,
children=[
html.H3('Location 2'),
dcc.Dropdown(
id='image-dropdownSA2',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[1],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa2', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row5-1-2',
children=[
dfx.Col(
id='col5-2-1',
xs=6,
lg=6,
children=[
html.H3('Location 3'),
dcc.Dropdown(
id='image-dropdownSA3',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[2],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa3', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col5-2-2',
xs=6,
lg=6,
children=[
html.H3('Location 4'),
dcc.Dropdown(
id='image-dropdownSA4',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[3],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa4', style={'height':'90%', 'width':'81%'})
],
),
],
),
html.Br(),
dfx.Row(
id='row5-1-3',
children=[
dfx.Col(
id='col5-3-1',
xs=6,
lg=6,
children=[
html.H3('Location 5'),
dcc.Dropdown(
id='image-dropdownSA5',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[4],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa5', style={'height':'90%', 'width':'81%'})
],
),
dfx.Col(
id='col5-3-2',
xs=6,
lg=6,
children=[
html.H3('Location 6'),
dcc.Dropdown(
id='image-dropdownSA6',
options=[{'label': i, 'value': i} for i in list_of_images_s_america],
placeholder="Select Country",
value=list_of_images_s_america[5],
style=dict(
width='90%',
#display='inline-block',
verticalAlign="middle"
)
),
html.Img(id='imagesa6', style={'height':'90%', 'width':'81%'})
],
),
],
),
],
),
]
),
],
style=tabs_styles,
),
html.Div(id='tabs-content-inline')
])
#callbacks
# @app.callback(Output('tabs-content-inline', 'children'),
# [Input('tabs-styled-with-inline', 'value')])
#WORLD
@app.callback(
dash.dependencies.Output('imageworld1', 'src'),
[dash.dependencies.Input('image-dropdownWorld1', 'value')]
)
def update_image_srcWorld1(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld2', 'src'),
[dash.dependencies.Input('image-dropdownWorld2', 'value')]
)
def update_image_srcWorld2(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld3', 'src'),
[dash.dependencies.Input('image-dropdownWorld3', 'value')]
)
def update_image_srcWorld3(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld4', 'src'),
[dash.dependencies.Input('image-dropdownWorld4', 'value')]
)
def update_image_srcWorld4(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld5', 'src'),
[dash.dependencies.Input('image-dropdownWorld5', 'value')]
)
def update_image_srcWorld5(value):
return static_image_route_world + value
@app.callback(
dash.dependencies.Output('imageworld6', 'src'),
[dash.dependencies.Input('image-dropdownWorld6', 'value')]
)
def update_image_srcWorld6(value):
return static_image_route_world + value
#US
@app.callback(
dash.dependencies.Output('imageus1', 'src'),
[dash.dependencies.Input('image-dropdownUS1', 'value')]
)
def update_image_srcUS1(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus2', 'src'),
[dash.dependencies.Input('image-dropdownUS2', 'value')]
)
def update_image_srcUS2(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus3', 'src'),
[dash.dependencies.Input('image-dropdownUS3', 'value')]
)
def update_image_srcUS3(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus4', 'src'),
[dash.dependencies.Input('image-dropdownUS4', 'value')]
)
def update_image_srcUS4(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus5', 'src'),
[dash.dependencies.Input('image-dropdownUS5', 'value')]
)
def update_image_srcUS5(value):
return static_image_route_us + value
@app.callback(
dash.dependencies.Output('imageus6', 'src'),
[dash.dependencies.Input('image-dropdownUS6', 'value')]
)
def update_image_srcUS6(value):
return static_image_route_us + value
#Italy
@app.callback(
dash.dependencies.Output('imageit1', 'src'),
[dash.dependencies.Input('image-dropdownIT1', 'value')]
)
def update_image_srcIT1(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit2', 'src'),
[dash.dependencies.Input('image-dropdownIT2', 'value')]
)
def update_image_srcIT2(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit3', 'src'),
[dash.dependencies.Input('image-dropdownIT3', 'value')]
)
def update_image_srcIT3(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit4', 'src'),
[dash.dependencies.Input('image-dropdownIT4', 'value')]
)
def update_image_srcIT4(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit5', 'src'),
[dash.dependencies.Input('image-dropdownIT5', 'value')]
)
def update_image_srcIT5(value):
return static_image_route_italy + value
@app.callback(
dash.dependencies.Output('imageit6', 'src'),
[dash.dependencies.Input('image-dropdownIT6', 'value')]
)
def update_image_srcIT6(value):
return static_image_route_italy + value
#Canada
@app.callback(
dash.dependencies.Output('imageca1', 'src'),
[dash.dependencies.Input('image-dropdownCA1', 'value')]
)
def update_image_srcCA1(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca2', 'src'),
[dash.dependencies.Input('image-dropdownCA2', 'value')]
)
def update_image_srcCA2(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca3', 'src'),
[dash.dependencies.Input('image-dropdownCA3', 'value')]
)
def update_image_srcCA3(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca4', 'src'),
[dash.dependencies.Input('image-dropdownCA4', 'value')]
)
def update_image_srcCA4(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca5', 'src'),
[dash.dependencies.Input('image-dropdownCA5', 'value')]
)
def update_image_srcCA5(value):
return static_image_route_canada + value
@app.callback(
dash.dependencies.Output('imageca6', 'src'),
[dash.dependencies.Input('image-dropdownCA6', 'value')]
)
def update_image_srcCA6(value):
return static_image_route_canada + value
#South America
@app.callback(
dash.dependencies.Output('imagesa1', 'src'),
[dash.dependencies.Input('image-dropdownSA1', 'value')]
)
def update_image_srcSA1(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa2', 'src'),
[dash.dependencies.Input('image-dropdownSA2', 'value')]
)
def update_image_srcSA2(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa3', 'src'),
[dash.dependencies.Input('image-dropdownSA3', 'value')]
)
def update_image_srcSA3(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa4', 'src'),
[dash.dependencies.Input('image-dropdownSA4', 'value')]
)
def update_image_srcSA4(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa5', 'src'),
[dash.dependencies.Input('image-dropdownSA5', 'value')]
)
def update_image_srcSA5(value):
return static_image_route_s_america + value
@app.callback(
dash.dependencies.Output('imagesa6', 'src'),
[dash.dependencies.Input('image-dropdownSA6', 'value')]
)
def update_image_srcSA6(value):
return static_image_route_s_america + value
dash.dependencies.Output('imageNAmerica', 'src'),
[dash.dependencies.Input('image-dropdownNAmerica', 'value')]
# Add a static image route that serves images from desktop
# Be *very* careful here - you don't want to serve arbitrary files
# from your computer or server
@app.server.route('{}<image_path>.png'.format(static_image_route_world))
def serve_imageWorld(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_world:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_world, image_name)
@app.server.route('{}<image_path>.png'.format(static_image_route_us))
def serve_imageUS(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_us:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_us, image_name)
@app.server.route('{}<image_path>.png'.format(static_image_route_italy))
def serve_imageIT(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_italy:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_italy, image_name)
@app.server.route('{}<image_path>.png'.format(static_image_route_canada))
def serve_imageCA(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_canada:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_canada, image_name)
@app.server.route('{}<image_path>.png'.format(static_image_route_s_america))
def serve_imageSA(image_path):
image_name = '{}.png'.format(image_path)
if image_name not in list_of_images_s_america:
raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
return flask.send_from_directory(image_directory_s_america, image_name)
if __name__ == '__main__':
app.run_server(debug=False)
| en | 0.263304 | # this notebook # directory where the csv files are # # world # csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_world.csv' # print(csv_path) # df = pd.read_csv(csv_path) # # US # csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_us.csv' # print(csv_path) # df_us = pd.read_csv(csv_path) # # # CA # # csv_path = outputdir / 'Staight_Line_COVID_Prediction_Table_ca.csv' # # print(csv_path) # df_ca = pd.read_csv(csv_path) # Section for Google analytics #for server deployment #d6d6d6', #d6d6d6', #d6d6d6', #display='inline-block', #'width': '600px' #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #display='inline-block', #callbacks # @app.callback(Output('tabs-content-inline', 'children'), # [Input('tabs-styled-with-inline', 'value')]) #WORLD #US #Italy #Canada #South America # Add a static image route that serves images from desktop # Be *very* careful here - you don't want to serve arbitrary files # from your computer or server | 2.287803 | 2 |
tests/collections/asr/test_asr_datasets.py | vadam5/NeMo | 10 | 6622085 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from nemo.collections.asr.data.audio_to_text import TarredAudioToBPEDataset, TarredAudioToCharDataset
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.collections.common import tokenizers
class TestASRDatasets:
labels = [
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"'",
]
@pytest.mark.unit
def test_tarred_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
# Test braceexpand loading
tarpath = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/audio_{0..1}.tar'))
ds_braceexpand = TarredAudioToCharDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, sample_rate=16000
)
assert len(ds_braceexpand) == 32
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 32
# Test loading via list
tarpath = [os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{i}.tar')) for i in range(2)]
ds_list_load = TarredAudioToCharDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, sample_rate=16000
)
count = 0
for _ in ds_list_load:
count += 1
assert count == 32
@pytest.mark.unit
def test_tarred_bpe_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
tokenizer_path = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
tokenizer = tokenizers.AutoTokenizer(pretrained_model_name='bert-base-cased', vocab_file=tokenizer_path)
# Test braceexpand loading
tarpath = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/audio_{0..1}.tar'))
ds_braceexpand = TarredAudioToBPEDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, tokenizer=tokenizer, sample_rate=16000
)
assert len(ds_braceexpand) == 32
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 32
# Test loading via list
tarpath = [os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{i}.tar')) for i in range(2)]
ds_list_load = TarredAudioToBPEDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, tokenizer=tokenizer, sample_rate=16000
)
count = 0
for _ in ds_list_load:
count += 1
assert count == 32
| # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from nemo.collections.asr.data.audio_to_text import TarredAudioToBPEDataset, TarredAudioToCharDataset
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.collections.common import tokenizers
class TestASRDatasets:
labels = [
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"'",
]
@pytest.mark.unit
def test_tarred_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
# Test braceexpand loading
tarpath = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/audio_{0..1}.tar'))
ds_braceexpand = TarredAudioToCharDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, sample_rate=16000
)
assert len(ds_braceexpand) == 32
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 32
# Test loading via list
tarpath = [os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{i}.tar')) for i in range(2)]
ds_list_load = TarredAudioToCharDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, sample_rate=16000
)
count = 0
for _ in ds_list_load:
count += 1
assert count == 32
@pytest.mark.unit
def test_tarred_bpe_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
tokenizer_path = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
tokenizer = tokenizers.AutoTokenizer(pretrained_model_name='bert-base-cased', vocab_file=tokenizer_path)
# Test braceexpand loading
tarpath = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/audio_{0..1}.tar'))
ds_braceexpand = TarredAudioToBPEDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, tokenizer=tokenizer, sample_rate=16000
)
assert len(ds_braceexpand) == 32
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 32
# Test loading via list
tarpath = [os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{i}.tar')) for i in range(2)]
ds_list_load = TarredAudioToBPEDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, tokenizer=tokenizer, sample_rate=16000
)
count = 0
for _ in ds_list_load:
count += 1
assert count == 32
| en | 0.824047 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Test braceexpand loading # Test loading via list # Test braceexpand loading # Test loading via list | 1.834524 | 2 |
demo/PYTHON3/zouwu.py | Yunlei-AI/ZouWu | 8 | 6622086 | <reponame>Yunlei-AI/ZouWu
import os
import ctypes
from ctypes import CDLL
class ZouWu(object):
def __init__(
self,
library_path):
if not os.path.exists(library_path):
raise IOError("Zouwu library path erro at '%s'" % library_path)
#
#self.zouwulib = CDLL(library_path)
self.zouwulib = ctypes.cdll.LoadLibrary(library_path)
# init
self.zouwu_pInst = ctypes.c_void_p(0)
self.pInst = ctypes.byref(self.zouwu_pInst)
self.zouwulib.ZouwuInit(self.pInst)
# load model
#self.zouwulib.ZouwuLoadModel(self.zouwu_pInst, model_path.encode())
#set Param
#c_sensitivity = ctypes.c_float(sensitivity)
#pcmd=ctypes.c_char_p(b'sensitivity')
#mdlid=ctypes.c_int(1)
#self.zouwulib.ZouwuSetParam(self.zouwu_pInst,pcmd,ctypes.byref(c_sensitivity),mdlid)
def release(self):
self.zouwulib.ZouwuFinal(self.pInst)
def RegModel(self,data,datalen,id=1):
mdlid = ctypes.c_int(id)
nSample = ctypes.c_int(datalen)
self.zouwulib.ZouwuRegModel(self.zouwu_pInst, (ctypes.c_short * len(data))(*data), nSample, mdlid)
def LoadModel(self,model_path):
if not os.path.exists(model_path):
raise IOError("Zouwu model path erro at '%s'" % model_path)
# load model
ret = self.zouwulib.ZouwuLoadModel(self.zouwu_pInst, model_path.encode())
if ret != 0:
print("load model erro!")
return 1
return 0
def SetParam(self,sensitivity=0.3,mdl_id=1):
c_sensitivity = ctypes.c_float(sensitivity)
pcmd=ctypes.c_char_p(b'sensitivity')
mdlid=ctypes.c_int(mdl_id)
self.zouwulib.ZouwuSetParam(self.zouwu_pInst,pcmd,ctypes.byref(c_sensitivity),mdlid)
def Proc(self,data):
c_ret = ctypes.c_int(0)
self.zouwulib.ZouwuProc(self.zouwu_pInst,(ctypes.c_short * len(data))(*data),ctypes.byref(c_ret))
return c_ret.value
| import os
import ctypes
from ctypes import CDLL
class ZouWu(object):
def __init__(
self,
library_path):
if not os.path.exists(library_path):
raise IOError("Zouwu library path erro at '%s'" % library_path)
#
#self.zouwulib = CDLL(library_path)
self.zouwulib = ctypes.cdll.LoadLibrary(library_path)
# init
self.zouwu_pInst = ctypes.c_void_p(0)
self.pInst = ctypes.byref(self.zouwu_pInst)
self.zouwulib.ZouwuInit(self.pInst)
# load model
#self.zouwulib.ZouwuLoadModel(self.zouwu_pInst, model_path.encode())
#set Param
#c_sensitivity = ctypes.c_float(sensitivity)
#pcmd=ctypes.c_char_p(b'sensitivity')
#mdlid=ctypes.c_int(1)
#self.zouwulib.ZouwuSetParam(self.zouwu_pInst,pcmd,ctypes.byref(c_sensitivity),mdlid)
def release(self):
self.zouwulib.ZouwuFinal(self.pInst)
def RegModel(self,data,datalen,id=1):
mdlid = ctypes.c_int(id)
nSample = ctypes.c_int(datalen)
self.zouwulib.ZouwuRegModel(self.zouwu_pInst, (ctypes.c_short * len(data))(*data), nSample, mdlid)
def LoadModel(self,model_path):
if not os.path.exists(model_path):
raise IOError("Zouwu model path erro at '%s'" % model_path)
# load model
ret = self.zouwulib.ZouwuLoadModel(self.zouwu_pInst, model_path.encode())
if ret != 0:
print("load model erro!")
return 1
return 0
def SetParam(self,sensitivity=0.3,mdl_id=1):
c_sensitivity = ctypes.c_float(sensitivity)
pcmd=ctypes.c_char_p(b'sensitivity')
mdlid=ctypes.c_int(mdl_id)
self.zouwulib.ZouwuSetParam(self.zouwu_pInst,pcmd,ctypes.byref(c_sensitivity),mdlid)
def Proc(self,data):
c_ret = ctypes.c_int(0)
self.zouwulib.ZouwuProc(self.zouwu_pInst,(ctypes.c_short * len(data))(*data),ctypes.byref(c_ret))
return c_ret.value | en | 0.276556 | # #self.zouwulib = CDLL(library_path) # init # load model #self.zouwulib.ZouwuLoadModel(self.zouwu_pInst, model_path.encode()) #set Param #c_sensitivity = ctypes.c_float(sensitivity) #pcmd=ctypes.c_char_p(b'sensitivity') #mdlid=ctypes.c_int(1) #self.zouwulib.ZouwuSetParam(self.zouwu_pInst,pcmd,ctypes.byref(c_sensitivity),mdlid) # load model | 2.322797 | 2 |
playground/server test.py | antonloof/lnpsController | 0 | 6622087 | import socket, json, serial, time
## ps communication constants
MSG_TYPE_SEND = 0xC0
MSG_TYPE_QUERY = 0x40
MSG_TYPE_ANSWER = 0x80
CAST_TYPE_ANSWER_FROM_DEV = 0x00
CAST_TYPE_BROADCAST = 0x20
DIRECTION_FROM_PC = 0x10
DIRECTION_FROM_DEV = 0x00
TRANSMISSION_LATENCY = 0.050 #50 ms
class ComObject:
DEV_TYPE = 0
DEV_SER_NO = 1
NOM_VOLTAGE = 2
NOM_CURRENT = 3
NOM_POWER = 4
DEV_PART_NO = 6
DEV_MANUFACTURER = 8
DEV_SW_VERSION = 9
DEV_CLASS = 19
OVP_THRESHOLD = 38
OCP_THRESHOLD = 39
VOLTAGE = 50
CURRENT = 51
DEV_CONTROL = 54
DEV_STATUS = 71
DEV_CONF = 72
DEV_CONTROL_BUG_FIX = 77
ERROR_CODE = 0xFF
PS_ERRORS = {0:"OK",
3:"INCORRECT_CHECKSUM",
4:"INCORRECT_START_DELIMITER",
5:"WRONG_OUTPUT_ADDRESS",
7:"UNDEFINED_OBJECT",
8:"INCORRECT_OBJECT_LENGTH",
9:"VIOLATED_READ_WRITE_PERMISSION",
15:"DEVICE_IN_LOCK_STATE",
48:"EXCEEDED_OBJECT_UPPER_LIMIT",
49:"EXCEEDED_OBJECT_LOWER_LIMIT"}
## ps communication helpers
def calculateChecksum(h, d):
checksum = h[0] + h[1] + h[2]
for i in d:
checksum += i
return checksum.to_bytes(2, 'big')
def createHeader(sd, dn, obj)
return bytes([sd, dn, obj])
class PowerSupply():
def __init__(self, serial):
self.serial = serial
self.lastSend = 0
def recv(self, expectedObj):
startDelim = int.from_bytes(self.serial.read(), 'big')
if startDelim & 0xC0 != MSG_TYPE_ANSWER:
return
if startDelim & CAST_TYPE_BROADCAST != CAST_TYPE_ANSWER_FROM_DEV:
return
if srartDelim % DIRECTION_FROM_PC != DIRECTION_FROM_DEV:
return
deviceNode = int.from_bytes(self.serial.read(), 'big')
obj = int.from_bytes(self.serial.read(), 'big')
data = self.serial.read(startDelim & 0xF + 1)
checksum = self.serial.read(2)
if checksum != calculateChecksum(createHeader(startDelim, deviceNode, obj), data):
return
if obj != expectedObj && obj != ComObject.ERROR_CODE:
return
def send(self, startdelim, deviceNode, obj, data=b''):
# wait for some time to pass in order not to bombard the ps with requests
timeSinceLastSent = time.time() - self.lastSend
if timeSinceLastSent < TRANSMISSION_LATENCY:
time.sleep(TRANSMISSION_LATENCY - timeSinceLastSent)
header = createHeader(startdelim, deviceNode, obj)
self.serial.write(header + data + calculateChecksum(header, data))
def get(self, obj):
startdelim = MSG_TYPE_QUERY + CAST_TYPE_BROADCAST + DIRECTION_FROM_PC
deviceNode = 0
self.send(startDelim, deviceNode, obj)
return self.recv(obj)
## load json
CONF_PATH = "ps_config.json"
with open(CONF_PATH, "r") as f:
config = json.load(f)
print(config)
## establish which port talks to which ps
BAUD_RATE = 115200
for port in config.ports:
ps = PowerSupply(serial.Serial(port, BAUD_RATE, timeout=1, parity=serial.PARITY_ODD))
## start threads for com ports
## start web server
## delegate
"""
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('localhost', 5039))
serversocket.listen(5)
while True:
cs, addr = serversocket.accept()
request = cs.recv(2048)
headerbody = request.decode("utf-8").split("\r\n\r\n")
if len(headerbody) == 1:
headers = headerbody[0].split("\r\n")
body = ""
elif len(headerbody) == 2:
headers, body = headerbody
headers = headers.split("\r\n")
else:
resp = b"HTTP/1.1 400 Bad Request"
cs.send(resp)
continue
print(headers, body)
resp = b"HTTP/1.1 200 OK\r\nContent-Type: text/json; charset=UTF-8\r\n\r\n{success: 4}"
cs.send(resp)
""" | import socket, json, serial, time
## ps communication constants
MSG_TYPE_SEND = 0xC0
MSG_TYPE_QUERY = 0x40
MSG_TYPE_ANSWER = 0x80
CAST_TYPE_ANSWER_FROM_DEV = 0x00
CAST_TYPE_BROADCAST = 0x20
DIRECTION_FROM_PC = 0x10
DIRECTION_FROM_DEV = 0x00
TRANSMISSION_LATENCY = 0.050 #50 ms
class ComObject:
DEV_TYPE = 0
DEV_SER_NO = 1
NOM_VOLTAGE = 2
NOM_CURRENT = 3
NOM_POWER = 4
DEV_PART_NO = 6
DEV_MANUFACTURER = 8
DEV_SW_VERSION = 9
DEV_CLASS = 19
OVP_THRESHOLD = 38
OCP_THRESHOLD = 39
VOLTAGE = 50
CURRENT = 51
DEV_CONTROL = 54
DEV_STATUS = 71
DEV_CONF = 72
DEV_CONTROL_BUG_FIX = 77
ERROR_CODE = 0xFF
PS_ERRORS = {0:"OK",
3:"INCORRECT_CHECKSUM",
4:"INCORRECT_START_DELIMITER",
5:"WRONG_OUTPUT_ADDRESS",
7:"UNDEFINED_OBJECT",
8:"INCORRECT_OBJECT_LENGTH",
9:"VIOLATED_READ_WRITE_PERMISSION",
15:"DEVICE_IN_LOCK_STATE",
48:"EXCEEDED_OBJECT_UPPER_LIMIT",
49:"EXCEEDED_OBJECT_LOWER_LIMIT"}
## ps communication helpers
def calculateChecksum(h, d):
checksum = h[0] + h[1] + h[2]
for i in d:
checksum += i
return checksum.to_bytes(2, 'big')
def createHeader(sd, dn, obj)
return bytes([sd, dn, obj])
class PowerSupply():
def __init__(self, serial):
self.serial = serial
self.lastSend = 0
def recv(self, expectedObj):
startDelim = int.from_bytes(self.serial.read(), 'big')
if startDelim & 0xC0 != MSG_TYPE_ANSWER:
return
if startDelim & CAST_TYPE_BROADCAST != CAST_TYPE_ANSWER_FROM_DEV:
return
if srartDelim % DIRECTION_FROM_PC != DIRECTION_FROM_DEV:
return
deviceNode = int.from_bytes(self.serial.read(), 'big')
obj = int.from_bytes(self.serial.read(), 'big')
data = self.serial.read(startDelim & 0xF + 1)
checksum = self.serial.read(2)
if checksum != calculateChecksum(createHeader(startDelim, deviceNode, obj), data):
return
if obj != expectedObj && obj != ComObject.ERROR_CODE:
return
def send(self, startdelim, deviceNode, obj, data=b''):
# wait for some time to pass in order not to bombard the ps with requests
timeSinceLastSent = time.time() - self.lastSend
if timeSinceLastSent < TRANSMISSION_LATENCY:
time.sleep(TRANSMISSION_LATENCY - timeSinceLastSent)
header = createHeader(startdelim, deviceNode, obj)
self.serial.write(header + data + calculateChecksum(header, data))
def get(self, obj):
startdelim = MSG_TYPE_QUERY + CAST_TYPE_BROADCAST + DIRECTION_FROM_PC
deviceNode = 0
self.send(startDelim, deviceNode, obj)
return self.recv(obj)
## load json
CONF_PATH = "ps_config.json"
with open(CONF_PATH, "r") as f:
config = json.load(f)
print(config)
## establish which port talks to which ps
BAUD_RATE = 115200
for port in config.ports:
ps = PowerSupply(serial.Serial(port, BAUD_RATE, timeout=1, parity=serial.PARITY_ODD))
## start threads for com ports
## start web server
## delegate
"""
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('localhost', 5039))
serversocket.listen(5)
while True:
cs, addr = serversocket.accept()
request = cs.recv(2048)
headerbody = request.decode("utf-8").split("\r\n\r\n")
if len(headerbody) == 1:
headers = headerbody[0].split("\r\n")
body = ""
elif len(headerbody) == 2:
headers, body = headerbody
headers = headers.split("\r\n")
else:
resp = b"HTTP/1.1 400 Bad Request"
cs.send(resp)
continue
print(headers, body)
resp = b"HTTP/1.1 200 OK\r\nContent-Type: text/json; charset=UTF-8\r\n\r\n{success: 4}"
cs.send(resp)
""" | en | 0.491018 | ## ps communication constants #50 ms ## ps communication helpers # wait for some time to pass in order not to bombard the ps with requests ## load json ## establish which port talks to which ps ## start threads for com ports ## start web server ## delegate serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serversocket.bind(('localhost', 5039)) serversocket.listen(5) while True: cs, addr = serversocket.accept() request = cs.recv(2048) headerbody = request.decode("utf-8").split("\r\n\r\n") if len(headerbody) == 1: headers = headerbody[0].split("\r\n") body = "" elif len(headerbody) == 2: headers, body = headerbody headers = headers.split("\r\n") else: resp = b"HTTP/1.1 400 Bad Request" cs.send(resp) continue print(headers, body) resp = b"HTTP/1.1 200 OK\r\nContent-Type: text/json; charset=UTF-8\r\n\r\n{success: 4}" cs.send(resp) | 2.253575 | 2 |
ana/OpticksIdentity.py | hanswenzel/opticks | 11 | 6622088 | <gh_stars>10-100
#!/usr/bin/env python
"""
In [80]: trpo[trpo[:,1] == 5]
Out[80]:
array([[83886080, 5, 0, 0],
[83886081, 5, 0, 1],
[83886082, 5, 0, 2],
...,
[84057858, 5, 671, 2],
[84057859, 5, 671, 3],
[84057860, 5, 671, 4]], dtype=uint32)
"""
import numpy as np
class OpticksIdentity(object):
"""
cf okc/OpticksIdentity.cc
"""
@classmethod
def Encode(cls, ridx, pidx, oidx):
if ridx > 0:
assert (ridx & 0xff) == ridx
assert (pidx & 0xffff) == pidx
assert (oidx & 0xff) == oidx
return (ridx << 24) | (pidx << 8) | (oidx << 0)
else:
assert (ridx & 0xff) == ridx
assert pidx == 0
assert (oidx & 0xffffff) == oidx
return (ridx << 24) | (oidx << 0)
pass
@classmethod
def Decode(cls, tid):
ridx = ( tid >> 24 ) & 0xff
pidx = np.where( ridx == 0, 0, ( tid >> 8 ) & 0xffff )
oidx = np.where( ridx == 0, ( tid >> 0 ) & 0xffffff, ( tid >> 0 ) & 0xff )
return ridx,pidx,oidx
@classmethod
def NRPO(cls, tid):
"""
Decode the triplet identifier to show nidx/ridx/pidx/oidx (node/repeat/placement/offset-idx)
of all volumes, see okc/OpticksIdentity::Decode::
In [44]: nrpo[nrpo[:,1] == 5]
Out[44]:
array([[ 3199, 5, 0, 0],
[ 3200, 5, 0, 1],
[ 3201, 5, 0, 2],
...,
[11410, 5, 671, 2],
[11411, 5, 671, 3],
[11412, 5, 671, 4]], dtype=uint32)
"""
nidx = np.arange(len(tid), dtype=np.uint32)
ridx,pidx,oidx = cls.Decode(tid)
nrpo = np.zeros( (len(tid),4), dtype=np.uint32 )
nrpo[:,0] = nidx
nrpo[:,1] = ridx
nrpo[:,2] = pidx
nrpo[:,3] = oidx
return nrpo
if __name__ == '__main__':
import os, numpy as np
from opticks.ana.key import keydir
avi = np.load(os.path.join(keydir(),"GNodeLib/all_volume_identity.npy"))
tid = avi[:,1]
nrpo = OpticksIdentity.NRPO(tid)
| #!/usr/bin/env python
"""
In [80]: trpo[trpo[:,1] == 5]
Out[80]:
array([[83886080, 5, 0, 0],
[83886081, 5, 0, 1],
[83886082, 5, 0, 2],
...,
[84057858, 5, 671, 2],
[84057859, 5, 671, 3],
[84057860, 5, 671, 4]], dtype=uint32)
"""
import numpy as np
class OpticksIdentity(object):
"""
cf okc/OpticksIdentity.cc
"""
@classmethod
def Encode(cls, ridx, pidx, oidx):
if ridx > 0:
assert (ridx & 0xff) == ridx
assert (pidx & 0xffff) == pidx
assert (oidx & 0xff) == oidx
return (ridx << 24) | (pidx << 8) | (oidx << 0)
else:
assert (ridx & 0xff) == ridx
assert pidx == 0
assert (oidx & 0xffffff) == oidx
return (ridx << 24) | (oidx << 0)
pass
@classmethod
def Decode(cls, tid):
ridx = ( tid >> 24 ) & 0xff
pidx = np.where( ridx == 0, 0, ( tid >> 8 ) & 0xffff )
oidx = np.where( ridx == 0, ( tid >> 0 ) & 0xffffff, ( tid >> 0 ) & 0xff )
return ridx,pidx,oidx
@classmethod
def NRPO(cls, tid):
"""
Decode the triplet identifier to show nidx/ridx/pidx/oidx (node/repeat/placement/offset-idx)
of all volumes, see okc/OpticksIdentity::Decode::
In [44]: nrpo[nrpo[:,1] == 5]
Out[44]:
array([[ 3199, 5, 0, 0],
[ 3200, 5, 0, 1],
[ 3201, 5, 0, 2],
...,
[11410, 5, 671, 2],
[11411, 5, 671, 3],
[11412, 5, 671, 4]], dtype=uint32)
"""
nidx = np.arange(len(tid), dtype=np.uint32)
ridx,pidx,oidx = cls.Decode(tid)
nrpo = np.zeros( (len(tid),4), dtype=np.uint32 )
nrpo[:,0] = nidx
nrpo[:,1] = ridx
nrpo[:,2] = pidx
nrpo[:,3] = oidx
return nrpo
if __name__ == '__main__':
import os, numpy as np
from opticks.ana.key import keydir
avi = np.load(os.path.join(keydir(),"GNodeLib/all_volume_identity.npy"))
tid = avi[:,1]
nrpo = OpticksIdentity.NRPO(tid) | en | 0.318317 | #!/usr/bin/env python In [80]: trpo[trpo[:,1] == 5] Out[80]: array([[83886080, 5, 0, 0], [83886081, 5, 0, 1], [83886082, 5, 0, 2], ..., [84057858, 5, 671, 2], [84057859, 5, 671, 3], [84057860, 5, 671, 4]], dtype=uint32) cf okc/OpticksIdentity.cc Decode the triplet identifier to show nidx/ridx/pidx/oidx (node/repeat/placement/offset-idx) of all volumes, see okc/OpticksIdentity::Decode:: In [44]: nrpo[nrpo[:,1] == 5] Out[44]: array([[ 3199, 5, 0, 0], [ 3200, 5, 0, 1], [ 3201, 5, 0, 2], ..., [11410, 5, 671, 2], [11411, 5, 671, 3], [11412, 5, 671, 4]], dtype=uint32) | 2.316205 | 2 |
displayio/_bitmap.py | adafruit/Adafruit_Blinka_displayio | 0 | 6622089 | <gh_stars>0
# SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`displayio.bitmap`
================================================================================
displayio for Blinka
**Software and Dependencies:**
* Adafruit Blinka:
https://github.com/adafruit/Adafruit_Blinka/releases
* Author(s): <NAME>
"""
from __future__ import annotations
from typing import Union, Tuple
from PIL import Image
from ._structs import RectangleStruct
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_Blinka_displayio.git"
class Bitmap:
"""Stores values of a certain size in a 2D array"""
def __init__(self, width: int, height: int, value_count: int):
"""Create a Bitmap object with the given fixed size. Each pixel stores a value that is
used to index into a corresponding palette. This enables differently colored sprites to
share the underlying Bitmap. value_count is used to minimize the memory used to store
the Bitmap.
"""
self._bmp_width = width
self._bmp_height = height
self._read_only = False
if value_count < 0:
raise ValueError("value_count must be > 0")
bits = 1
while (value_count - 1) >> bits:
if bits < 8:
bits = bits << 1
else:
bits += 8
self._bits_per_value = bits
if (
self._bits_per_value > 8
and self._bits_per_value != 16
and self._bits_per_value != 32
):
raise NotImplementedError("Invalid bits per value")
self._image = Image.new("P", (width, height), 0)
self._dirty_area = RectangleStruct(0, 0, width, height)
def __getitem__(self, index: Union[Tuple[int, int], int]) -> int:
"""
Returns the value at the given index. The index can either be
an x,y tuple or an int equal to `y * width + x`.
"""
if isinstance(index, (tuple, list)):
x, y = index
elif isinstance(index, int):
x = index % self._bmp_width
y = index // self._bmp_width
else:
raise TypeError("Index is not an int, list, or tuple")
if x > self._image.width or y > self._image.height:
raise ValueError(f"Index {index} is out of range")
return self._image.getpixel((x, y))
def __setitem__(self, index: Union[Tuple[int, int], int], value: int) -> None:
"""
Sets the value at the given index. The index can either be
an x,y tuple or an int equal to `y * width + x`.
"""
if self._read_only:
raise RuntimeError("Read-only object")
if isinstance(index, (tuple, list)):
x = index[0]
y = index[1]
index = y * self._bmp_width + x
elif isinstance(index, int):
x = index % self._bmp_width
y = index // self._bmp_width
self._image.putpixel((x, y), value)
if self._dirty_area.x1 == self._dirty_area.x2:
self._dirty_area.x1 = x
self._dirty_area.x2 = x + 1
self._dirty_area.y1 = y
self._dirty_area.y2 = y + 1
else:
if x < self._dirty_area.x1:
self._dirty_area.x1 = x
elif x >= self._dirty_area.x2:
self._dirty_area.x2 = x + 1
if y < self._dirty_area.y1:
self._dirty_area.y1 = y
elif y >= self._dirty_area.y2:
self._dirty_area.y2 = y + 1
def _finish_refresh(self):
self._dirty_area.x1 = 0
self._dirty_area.x2 = 0
def fill(self, value: int) -> None:
"""Fills the bitmap with the supplied palette index value."""
self._image = Image.new("P", (self._bmp_width, self._bmp_height), value)
self._dirty_area = RectangleStruct(0, 0, self._bmp_width, self._bmp_height)
def blit(
self,
x: int,
y: int,
source_bitmap: Bitmap,
*,
x1: int,
y1: int,
x2: int,
y2: int,
skip_index: int,
) -> None:
# pylint: disable=unnecessary-pass, invalid-name
"""Inserts the source_bitmap region defined by rectangular boundaries"""
pass
def dirty(self, x1: int = 0, y1: int = 0, x2: int = -1, y2: int = -1) -> None:
# pylint: disable=unnecessary-pass, invalid-name
"""Inform displayio of bitmap updates done via the buffer protocol."""
pass
@property
def width(self) -> int:
"""Width of the bitmap. (read only)"""
return self._bmp_width
@property
def height(self) -> int:
"""Height of the bitmap. (read only)"""
return self._bmp_height
| # SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`displayio.bitmap`
================================================================================
displayio for Blinka
**Software and Dependencies:**
* Adafruit Blinka:
https://github.com/adafruit/Adafruit_Blinka/releases
* Author(s): <NAME>
"""
from __future__ import annotations
from typing import Union, Tuple
from PIL import Image
from ._structs import RectangleStruct
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_Blinka_displayio.git"
class Bitmap:
"""Stores values of a certain size in a 2D array"""
def __init__(self, width: int, height: int, value_count: int):
"""Create a Bitmap object with the given fixed size. Each pixel stores a value that is
used to index into a corresponding palette. This enables differently colored sprites to
share the underlying Bitmap. value_count is used to minimize the memory used to store
the Bitmap.
"""
self._bmp_width = width
self._bmp_height = height
self._read_only = False
if value_count < 0:
raise ValueError("value_count must be > 0")
bits = 1
while (value_count - 1) >> bits:
if bits < 8:
bits = bits << 1
else:
bits += 8
self._bits_per_value = bits
if (
self._bits_per_value > 8
and self._bits_per_value != 16
and self._bits_per_value != 32
):
raise NotImplementedError("Invalid bits per value")
self._image = Image.new("P", (width, height), 0)
self._dirty_area = RectangleStruct(0, 0, width, height)
def __getitem__(self, index: Union[Tuple[int, int], int]) -> int:
"""
Returns the value at the given index. The index can either be
an x,y tuple or an int equal to `y * width + x`.
"""
if isinstance(index, (tuple, list)):
x, y = index
elif isinstance(index, int):
x = index % self._bmp_width
y = index // self._bmp_width
else:
raise TypeError("Index is not an int, list, or tuple")
if x > self._image.width or y > self._image.height:
raise ValueError(f"Index {index} is out of range")
return self._image.getpixel((x, y))
def __setitem__(self, index: Union[Tuple[int, int], int], value: int) -> None:
"""
Sets the value at the given index. The index can either be
an x,y tuple or an int equal to `y * width + x`.
"""
if self._read_only:
raise RuntimeError("Read-only object")
if isinstance(index, (tuple, list)):
x = index[0]
y = index[1]
index = y * self._bmp_width + x
elif isinstance(index, int):
x = index % self._bmp_width
y = index // self._bmp_width
self._image.putpixel((x, y), value)
if self._dirty_area.x1 == self._dirty_area.x2:
self._dirty_area.x1 = x
self._dirty_area.x2 = x + 1
self._dirty_area.y1 = y
self._dirty_area.y2 = y + 1
else:
if x < self._dirty_area.x1:
self._dirty_area.x1 = x
elif x >= self._dirty_area.x2:
self._dirty_area.x2 = x + 1
if y < self._dirty_area.y1:
self._dirty_area.y1 = y
elif y >= self._dirty_area.y2:
self._dirty_area.y2 = y + 1
def _finish_refresh(self):
self._dirty_area.x1 = 0
self._dirty_area.x2 = 0
def fill(self, value: int) -> None:
"""Fills the bitmap with the supplied palette index value."""
self._image = Image.new("P", (self._bmp_width, self._bmp_height), value)
self._dirty_area = RectangleStruct(0, 0, self._bmp_width, self._bmp_height)
def blit(
self,
x: int,
y: int,
source_bitmap: Bitmap,
*,
x1: int,
y1: int,
x2: int,
y2: int,
skip_index: int,
) -> None:
# pylint: disable=unnecessary-pass, invalid-name
"""Inserts the source_bitmap region defined by rectangular boundaries"""
pass
def dirty(self, x1: int = 0, y1: int = 0, x2: int = -1, y2: int = -1) -> None:
# pylint: disable=unnecessary-pass, invalid-name
"""Inform displayio of bitmap updates done via the buffer protocol."""
pass
@property
def width(self) -> int:
"""Width of the bitmap. (read only)"""
return self._bmp_width
@property
def height(self) -> int:
"""Height of the bitmap. (read only)"""
return self._bmp_height | en | 0.565492 | # SPDX-FileCopyrightText: 2020 <NAME> for Adafruit Industries # # SPDX-License-Identifier: MIT `displayio.bitmap` ================================================================================ displayio for Blinka **Software and Dependencies:** * Adafruit Blinka: https://github.com/adafruit/Adafruit_Blinka/releases * Author(s): <NAME> Stores values of a certain size in a 2D array Create a Bitmap object with the given fixed size. Each pixel stores a value that is used to index into a corresponding palette. This enables differently colored sprites to share the underlying Bitmap. value_count is used to minimize the memory used to store the Bitmap. Returns the value at the given index. The index can either be an x,y tuple or an int equal to `y * width + x`. Sets the value at the given index. The index can either be an x,y tuple or an int equal to `y * width + x`. Fills the bitmap with the supplied palette index value. # pylint: disable=unnecessary-pass, invalid-name Inserts the source_bitmap region defined by rectangular boundaries # pylint: disable=unnecessary-pass, invalid-name Inform displayio of bitmap updates done via the buffer protocol. Width of the bitmap. (read only) Height of the bitmap. (read only) | 2.675762 | 3 |
plastering/inferencers/algorithm/GeneticAlgorithm/colocation/utils/paths.py | MingzheWu418/plastering | 0 | 6622090 | <reponame>MingzheWu418/plastering<gh_stars>0
"""Helper function that deal with paths
"""
import pathlib
def create_dir(dir_name):
"""Create a dir if it does not already exist
Args:
dir_name (str): the directory name
Raises:
FileExistsError: If the path specified already exists and is a file
"""
path = pathlib.Path(dir_name)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
elif path.is_file():
raise FileExistsError('Log Path already exists and is not a dir')
| """Helper function that deal with paths
"""
import pathlib
def create_dir(dir_name):
"""Create a dir if it does not already exist
Args:
dir_name (str): the directory name
Raises:
FileExistsError: If the path specified already exists and is a file
"""
path = pathlib.Path(dir_name)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
elif path.is_file():
raise FileExistsError('Log Path already exists and is not a dir') | en | 0.712992 | Helper function that deal with paths Create a dir if it does not already exist Args: dir_name (str): the directory name Raises: FileExistsError: If the path specified already exists and is a file | 4.096374 | 4 |
algorithms/graphs/dijkstra.py | mchao409/python-algorithms | 3 | 6622091 | <filename>algorithms/graphs/dijkstra.py
"""
Dijkstra's algorithm is an algorithm for finding the shortest paths between nodes in a graph,
which may represent, for example, road networks.
It was conceived by computer scientist <NAME> in 1956 and published three years later [Wikipedia]
Worst-case Performance: O(|E|+|V| log |V|)
"""
import queue
def dijkstra(graph, start, target):
"""
Solves shortest path problem using Dijkstra algorithm
Args:
graph: graph representation
start: start node
target: target node
Returns:
int: distance between start and target nodes
Examples:
>>> graph = prepare_weighted_undirect_graph(
[(1, 2, 7), (1, 3, 9), (1, 6, 14), (6, 3, 2), (6, 5, 9), (3, 2, 10), (3, 4, 11),
(2, 4, 15), (6, 5, 9), (5, 4, 6)])
>>> dijkstra(graph, 1, 6)
11
"""
dist = dict()
dist[start] = 0
q = queue.PriorityQueue()
q.put(start)
while not q.empty():
node = q.get()
for adjacent_node, edge_weigth in graph[node].items():
length = dist[node] + edge_weigth
if adjacent_node not in dist or length < dist[adjacent_node]:
dist[adjacent_node] = length
q.put(adjacent_node, dist[adjacent_node])
if target not in dist:
return -1
return dist[target]
| <filename>algorithms/graphs/dijkstra.py
"""
Dijkstra's algorithm is an algorithm for finding the shortest paths between nodes in a graph,
which may represent, for example, road networks.
It was conceived by computer scientist <NAME> in 1956 and published three years later [Wikipedia]
Worst-case Performance: O(|E|+|V| log |V|)
"""
import queue
def dijkstra(graph, start, target):
"""
Solves shortest path problem using Dijkstra algorithm
Args:
graph: graph representation
start: start node
target: target node
Returns:
int: distance between start and target nodes
Examples:
>>> graph = prepare_weighted_undirect_graph(
[(1, 2, 7), (1, 3, 9), (1, 6, 14), (6, 3, 2), (6, 5, 9), (3, 2, 10), (3, 4, 11),
(2, 4, 15), (6, 5, 9), (5, 4, 6)])
>>> dijkstra(graph, 1, 6)
11
"""
dist = dict()
dist[start] = 0
q = queue.PriorityQueue()
q.put(start)
while not q.empty():
node = q.get()
for adjacent_node, edge_weigth in graph[node].items():
length = dist[node] + edge_weigth
if adjacent_node not in dist or length < dist[adjacent_node]:
dist[adjacent_node] = length
q.put(adjacent_node, dist[adjacent_node])
if target not in dist:
return -1
return dist[target]
| en | 0.87643 | Dijkstra's algorithm is an algorithm for finding the shortest paths between nodes in a graph, which may represent, for example, road networks. It was conceived by computer scientist <NAME> in 1956 and published three years later [Wikipedia] Worst-case Performance: O(|E|+|V| log |V|) Solves shortest path problem using Dijkstra algorithm Args: graph: graph representation start: start node target: target node Returns: int: distance between start and target nodes Examples: >>> graph = prepare_weighted_undirect_graph( [(1, 2, 7), (1, 3, 9), (1, 6, 14), (6, 3, 2), (6, 5, 9), (3, 2, 10), (3, 4, 11), (2, 4, 15), (6, 5, 9), (5, 4, 6)]) >>> dijkstra(graph, 1, 6) 11 | 3.904464 | 4 |
scripts/make_simulations.py | Karthikprabhu22/lynx | 0 | 6622092 | <filename>scripts/make_simulations.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################################################
#
# This script is used to generate simulations for B-mode forecasting.
#
#
#
#
#
#
#
#######################################################################
import logging
from pathlib import Path
import sys
import click
import h5py
import lynx
from hoover.tools import WhiteNoise
import pysm
import pysm.units as u
import jax.numpy as np
import numpy as old_np
import yaml
import healpy as hp
_logger = logging.getLogger(__name__)
@click.command()
@click.option('-d', '--config', 'cfg_path', required=True,
type=click.Path(exists=True), help='path to config file')
@click.option('--quiet', 'log_level', flag_value=logging.WARNING, default=True)
@click.option('-v', '--verbose', 'log_level', flag_value=logging.INFO)
@click.option('-vv', '--very-verbose', 'log_level', flag_value=logging.DEBUG)
@click.version_option(lynx.__version__)
def main(cfg_path: Path, log_level: int):
logging.basicConfig(stream=sys.stdout,
level=log_level,
datefmt='%Y-%m-%d %H:%M',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
with open(cfg_path) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
freqs = old_np.array(cfg['frequencies']) * u. GHz
nside = cfg['nside']
components = cfg['skymodel']['args']
sensitivities = cfg['sensitivities']
nmc = cfg['monte_carlo']
beams = cfg['fwhm']
outpath = cfg['hdf5_path']
half_mission_noise = cfg['half_mission_noise']
cosmo_path = cfg['cosmo_path']
if half_mission_noise:
sensitivities = [s * np.sqrt(2.) for s in sensitivities]
logging.info(f"""
Frequencies: {freqs!s}
Nside: {nside:04d}
Components: {components!s}
Sensitivities: {sensitivities!s}
Number of Monte Carlo Simulations: {nmc:05d}
""")
# Generate sky signal
sky = pysm.Sky(nside=nside, **components)
fgnd = (sky.get_emission(f) for f in freqs)
fgnd = (hp.smoothing(s, fwhm=b / 60. * np.pi / 180., verbose=False)[None, 1:, ...] for b, s in zip(beams, fgnd))
fgnd = np.concatenate(list(fgnd))
# Make noise generator
sens = np.array(sensitivities) * u.uK_CMB
sens = np.array([w.to(u.uK_RJ, equivalencies=u.cmb_equivalencies(f)) for w, f in zip(sens, freqs)])
noise_generator = WhiteNoise(sens=sens)
cov = noise_generator.get_pix_var_map(nside)
logging.info(f"Output path: {outpath}")
with h5py.File(outpath, 'a') as f:
f.attrs.update({'config': yaml.dump(cfg)})
maps = f.require_group('maps')
monte_carlo = maps.require_group('monte_carlo')
components = maps.require_group('components')
data_dset = monte_carlo.require_dataset('data', shape=(nmc, len(freqs), 2, hp.nside2npix(nside)), dtype=np.float32)
cov_dset = monte_carlo.require_dataset('cov', shape=(nmc, len(freqs), 2, hp.nside2npix(nside)), dtype=np.float32)
cov_dset[...] = cov.astype(np.float32)
for imc in np.arange(nmc)[::2]:
logging.info(f"Working on CMB MC: {imc:04d}")
cmb = get_cmb_realization(nside, cosmo_path, beams, freqs, seed=imc)
for j in range(imc, imc + 2):
logging.info(f"Working on noise MC: {j:04d}")
data = fgnd + cmb + noise_generator.map(nside, seed=j)
logging.debug(f"Data shape: {data.shape!r}")
data_dset[j] = data
def get_cmb_realization(nside, cl_path, beams, frequencies, seed=100):
with h5py.File(f"{cl_path}", 'r') as f:
cl_total = np.swapaxes(f['lensed_scalar'][...], 0, 1)
cmb = hp.synfast(cl_total, nside, new=True, verbose=False)
cmb = [hp.smoothing(cmb, fwhm=b / 60. * np.pi/180., verbose=False)[1:] * u.uK_CMB for b in beams]
return np.array([c.to(u.uK_RJ, equivalencies=u.cmb_equivalencies(f)) for c, f in zip(cmb, frequencies)])
if __name__ == '__main__':
main() | <filename>scripts/make_simulations.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################################################
#
# This script is used to generate simulations for B-mode forecasting.
#
#
#
#
#
#
#
#######################################################################
import logging
from pathlib import Path
import sys
import click
import h5py
import lynx
from hoover.tools import WhiteNoise
import pysm
import pysm.units as u
import jax.numpy as np
import numpy as old_np
import yaml
import healpy as hp
_logger = logging.getLogger(__name__)
@click.command()
@click.option('-d', '--config', 'cfg_path', required=True,
type=click.Path(exists=True), help='path to config file')
@click.option('--quiet', 'log_level', flag_value=logging.WARNING, default=True)
@click.option('-v', '--verbose', 'log_level', flag_value=logging.INFO)
@click.option('-vv', '--very-verbose', 'log_level', flag_value=logging.DEBUG)
@click.version_option(lynx.__version__)
def main(cfg_path: Path, log_level: int):
logging.basicConfig(stream=sys.stdout,
level=log_level,
datefmt='%Y-%m-%d %H:%M',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
with open(cfg_path) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
freqs = old_np.array(cfg['frequencies']) * u. GHz
nside = cfg['nside']
components = cfg['skymodel']['args']
sensitivities = cfg['sensitivities']
nmc = cfg['monte_carlo']
beams = cfg['fwhm']
outpath = cfg['hdf5_path']
half_mission_noise = cfg['half_mission_noise']
cosmo_path = cfg['cosmo_path']
if half_mission_noise:
sensitivities = [s * np.sqrt(2.) for s in sensitivities]
logging.info(f"""
Frequencies: {freqs!s}
Nside: {nside:04d}
Components: {components!s}
Sensitivities: {sensitivities!s}
Number of Monte Carlo Simulations: {nmc:05d}
""")
# Generate sky signal
sky = pysm.Sky(nside=nside, **components)
fgnd = (sky.get_emission(f) for f in freqs)
fgnd = (hp.smoothing(s, fwhm=b / 60. * np.pi / 180., verbose=False)[None, 1:, ...] for b, s in zip(beams, fgnd))
fgnd = np.concatenate(list(fgnd))
# Make noise generator
sens = np.array(sensitivities) * u.uK_CMB
sens = np.array([w.to(u.uK_RJ, equivalencies=u.cmb_equivalencies(f)) for w, f in zip(sens, freqs)])
noise_generator = WhiteNoise(sens=sens)
cov = noise_generator.get_pix_var_map(nside)
logging.info(f"Output path: {outpath}")
with h5py.File(outpath, 'a') as f:
f.attrs.update({'config': yaml.dump(cfg)})
maps = f.require_group('maps')
monte_carlo = maps.require_group('monte_carlo')
components = maps.require_group('components')
data_dset = monte_carlo.require_dataset('data', shape=(nmc, len(freqs), 2, hp.nside2npix(nside)), dtype=np.float32)
cov_dset = monte_carlo.require_dataset('cov', shape=(nmc, len(freqs), 2, hp.nside2npix(nside)), dtype=np.float32)
cov_dset[...] = cov.astype(np.float32)
for imc in np.arange(nmc)[::2]:
logging.info(f"Working on CMB MC: {imc:04d}")
cmb = get_cmb_realization(nside, cosmo_path, beams, freqs, seed=imc)
for j in range(imc, imc + 2):
logging.info(f"Working on noise MC: {j:04d}")
data = fgnd + cmb + noise_generator.map(nside, seed=j)
logging.debug(f"Data shape: {data.shape!r}")
data_dset[j] = data
def get_cmb_realization(nside, cl_path, beams, frequencies, seed=100):
with h5py.File(f"{cl_path}", 'r') as f:
cl_total = np.swapaxes(f['lensed_scalar'][...], 0, 1)
cmb = hp.synfast(cl_total, nside, new=True, verbose=False)
cmb = [hp.smoothing(cmb, fwhm=b / 60. * np.pi/180., verbose=False)[1:] * u.uK_CMB for b in beams]
return np.array([c.to(u.uK_RJ, equivalencies=u.cmb_equivalencies(f)) for c, f in zip(cmb, frequencies)])
if __name__ == '__main__':
main() | en | 0.250339 | #!/usr/bin/env python # -*- coding: utf-8 -*- ####################################################################### # # This script is used to generate simulations for B-mode forecasting. # # # # # # # ####################################################################### Frequencies: {freqs!s} Nside: {nside:04d} Components: {components!s} Sensitivities: {sensitivities!s} Number of Monte Carlo Simulations: {nmc:05d} # Generate sky signal # Make noise generator | 2.09422 | 2 |
tests/unit_tests.py | peterjiz/pytorch-colors | 187 | 6622093 | import torch
import torch.cuda
from torch.autograd import Variable
import unittest
import sys
import pytorch_colors as colors
class TestColorConversion(unittest.TestCase):
def test_yuv(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_yuv = colors.rgb_to_yuv(a)
a_ = colors.yuv_to_rgb(a_yuv)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_ycbcr(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_ycbcr = colors.rgb_to_ycbcr(a)
a_ = colors.ycbcr_to_rgb(a_ycbcr)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_cielab(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_lab = colors.rgb_to_lab(a)
a_ = colors.lab_to_rgb(a_lab)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_hsv(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_lab = colors.rgb_to_hsv(a)
a_ = colors.hsv_to_rgb(a_lab)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_hed(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_lab = colors.rgb_to_hed(a)
a_ = colors.hed_to_rgb(a_lab)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_xyz(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_lab = colors.rgb_to_xyz(a)
a_ = colors.xyz_to_rgb(a_lab)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
class TestCudaConversion(unittest.TestCase):
def test_keep_cuda(self):
a = torch.randn(3, 512, 512).clamp(0, 1).cuda()
a_yuv = colors.rgb_to_yuv(a)
self.assertTrue(a_yuv.is_cuda)
a_ = colors.yuv_to_rgb(a_yuv)
self.assertTrue(a_.is_cuda)
def test_no_cuda(self):
a = torch.randn(3, 512, 512).clamp(0, 1)
a_yuv = colors.rgb_to_yuv(a)
self.assertFalse(a_yuv.is_cuda)
a_ = colors.yuv_to_rgb(a_yuv)
self.assertFalse(a_.is_cuda)
class Test3dTo4dConversion(unittest.TestCase):
def test_3d(self):
a = torch.randn(3, 512, 512).clamp(0, 1)
a_yuv = colors.rgb_to_yuv(a)
self.assertEqual(a_yuv.dim(), 3, 1)
a_ = colors.yuv_to_rgb(a_yuv)
self.assertEqual(a_.dim(), 3, 1)
def test_4d(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_yuv = colors.rgb_to_yuv(a)
self.assertEqual(a_yuv.dim(), 4, 1)
a_ = colors.yuv_to_rgb(a_yuv)
self.assertEqual(a_.dim(), 4, 1)
if __name__ == '__main__':
unittest.main()
| import torch
import torch.cuda
from torch.autograd import Variable
import unittest
import sys
import pytorch_colors as colors
class TestColorConversion(unittest.TestCase):
def test_yuv(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_yuv = colors.rgb_to_yuv(a)
a_ = colors.yuv_to_rgb(a_yuv)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_ycbcr(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_ycbcr = colors.rgb_to_ycbcr(a)
a_ = colors.ycbcr_to_rgb(a_ycbcr)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_cielab(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_lab = colors.rgb_to_lab(a)
a_ = colors.lab_to_rgb(a_lab)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_hsv(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_lab = colors.rgb_to_hsv(a)
a_ = colors.hsv_to_rgb(a_lab)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_hed(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_lab = colors.rgb_to_hed(a)
a_ = colors.hed_to_rgb(a_lab)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
def test_xyz(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_lab = colors.rgb_to_xyz(a)
a_ = colors.xyz_to_rgb(a_lab)
mean_err = (a-a_).abs().mean().item()
self.assertAlmostEqual(mean_err, 0, 6)
class TestCudaConversion(unittest.TestCase):
def test_keep_cuda(self):
a = torch.randn(3, 512, 512).clamp(0, 1).cuda()
a_yuv = colors.rgb_to_yuv(a)
self.assertTrue(a_yuv.is_cuda)
a_ = colors.yuv_to_rgb(a_yuv)
self.assertTrue(a_.is_cuda)
def test_no_cuda(self):
a = torch.randn(3, 512, 512).clamp(0, 1)
a_yuv = colors.rgb_to_yuv(a)
self.assertFalse(a_yuv.is_cuda)
a_ = colors.yuv_to_rgb(a_yuv)
self.assertFalse(a_.is_cuda)
class Test3dTo4dConversion(unittest.TestCase):
def test_3d(self):
a = torch.randn(3, 512, 512).clamp(0, 1)
a_yuv = colors.rgb_to_yuv(a)
self.assertEqual(a_yuv.dim(), 3, 1)
a_ = colors.yuv_to_rgb(a_yuv)
self.assertEqual(a_.dim(), 3, 1)
def test_4d(self):
a = torch.randn(1, 3, 512, 512).clamp(0, 1)
a_yuv = colors.rgb_to_yuv(a)
self.assertEqual(a_yuv.dim(), 4, 1)
a_ = colors.yuv_to_rgb(a_yuv)
self.assertEqual(a_.dim(), 4, 1)
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.663749 | 3 | |
googledevices/utils/__init__.py | vlebourl/googledevices | 19 | 6622094 | <reponame>vlebourl/googledevices<filename>googledevices/utils/__init__.py
"""Initialize the utils."""
| """Initialize the utils.""" | en | 0.603895 | Initialize the utils. | 1.215801 | 1 |
LeReS/Train/tools/parse_arg_base.py | chensjtu/AdelaiDepth | 0 | 6622095 | import argparse
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--backbone', type=str, default='resnet50', help='Select backbone type, resnet50 or resnext101')
parser.add_argument('--batchsize', type=int, default=2, help='Batch size')
parser.add_argument('--base_lr', type=float, default=0.001, help='Initial learning rate')
parser.add_argument('--load_ckpt', help='Checkpoint path to load')
parser.add_argument('--resume', action='store_true', help='Resume to train')
parser.add_argument('--epoch', default=50, type=int, help='Total training epochs')
parser.add_argument('--dataset_list', default=None, nargs='+', help='The names of multiple datasets')
parser.add_argument('--loss_mode', default='_vnl_ssil_ranking_', help='Select loss to supervise, joint or ranking')
parser.add_argument('--lr_scheduler_multiepochs', default=[10, 25, 40], nargs='+', type=int, help='Learning rate scheduler step')
parser.add_argument('--val_step', default=5000, type=int, help='Validation steps')
parser.add_argument('--snapshot_iters', default=5000, type=int, help='Checkpoint save iters')
parser.add_argument('--log_interval', default=10, type=int, help='Log print iters')
parser.add_argument('--output_dir', type=str, default='./output', help='Output dir')
parser.add_argument('--use_tfboard', action='store_true', help='Tensorboard to log training info')
parser.add_argument('--dataroot', default='./datasets', required=True, help='Path to images')
parser.add_argument('--dataset', default='multi', help='Dataset loader name')
parser.add_argument('--scale_decoder_lr', type=float, default=1, help='Scale learning rate for the decoder')
parser.add_argument('--thread', default=0, type=int, help='Thread for loading data')
parser.add_argument('--start_step', default=0, type=int, help='Set start training steps')
parser.add_argument('--sample_ratio_steps', default=10000, type=int, help='Step for increasing sample ratio')
parser.add_argument('--sample_start_ratio', default=0.1, type=float, help='Start sample ratio')
parser.add_argument('--local_rank', type=int, default=0, help='Rank ID for processes')
parser.add_argument('--nnodes', type=int, default=1, help='Amount of nodes')
parser.add_argument('--node_rank', type=int, default=0, help='Rank of current node')
parser.add_argument('--dist_url', type=str, default='tcp://127.0.0.1:22', help='URL specifying how to initialize the process group')
# parser.add_argument('--optim', default='SGD', help='Select optimizer, SGD or Adam')
# parser.add_argument('--start_epoch', default=0, type=int, help='Set training epochs')
# parser.add_argument('--results_dir', type=str, default='./evaluation', help='Output dir')
# parser.add_argument('--diff_loss_weight', default=1, type=int, help='Step for increasing sample ratio')
self.initialized = True
return parser
def parse(self):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser = self.initialize(parser)
self.opt = parser.parse_args()
return self.opt
def print_options(opt, logger=None):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
message += '{:>25}: {}\n'.format(str(k), str(v))
message += '----------------- End -------------------'
logger.info(message) | import argparse
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--backbone', type=str, default='resnet50', help='Select backbone type, resnet50 or resnext101')
parser.add_argument('--batchsize', type=int, default=2, help='Batch size')
parser.add_argument('--base_lr', type=float, default=0.001, help='Initial learning rate')
parser.add_argument('--load_ckpt', help='Checkpoint path to load')
parser.add_argument('--resume', action='store_true', help='Resume to train')
parser.add_argument('--epoch', default=50, type=int, help='Total training epochs')
parser.add_argument('--dataset_list', default=None, nargs='+', help='The names of multiple datasets')
parser.add_argument('--loss_mode', default='_vnl_ssil_ranking_', help='Select loss to supervise, joint or ranking')
parser.add_argument('--lr_scheduler_multiepochs', default=[10, 25, 40], nargs='+', type=int, help='Learning rate scheduler step')
parser.add_argument('--val_step', default=5000, type=int, help='Validation steps')
parser.add_argument('--snapshot_iters', default=5000, type=int, help='Checkpoint save iters')
parser.add_argument('--log_interval', default=10, type=int, help='Log print iters')
parser.add_argument('--output_dir', type=str, default='./output', help='Output dir')
parser.add_argument('--use_tfboard', action='store_true', help='Tensorboard to log training info')
parser.add_argument('--dataroot', default='./datasets', required=True, help='Path to images')
parser.add_argument('--dataset', default='multi', help='Dataset loader name')
parser.add_argument('--scale_decoder_lr', type=float, default=1, help='Scale learning rate for the decoder')
parser.add_argument('--thread', default=0, type=int, help='Thread for loading data')
parser.add_argument('--start_step', default=0, type=int, help='Set start training steps')
parser.add_argument('--sample_ratio_steps', default=10000, type=int, help='Step for increasing sample ratio')
parser.add_argument('--sample_start_ratio', default=0.1, type=float, help='Start sample ratio')
parser.add_argument('--local_rank', type=int, default=0, help='Rank ID for processes')
parser.add_argument('--nnodes', type=int, default=1, help='Amount of nodes')
parser.add_argument('--node_rank', type=int, default=0, help='Rank of current node')
parser.add_argument('--dist_url', type=str, default='tcp://127.0.0.1:22', help='URL specifying how to initialize the process group')
# parser.add_argument('--optim', default='SGD', help='Select optimizer, SGD or Adam')
# parser.add_argument('--start_epoch', default=0, type=int, help='Set training epochs')
# parser.add_argument('--results_dir', type=str, default='./evaluation', help='Output dir')
# parser.add_argument('--diff_loss_weight', default=1, type=int, help='Step for increasing sample ratio')
self.initialized = True
return parser
def parse(self):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser = self.initialize(parser)
self.opt = parser.parse_args()
return self.opt
def print_options(opt, logger=None):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
message += '{:>25}: {}\n'.format(str(k), str(v))
message += '----------------- End -------------------'
logger.info(message) | en | 0.048483 | # parser.add_argument('--optim', default='SGD', help='Select optimizer, SGD or Adam') # parser.add_argument('--start_epoch', default=0, type=int, help='Set training epochs') # parser.add_argument('--results_dir', type=str, default='./evaluation', help='Output dir') # parser.add_argument('--diff_loss_weight', default=1, type=int, help='Step for increasing sample ratio') | 2.273029 | 2 |
catalog/bindings/csw/polygon.py | NIVANorge/s-enda-playground | 0 | 6622096 | <gh_stars>0
from dataclasses import dataclass
from bindings.csw.polygon_type import PolygonType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class Polygon(PolygonType):
class Meta:
namespace = "http://www.opengis.net/gml"
| from dataclasses import dataclass
from bindings.csw.polygon_type import PolygonType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class Polygon(PolygonType):
class Meta:
namespace = "http://www.opengis.net/gml" | none | 1 | 2.231394 | 2 | |
example/gen_chinese_data.py | johnson7788/OpenNRE | 5 | 6622097 | <reponame>johnson7788/OpenNRE<filename>example/gen_chinese_data.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021/3/19 10:58 上午
# @File : gen_chinese_data.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc : 根据Chinese-Literature-NER-RE-Dataset提供的数据格式,生成我们需要的训练数据格式
# 由于Chinese-Literature-NER-RE-Dataset是文档级的数据,所以其实需要更高效的训练和预测方法
import os
import json
import re
import random
def gen_rel2id(train_dir, destination='/Users/admin/git/OpenNRE/benchmark/liter/liter_rel2id.json'):
"""
根据Chinese-Literature-NER-RE-Dataset的训练目录生成关系到id的映射
:param train_dir: *.ann和*.txt结尾的文件
:param destination: 输出的目标json文件
:return:
"""
relations = []
files = os.listdir(train_dir)
#过滤出标注的文件
files = [f for f in files if f.endswith('.ann')]
for file in files:
annfile = os.path.join(train_dir,file)
with open(annfile, 'r') as f:
for line in f:
if line.startswith('R'):
line = line.strip()
line_split = re.split('[\t ]', line)
relation = line_split[1]
if relation == 'Coreference':
print(f"文件{annfile},行 {line}是有问题的")
if relation not in relations:
print(f'加入关系: {relation}')
relations.append(relation)
desdir = os.path.dirname(destination)
if not os.path.exists(desdir):
os.makedirs(desdir)
assert len(relations) == 9, "关系必须是9个才对"
rel2id = {rel:idx for idx, rel in enumerate(relations)}
with open(destination, 'w', encoding='utf-8') as f:
json.dump(rel2id, f)
def gen_data(source_dir, des_dir, mini_data = False, truncate=-1):
"""
根据原始目录生成目标训练或测试等文件
:param source_dir: eg: /Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training
:param des_dir: eg: /Users/admin/git/OpenNRE/benchmark/liter
:return:
"""
#保存处理好的数据
data = []
files = os.listdir(source_dir)
# 过滤出标注的文件
ann_files = [f for f in files if f.endswith('.ann')]
text_files = [f for f in files if f.endswith('.txt')]
#转出成不带文件后缀的key和文件名为value的字典
ann_file_dict = {f.split('.')[0]:f for f in ann_files}
text_file_dict = {f.split('.')[0]: f for f in text_files}
for k, v in ann_file_dict.items():
if text_file_dict.get(k) is None:
print(f"文件{v} 不存在对应的txt文件,错误")
continue
#开始读取ann 文件
annfile = os.path.join(source_dir, v)
text_name = text_file_dict.get(k)
textfile = os.path.join(source_dir, text_name)
with open(textfile, 'r') as f:
text = ""
text_len = []
for line in f:
text_len.append(len(line))
if len(line) == 61:
#固定的行长度是61
line = line.strip()
text += line
# text = f.read()
#保存所有实体
entities = []
#保存所有关系
rels = []
with open(annfile, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('R'):
line_split = re.split('[\t ]', line)
assert len(line_split) == 4, f"关系{annfile}的行 {line}不为4项"
rels.append(line_split)
if line.startswith('T'):
line_split = re.split('[\t ]', line)
if len(line_split) == 7:
# 如果不为5,那么是有逗号隔开的,例如 T81 Metric 539 540;541 542 百 鸟
# 只需要T81 Metric 539 540 百
pos_stop = line_split[3].split(';')[0]
line_split = line_split[:3] + [pos_stop] + [line_split[5]]
elif len(line_split) == 5:
pass
else:
raise Exception(f"实体 {annfile} 的行 {line} 不为5项或者7项,有问题,请检查")
#把实体的索引,进行减法,因为每61个字符一行,我们去掉了一部分'\n',所以做减法
pos_start = int(line_split[2])
pos_stop = int(line_split[3])
if pos_start > 61:
pos_remind1 = pos_start // 61
pos_start = pos_start -pos_remind1
if pos_stop > 61:
pos_remind2 = pos_stop //61
pos_stop = pos_stop - pos_remind2
line_split = line_split[:2] + [pos_start, pos_stop] + [line_split[-1]]
entities.append(line_split)
#检查实体, 保存成实体id:实体的type,实体start_idx, 实体stop_idx,实体的值
ent_dict = {}
for entity in entities:
entity_id = entity[0]
if ent_dict.get(entity_id) is not None:
print(f"{annfile}: 实体id已经存在过了,冲突的id,请检查 {entity}")
ent_dict[entity_id] = entity[1:]
#开始分析所有关系
for rel in rels:
relation = rel[1]
arg1, h1_entityid = rel[2].split(':')
assert arg1 == 'Arg1', f"{rel}分隔的首个字符不是Arg1"
#实体1的id处理
h1_entity = ent_dict.get(h1_entityid)
if h1_entity is None:
print(f"关系{rel}中对应的实体id{h1_entityid}是不存在的,请检查")
h1_type,h1_pos_start, h1_pos_stop, h1_entity_value = h1_entity
h1_pos_start = int(h1_pos_start)
h1_pos_stop = int(h1_pos_stop)
arg2, h2_entityid = rel[3].split(':')
assert arg2 == 'Arg2', f"{rel}分隔的首个字符不是Arg2"
#实体2的id处理
h2_entity = ent_dict.get(h2_entityid)
if h2_entity is None:
print(f"关系{rel}中对应的实体id{h2_entityid}是不存在的,请检查")
h2_type, h2_pos_start, h2_pos_stop, h2_entity_value = h2_entity
h2_pos_start = int(h2_pos_start)
h2_pos_stop = int(h2_pos_stop)
# 检查关键字的位置是否匹配
def get_true_pos(text, value, pos1, pos2, rnum=16):
#从上下加8个字符获取真实的位置
index_true_text = text[pos1-rnum:pos2+rnum]
print(f"实体1: {value}位置不匹配, 上下的2个位置是: {index_true_text},尝试修复")
newpos1, newpos2 = pos1, pos2
if value in index_true_text:
sres = re.finditer(re.escape(value), text)
for sv in sres:
if sv.start() > pos1-rnum and sv.end() < pos2+rnum:
newpos1, newpos2 = sv.start(), sv.end()
break
else:
print("通过正则没有匹配到,请检查,用最后一个位置作为索引")
newpos1, newpos2 = sv.start(), sv.end()
else:
print("上下浮动了16个,仍然没有匹配,请检查")
sres = re.finditer(re.escape(value), text)
min_dist = 100
for sv in sres:
min_dist = min(min_dist, sv.start() - pos1, sv.end() - pos2)
if min_dist in [sv.start() - pos1, sv.end() - pos2]:
newpos1, newpos2 = sv.start(), sv.end()
if text[newpos1:newpos2] != value:
assert text[newpos1:newpos2] == value, "仍然是匹配错误的位置,请检查"
return newpos1, newpos2
# 验证下文本中的实体在文档中的位置时正确的
if text[h1_pos_start:h1_pos_stop] != h1_entity_value:
h1_pos_start, h1_pos_stop = get_true_pos(text=text,value=h1_entity_value, pos1=h1_pos_start, pos2=h1_pos_stop)
if text[h2_pos_start:h2_pos_stop] != h2_entity_value:
h2_pos_start, h2_pos_stop = get_true_pos(text=text,value=h2_entity_value, pos1=h2_pos_start, pos2=h2_pos_stop)
if truncate != -1:
if abs(h1_pos_start - h2_pos_stop) > truncate:
print(f'2个实体间的距离很大,超过了{truncate}长度')
else:
#开始截断数据, 只保留最大长度
add_length = truncate - abs(h1_pos_start - h2_pos_stop)
added = int(add_length/2)
if h1_pos_start < h2_pos_stop:
truncate_start = h1_pos_start - added
truncate_end = h2_pos_stop + added
else:
truncate_start = h2_pos_stop - added
truncate_end = h1_pos_start + added
if truncate_start <0:
truncate_start = 0
truncate_text = text[truncate_start:truncate_end]
else:
truncate_text = text
# 开始整理成一条数据
one_data = {
'text': truncate_text,
'h': {
'name': h1_entity_value,
'id': h1_entityid,
'pos': [h1_pos_start, h1_pos_stop]
},
't': {
'name': h2_entity_value,
'id': h2_entityid,
'pos': [h2_pos_start, h2_pos_stop]
},
'relation': relation
}
data.append(one_data)
train_file = os.path.join(des_dir, 'liter_train.txt')
dev_file = os.path.join(des_dir, 'liter_test.txt')
test_file = os.path.join(des_dir, 'liter_val.txt')
print(f"一共处理了{len(ann_files)}个文件,生成{len(data)}条数据")
random.shuffle(data)
train_num = int(len(data) * 0.8)
dev_num = int(len(data) * 0.1)
train_data = data[:train_num]
dev_data = data[train_num:train_num+dev_num]
test_data = data[train_num+dev_num:]
if mini_data:
#选择前500条样本测试
train_data = train_data[:500]
dev_data = dev_data[:100]
test_data = test_data[:100]
with open(train_file, 'w', encoding='utf-8') as f:
for d in train_data:
f.write(json.dumps(d) + '\n')
with open(dev_file, 'w', encoding='utf-8') as f:
for d in dev_data:
f.write(json.dumps(d)+ '\n')
with open(test_file, 'w', encoding='utf-8') as f:
for d in test_data:
f.write(json.dumps(d)+ '\n')
print(f"训练集数量{len(train_data)}, 测试集数量{len(test_data)},开发集数量{len(dev_data)}")
if __name__ == '__main__':
# gen_rel2id(train_dir='/Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training')
gen_data(source_dir='/Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training', des_dir='/Users/admin/git/OpenNRE/benchmark/liter', mini_data=False, truncate=196) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021/3/19 10:58 上午
# @File : gen_chinese_data.py
# @Author: johnson
# @Contact : github: johnson7788
# @Desc : 根据Chinese-Literature-NER-RE-Dataset提供的数据格式,生成我们需要的训练数据格式
# 由于Chinese-Literature-NER-RE-Dataset是文档级的数据,所以其实需要更高效的训练和预测方法
import os
import json
import re
import random
def gen_rel2id(train_dir, destination='/Users/admin/git/OpenNRE/benchmark/liter/liter_rel2id.json'):
"""
根据Chinese-Literature-NER-RE-Dataset的训练目录生成关系到id的映射
:param train_dir: *.ann和*.txt结尾的文件
:param destination: 输出的目标json文件
:return:
"""
relations = []
files = os.listdir(train_dir)
#过滤出标注的文件
files = [f for f in files if f.endswith('.ann')]
for file in files:
annfile = os.path.join(train_dir,file)
with open(annfile, 'r') as f:
for line in f:
if line.startswith('R'):
line = line.strip()
line_split = re.split('[\t ]', line)
relation = line_split[1]
if relation == 'Coreference':
print(f"文件{annfile},行 {line}是有问题的")
if relation not in relations:
print(f'加入关系: {relation}')
relations.append(relation)
desdir = os.path.dirname(destination)
if not os.path.exists(desdir):
os.makedirs(desdir)
assert len(relations) == 9, "关系必须是9个才对"
rel2id = {rel:idx for idx, rel in enumerate(relations)}
with open(destination, 'w', encoding='utf-8') as f:
json.dump(rel2id, f)
def gen_data(source_dir, des_dir, mini_data = False, truncate=-1):
"""
根据原始目录生成目标训练或测试等文件
:param source_dir: eg: /Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training
:param des_dir: eg: /Users/admin/git/OpenNRE/benchmark/liter
:return:
"""
#保存处理好的数据
data = []
files = os.listdir(source_dir)
# 过滤出标注的文件
ann_files = [f for f in files if f.endswith('.ann')]
text_files = [f for f in files if f.endswith('.txt')]
#转出成不带文件后缀的key和文件名为value的字典
ann_file_dict = {f.split('.')[0]:f for f in ann_files}
text_file_dict = {f.split('.')[0]: f for f in text_files}
for k, v in ann_file_dict.items():
if text_file_dict.get(k) is None:
print(f"文件{v} 不存在对应的txt文件,错误")
continue
#开始读取ann 文件
annfile = os.path.join(source_dir, v)
text_name = text_file_dict.get(k)
textfile = os.path.join(source_dir, text_name)
with open(textfile, 'r') as f:
text = ""
text_len = []
for line in f:
text_len.append(len(line))
if len(line) == 61:
#固定的行长度是61
line = line.strip()
text += line
# text = f.read()
#保存所有实体
entities = []
#保存所有关系
rels = []
with open(annfile, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('R'):
line_split = re.split('[\t ]', line)
assert len(line_split) == 4, f"关系{annfile}的行 {line}不为4项"
rels.append(line_split)
if line.startswith('T'):
line_split = re.split('[\t ]', line)
if len(line_split) == 7:
# 如果不为5,那么是有逗号隔开的,例如 T81 Metric 539 540;541 542 百 鸟
# 只需要T81 Metric 539 540 百
pos_stop = line_split[3].split(';')[0]
line_split = line_split[:3] + [pos_stop] + [line_split[5]]
elif len(line_split) == 5:
pass
else:
raise Exception(f"实体 {annfile} 的行 {line} 不为5项或者7项,有问题,请检查")
#把实体的索引,进行减法,因为每61个字符一行,我们去掉了一部分'\n',所以做减法
pos_start = int(line_split[2])
pos_stop = int(line_split[3])
if pos_start > 61:
pos_remind1 = pos_start // 61
pos_start = pos_start -pos_remind1
if pos_stop > 61:
pos_remind2 = pos_stop //61
pos_stop = pos_stop - pos_remind2
line_split = line_split[:2] + [pos_start, pos_stop] + [line_split[-1]]
entities.append(line_split)
#检查实体, 保存成实体id:实体的type,实体start_idx, 实体stop_idx,实体的值
ent_dict = {}
for entity in entities:
entity_id = entity[0]
if ent_dict.get(entity_id) is not None:
print(f"{annfile}: 实体id已经存在过了,冲突的id,请检查 {entity}")
ent_dict[entity_id] = entity[1:]
#开始分析所有关系
for rel in rels:
relation = rel[1]
arg1, h1_entityid = rel[2].split(':')
assert arg1 == 'Arg1', f"{rel}分隔的首个字符不是Arg1"
#实体1的id处理
h1_entity = ent_dict.get(h1_entityid)
if h1_entity is None:
print(f"关系{rel}中对应的实体id{h1_entityid}是不存在的,请检查")
h1_type,h1_pos_start, h1_pos_stop, h1_entity_value = h1_entity
h1_pos_start = int(h1_pos_start)
h1_pos_stop = int(h1_pos_stop)
arg2, h2_entityid = rel[3].split(':')
assert arg2 == 'Arg2', f"{rel}分隔的首个字符不是Arg2"
#实体2的id处理
h2_entity = ent_dict.get(h2_entityid)
if h2_entity is None:
print(f"关系{rel}中对应的实体id{h2_entityid}是不存在的,请检查")
h2_type, h2_pos_start, h2_pos_stop, h2_entity_value = h2_entity
h2_pos_start = int(h2_pos_start)
h2_pos_stop = int(h2_pos_stop)
# 检查关键字的位置是否匹配
def get_true_pos(text, value, pos1, pos2, rnum=16):
#从上下加8个字符获取真实的位置
index_true_text = text[pos1-rnum:pos2+rnum]
print(f"实体1: {value}位置不匹配, 上下的2个位置是: {index_true_text},尝试修复")
newpos1, newpos2 = pos1, pos2
if value in index_true_text:
sres = re.finditer(re.escape(value), text)
for sv in sres:
if sv.start() > pos1-rnum and sv.end() < pos2+rnum:
newpos1, newpos2 = sv.start(), sv.end()
break
else:
print("通过正则没有匹配到,请检查,用最后一个位置作为索引")
newpos1, newpos2 = sv.start(), sv.end()
else:
print("上下浮动了16个,仍然没有匹配,请检查")
sres = re.finditer(re.escape(value), text)
min_dist = 100
for sv in sres:
min_dist = min(min_dist, sv.start() - pos1, sv.end() - pos2)
if min_dist in [sv.start() - pos1, sv.end() - pos2]:
newpos1, newpos2 = sv.start(), sv.end()
if text[newpos1:newpos2] != value:
assert text[newpos1:newpos2] == value, "仍然是匹配错误的位置,请检查"
return newpos1, newpos2
# 验证下文本中的实体在文档中的位置时正确的
if text[h1_pos_start:h1_pos_stop] != h1_entity_value:
h1_pos_start, h1_pos_stop = get_true_pos(text=text,value=h1_entity_value, pos1=h1_pos_start, pos2=h1_pos_stop)
if text[h2_pos_start:h2_pos_stop] != h2_entity_value:
h2_pos_start, h2_pos_stop = get_true_pos(text=text,value=h2_entity_value, pos1=h2_pos_start, pos2=h2_pos_stop)
if truncate != -1:
if abs(h1_pos_start - h2_pos_stop) > truncate:
print(f'2个实体间的距离很大,超过了{truncate}长度')
else:
#开始截断数据, 只保留最大长度
add_length = truncate - abs(h1_pos_start - h2_pos_stop)
added = int(add_length/2)
if h1_pos_start < h2_pos_stop:
truncate_start = h1_pos_start - added
truncate_end = h2_pos_stop + added
else:
truncate_start = h2_pos_stop - added
truncate_end = h1_pos_start + added
if truncate_start <0:
truncate_start = 0
truncate_text = text[truncate_start:truncate_end]
else:
truncate_text = text
# 开始整理成一条数据
one_data = {
'text': truncate_text,
'h': {
'name': h1_entity_value,
'id': h1_entityid,
'pos': [h1_pos_start, h1_pos_stop]
},
't': {
'name': h2_entity_value,
'id': h2_entityid,
'pos': [h2_pos_start, h2_pos_stop]
},
'relation': relation
}
data.append(one_data)
train_file = os.path.join(des_dir, 'liter_train.txt')
dev_file = os.path.join(des_dir, 'liter_test.txt')
test_file = os.path.join(des_dir, 'liter_val.txt')
print(f"一共处理了{len(ann_files)}个文件,生成{len(data)}条数据")
random.shuffle(data)
train_num = int(len(data) * 0.8)
dev_num = int(len(data) * 0.1)
train_data = data[:train_num]
dev_data = data[train_num:train_num+dev_num]
test_data = data[train_num+dev_num:]
if mini_data:
#选择前500条样本测试
train_data = train_data[:500]
dev_data = dev_data[:100]
test_data = test_data[:100]
with open(train_file, 'w', encoding='utf-8') as f:
for d in train_data:
f.write(json.dumps(d) + '\n')
with open(dev_file, 'w', encoding='utf-8') as f:
for d in dev_data:
f.write(json.dumps(d)+ '\n')
with open(test_file, 'w', encoding='utf-8') as f:
for d in test_data:
f.write(json.dumps(d)+ '\n')
print(f"训练集数量{len(train_data)}, 测试集数量{len(test_data)},开发集数量{len(dev_data)}")
if __name__ == '__main__':
# gen_rel2id(train_dir='/Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training')
gen_data(source_dir='/Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training', des_dir='/Users/admin/git/OpenNRE/benchmark/liter', mini_data=False, truncate=196) | zh | 0.785852 | #!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2021/3/19 10:58 上午 # @File : gen_chinese_data.py # @Author: johnson # @Contact : github: johnson7788 # @Desc : 根据Chinese-Literature-NER-RE-Dataset提供的数据格式,生成我们需要的训练数据格式 # 由于Chinese-Literature-NER-RE-Dataset是文档级的数据,所以其实需要更高效的训练和预测方法 根据Chinese-Literature-NER-RE-Dataset的训练目录生成关系到id的映射 :param train_dir: *.ann和*.txt结尾的文件 :param destination: 输出的目标json文件 :return: #过滤出标注的文件 根据原始目录生成目标训练或测试等文件 :param source_dir: eg: /Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training :param des_dir: eg: /Users/admin/git/OpenNRE/benchmark/liter :return: #保存处理好的数据 # 过滤出标注的文件 #转出成不带文件后缀的key和文件名为value的字典 #开始读取ann 文件 #固定的行长度是61 # text = f.read() #保存所有实体 #保存所有关系 # 如果不为5,那么是有逗号隔开的,例如 T81 Metric 539 540;541 542 百 鸟 # 只需要T81 Metric 539 540 百 #把实体的索引,进行减法,因为每61个字符一行,我们去掉了一部分'\n',所以做减法 #检查实体, 保存成实体id:实体的type,实体start_idx, 实体stop_idx,实体的值 #开始分析所有关系 #实体1的id处理 #实体2的id处理 # 检查关键字的位置是否匹配 #从上下加8个字符获取真实的位置 # 验证下文本中的实体在文档中的位置时正确的 #开始截断数据, 只保留最大长度 # 开始整理成一条数据 #选择前500条样本测试 # gen_rel2id(train_dir='/Users/admin/git/Chinese-Literature-NER-RE-Dataset/relation_extraction/Training') | 2.56416 | 3 |
bias_detector/fuzzy_names_from_emails/FullName.py | yairhoresh/bias-detector | 50 | 6622098 | <reponame>yairhoresh/bias-detector<gh_stars>10-100
import pandas as pd
class FullName:
def __init__(self, first_name: str, last_name: str) -> None:
self.first_name = first_name
self.last_name = last_name
def is_empty(self) -> bool:
return (self.first_name is None or self.first_name == '') \
and (self.last_name is None or self.last_name == '')
def is_full(self):
return self.first_name is not None and self.first_name != '' \
and \
self.last_name is not None and self.last_name != ''
def to_series(self) -> pd.Series:
return pd.Series({'first_name': self.first_name, 'last_name': self.last_name})
| import pandas as pd
class FullName:
def __init__(self, first_name: str, last_name: str) -> None:
self.first_name = first_name
self.last_name = last_name
def is_empty(self) -> bool:
return (self.first_name is None or self.first_name == '') \
and (self.last_name is None or self.last_name == '')
def is_full(self):
return self.first_name is not None and self.first_name != '' \
and \
self.last_name is not None and self.last_name != ''
def to_series(self) -> pd.Series:
return pd.Series({'first_name': self.first_name, 'last_name': self.last_name}) | none | 1 | 3.276055 | 3 | |
datasets/general_dataset.py | huhongjun/3d-semantic-segmentation | 98 | 6622099 | import os
import numpy as np
import datasets.color_constants as cc
from tools.lazy_decorator import *
from typing import Tuple, List, Dict
import logging
class GeneralDataset:
"""
Class used for reading in datasets for training/testing.
Parameterized in order to handle different kinds of datasets (e.g. k-fold datasets)
"""
@property
def data_path(self) -> str:
return self._data_path
@property
def data(self) -> List[np.ndarray]:
return self._data
@property
def full_sized_data(self) -> Dict[str, np.ndarray]:
return self._full_sized_data
@property
def file_names(self) -> List[str]:
return self._file_names
@property
def train_pc_idx(self) -> List[int]:
return self._train_pc_idx
@property
def test_pc_idx(self) -> List[int]:
return self._test_pc_idx
def __init__(self, data_path: str, is_train: bool, test_sets: list,
downsample_prefix: str, is_colors: bool, is_laser: bool, n_classes=None):
self._test_sets = test_sets
self._downsample_prefix = downsample_prefix
self._is_colors = is_colors
self._is_laser = is_laser
# it is possible that there is no class information given for test sets
if n_classes is None:
self._is_class = True
else:
self._is_class = False
self._num_classes = n_classes
self._data_path = data_path
self._data, self._file_names, self._full_sized_data = self._load(is_train)
# log some dataset properties
logging.debug(f"number of features: {self.num_features}")
logging.debug(f"number of classes: {self.num_classes}")
logging.debug(f"number of training samples: {len(self.train_pc_idx)}")
logging.debug(f"number of test samples: {len(self.test_pc_idx)}")
@lazy_property
def num_classes(self) -> int:
"""
calculate the number of unique class labels if class information is given in npy-file.
Otherwise, just return the number of classes which have been defined in the constructor
:return: number of classes for this dataset
"""
if self._is_class:
# assuming that labels are in the last column
# counting unique class labels of all pointclouds
_num_classes = len(np.unique(np.concatenate([np.unique(pointcloud[:, -1])
for pointcloud in self.data])))
if _num_classes > len(self.label_colors()):
logging.warning(f"There are more classes than label colors for this dataset. "
f"If you want to plot your results, this will not work.")
return _num_classes
else:
return self._num_classes
@lazy_property
def normalization(self) -> np.ndarray:
"""
before blob is fed into the neural network some normalization takes place in the batch generator
normalization factors specific for each dataset have to be provided
note: this property can be overriden by subclasses if another normalization is needed
:return: np.ndarray with normalization factors
"""
_normalizer = np.array([1. for _ in range(self.num_features)])
if self._is_colors:
_normalizer[3:6] = 255. # normalize colors to [0,1]
if self._is_laser:
_normalizer[6] = 2048. # normalize laser [-1, 1]
elif self._is_laser:
_normalizer[3] = 2048. # normalize laser [-1, 1]
return _normalizer
@lazy_property
def num_features(self) -> int:
return 3 + self._is_colors * 3 + self._is_laser
@staticmethod
def label_colors() -> np.ndarray:
return np.array([cc.colors['brown'].npy,
cc.colors['darkgreen'].npy,
cc.colors['springgreen'].npy,
cc.colors['red1'].npy,
cc.colors['darkgray'].npy,
cc.colors['gray'].npy,
cc.colors['pink'].npy,
cc.colors['yellow1'].npy,
cc.colors['violet'].npy,
cc.colors['hotpink'].npy,
cc.colors['blue'].npy,
cc.colors['lightblue'].npy,
cc.colors['orange'].npy,
cc.colors['black'].npy])
def _load(self, is_train: bool) -> Tuple[List[np.ndarray], List[str], Dict[str, np.ndarray]]:
"""
Note that we assume a folder hierarchy of DATA_PATH/SET_NO/{full_size, sample_X_Y, ...}/POINTCLOUD.npy
:param is_train: true iff training mode
:return: list of pointclouds and list of filenames
"""
data_training_test = {}
full_sized_test_data = {}
names = set()
train_pc_names = set()
test_pc_names = set()
# pick
pick = [0, 1, 2]
if self._is_colors:
pick = pick + [3, 4, 5]
if self._is_laser:
pick = pick + [6]
if self._is_laser:
pick = pick + [3]
pick = pick + [-1]
for dirpath, dirnames, filenames in os.walk(self.data_path):
for filename in [f for f in filenames if f.endswith(".npy")]:
is_test_set = os.path.dirname(dirpath).split('/')[-1] in self._test_sets
if not is_test_set and not is_train:
# we do not have to load training examples if we only want to evaluate
continue
name = None
if os.path.basename(dirpath) == self._downsample_prefix:
# dimension of a single npy file: (number of points, number of features + label)
pointcloud_data = np.load(os.path.join(dirpath, filename))
pointcloud_data = pointcloud_data[:, pick]
pointcloud_data = pointcloud_data.astype(np.float32) # just to be sure!
name = filename.replace('.npy', '')
data_training_test[name] = pointcloud_data
elif os.path.basename(dirpath) == 'full_size':
if not is_train:
# for testing we consider full scale point clouds
if is_test_set:
# dimension of a single npy file: (number of points, number of features + label)
pointcloud_data = np.load(os.path.join(dirpath, filename))
pointcloud_data = pointcloud_data[:, pick]
pointcloud_data = pointcloud_data.astype(np.float32) # just to be sure!
name = filename.replace('.npy', '')
full_sized_test_data[name] = pointcloud_data
if name is not None:
names.add(name)
if is_test_set:
test_pc_names.add(name)
else:
train_pc_names.add(name)
names = sorted(names)
data_training_test = [data_training_test[key] for key in names]
self._train_pc_idx = sorted([names.index(name) for name in train_pc_names])
self._test_pc_idx = sorted([names.index(name) for name in test_pc_names])
# short sanity check to ensure that data could be read in
if len(data_training_test) == 0 or len(names) == 0:
# error
raise ValueError(f"Dataset could not be found under {self.data_path}")
else:
if (not is_train) and len(full_sized_test_data) == 0:
# error
raise ValueError(f"Dataset could not be found in {self.data_path}")
return data_training_test, names, full_sized_test_data
if __name__ == '__main__':
from tools.tools import setup_logger
setup_logger()
dataset = GeneralDataset(data_path='/fastwork/schult/stanford_indoor',
is_train=False,
test_sets=['area_3', 'area_2'],
downsample_prefix='sample_1_1',
is_colors=True,
is_laser=True)
| import os
import numpy as np
import datasets.color_constants as cc
from tools.lazy_decorator import *
from typing import Tuple, List, Dict
import logging
class GeneralDataset:
"""
Class used for reading in datasets for training/testing.
Parameterized in order to handle different kinds of datasets (e.g. k-fold datasets)
"""
@property
def data_path(self) -> str:
return self._data_path
@property
def data(self) -> List[np.ndarray]:
return self._data
@property
def full_sized_data(self) -> Dict[str, np.ndarray]:
return self._full_sized_data
@property
def file_names(self) -> List[str]:
return self._file_names
@property
def train_pc_idx(self) -> List[int]:
return self._train_pc_idx
@property
def test_pc_idx(self) -> List[int]:
return self._test_pc_idx
def __init__(self, data_path: str, is_train: bool, test_sets: list,
downsample_prefix: str, is_colors: bool, is_laser: bool, n_classes=None):
self._test_sets = test_sets
self._downsample_prefix = downsample_prefix
self._is_colors = is_colors
self._is_laser = is_laser
# it is possible that there is no class information given for test sets
if n_classes is None:
self._is_class = True
else:
self._is_class = False
self._num_classes = n_classes
self._data_path = data_path
self._data, self._file_names, self._full_sized_data = self._load(is_train)
# log some dataset properties
logging.debug(f"number of features: {self.num_features}")
logging.debug(f"number of classes: {self.num_classes}")
logging.debug(f"number of training samples: {len(self.train_pc_idx)}")
logging.debug(f"number of test samples: {len(self.test_pc_idx)}")
@lazy_property
def num_classes(self) -> int:
"""
calculate the number of unique class labels if class information is given in npy-file.
Otherwise, just return the number of classes which have been defined in the constructor
:return: number of classes for this dataset
"""
if self._is_class:
# assuming that labels are in the last column
# counting unique class labels of all pointclouds
_num_classes = len(np.unique(np.concatenate([np.unique(pointcloud[:, -1])
for pointcloud in self.data])))
if _num_classes > len(self.label_colors()):
logging.warning(f"There are more classes than label colors for this dataset. "
f"If you want to plot your results, this will not work.")
return _num_classes
else:
return self._num_classes
@lazy_property
def normalization(self) -> np.ndarray:
"""
before blob is fed into the neural network some normalization takes place in the batch generator
normalization factors specific for each dataset have to be provided
note: this property can be overriden by subclasses if another normalization is needed
:return: np.ndarray with normalization factors
"""
_normalizer = np.array([1. for _ in range(self.num_features)])
if self._is_colors:
_normalizer[3:6] = 255. # normalize colors to [0,1]
if self._is_laser:
_normalizer[6] = 2048. # normalize laser [-1, 1]
elif self._is_laser:
_normalizer[3] = 2048. # normalize laser [-1, 1]
return _normalizer
@lazy_property
def num_features(self) -> int:
return 3 + self._is_colors * 3 + self._is_laser
@staticmethod
def label_colors() -> np.ndarray:
return np.array([cc.colors['brown'].npy,
cc.colors['darkgreen'].npy,
cc.colors['springgreen'].npy,
cc.colors['red1'].npy,
cc.colors['darkgray'].npy,
cc.colors['gray'].npy,
cc.colors['pink'].npy,
cc.colors['yellow1'].npy,
cc.colors['violet'].npy,
cc.colors['hotpink'].npy,
cc.colors['blue'].npy,
cc.colors['lightblue'].npy,
cc.colors['orange'].npy,
cc.colors['black'].npy])
def _load(self, is_train: bool) -> Tuple[List[np.ndarray], List[str], Dict[str, np.ndarray]]:
"""
Note that we assume a folder hierarchy of DATA_PATH/SET_NO/{full_size, sample_X_Y, ...}/POINTCLOUD.npy
:param is_train: true iff training mode
:return: list of pointclouds and list of filenames
"""
data_training_test = {}
full_sized_test_data = {}
names = set()
train_pc_names = set()
test_pc_names = set()
# pick
pick = [0, 1, 2]
if self._is_colors:
pick = pick + [3, 4, 5]
if self._is_laser:
pick = pick + [6]
if self._is_laser:
pick = pick + [3]
pick = pick + [-1]
for dirpath, dirnames, filenames in os.walk(self.data_path):
for filename in [f for f in filenames if f.endswith(".npy")]:
is_test_set = os.path.dirname(dirpath).split('/')[-1] in self._test_sets
if not is_test_set and not is_train:
# we do not have to load training examples if we only want to evaluate
continue
name = None
if os.path.basename(dirpath) == self._downsample_prefix:
# dimension of a single npy file: (number of points, number of features + label)
pointcloud_data = np.load(os.path.join(dirpath, filename))
pointcloud_data = pointcloud_data[:, pick]
pointcloud_data = pointcloud_data.astype(np.float32) # just to be sure!
name = filename.replace('.npy', '')
data_training_test[name] = pointcloud_data
elif os.path.basename(dirpath) == 'full_size':
if not is_train:
# for testing we consider full scale point clouds
if is_test_set:
# dimension of a single npy file: (number of points, number of features + label)
pointcloud_data = np.load(os.path.join(dirpath, filename))
pointcloud_data = pointcloud_data[:, pick]
pointcloud_data = pointcloud_data.astype(np.float32) # just to be sure!
name = filename.replace('.npy', '')
full_sized_test_data[name] = pointcloud_data
if name is not None:
names.add(name)
if is_test_set:
test_pc_names.add(name)
else:
train_pc_names.add(name)
names = sorted(names)
data_training_test = [data_training_test[key] for key in names]
self._train_pc_idx = sorted([names.index(name) for name in train_pc_names])
self._test_pc_idx = sorted([names.index(name) for name in test_pc_names])
# short sanity check to ensure that data could be read in
if len(data_training_test) == 0 or len(names) == 0:
# error
raise ValueError(f"Dataset could not be found under {self.data_path}")
else:
if (not is_train) and len(full_sized_test_data) == 0:
# error
raise ValueError(f"Dataset could not be found in {self.data_path}")
return data_training_test, names, full_sized_test_data
if __name__ == '__main__':
from tools.tools import setup_logger
setup_logger()
dataset = GeneralDataset(data_path='/fastwork/schult/stanford_indoor',
is_train=False,
test_sets=['area_3', 'area_2'],
downsample_prefix='sample_1_1',
is_colors=True,
is_laser=True)
| en | 0.842776 | Class used for reading in datasets for training/testing. Parameterized in order to handle different kinds of datasets (e.g. k-fold datasets) # it is possible that there is no class information given for test sets # log some dataset properties calculate the number of unique class labels if class information is given in npy-file. Otherwise, just return the number of classes which have been defined in the constructor :return: number of classes for this dataset # assuming that labels are in the last column # counting unique class labels of all pointclouds before blob is fed into the neural network some normalization takes place in the batch generator normalization factors specific for each dataset have to be provided note: this property can be overriden by subclasses if another normalization is needed :return: np.ndarray with normalization factors # normalize colors to [0,1] # normalize laser [-1, 1] # normalize laser [-1, 1] Note that we assume a folder hierarchy of DATA_PATH/SET_NO/{full_size, sample_X_Y, ...}/POINTCLOUD.npy :param is_train: true iff training mode :return: list of pointclouds and list of filenames # pick # we do not have to load training examples if we only want to evaluate # dimension of a single npy file: (number of points, number of features + label) # just to be sure! # for testing we consider full scale point clouds # dimension of a single npy file: (number of points, number of features + label) # just to be sure! # short sanity check to ensure that data could be read in # error # error | 2.595157 | 3 |
musicstore/musicapp/views.py | hannahclee/recordreview | 0 | 6622100 | <filename>musicstore/musicapp/views.py
from django.shortcuts import render
from .models import Artist
from .models import Record
from .models import Review
from django.shortcuts import render, get_object_or_404
from .forms import RecordForm, ReviewForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
return render(request, 'musicapp/index.html')
def records (request):
records_list=Record.objects.all()
return render (request, 'musicapp/records.html' , {'records_list': records_list})
def artists (request):
artist_list=Artist.objects.all()
return render (request, 'musicapp/artists.html' , {'artist_list': artist_list})
def reviews (request):
review_list=Review.objects.all()
return render (request, 'musicapp/reviews.html' , {'review_list': review_list})
def recorddetail (request, id):
detail=get_object_or_404(Record, pk=id)
context = {'detail': detail}
return render (request, 'musicapp/details.html' , context=context)
@login_required
def newRecord(request):
form=RecordForm
if request.method=='POST':
form=RecordForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=RecordForm()
else:
form=RecordForm()
return render(request, 'musicapp/newrecord.html', {'form': form})
def newReview(request):
form=ReviewForm
if request.method=='POST':
form=ReviewForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=ReviewForm()
else:
form=ReviewForm()
return render(request, 'musicapp/newreview.html', {'form': form})
def loginmessage(request):
return render(request, 'musicapp/loginmessage.html')
def logoutmessage(request):
return render(request, 'musicapp/logoutmessage.html') | <filename>musicstore/musicapp/views.py
from django.shortcuts import render
from .models import Artist
from .models import Record
from .models import Review
from django.shortcuts import render, get_object_or_404
from .forms import RecordForm, ReviewForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
return render(request, 'musicapp/index.html')
def records (request):
records_list=Record.objects.all()
return render (request, 'musicapp/records.html' , {'records_list': records_list})
def artists (request):
artist_list=Artist.objects.all()
return render (request, 'musicapp/artists.html' , {'artist_list': artist_list})
def reviews (request):
review_list=Review.objects.all()
return render (request, 'musicapp/reviews.html' , {'review_list': review_list})
def recorddetail (request, id):
detail=get_object_or_404(Record, pk=id)
context = {'detail': detail}
return render (request, 'musicapp/details.html' , context=context)
@login_required
def newRecord(request):
form=RecordForm
if request.method=='POST':
form=RecordForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=RecordForm()
else:
form=RecordForm()
return render(request, 'musicapp/newrecord.html', {'form': form})
def newReview(request):
form=ReviewForm
if request.method=='POST':
form=ReviewForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=ReviewForm()
else:
form=ReviewForm()
return render(request, 'musicapp/newreview.html', {'form': form})
def loginmessage(request):
return render(request, 'musicapp/loginmessage.html')
def logoutmessage(request):
return render(request, 'musicapp/logoutmessage.html') | en | 0.968116 | # Create your views here. | 2.140226 | 2 |
LBP_SVM/classifier.py | VieVie31/kaggle_invasive_species | 1 | 6622101 | import random
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import warnings
warnings.filterwarnings('ignore')
#reproductibility
random.seed(1996)
np.random.seed(1996)
#load training labels
train_labels = pd.read_csv('../input/train_labels.csv')
training_labels = np.array(list(train_labels.drop("name", axis=1)["invasive"]))
#load training data (allready normalized)
training_data = joblib.load("invasive_species_lbp_training_data.pkl")
print("training set size : ", len(training_data))
#shuffling data
training_set = list(zip(training_labels, training_data))
random.shuffle(training_set)
#split training set
train_set, test_set = train_test_split(training_set, test_size=.1)
Y_train, X_train = zip(*train_set)
Y_test, X_test = zip(*test_set)
X_train = np.array(X_train)
Y_train = np.array(Y_train)
X_test = np.array(X_test)
Y_test = np.array(Y_test)
print("nb training set : ", len(Y_train))
print("nb testing set : ", len(Y_test))
#train a SVC classifier
clf_svc = SVC(probability=True)
clf_svc.fit(X_train, Y_train)
print("SVC accuracy : ", sum(clf_svc.predict(X_test) == Y_test) / float(len(Y_test)))
#train a RandomForestClassifier classifier
clf_rfc = RandomForestClassifier()
clf_rfc.fit(X_train, Y_train)
print("RandomForestClassifier accuracy : ", sum(clf_rfc.predict(X_test) == Y_test) / float(len(Y_test)))
#load testing data (allready normalized)
testing_data = joblib.load("invasive_species_lbp_testing_data.pkl")
testing_predicted_labels_proba = clf_svc.predict_proba(testing_data)[:,1]
#save the ouput for kaggle in csv
s = "name,invasive\n"
for i, v in enumerate(testing_predicted_labels_proba):
s += str(i + 1) + ',' + str(v) + chr(10)
f = open('submit.csv', 'w')
f.write(s)
f.close()
print("done !")
| import random
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import warnings
warnings.filterwarnings('ignore')
#reproductibility
random.seed(1996)
np.random.seed(1996)
#load training labels
train_labels = pd.read_csv('../input/train_labels.csv')
training_labels = np.array(list(train_labels.drop("name", axis=1)["invasive"]))
#load training data (allready normalized)
training_data = joblib.load("invasive_species_lbp_training_data.pkl")
print("training set size : ", len(training_data))
#shuffling data
training_set = list(zip(training_labels, training_data))
random.shuffle(training_set)
#split training set
train_set, test_set = train_test_split(training_set, test_size=.1)
Y_train, X_train = zip(*train_set)
Y_test, X_test = zip(*test_set)
X_train = np.array(X_train)
Y_train = np.array(Y_train)
X_test = np.array(X_test)
Y_test = np.array(Y_test)
print("nb training set : ", len(Y_train))
print("nb testing set : ", len(Y_test))
#train a SVC classifier
clf_svc = SVC(probability=True)
clf_svc.fit(X_train, Y_train)
print("SVC accuracy : ", sum(clf_svc.predict(X_test) == Y_test) / float(len(Y_test)))
#train a RandomForestClassifier classifier
clf_rfc = RandomForestClassifier()
clf_rfc.fit(X_train, Y_train)
print("RandomForestClassifier accuracy : ", sum(clf_rfc.predict(X_test) == Y_test) / float(len(Y_test)))
#load testing data (allready normalized)
testing_data = joblib.load("invasive_species_lbp_testing_data.pkl")
testing_predicted_labels_proba = clf_svc.predict_proba(testing_data)[:,1]
#save the ouput for kaggle in csv
s = "name,invasive\n"
for i, v in enumerate(testing_predicted_labels_proba):
s += str(i + 1) + ',' + str(v) + chr(10)
f = open('submit.csv', 'w')
f.write(s)
f.close()
print("done !")
| en | 0.694168 | #reproductibility #load training labels #load training data (allready normalized) #shuffling data #split training set #train a SVC classifier #train a RandomForestClassifier classifier #load testing data (allready normalized) #save the ouput for kaggle in csv | 2.801479 | 3 |
src/selfcoin/common/globall.py | wangyubin112/selfcoin | 0 | 6622102 | <reponame>wangyubin112/selfcoin<gh_stars>0
'''
c : card
ch : chain
s : socket
f : file
fo : folder
l : line
m : mutual
i : index
p : position/pointer
G : globall
T : tune
'''
'''
TRADE protocol:
deferred trade (need server to involve):
(pay) demand newest mutual card --> server
(server) demand ack --> pay
(pay) pay --> server
(server) pay ack --> pay
(earn) demand newest mutual card --> server
(server) demand ack --> earn
(earn) earn --> server
(server) earn ack --> earn
(earn) earn --> group(multicast)
immediate trade ():
(pay) pay --> earn
(earn) pay ack --> pay
(earn) earn --> group(multicast)
interact with server for deferred trade:
(earn) earn --> server
(server) earn ack --> earn
(pay) pay --> server
(server) pay ack --> pay
POST protocol:
(post) launch --> server
(server) close --> post
if post does not receive close card
(post) demand --> server
(server) demand ack --> post
WATCH protocol:
Below is details of each card with protocol of ROOT, POST, CHARGE, REDEEM, TRADE, DEMAND:
ROOT:
god ROOT:
version: --> P_VER: 0
time: --> P_TIME: 1
type: ROOT --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
god ID: --> P_ID_GOD: 4
mutual index: --> P_I_M: 5
root content hash: --> P_POST: 6
remained coin: b58encode_int(0) --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: b58encode_int(0) --> P_I_CH: -4
previous hash: real ID hash --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
POST:
node POST:
version: --> P_VER: 0
time: --> P_TIME: 1
type: POST --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
post ID: --> P_ID_POST: 4
mutual index: --> P_I_M: 5
post content hash: --> P_POST: 6
post sign: --> P_SIGN: -2
post hash: --> P_HASH: -1
god POST:
c_post_node: post sign and hash is discard
remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node POST:
c_post_god: ack hash is discard
remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
CHARGE: (used in Init: the first root card)
node charge:
version: --> P_VER: 0
time: --> P_TIME: 1
type: CHARGE --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
node ID: --> P_ID_node: 4
mutual index: b58encode_int(0) --> P_I_M: 5
charge content hash: hash_ID_real --> P_POST: 6
sign:
hash:
GOD charge: (If TX to charge node directly, use ACK and no need to TX c_charge_node part)
c_charge_node: hash is discard --> P_CHARG_NODE: 7
charge coin: COIN_CREDIT --> P_COIN_CHRE: 8
remained coin: --> P_COIN_REST: -6
pre mutual chain index: b58encode_int(0) --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node charge:
c_charge_god: hash is discard
remained coin: b58encode_int(0) --> P_COIN_REST: -6
pre mutual chain index: b58encode_int(0) --> P_I_CH_M_PRE: -5
chain index: b58encode_int(0) --> P_I_CH: -4
pre hash: b58encode_int(0) --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
REDEEM:
node redeem:
version: --> P_VER: 0
time: --> P_TIME: 1
type: REDEEM --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
node ID: --> P_ID_node: 4
mutual index: --> P_I_M: 5
redeem content hash: --> P_POST: 6
sign:
hash:
GOD redeem: (If TX to redeem node directly, use ACK and no need to TX c_charge_node part)
c_redeem_node: hash is discard
redeem coin: COIN_CREDIT --> P_COIN_CHRE: 8
remained coin: --> P_COIN_REST: -6
pre mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node redeem:
c_redeem_god: hash is discard
remained coin: --> P_COIN_REST: -6
pre mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
TRADE:
pay:
version: --> P_VER: 0
time: --> P_TIME: 1
type: PAY --> P_TYPE: 2
pay ID: --> P_ID_PAY: 3
earn ID: --> P_ID_EARN: 4
mutual index: --> P_I_M: 5
trade coin: --> P_COIN_TRADE: 6
pay remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
earn: (If TX to pay node directly, use ACK and no need to TX c_charge_node part)
card_pay: pay hash is discarded
card subtype is change from PAY --> EARN
earn remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash:
sign:
hash:
pay/earn ack(from Aid in deferred trade):
version:
time:
type: ACK
acker(server) ID: --> P_ID_ACK
source card hash: --> P_HASH_SRC
content: success or fail --> P_CONTENT
acker(server) sign:
acker(server) hash:
DEMAND:
demand: (TX to Aid or other nodes)
version:
time:
type: DEMAND
demand(earn) ID: --> P_ID_DEMAND: 3
demanded(pay) ID: --> P_ID_DEMANDED: 4
mutual index: --> P_I_M: 5
chain index: -->
own(earn) sign:
own(earn) hash:
demand ack:
version:
time:
type: ACK
acker(server) ID: --> P_ID_ACK
source card hash: --> P_HASH_SRC
content: --> P_CONTENT
acker(server) sign:
acker(server) hash:
'''
# card type
ROOT = b'0'
PAY = b'1'
EARN = b'2'
POST = b'3'
CHARGE = b'4'
REDEEM = b'5'
WATCH = b'6' # i_ch
DEMAND = b'7' # i_m
ACK = b'8'
## position of specific attribute in a card
P_VER = 0
P_TIME = 1
P_TYPE = 2
P_ID = 3
P_ID_DEMAND = 3
P_ID_DEMANDED = 4
P_ID_PAY = 3
P_ID_EARN = 4
P_ID_GOD = 3
P_ID_NODE = 4
P_I_M = 5
P_POST = 6
P_COIN_TRADE = 6
P_COIN_REST = -6
P_COIN_CHRE = 8
P_I_CH_M_PRE = -5
P_I_CH = -4
P_HASH_PRE = -3
P_SIGN = -2
P_HASH = -1
# for ack
P_ID_ACK = 3
P_HASH_SRC = 4
P_CONTENT = 5
'''
chain file name:
example: 'version_ID_index'
version: for upgrade (different version may have different line len and key len, even change of structure of chain records)
ID: pub_key (may upgrade to increase len)
index: for organization (file only contains no more than fixed num of line based on version)
chain file content:
head:
line 0 (chain info) version of chain file
line 1: the next close line need to be checked
line 2-3: reserved
body:
line 4-end:
'''
# LEN_L: len of line, visiable and invisible character, include '\n'
# NUM_L_HEAD: the number of line in head of chain file
# NUM_L_BODY: the number of line in body of chain file
# INDEX_L_MAX = 2**32
VER_0 = b'0' # ver 0 for test
VER_1 = b'1'
VER_PRIVACY = b'10' # to do fo privacy
VER = VER_0
NUM_L_HEAD = 4
NUM_L_BODY = 10**5
LEN_L = 1024 # all character, include '\n'
LEN_ID = 44
LEN_NAME = 20
LEN_KEY = LEN_ID
# CHAIN = {
# VER_0: {
# TRADE: {
# 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5
# },
# POST: {
# 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5
# }
# },
# VER_1: {
# TRADE: {
# 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5
# },
# POST: {
# 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5
# }
# }
# }
NUM_I_M = 2**32
# god node
COIN_CREDIT = 100000
NAME_GOD = b'god'
ID_REAL_GOD_TEST = b'XXXXXX19901211XXXX' # this is for test
ID_REAL_GOD = ID_REAL_GOD_TEST # real ID for god
# key
NUM_B_ODEV = 1 | '''
c : card
ch : chain
s : socket
f : file
fo : folder
l : line
m : mutual
i : index
p : position/pointer
G : globall
T : tune
'''
'''
TRADE protocol:
deferred trade (need server to involve):
(pay) demand newest mutual card --> server
(server) demand ack --> pay
(pay) pay --> server
(server) pay ack --> pay
(earn) demand newest mutual card --> server
(server) demand ack --> earn
(earn) earn --> server
(server) earn ack --> earn
(earn) earn --> group(multicast)
immediate trade ():
(pay) pay --> earn
(earn) pay ack --> pay
(earn) earn --> group(multicast)
interact with server for deferred trade:
(earn) earn --> server
(server) earn ack --> earn
(pay) pay --> server
(server) pay ack --> pay
POST protocol:
(post) launch --> server
(server) close --> post
if post does not receive close card
(post) demand --> server
(server) demand ack --> post
WATCH protocol:
Below is details of each card with protocol of ROOT, POST, CHARGE, REDEEM, TRADE, DEMAND:
ROOT:
god ROOT:
version: --> P_VER: 0
time: --> P_TIME: 1
type: ROOT --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
god ID: --> P_ID_GOD: 4
mutual index: --> P_I_M: 5
root content hash: --> P_POST: 6
remained coin: b58encode_int(0) --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: b58encode_int(0) --> P_I_CH: -4
previous hash: real ID hash --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
POST:
node POST:
version: --> P_VER: 0
time: --> P_TIME: 1
type: POST --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
post ID: --> P_ID_POST: 4
mutual index: --> P_I_M: 5
post content hash: --> P_POST: 6
post sign: --> P_SIGN: -2
post hash: --> P_HASH: -1
god POST:
c_post_node: post sign and hash is discard
remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node POST:
c_post_god: ack hash is discard
remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
CHARGE: (used in Init: the first root card)
node charge:
version: --> P_VER: 0
time: --> P_TIME: 1
type: CHARGE --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
node ID: --> P_ID_node: 4
mutual index: b58encode_int(0) --> P_I_M: 5
charge content hash: hash_ID_real --> P_POST: 6
sign:
hash:
GOD charge: (If TX to charge node directly, use ACK and no need to TX c_charge_node part)
c_charge_node: hash is discard --> P_CHARG_NODE: 7
charge coin: COIN_CREDIT --> P_COIN_CHRE: 8
remained coin: --> P_COIN_REST: -6
pre mutual chain index: b58encode_int(0) --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node charge:
c_charge_god: hash is discard
remained coin: b58encode_int(0) --> P_COIN_REST: -6
pre mutual chain index: b58encode_int(0) --> P_I_CH_M_PRE: -5
chain index: b58encode_int(0) --> P_I_CH: -4
pre hash: b58encode_int(0) --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
REDEEM:
node redeem:
version: --> P_VER: 0
time: --> P_TIME: 1
type: REDEEM --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
node ID: --> P_ID_node: 4
mutual index: --> P_I_M: 5
redeem content hash: --> P_POST: 6
sign:
hash:
GOD redeem: (If TX to redeem node directly, use ACK and no need to TX c_charge_node part)
c_redeem_node: hash is discard
redeem coin: COIN_CREDIT --> P_COIN_CHRE: 8
remained coin: --> P_COIN_REST: -6
pre mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node redeem:
c_redeem_god: hash is discard
remained coin: --> P_COIN_REST: -6
pre mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
TRADE:
pay:
version: --> P_VER: 0
time: --> P_TIME: 1
type: PAY --> P_TYPE: 2
pay ID: --> P_ID_PAY: 3
earn ID: --> P_ID_EARN: 4
mutual index: --> P_I_M: 5
trade coin: --> P_COIN_TRADE: 6
pay remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
earn: (If TX to pay node directly, use ACK and no need to TX c_charge_node part)
card_pay: pay hash is discarded
card subtype is change from PAY --> EARN
earn remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash:
sign:
hash:
pay/earn ack(from Aid in deferred trade):
version:
time:
type: ACK
acker(server) ID: --> P_ID_ACK
source card hash: --> P_HASH_SRC
content: success or fail --> P_CONTENT
acker(server) sign:
acker(server) hash:
DEMAND:
demand: (TX to Aid or other nodes)
version:
time:
type: DEMAND
demand(earn) ID: --> P_ID_DEMAND: 3
demanded(pay) ID: --> P_ID_DEMANDED: 4
mutual index: --> P_I_M: 5
chain index: -->
own(earn) sign:
own(earn) hash:
demand ack:
version:
time:
type: ACK
acker(server) ID: --> P_ID_ACK
source card hash: --> P_HASH_SRC
content: --> P_CONTENT
acker(server) sign:
acker(server) hash:
'''
# card type
ROOT = b'0'
PAY = b'1'
EARN = b'2'
POST = b'3'
CHARGE = b'4'
REDEEM = b'5'
WATCH = b'6' # i_ch
DEMAND = b'7' # i_m
ACK = b'8'
## position of specific attribute in a card
P_VER = 0
P_TIME = 1
P_TYPE = 2
P_ID = 3
P_ID_DEMAND = 3
P_ID_DEMANDED = 4
P_ID_PAY = 3
P_ID_EARN = 4
P_ID_GOD = 3
P_ID_NODE = 4
P_I_M = 5
P_POST = 6
P_COIN_TRADE = 6
P_COIN_REST = -6
P_COIN_CHRE = 8
P_I_CH_M_PRE = -5
P_I_CH = -4
P_HASH_PRE = -3
P_SIGN = -2
P_HASH = -1
# for ack
P_ID_ACK = 3
P_HASH_SRC = 4
P_CONTENT = 5
'''
chain file name:
example: 'version_ID_index'
version: for upgrade (different version may have different line len and key len, even change of structure of chain records)
ID: pub_key (may upgrade to increase len)
index: for organization (file only contains no more than fixed num of line based on version)
chain file content:
head:
line 0 (chain info) version of chain file
line 1: the next close line need to be checked
line 2-3: reserved
body:
line 4-end:
'''
# LEN_L: len of line, visiable and invisible character, include '\n'
# NUM_L_HEAD: the number of line in head of chain file
# NUM_L_BODY: the number of line in body of chain file
# INDEX_L_MAX = 2**32
VER_0 = b'0' # ver 0 for test
VER_1 = b'1'
VER_PRIVACY = b'10' # to do fo privacy
VER = VER_0
NUM_L_HEAD = 4
NUM_L_BODY = 10**5
LEN_L = 1024 # all character, include '\n'
LEN_ID = 44
LEN_NAME = 20
LEN_KEY = LEN_ID
# CHAIN = {
# VER_0: {
# TRADE: {
# 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5
# },
# POST: {
# 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5
# }
# },
# VER_1: {
# TRADE: {
# 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5
# },
# POST: {
# 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5
# }
# }
# }
NUM_I_M = 2**32
# god node
COIN_CREDIT = 100000
NAME_GOD = b'god'
ID_REAL_GOD_TEST = b'XXXXXX19901211XXXX' # this is for test
ID_REAL_GOD = ID_REAL_GOD_TEST # real ID for god
# key
NUM_B_ODEV = 1 | en | 0.671314 | c : card
ch : chain
s : socket
f : file
fo : folder
l : line
m : mutual
i : index
p : position/pointer
G : globall
T : tune TRADE protocol:
deferred trade (need server to involve):
(pay) demand newest mutual card --> server
(server) demand ack --> pay
(pay) pay --> server
(server) pay ack --> pay
(earn) demand newest mutual card --> server
(server) demand ack --> earn
(earn) earn --> server
(server) earn ack --> earn
(earn) earn --> group(multicast)
immediate trade ():
(pay) pay --> earn
(earn) pay ack --> pay
(earn) earn --> group(multicast)
interact with server for deferred trade:
(earn) earn --> server
(server) earn ack --> earn
(pay) pay --> server
(server) pay ack --> pay
POST protocol:
(post) launch --> server
(server) close --> post
if post does not receive close card
(post) demand --> server
(server) demand ack --> post
WATCH protocol:
Below is details of each card with protocol of ROOT, POST, CHARGE, REDEEM, TRADE, DEMAND:
ROOT:
god ROOT:
version: --> P_VER: 0
time: --> P_TIME: 1
type: ROOT --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
god ID: --> P_ID_GOD: 4
mutual index: --> P_I_M: 5
root content hash: --> P_POST: 6
remained coin: b58encode_int(0) --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: b58encode_int(0) --> P_I_CH: -4
previous hash: real ID hash --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
POST:
node POST:
version: --> P_VER: 0
time: --> P_TIME: 1
type: POST --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
post ID: --> P_ID_POST: 4
mutual index: --> P_I_M: 5
post content hash: --> P_POST: 6
post sign: --> P_SIGN: -2
post hash: --> P_HASH: -1
god POST:
c_post_node: post sign and hash is discard
remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node POST:
c_post_god: ack hash is discard
remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
CHARGE: (used in Init: the first root card)
node charge:
version: --> P_VER: 0
time: --> P_TIME: 1
type: CHARGE --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
node ID: --> P_ID_node: 4
mutual index: b58encode_int(0) --> P_I_M: 5
charge content hash: hash_ID_real --> P_POST: 6
sign:
hash:
GOD charge: (If TX to charge node directly, use ACK and no need to TX c_charge_node part)
c_charge_node: hash is discard --> P_CHARG_NODE: 7
charge coin: COIN_CREDIT --> P_COIN_CHRE: 8
remained coin: --> P_COIN_REST: -6
pre mutual chain index: b58encode_int(0) --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node charge:
c_charge_god: hash is discard
remained coin: b58encode_int(0) --> P_COIN_REST: -6
pre mutual chain index: b58encode_int(0) --> P_I_CH_M_PRE: -5
chain index: b58encode_int(0) --> P_I_CH: -4
pre hash: b58encode_int(0) --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
REDEEM:
node redeem:
version: --> P_VER: 0
time: --> P_TIME: 1
type: REDEEM --> P_TYPE: 2
god ID: --> P_ID_GOD: 3
node ID: --> P_ID_node: 4
mutual index: --> P_I_M: 5
redeem content hash: --> P_POST: 6
sign:
hash:
GOD redeem: (If TX to redeem node directly, use ACK and no need to TX c_charge_node part)
c_redeem_node: hash is discard
redeem coin: COIN_CREDIT --> P_COIN_CHRE: 8
remained coin: --> P_COIN_REST: -6
pre mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
node redeem:
c_redeem_god: hash is discard
remained coin: --> P_COIN_REST: -6
pre mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
pre hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
TRADE:
pay:
version: --> P_VER: 0
time: --> P_TIME: 1
type: PAY --> P_TYPE: 2
pay ID: --> P_ID_PAY: 3
earn ID: --> P_ID_EARN: 4
mutual index: --> P_I_M: 5
trade coin: --> P_COIN_TRADE: 6
pay remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash: --> P_HASH_PRE: -3
sign: --> P_SIGN: -2
hash: --> P_HASH: -1
earn: (If TX to pay node directly, use ACK and no need to TX c_charge_node part)
card_pay: pay hash is discarded
card subtype is change from PAY --> EARN
earn remained coin: --> P_COIN_REST: -6
previous mutual chain index: --> P_I_CH_M_PRE: -5
chain index: --> P_I_CH: -4
previous hash:
sign:
hash:
pay/earn ack(from Aid in deferred trade):
version:
time:
type: ACK
acker(server) ID: --> P_ID_ACK
source card hash: --> P_HASH_SRC
content: success or fail --> P_CONTENT
acker(server) sign:
acker(server) hash:
DEMAND:
demand: (TX to Aid or other nodes)
version:
time:
type: DEMAND
demand(earn) ID: --> P_ID_DEMAND: 3
demanded(pay) ID: --> P_ID_DEMANDED: 4
mutual index: --> P_I_M: 5
chain index: -->
own(earn) sign:
own(earn) hash:
demand ack:
version:
time:
type: ACK
acker(server) ID: --> P_ID_ACK
source card hash: --> P_HASH_SRC
content: --> P_CONTENT
acker(server) sign:
acker(server) hash: # card type # i_ch # i_m ## position of specific attribute in a card # for ack chain file name:
example: 'version_ID_index'
version: for upgrade (different version may have different line len and key len, even change of structure of chain records)
ID: pub_key (may upgrade to increase len)
index: for organization (file only contains no more than fixed num of line based on version)
chain file content:
head:
line 0 (chain info) version of chain file
line 1: the next close line need to be checked
line 2-3: reserved
body:
line 4-end: # LEN_L: len of line, visiable and invisible character, include '\n' # NUM_L_HEAD: the number of line in head of chain file # NUM_L_BODY: the number of line in body of chain file # INDEX_L_MAX = 2**32 # ver 0 for test # to do fo privacy # all character, include '\n' # CHAIN = { # VER_0: { # TRADE: { # 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5 # }, # POST: { # 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5 # } # }, # VER_1: { # TRADE: { # 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5 # }, # POST: { # 'LEN_L': 1024, 'NUM_L_HEAD': 4, 'NUM_L_BODY': 10**5 # } # } # } # god node # this is for test # real ID for god # key | 2.150824 | 2 |
reviews/management/commands/create_reviews.py | Flict-dev/Boxes-api | 1 | 6622103 | from django.core.exceptions import ValidationError
from django.core.management import BaseCommand
from items.models import Item
from reviews.models import Reviews
from users.models import User
import requests
class Command(BaseCommand):
def handle(self, *args, **options):
response_review = requests.get(
'https://raw.githubusercontent.com/stepik-a-w/drf-project-boxes/master/reviews.json'
)
if response_review.status_code == 200:
json_data_review = response_review.json()
for review in json_data_review:
try:
Reviews.objects.update_or_create(
text=review['content'],
author_id=review['author'],
created_at=review['created_at'],
published_at=review['published_at'],
status=review['status']
)
print('success')
except ValidationError:
Reviews.objects.update_or_create(
text=review['content'],
author_id=review['author'],
created_at=review['created_at'],
published_at='2021-01-03',
status=review['status']
)
else:
print('URL (reviews) не поддерживается!')
| from django.core.exceptions import ValidationError
from django.core.management import BaseCommand
from items.models import Item
from reviews.models import Reviews
from users.models import User
import requests
class Command(BaseCommand):
def handle(self, *args, **options):
response_review = requests.get(
'https://raw.githubusercontent.com/stepik-a-w/drf-project-boxes/master/reviews.json'
)
if response_review.status_code == 200:
json_data_review = response_review.json()
for review in json_data_review:
try:
Reviews.objects.update_or_create(
text=review['content'],
author_id=review['author'],
created_at=review['created_at'],
published_at=review['published_at'],
status=review['status']
)
print('success')
except ValidationError:
Reviews.objects.update_or_create(
text=review['content'],
author_id=review['author'],
created_at=review['created_at'],
published_at='2021-01-03',
status=review['status']
)
else:
print('URL (reviews) не поддерживается!')
| none | 1 | 2.249508 | 2 | |
relaax/common/algorithms/subgraph.py | deeplearninc/relaax | 71 | 6622104 | from builtins import object
import tensorflow as tf
import re
class Subgraph(object):
def __init__(self, *args, **kwargs):
with tf.variable_scope(None, default_name=type(self).__name__):
self.__node = self.build_graph(*args, **kwargs)
@property
def node(self):
return self.__node
def Op(self, op, **feed_dict):
return Op(op, feed_dict)
def Ops(self, *ops, **feed_dict):
return Op(ops, feed_dict)
def Call(self, f):
return Call(f)
class Call(object):
def __init__(self, f):
self.f = f
def __call__(self, session, *args, **kwargs):
return self.f(session, *args, **kwargs)
class Op(object):
def __init__(self, op, feed_dict):
self.op = op
self.feed_dict = feed_dict
def __call__(self, session, **kwargs):
feed_dict = {v: kwargs[k] for k, v in self.feed_dict.items()}
# print('feed_dict')
# for k, v in self.flatten_feed_dict(feed_dict).items():
# import numpy as np
# print(repr(k), repr(np.asarray(v).shape))
return self.reconstruct(session._tf_session.run(list(self.flatten(self.op)),
feed_dict=self.flatten_feed_dict(feed_dict)), self.op)
@classmethod
def flatten_feed_dict(cls, feed_dict):
return {k: v for k, v in cls.flatten_fd(feed_dict)}
@classmethod
def flatten_fd(cls, feed_dict):
for k, v in feed_dict.items():
for kk, vv in cls.izip2(k, v):
yield kk, vv
@classmethod
def map(cls, v, mapping):
def _map(v):
v = cls.cast(v)
if isinstance(v, (tuple, list)):
return [_map(v1) for v1 in v]
if isinstance(v, dict):
return {k: _map(v1) for k, v1 in v.items()}
return mapping(v)
return _map(v)
@classmethod
def flatten(cls, v):
v = cls.cast(v)
if isinstance(v, (tuple, list)):
for vv in v:
for vvv in cls.flatten(vv):
yield vvv
elif isinstance(v, dict):
for vv in v.values():
for vvv in cls.flatten(vv):
yield vvv
else:
yield v
@classmethod
def reconstruct(cls, v, pattern):
i = iter(v)
result = cls.map(pattern, lambda v: next(i))
try:
next(i)
assert False
except StopIteration:
pass
return result
@classmethod
def izip2(cls, v1, v2):
v1 = cls.cast(v1)
if isinstance(v1, (tuple, list)):
assert isinstance(v2, (tuple, list))
assert len(v1) == len(v2), 'len(v1) = {}, len(v2) = {}'.format(len(v1), len(v2))
for vv1, vv2 in zip(v1, v2):
for vvv1, vvv2 in cls.izip2(vv1, vv2):
yield vvv1, vvv2
elif isinstance(v1, dict):
assert isinstance(v2, dict)
assert len(v1) == len(v2)
for k1, vv1 in v1.items():
vv2 = v2[k1]
for vvv1, vvv2 in cls.izip2(vv1, vv2):
yield vvv1, vvv2
else:
yield v1, v2
@staticmethod
def cast(v):
if isinstance(v, Subgraph):
return v.node
return v
def only_brackets(s):
s1 = re.sub("[^\[\]]+", "", s)
s2 = s1.replace("][", "], [")
return s2
| from builtins import object
import tensorflow as tf
import re
class Subgraph(object):
def __init__(self, *args, **kwargs):
with tf.variable_scope(None, default_name=type(self).__name__):
self.__node = self.build_graph(*args, **kwargs)
@property
def node(self):
return self.__node
def Op(self, op, **feed_dict):
return Op(op, feed_dict)
def Ops(self, *ops, **feed_dict):
return Op(ops, feed_dict)
def Call(self, f):
return Call(f)
class Call(object):
def __init__(self, f):
self.f = f
def __call__(self, session, *args, **kwargs):
return self.f(session, *args, **kwargs)
class Op(object):
def __init__(self, op, feed_dict):
self.op = op
self.feed_dict = feed_dict
def __call__(self, session, **kwargs):
feed_dict = {v: kwargs[k] for k, v in self.feed_dict.items()}
# print('feed_dict')
# for k, v in self.flatten_feed_dict(feed_dict).items():
# import numpy as np
# print(repr(k), repr(np.asarray(v).shape))
return self.reconstruct(session._tf_session.run(list(self.flatten(self.op)),
feed_dict=self.flatten_feed_dict(feed_dict)), self.op)
@classmethod
def flatten_feed_dict(cls, feed_dict):
return {k: v for k, v in cls.flatten_fd(feed_dict)}
@classmethod
def flatten_fd(cls, feed_dict):
for k, v in feed_dict.items():
for kk, vv in cls.izip2(k, v):
yield kk, vv
@classmethod
def map(cls, v, mapping):
def _map(v):
v = cls.cast(v)
if isinstance(v, (tuple, list)):
return [_map(v1) for v1 in v]
if isinstance(v, dict):
return {k: _map(v1) for k, v1 in v.items()}
return mapping(v)
return _map(v)
@classmethod
def flatten(cls, v):
v = cls.cast(v)
if isinstance(v, (tuple, list)):
for vv in v:
for vvv in cls.flatten(vv):
yield vvv
elif isinstance(v, dict):
for vv in v.values():
for vvv in cls.flatten(vv):
yield vvv
else:
yield v
@classmethod
def reconstruct(cls, v, pattern):
i = iter(v)
result = cls.map(pattern, lambda v: next(i))
try:
next(i)
assert False
except StopIteration:
pass
return result
@classmethod
def izip2(cls, v1, v2):
v1 = cls.cast(v1)
if isinstance(v1, (tuple, list)):
assert isinstance(v2, (tuple, list))
assert len(v1) == len(v2), 'len(v1) = {}, len(v2) = {}'.format(len(v1), len(v2))
for vv1, vv2 in zip(v1, v2):
for vvv1, vvv2 in cls.izip2(vv1, vv2):
yield vvv1, vvv2
elif isinstance(v1, dict):
assert isinstance(v2, dict)
assert len(v1) == len(v2)
for k1, vv1 in v1.items():
vv2 = v2[k1]
for vvv1, vvv2 in cls.izip2(vv1, vv2):
yield vvv1, vvv2
else:
yield v1, v2
@staticmethod
def cast(v):
if isinstance(v, Subgraph):
return v.node
return v
def only_brackets(s):
s1 = re.sub("[^\[\]]+", "", s)
s2 = s1.replace("][", "], [")
return s2
| en | 0.298673 | # print('feed_dict') # for k, v in self.flatten_feed_dict(feed_dict).items(): # import numpy as np # print(repr(k), repr(np.asarray(v).shape)) | 2.598403 | 3 |
src/player.py | Jakub21/Disk-Game | 0 | 6622105 | class Player:
def __init__(self, name, pwd):
self.username = name
self.clr_choice = 'green'
def add_to_session(self, session):
self.session = session
self.color = session.get_color(self)
self.blnd_count = 0
self.unit_count = 0
self.r_wood = session.app.GAME.starting_wood
self.r_iron = session.app.GAME.starting_iron
self.r_fuel = session.app.GAME.starting_fuel
def defeat(self):
self.session.rem_player(self)
def leave_session(self):
del self.session
del self.color
del self.blnd_count
del self.unit_count
del self.r_wood
del self.r_iron
del self.r_fuel
def check_rsrc(self, resources):
wood, iron, fuel = resources
if wood > self.r_wood:
return False, 'wood'
if iron > self.r_iron:
return False, 'iron'
if fuel > self.r_fuel:
return False, 'fuel'
return True, None
def charge_rsrc(self, resources):
wood, iron, fuel = resources
self.r_wood -= wood
self.r_iron -= iron
self.r_fuel -= fuel
def refund_rsrc(self, resources):
wood, iron, fuel = resources
self.r_wood += wood
self.r_iron += iron
self.r_fuel += fuel
| class Player:
def __init__(self, name, pwd):
self.username = name
self.clr_choice = 'green'
def add_to_session(self, session):
self.session = session
self.color = session.get_color(self)
self.blnd_count = 0
self.unit_count = 0
self.r_wood = session.app.GAME.starting_wood
self.r_iron = session.app.GAME.starting_iron
self.r_fuel = session.app.GAME.starting_fuel
def defeat(self):
self.session.rem_player(self)
def leave_session(self):
del self.session
del self.color
del self.blnd_count
del self.unit_count
del self.r_wood
del self.r_iron
del self.r_fuel
def check_rsrc(self, resources):
wood, iron, fuel = resources
if wood > self.r_wood:
return False, 'wood'
if iron > self.r_iron:
return False, 'iron'
if fuel > self.r_fuel:
return False, 'fuel'
return True, None
def charge_rsrc(self, resources):
wood, iron, fuel = resources
self.r_wood -= wood
self.r_iron -= iron
self.r_fuel -= fuel
def refund_rsrc(self, resources):
wood, iron, fuel = resources
self.r_wood += wood
self.r_iron += iron
self.r_fuel += fuel
| none | 1 | 2.784716 | 3 | |
ReportarHardware.py | wisrovi/NurcallApp | 0 | 6622106 | <gh_stars>0
umbralTempCPU = 80
umbralTempGPU = 70
umbralRamUsada = 75
segundos = 600
import requests
import json
import platform
import subprocess as commands
import sched, time
class ObtenerIP:
def __init__(self):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
self.ipEquipo = s.getsockname()[0]
s.close()
def getIP(self):
return self.ipEquipo
myIP = ObtenerIP().getIP()
nombreEstacion = ""
req = requests.get(
'https://paul.fcv.org:8443/NurcallApp/NurcallAppServlet?Proceso=listNurcall&Estacion=00&raspberry=' + myIP,
verify=False,
timeout=5)
respuesta = str(req.text)
listaDispositivosNurcall = []
if len(req.text)>5:
res = json.loads(req.text)
nombreEstacion = ""
for objeto in res:
listaDispositivosNurcall.append((objeto["ipLampara"], objeto["descripcionLampara"]))
nombreEstacion = objeto["nombreEstacion"]
break
TOKEN = "673863930:<KEY>"
DestinatarioTelegram = -356316070
class TelegramService():
"""
AUTHOR: WISROVI
"""
Token = ""
def __init__(self, token):
self.Token = token
def sendMessageForUrl(self, Id_group, Mensaje):
url = "https://api.telegram.org/bot" + self.Token
url += "/sendMessage?chat_id=" + str(Id_group)
url += "&text=" + Mensaje
respuestaGet = requests.get(url, timeout=15)
if respuestaGet.status_code == 200:
return True
else:
return False
def verActualizacionesBot(self):
url = "https://api.telegram.org/bot" + self.Token
url += "/getUpdates"
respuestaGet = requests.get(url)
if respuestaGet.status_code == 200:
print(respuestaGet.content)
else:
print("Error solicitar info sobre los chat del bot")
telegram = TelegramService(TOKEN)
def get_cpu_temp():
tempFile = open("/sys/class/thermal/thermal_zone0/temp")
cpu_temp = tempFile.read()
tempFile.close()
return float(cpu_temp) / 1000
# Mostrar temperatura en grados Fahrenheit
# return float(1.8*cpu_temp)+32
def get_gpu_temp():
gpu_temp = commands.getoutput('/opt/vc/bin/vcgencmd measure_temp').replace('temp=', " ").replace("'C", " ")
return float(gpu_temp)
# Mostrar temperatura en grados Fahrenheit
# return float(1.8* gpu_temp)+32
# Return RAM information (unit=kb) in a list
# Index 0: total RAM
# Index 1: used RAM
# Index 2: free RAM
def getRAMinfo():
p = os.popen('free')
i = 0
while 1:
i = i + 1
line = p.readline()
if i==2:
return(line.split()[1:4])
def obtenerPorcentajeRamUsada():
ram = getRAMinfo()
return round((float(ram[1]) * 100) / float(ram[0]))
s = sched.scheduler(time.time, time.sleep)
import os
import os.path as path
if path.exists("CPU.rpi") == False:
archivo = open("CPU.rpi", "w")
archivo.write(str(round(get_cpu_temp())))
archivo.close()
if path.exists("GPU.rpi") == False:
archivo = open("GPU.rpi", "w")
archivo.write(str(round(get_gpu_temp())))
archivo.close()
if path.exists("RAM.rpi") == False:
archivo = open("RAM.rpi", "w")
archivo.write(str(obtenerPorcentajeRamUsada()))
archivo.close()
primerInicio = True
def do_something(sc):
archivo = open("CPU.rpi", "r")
cpuTemp = float(archivo.read())
archivo.close()
archivo = open("GPU.rpi", "r")
gpuTemp = float(archivo.read())
archivo.close()
archivo = open("RAM.rpi", "r")
gpuRam = float(archivo.read())
archivo.close()
if primerInicio:
cpuTemp = 0
gpuTemp = 0
gpuRam = 0
temperaturaCPUactual = round(get_cpu_temp())
temperaturaGPUactual = round(get_gpu_temp())
ramActual = obtenerPorcentajeRamUsada()
hayMensajeReportar = False
infoEnviar = "Esto es una alerta del sistema de:" + nombreEstacion + "\n"
#infoEnviar += "Soy la raspberry con IP: " + myIP + "\n"
#infoEnviar += "SO: " + platform.system() + "\n"
#infoEnviar += "Nombre equipo: " + platform.node() + "\n"
#infoEnviar += "Procesador: " + platform.machine() + "\n"
#infoEnviar += "Arquitectura: " + platform.architecture()[0] + "\n"
#infoEnviar += "Version Python: " + platform.python_version() + "\n"
if temperaturaCPUactual >= umbralTempCPU:
if cpuTemp != temperaturaCPUactual:
hayMensajeReportar = True
infoEnviar += "Mi temperatura de CPU es: " + str(temperaturaCPUactual) + "C" + "\n"
if temperaturaGPUactual >= umbralTempGPU:
if gpuTemp != temperaturaGPUactual:
hayMensajeReportar = True
infoEnviar += "Mi temperatura de GPU es: " + str(temperaturaGPUactual) + "C" + "\n"
if ramActual >= umbralRamUsada:
if gpuRam != ramActual:
hayMensajeReportar = True
infoEnviar += "Mi RAM usada es: " + str(ramActual) + "%" + "\n"
if hayMensajeReportar:
telegram.sendMessageForUrl(DestinatarioTelegram, infoEnviar)
archivo = open("GPU.rpi", "w")
archivo.write(str(temperaturaGPUactual))
archivo.close()
archivo = open("CPU.rpi", "w")
archivo.write(str(temperaturaCPUactual))
archivo.close()
archivo = open("RAM.rpi", "w")
archivo.write(str(ramActual))
archivo.close()
s.enter(segundos, 1, do_something, (sc,))
s.enter(segundos, 1, do_something, (s,))
s.run()
| umbralTempCPU = 80
umbralTempGPU = 70
umbralRamUsada = 75
segundos = 600
import requests
import json
import platform
import subprocess as commands
import sched, time
class ObtenerIP:
def __init__(self):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
self.ipEquipo = s.getsockname()[0]
s.close()
def getIP(self):
return self.ipEquipo
myIP = ObtenerIP().getIP()
nombreEstacion = ""
req = requests.get(
'https://paul.fcv.org:8443/NurcallApp/NurcallAppServlet?Proceso=listNurcall&Estacion=00&raspberry=' + myIP,
verify=False,
timeout=5)
respuesta = str(req.text)
listaDispositivosNurcall = []
if len(req.text)>5:
res = json.loads(req.text)
nombreEstacion = ""
for objeto in res:
listaDispositivosNurcall.append((objeto["ipLampara"], objeto["descripcionLampara"]))
nombreEstacion = objeto["nombreEstacion"]
break
TOKEN = "673863930:<KEY>"
DestinatarioTelegram = -356316070
class TelegramService():
"""
AUTHOR: WISROVI
"""
Token = ""
def __init__(self, token):
self.Token = token
def sendMessageForUrl(self, Id_group, Mensaje):
url = "https://api.telegram.org/bot" + self.Token
url += "/sendMessage?chat_id=" + str(Id_group)
url += "&text=" + Mensaje
respuestaGet = requests.get(url, timeout=15)
if respuestaGet.status_code == 200:
return True
else:
return False
def verActualizacionesBot(self):
url = "https://api.telegram.org/bot" + self.Token
url += "/getUpdates"
respuestaGet = requests.get(url)
if respuestaGet.status_code == 200:
print(respuestaGet.content)
else:
print("Error solicitar info sobre los chat del bot")
telegram = TelegramService(TOKEN)
def get_cpu_temp():
tempFile = open("/sys/class/thermal/thermal_zone0/temp")
cpu_temp = tempFile.read()
tempFile.close()
return float(cpu_temp) / 1000
# Mostrar temperatura en grados Fahrenheit
# return float(1.8*cpu_temp)+32
def get_gpu_temp():
gpu_temp = commands.getoutput('/opt/vc/bin/vcgencmd measure_temp').replace('temp=', " ").replace("'C", " ")
return float(gpu_temp)
# Mostrar temperatura en grados Fahrenheit
# return float(1.8* gpu_temp)+32
# Return RAM information (unit=kb) in a list
# Index 0: total RAM
# Index 1: used RAM
# Index 2: free RAM
def getRAMinfo():
p = os.popen('free')
i = 0
while 1:
i = i + 1
line = p.readline()
if i==2:
return(line.split()[1:4])
def obtenerPorcentajeRamUsada():
ram = getRAMinfo()
return round((float(ram[1]) * 100) / float(ram[0]))
s = sched.scheduler(time.time, time.sleep)
import os
import os.path as path
if path.exists("CPU.rpi") == False:
archivo = open("CPU.rpi", "w")
archivo.write(str(round(get_cpu_temp())))
archivo.close()
if path.exists("GPU.rpi") == False:
archivo = open("GPU.rpi", "w")
archivo.write(str(round(get_gpu_temp())))
archivo.close()
if path.exists("RAM.rpi") == False:
archivo = open("RAM.rpi", "w")
archivo.write(str(obtenerPorcentajeRamUsada()))
archivo.close()
primerInicio = True
def do_something(sc):
archivo = open("CPU.rpi", "r")
cpuTemp = float(archivo.read())
archivo.close()
archivo = open("GPU.rpi", "r")
gpuTemp = float(archivo.read())
archivo.close()
archivo = open("RAM.rpi", "r")
gpuRam = float(archivo.read())
archivo.close()
if primerInicio:
cpuTemp = 0
gpuTemp = 0
gpuRam = 0
temperaturaCPUactual = round(get_cpu_temp())
temperaturaGPUactual = round(get_gpu_temp())
ramActual = obtenerPorcentajeRamUsada()
hayMensajeReportar = False
infoEnviar = "Esto es una alerta del sistema de:" + nombreEstacion + "\n"
#infoEnviar += "Soy la raspberry con IP: " + myIP + "\n"
#infoEnviar += "SO: " + platform.system() + "\n"
#infoEnviar += "Nombre equipo: " + platform.node() + "\n"
#infoEnviar += "Procesador: " + platform.machine() + "\n"
#infoEnviar += "Arquitectura: " + platform.architecture()[0] + "\n"
#infoEnviar += "Version Python: " + platform.python_version() + "\n"
if temperaturaCPUactual >= umbralTempCPU:
if cpuTemp != temperaturaCPUactual:
hayMensajeReportar = True
infoEnviar += "Mi temperatura de CPU es: " + str(temperaturaCPUactual) + "C" + "\n"
if temperaturaGPUactual >= umbralTempGPU:
if gpuTemp != temperaturaGPUactual:
hayMensajeReportar = True
infoEnviar += "Mi temperatura de GPU es: " + str(temperaturaGPUactual) + "C" + "\n"
if ramActual >= umbralRamUsada:
if gpuRam != ramActual:
hayMensajeReportar = True
infoEnviar += "Mi RAM usada es: " + str(ramActual) + "%" + "\n"
if hayMensajeReportar:
telegram.sendMessageForUrl(DestinatarioTelegram, infoEnviar)
archivo = open("GPU.rpi", "w")
archivo.write(str(temperaturaGPUactual))
archivo.close()
archivo = open("CPU.rpi", "w")
archivo.write(str(temperaturaCPUactual))
archivo.close()
archivo = open("RAM.rpi", "w")
archivo.write(str(ramActual))
archivo.close()
s.enter(segundos, 1, do_something, (sc,))
s.enter(segundos, 1, do_something, (s,))
s.run() | es | 0.139489 | AUTHOR: WISROVI # Mostrar temperatura en grados Fahrenheit # return float(1.8*cpu_temp)+32 # Mostrar temperatura en grados Fahrenheit # return float(1.8* gpu_temp)+32 # Return RAM information (unit=kb) in a list # Index 0: total RAM # Index 1: used RAM # Index 2: free RAM #infoEnviar += "Soy la raspberry con IP: " + myIP + "\n" #infoEnviar += "SO: " + platform.system() + "\n" #infoEnviar += "Nombre equipo: " + platform.node() + "\n" #infoEnviar += "Procesador: " + platform.machine() + "\n" #infoEnviar += "Arquitectura: " + platform.architecture()[0] + "\n" #infoEnviar += "Version Python: " + platform.python_version() + "\n" | 2.413883 | 2 |
Evaluation/packet_in_idle_new/merger.py | ManuelMeinen/DC-MONDRIAN | 0 | 6622107 | import pandas as pd
def merge(df1, df2):
total = df1['No_of_Packets']+df2['No_of_Packets']
result = {
'second': range(601),
'No_of_Packets': total[0:601]
}
res = pd.DataFrame(result,columns=['second','No_of_Packets'])
res['No_of_Packets'] = res['No_of_Packets'].astype(int)
return res
if __name__=='__main__':
timeouts = [1, 2, 4]
for t in timeouts:
idle6633 = pd.read_csv('packet-in_report_6633_HARD_TIMEOUT_30_IDLE_TIMEOUT_'+str(t)+'.bench')
idle6634 = pd.read_csv('packet-in_report_6634_HARD_TIMEOUT_30_IDLE_TIMEOUT_'+str(t)+'.bench')
result = merge(idle6633, idle6634)
result.to_csv('res_IDLE_'+str(t)+'.csv', index=False)
tot = {'second':range(601)}
for t in timeouts:
df = pd.read_csv('res_IDLE_'+str(t)+'.csv')
tot['IDLE_TIMEOUT='+str(t)] = df['No_of_Packets']
total_df = pd.DataFrame(tot)
total_df.to_csv('res_IDLE.csv', index=False) | import pandas as pd
def merge(df1, df2):
total = df1['No_of_Packets']+df2['No_of_Packets']
result = {
'second': range(601),
'No_of_Packets': total[0:601]
}
res = pd.DataFrame(result,columns=['second','No_of_Packets'])
res['No_of_Packets'] = res['No_of_Packets'].astype(int)
return res
if __name__=='__main__':
timeouts = [1, 2, 4]
for t in timeouts:
idle6633 = pd.read_csv('packet-in_report_6633_HARD_TIMEOUT_30_IDLE_TIMEOUT_'+str(t)+'.bench')
idle6634 = pd.read_csv('packet-in_report_6634_HARD_TIMEOUT_30_IDLE_TIMEOUT_'+str(t)+'.bench')
result = merge(idle6633, idle6634)
result.to_csv('res_IDLE_'+str(t)+'.csv', index=False)
tot = {'second':range(601)}
for t in timeouts:
df = pd.read_csv('res_IDLE_'+str(t)+'.csv')
tot['IDLE_TIMEOUT='+str(t)] = df['No_of_Packets']
total_df = pd.DataFrame(tot)
total_df.to_csv('res_IDLE.csv', index=False) | none | 1 | 2.912253 | 3 | |
bomeba0/external/__init__.py | aloctavodia/bomeba0 | 0 | 6622108 | <filename>bomeba0/external/__init__.py
from .gaussian import gen_tripeptides | <filename>bomeba0/external/__init__.py
from .gaussian import gen_tripeptides | none | 1 | 1.102084 | 1 | |
usercustomize.py | aroberge/ideas | 36 | 6622109 | <gh_stars>10-100
from ideas import experimental_syntax_encoding
print(f" --> {__file__} was executed")
| from ideas import experimental_syntax_encoding
print(f" --> {__file__} was executed") | none | 1 | 1.27316 | 1 | |
python/get_twitter_followers_id.py | zixels/booklio | 0 | 6622110 | # gets user ids of twitter followers of a specific account (user_screen_name) and
# saves them in a csv file named user_screen_name+'_followers_twitter_ids.csv'
# Imports
import os
import codecs
import time
import tweepy
# Inputs
user_screen_name = "AminSarafraz"
consumer_key = os.getenv("TWITTER_CONSUMER_KEY")
consumer_secret = os.getenv("TWITTER_CONSUMER_SECRET")
access_token = os.getenv("TWITTER_ACCESS_TOKEN")
access_token_secret = os.getenv("TWITTER_TOKEN_SECRET")
# oAuth for twitter API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Get user ids for the followers of a user
user_ids = []
total_number_user_ids = 0
for page in tweepy.Cursor(api.followers_ids, screen_name=user_screen_name).pages():
user_ids.extend(page)
with codecs.open(user_screen_name + '_followers_twitter_ids.csv', encoding='utf-8', mode='a+') as text_file:
for user_id in user_ids:
text_file.write(u'{} \n'.format(user_id))
total_number_user_ids += len(user_ids)
print('Total number of user ids extracted from the followers of', user_screen_name, ':', total_number_user_ids)
user_ids = []
time.sleep(61) # To avoid exceeding Twitter API rate limit (15 GETS every 15 minutes)
| # gets user ids of twitter followers of a specific account (user_screen_name) and
# saves them in a csv file named user_screen_name+'_followers_twitter_ids.csv'
# Imports
import os
import codecs
import time
import tweepy
# Inputs
user_screen_name = "AminSarafraz"
consumer_key = os.getenv("TWITTER_CONSUMER_KEY")
consumer_secret = os.getenv("TWITTER_CONSUMER_SECRET")
access_token = os.getenv("TWITTER_ACCESS_TOKEN")
access_token_secret = os.getenv("TWITTER_TOKEN_SECRET")
# oAuth for twitter API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Get user ids for the followers of a user
user_ids = []
total_number_user_ids = 0
for page in tweepy.Cursor(api.followers_ids, screen_name=user_screen_name).pages():
user_ids.extend(page)
with codecs.open(user_screen_name + '_followers_twitter_ids.csv', encoding='utf-8', mode='a+') as text_file:
for user_id in user_ids:
text_file.write(u'{} \n'.format(user_id))
total_number_user_ids += len(user_ids)
print('Total number of user ids extracted from the followers of', user_screen_name, ':', total_number_user_ids)
user_ids = []
time.sleep(61) # To avoid exceeding Twitter API rate limit (15 GETS every 15 minutes)
| en | 0.765515 | # gets user ids of twitter followers of a specific account (user_screen_name) and # saves them in a csv file named user_screen_name+'_followers_twitter_ids.csv' # Imports # Inputs # oAuth for twitter API # Get user ids for the followers of a user # To avoid exceeding Twitter API rate limit (15 GETS every 15 minutes) | 3.098141 | 3 |
tests/mock/i2c_checks_addr.py | jontrulson/mraa | 1,167 | 6622111 | #!/usr/bin/env python
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME>.
#
# SPDX-License-Identifier: MIT
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksAddr(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_address(self):
self.assertEqual(self.i2c.address(0x10),
m.SUCCESS,
"Setting address to 0x10 did not return success")
def test_i2c_address_invalid_bigger_than_max(self):
# For standard 7-bit addressing 0x7F is max address
self.assertEqual(self.i2c.address(0xFF),
m.ERROR_INVALID_PARAMETER,
"Setting address to 0xFF did not return INVALID_PARAMETER")
def test_i2c_address_invalid_smaller_than_min(self):
self.assertRaises(OverflowError, self.i2c.address, -100)
if __name__ == "__main__":
u.main()
| #!/usr/bin/env python
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME>.
#
# SPDX-License-Identifier: MIT
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksAddr(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_address(self):
self.assertEqual(self.i2c.address(0x10),
m.SUCCESS,
"Setting address to 0x10 did not return success")
def test_i2c_address_invalid_bigger_than_max(self):
# For standard 7-bit addressing 0x7F is max address
self.assertEqual(self.i2c.address(0xFF),
m.ERROR_INVALID_PARAMETER,
"Setting address to 0xFF did not return INVALID_PARAMETER")
def test_i2c_address_invalid_smaller_than_min(self):
self.assertRaises(OverflowError, self.i2c.address, -100)
if __name__ == "__main__":
u.main()
| en | 0.492743 | #!/usr/bin/env python # Author: <NAME> <<EMAIL>> # Copyright (c) 2016 <NAME>. # # SPDX-License-Identifier: MIT # For standard 7-bit addressing 0x7F is max address | 2.669222 | 3 |
formsnext/formsnext/report/survey_all_results/survey_all_results.py | ElasticRun/FormsNext | 3 | 6622112 | <reponame>ElasticRun/FormsNext
# Copyright (c) 2013, ElasticRun and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import pandas as pd
def execute(filters=None):
survey = filters.get("survey")
query = """
SELECT
tuf.user,
tq.question_string AS Question,
tq.evaluate AS Evaluate,
(CASE WHEN tq.evaluate THEN trl.score ELSE NULL END) AS Score,
GROUP_CONCAT(trv.value) AS Responses
FROM
`tabUser Feedback` tuf
INNER JOIN `tabUser Response` tur
ON tur.parent = tuf.name
INNER JOIN `tabSection Item` tq
ON tq.name = tur.question
INNER JOIN `tabResponse Value Link` trl
ON trl.name = tur.response
INNER JOIN `tabResponse Value` trv
ON trl.name = trv.parent and
trl.latest_version = trv.version
WHERE tuf.survey = '{survey}'
GROUP BY tuf.user, tq.name
""".format(survey=survey)
results = frappe.db.sql(query, as_dict = 1)
results_df = pd.DataFrame.from_records(results)
pivoted_results = results_df.pivot_table(index = 'user', columns = ['Question'], values = 'Responses', aggfunc={'Responses': max}).reset_index()
columns = pivoted_results.columns.tolist()
data = pivoted_results.values.tolist()
return columns, data
| # Copyright (c) 2013, ElasticRun and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import pandas as pd
def execute(filters=None):
survey = filters.get("survey")
query = """
SELECT
tuf.user,
tq.question_string AS Question,
tq.evaluate AS Evaluate,
(CASE WHEN tq.evaluate THEN trl.score ELSE NULL END) AS Score,
GROUP_CONCAT(trv.value) AS Responses
FROM
`tabUser Feedback` tuf
INNER JOIN `tabUser Response` tur
ON tur.parent = tuf.name
INNER JOIN `tabSection Item` tq
ON tq.name = tur.question
INNER JOIN `tabResponse Value Link` trl
ON trl.name = tur.response
INNER JOIN `tabResponse Value` trv
ON trl.name = trv.parent and
trl.latest_version = trv.version
WHERE tuf.survey = '{survey}'
GROUP BY tuf.user, tq.name
""".format(survey=survey)
results = frappe.db.sql(query, as_dict = 1)
results_df = pd.DataFrame.from_records(results)
pivoted_results = results_df.pivot_table(index = 'user', columns = ['Question'], values = 'Responses', aggfunc={'Responses': max}).reset_index()
columns = pivoted_results.columns.tolist()
data = pivoted_results.values.tolist()
return columns, data | en | 0.429014 | # Copyright (c) 2013, ElasticRun and contributors # For license information, please see license.txt SELECT tuf.user, tq.question_string AS Question, tq.evaluate AS Evaluate, (CASE WHEN tq.evaluate THEN trl.score ELSE NULL END) AS Score, GROUP_CONCAT(trv.value) AS Responses FROM `tabUser Feedback` tuf INNER JOIN `tabUser Response` tur ON tur.parent = tuf.name INNER JOIN `tabSection Item` tq ON tq.name = tur.question INNER JOIN `tabResponse Value Link` trl ON trl.name = tur.response INNER JOIN `tabResponse Value` trv ON trl.name = trv.parent and trl.latest_version = trv.version WHERE tuf.survey = '{survey}' GROUP BY tuf.user, tq.name | 2.429164 | 2 |
forecast/endpoints.py | brennv/surf-api | 2 | 6622113 | <reponame>brennv/surf-api
from .data import (get_response, parse_data, get_forecast, get_swell, get_wave,
get_wind_direction, get_wind_speed)
from flask_restful import Resource
class Health(Resource):
def get(self):
"""
API health check
---
tags:
- status
responses:
200:
description: Status check
"""
return {'status': 'ok'}, 200
class Point(Resource):
def get(self, lat, lon):
"""
Point forecast
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Point forecast
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_forecast(data)
else:
result = {}
return result, response.status_code
class PointSwell(Resource):
def get(self, lat, lon):
"""
Swell direction, height, period
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Swell direction, height, period
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_swell(data)
else:
result = {}
return result, response.status_code
'''
class PointSwellDirection(Resource):
def get(self, lat, lon):
"""
Swell direction
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Swell direction
"""
return get_swell_direction(lat, lon), response.status_code
class PointSwellHeight(Resource):
def get(self, lat, lon):
"""
Swell height
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Swell height
"""
return get_swell_height(lat, lon), response.status_code
class PointSwellPeriod(Resource):
def get(self, lat, lon):
"""
Swell period
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Swell period
"""
return get_swell_period(lat, lon), response.status_code
'''
class PointWave(Resource):
def get(self, lat, lon):
"""
Wave height
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Wave height
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_wave(data)
else:
result = {}
return result, response.status_code
'''
class PointWind(Resource):
def get(self, lat, lon):
"""
Wind direction, speed
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Wind direction, speed
"""
return get_wind(lat, lon), response.status_code
'''
class PointWindDirection(Resource):
def get(self, lat, lon):
"""
Wind direction
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Wind direction
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_wind_direction(data)
else:
result = {}
return result, response.status_code
class PointWindSpeed(Resource):
def get(self, lat, lon):
"""
Wind speed
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Wind speed
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_wind_speed(data)
else:
result = {}
return result, response.status_code
| from .data import (get_response, parse_data, get_forecast, get_swell, get_wave,
get_wind_direction, get_wind_speed)
from flask_restful import Resource
class Health(Resource):
def get(self):
"""
API health check
---
tags:
- status
responses:
200:
description: Status check
"""
return {'status': 'ok'}, 200
class Point(Resource):
def get(self, lat, lon):
"""
Point forecast
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Point forecast
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_forecast(data)
else:
result = {}
return result, response.status_code
class PointSwell(Resource):
def get(self, lat, lon):
"""
Swell direction, height, period
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Swell direction, height, period
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_swell(data)
else:
result = {}
return result, response.status_code
'''
class PointSwellDirection(Resource):
def get(self, lat, lon):
"""
Swell direction
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Swell direction
"""
return get_swell_direction(lat, lon), response.status_code
class PointSwellHeight(Resource):
def get(self, lat, lon):
"""
Swell height
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Swell height
"""
return get_swell_height(lat, lon), response.status_code
class PointSwellPeriod(Resource):
def get(self, lat, lon):
"""
Swell period
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Swell period
"""
return get_swell_period(lat, lon), response.status_code
'''
class PointWave(Resource):
def get(self, lat, lon):
"""
Wave height
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Wave height
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_wave(data)
else:
result = {}
return result, response.status_code
'''
class PointWind(Resource):
def get(self, lat, lon):
"""
Wind direction, speed
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Wind direction, speed
"""
return get_wind(lat, lon), response.status_code
'''
class PointWindDirection(Resource):
def get(self, lat, lon):
"""
Wind direction
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Wind direction
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_wind_direction(data)
else:
result = {}
return result, response.status_code
class PointWindSpeed(Resource):
def get(self, lat, lon):
"""
Wind speed
---
tags:
- point
parameters:
- name: lat
in: path
type: string
required: true
default: 37.583
- name: lon
in: path
type: string
required: true
default: -122.952
responses:
200:
description: Wind speed
"""
response = get_response(lat, lon)
if response.status_code == 200:
data = parse_data(response)
result = get_wind_speed(data)
else:
result = {}
return result, response.status_code | en | 0.495067 | API health check --- tags: - status responses: 200: description: Status check Point forecast --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Point forecast Swell direction, height, period --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Swell direction, height, period class PointSwellDirection(Resource): def get(self, lat, lon): """ Swell direction --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Swell direction """ return get_swell_direction(lat, lon), response.status_code class PointSwellHeight(Resource): def get(self, lat, lon): """ Swell height --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Swell height """ return get_swell_height(lat, lon), response.status_code class PointSwellPeriod(Resource): def get(self, lat, lon): """ Swell period --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Swell period """ return get_swell_period(lat, lon), response.status_code Wave height --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Wave height class PointWind(Resource): def get(self, lat, lon): """ Wind direction, speed --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Wind direction, speed """ return get_wind(lat, lon), response.status_code Wind direction --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Wind direction Wind speed --- tags: - point parameters: - name: lat in: path type: string required: true default: 37.583 - name: lon in: path type: string required: true default: -122.952 responses: 200: description: Wind speed | 2.789472 | 3 |
crawlino/models/__init__.py | BBVA/crawlino | 1 | 6622114 | <gh_stars>1-10
from .bases import *
from .crawlino_model import *
from .plugins_models import *
from .input_model import *
| from .bases import *
from .crawlino_model import *
from .plugins_models import *
from .input_model import * | none | 1 | 1.127569 | 1 | |
circuitPython/examples/audio-playback/code.py | BRTSG-FOSS/pico-bteve | 1 | 6622115 | from brteve.brt_eve_bt817_8 import BrtEve
from brteve.brt_eve_rp2040 import BrtEveRP2040
from audio_playback.audio_playback import audio_playback
from audio_playback.widgets import widgets_dialog_yes_no
host = BrtEveRP2040()
eve = BrtEve(host)
eve.init(resolution="1280x800", touch="goodix")
# Store calibration setting
eve.calibrate()
#eve.wr32(eve.REG_TOUCH_TRANSFORM_A, 0xfffefefc);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_B, 0xfffffcbf);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_C, 0x506adb4);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_D, 0xfffffed1);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_E, 0xfffefc79);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_F, 0x32c3211);
#audio_playback(eve)
yes = widgets_dialog_yes_no(eve, "Preparing flash",
"Write BT81X_Flash.bin from sdcard to EVE's connected flash at first?", 120, False) == True
if yes == True:
eve.storage.write_flash_with_progressbar('/sd/pico-brteve/circuitPython/examples/audio-playback/BT81X_Flash.bin', 0)
audio_playback(eve)
| from brteve.brt_eve_bt817_8 import BrtEve
from brteve.brt_eve_rp2040 import BrtEveRP2040
from audio_playback.audio_playback import audio_playback
from audio_playback.widgets import widgets_dialog_yes_no
host = BrtEveRP2040()
eve = BrtEve(host)
eve.init(resolution="1280x800", touch="goodix")
# Store calibration setting
eve.calibrate()
#eve.wr32(eve.REG_TOUCH_TRANSFORM_A, 0xfffefefc);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_B, 0xfffffcbf);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_C, 0x506adb4);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_D, 0xfffffed1);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_E, 0xfffefc79);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_F, 0x32c3211);
#audio_playback(eve)
yes = widgets_dialog_yes_no(eve, "Preparing flash",
"Write BT81X_Flash.bin from sdcard to EVE's connected flash at first?", 120, False) == True
if yes == True:
eve.storage.write_flash_with_progressbar('/sd/pico-brteve/circuitPython/examples/audio-playback/BT81X_Flash.bin', 0)
audio_playback(eve)
| en | 0.214702 | # Store calibration setting #eve.wr32(eve.REG_TOUCH_TRANSFORM_A, 0xfffefefc); #eve.wr32(eve.REG_TOUCH_TRANSFORM_B, 0xfffffcbf); #eve.wr32(eve.REG_TOUCH_TRANSFORM_C, 0x506adb4); #eve.wr32(eve.REG_TOUCH_TRANSFORM_D, 0xfffffed1); #eve.wr32(eve.REG_TOUCH_TRANSFORM_E, 0xfffefc79); #eve.wr32(eve.REG_TOUCH_TRANSFORM_F, 0x32c3211); #audio_playback(eve) | 1.908637 | 2 |
euporie/text.py | joouha/euporie | 505 | 6622116 | <reponame>joouha/euporie
# -*- coding: utf-8 -*-
"""Contains dpdated ANSI parsing and Formatted Text processing."""
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from prompt_toolkit.formatted_text import ANSI as PTANSI
from prompt_toolkit.formatted_text import (
fragment_list_to_text,
split_lines,
to_formatted_text,
)
from prompt_toolkit.layout.margins import ScrollbarMargin
from prompt_toolkit.layout.processors import DynamicProcessor, Processor, Transformation
from prompt_toolkit.widgets import TextArea
if TYPE_CHECKING:
from typing import Any, Generator
from prompt_toolkit.formatted_text import StyleAndTextTuples
from prompt_toolkit.layout.processors import TransformationInput
__all__ = ["FormatTextProcessor", "FormattedTextArea", "ANSI"]
class FormatTextProcessor(Processor):
"""Applies formatted text to a TextArea."""
def __init__(self, formatted_text: "StyleAndTextTuples"):
"""Initiate the processor.
Args:
formatted_text: The text in a buffer but with formatting applied.
"""
self.formatted_text = formatted_text
super().__init__()
def apply_transformation(
self, transformation_input: "TransformationInput"
) -> "Transformation":
"""Apply text formatting to a line in a buffer."""
if not hasattr(self, "formatted_lines"):
self.formatted_lines = list(split_lines(self.formatted_text))
lineno = transformation_input.lineno
max_lineno = len(self.formatted_lines) - 1
if lineno > max_lineno:
lineno = max_lineno
line = self.formatted_lines[lineno]
return Transformation(line)
class FormattedTextArea(TextArea):
"""Applies formatted text to a TextArea."""
def __init__(
self, formatted_text: "StyleAndTextTuples", *args: "Any", **kwargs: "Any"
):
"""Initialise a `FormattedTextArea` instance.
Args:
formatted_text: A list of `(style, text)` tuples to display.
*args: Arguments to pass to `prompt_toolkit.widgets.TextArea`.
**kwargs: Key-word arguments to pass to `prompt_toolkit.widgets.TextArea`.
"""
input_processors = kwargs.pop("input_processors", [])
input_processors.append(DynamicProcessor(self.get_processor))
# The following is not type checked due to a currently open mypy bug
# https://github.com/python/mypy/issues/6799
super().__init__(
*args,
input_processors=input_processors,
**kwargs,
) # type: ignore
# Set the formatted text to display
self.formatted_text: "StyleAndTextTuples" = formatted_text
for margin in self.window.right_margins:
if isinstance(margin, ScrollbarMargin):
margin.up_arrow_symbol = "▲"
margin.down_arrow_symbol = "▼"
def get_processor(self) -> "FormatTextProcessor":
"""Generate a processor for the formatted text."""
return FormatTextProcessor(self.formatted_text)
@property
def formatted_text(self) -> "StyleAndTextTuples":
"""The formatted text."""
return self._formatted_text
@formatted_text.setter
def formatted_text(self, value: "StyleAndTextTuples") -> None:
"""Sets the formatted text."""
self._formatted_text = to_formatted_text(value)
self.text = fragment_list_to_text(value)
class ANSI(PTANSI):
"""Converts ANSI text into formatted text, preserving all control sequences."""
def __init__(self, value: "str") -> None:
"""Initiate the ANSI processor instance.
This replaces carriage returns to emulate terminal output.
Args:
value: The ANSI string to process.
"""
# Replace windows style newlines
value = value.replace("\r\n", "\n")
# Remove anything before a carriage return if there is something after it to
# emulate a carriage return in the output
value = re.sub("^.*\\r(?!\\n)", "", value, 0, re.MULTILINE)
super().__init__(value)
def _parse_corot(self) -> Generator[None, str, None]:
"""Coroutine that parses the ANSI escape sequences.
This is modified version of the ANSI parser from prompt_toolkit retains
all CSI escape sequences.
Yields:
Accepts characters from a string.
"""
style = ""
formatted_text = self._formatted_text
while True:
char = yield
sequence = char
# Everything between \001 and \002 should become a ZeroWidthEscape.
if char == "\001":
sequence = ""
while char != "\002":
char = yield
if char == "\002":
formatted_text.append(("[ZeroWidthEscape]", sequence))
break
else:
sequence += char
continue
# Check for backspace
elif char == "\x08":
# TODO - remove last character from last non-ZeroWidthEscape fragment
formatted_text.pop()
continue
elif char in ("\x1b", "\x9b"):
# Got a CSI sequence, try to compile a control sequence
char = yield
# Check for sixels
if char == "P":
# Got as DEC code
sequence += char
# We expect "p1;p2;p3;q" + sixel data + "\x1b\"
char = yield
while char != "\x1b":
sequence += char
char = yield
sequence += char
char = yield
if ord(char) == 0x5C:
sequence += char
formatted_text.append(("[ZeroWidthEscape]", sequence))
# char = yield
continue
# Check for hyperlinks
elif char == "]":
sequence += char
char = yield
if char == "8":
sequence += char
char = yield
if char == ";":
sequence += char
char = yield
while True:
sequence += char
if sequence[-2:] == "\x1b\\":
break
char = yield
formatted_text.append(("[ZeroWidthEscape]", sequence))
continue
elif (char == "[" and sequence == "\x1b") or sequence == "\x9b":
if sequence == "\x1b":
sequence += char
char = yield
# Next are any number (including none) of "parameter bytes"
params = []
current = ""
while 0x30 <= ord(char) <= 0x3F:
# Parse list of integer parameters
sequence += char
if char.isdigit():
current += char
else:
params.append(min(int(current or 0), 9999))
if char == ";":
current = ""
char = yield
if current:
params.append(min(int(current or 0), 9999))
# then any number of "intermediate bytes"
while 0x20 <= ord(char) <= 0x2F:
sequence += char
char = yield
# finally by a single "final byte"
if 0x40 <= ord(char) <= 0x7E:
sequence += char
# Check if that escape sequence was a style:
if char == "m":
self._select_graphic_rendition(params)
style = self._create_style_string()
# Otherwise print a zero-width control sequence
else:
formatted_text.append(("[ZeroWidthEscape]", sequence))
continue
formatted_text.append((style, sequence))
| # -*- coding: utf-8 -*-
"""Contains dpdated ANSI parsing and Formatted Text processing."""
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from prompt_toolkit.formatted_text import ANSI as PTANSI
from prompt_toolkit.formatted_text import (
fragment_list_to_text,
split_lines,
to_formatted_text,
)
from prompt_toolkit.layout.margins import ScrollbarMargin
from prompt_toolkit.layout.processors import DynamicProcessor, Processor, Transformation
from prompt_toolkit.widgets import TextArea
if TYPE_CHECKING:
from typing import Any, Generator
from prompt_toolkit.formatted_text import StyleAndTextTuples
from prompt_toolkit.layout.processors import TransformationInput
__all__ = ["FormatTextProcessor", "FormattedTextArea", "ANSI"]
class FormatTextProcessor(Processor):
"""Applies formatted text to a TextArea."""
def __init__(self, formatted_text: "StyleAndTextTuples"):
"""Initiate the processor.
Args:
formatted_text: The text in a buffer but with formatting applied.
"""
self.formatted_text = formatted_text
super().__init__()
def apply_transformation(
self, transformation_input: "TransformationInput"
) -> "Transformation":
"""Apply text formatting to a line in a buffer."""
if not hasattr(self, "formatted_lines"):
self.formatted_lines = list(split_lines(self.formatted_text))
lineno = transformation_input.lineno
max_lineno = len(self.formatted_lines) - 1
if lineno > max_lineno:
lineno = max_lineno
line = self.formatted_lines[lineno]
return Transformation(line)
class FormattedTextArea(TextArea):
"""Applies formatted text to a TextArea."""
def __init__(
self, formatted_text: "StyleAndTextTuples", *args: "Any", **kwargs: "Any"
):
"""Initialise a `FormattedTextArea` instance.
Args:
formatted_text: A list of `(style, text)` tuples to display.
*args: Arguments to pass to `prompt_toolkit.widgets.TextArea`.
**kwargs: Key-word arguments to pass to `prompt_toolkit.widgets.TextArea`.
"""
input_processors = kwargs.pop("input_processors", [])
input_processors.append(DynamicProcessor(self.get_processor))
# The following is not type checked due to a currently open mypy bug
# https://github.com/python/mypy/issues/6799
super().__init__(
*args,
input_processors=input_processors,
**kwargs,
) # type: ignore
# Set the formatted text to display
self.formatted_text: "StyleAndTextTuples" = formatted_text
for margin in self.window.right_margins:
if isinstance(margin, ScrollbarMargin):
margin.up_arrow_symbol = "▲"
margin.down_arrow_symbol = "▼"
def get_processor(self) -> "FormatTextProcessor":
"""Generate a processor for the formatted text."""
return FormatTextProcessor(self.formatted_text)
@property
def formatted_text(self) -> "StyleAndTextTuples":
"""The formatted text."""
return self._formatted_text
@formatted_text.setter
def formatted_text(self, value: "StyleAndTextTuples") -> None:
"""Sets the formatted text."""
self._formatted_text = to_formatted_text(value)
self.text = fragment_list_to_text(value)
class ANSI(PTANSI):
"""Converts ANSI text into formatted text, preserving all control sequences."""
def __init__(self, value: "str") -> None:
"""Initiate the ANSI processor instance.
This replaces carriage returns to emulate terminal output.
Args:
value: The ANSI string to process.
"""
# Replace windows style newlines
value = value.replace("\r\n", "\n")
# Remove anything before a carriage return if there is something after it to
# emulate a carriage return in the output
value = re.sub("^.*\\r(?!\\n)", "", value, 0, re.MULTILINE)
super().__init__(value)
def _parse_corot(self) -> Generator[None, str, None]:
"""Coroutine that parses the ANSI escape sequences.
This is modified version of the ANSI parser from prompt_toolkit retains
all CSI escape sequences.
Yields:
Accepts characters from a string.
"""
style = ""
formatted_text = self._formatted_text
while True:
char = yield
sequence = char
# Everything between \001 and \002 should become a ZeroWidthEscape.
if char == "\001":
sequence = ""
while char != "\002":
char = yield
if char == "\002":
formatted_text.append(("[ZeroWidthEscape]", sequence))
break
else:
sequence += char
continue
# Check for backspace
elif char == "\x08":
# TODO - remove last character from last non-ZeroWidthEscape fragment
formatted_text.pop()
continue
elif char in ("\x1b", "\x9b"):
# Got a CSI sequence, try to compile a control sequence
char = yield
# Check for sixels
if char == "P":
# Got as DEC code
sequence += char
# We expect "p1;p2;p3;q" + sixel data + "\x1b\"
char = yield
while char != "\x1b":
sequence += char
char = yield
sequence += char
char = yield
if ord(char) == 0x5C:
sequence += char
formatted_text.append(("[ZeroWidthEscape]", sequence))
# char = yield
continue
# Check for hyperlinks
elif char == "]":
sequence += char
char = yield
if char == "8":
sequence += char
char = yield
if char == ";":
sequence += char
char = yield
while True:
sequence += char
if sequence[-2:] == "\x1b\\":
break
char = yield
formatted_text.append(("[ZeroWidthEscape]", sequence))
continue
elif (char == "[" and sequence == "\x1b") or sequence == "\x9b":
if sequence == "\x1b":
sequence += char
char = yield
# Next are any number (including none) of "parameter bytes"
params = []
current = ""
while 0x30 <= ord(char) <= 0x3F:
# Parse list of integer parameters
sequence += char
if char.isdigit():
current += char
else:
params.append(min(int(current or 0), 9999))
if char == ";":
current = ""
char = yield
if current:
params.append(min(int(current or 0), 9999))
# then any number of "intermediate bytes"
while 0x20 <= ord(char) <= 0x2F:
sequence += char
char = yield
# finally by a single "final byte"
if 0x40 <= ord(char) <= 0x7E:
sequence += char
# Check if that escape sequence was a style:
if char == "m":
self._select_graphic_rendition(params)
style = self._create_style_string()
# Otherwise print a zero-width control sequence
else:
formatted_text.append(("[ZeroWidthEscape]", sequence))
continue
formatted_text.append((style, sequence)) | en | 0.692737 | # -*- coding: utf-8 -*- Contains dpdated ANSI parsing and Formatted Text processing. Applies formatted text to a TextArea. Initiate the processor. Args: formatted_text: The text in a buffer but with formatting applied. Apply text formatting to a line in a buffer. Applies formatted text to a TextArea. Initialise a `FormattedTextArea` instance. Args: formatted_text: A list of `(style, text)` tuples to display. *args: Arguments to pass to `prompt_toolkit.widgets.TextArea`. **kwargs: Key-word arguments to pass to `prompt_toolkit.widgets.TextArea`. # The following is not type checked due to a currently open mypy bug # https://github.com/python/mypy/issues/6799 # type: ignore # Set the formatted text to display Generate a processor for the formatted text. The formatted text. Sets the formatted text. Converts ANSI text into formatted text, preserving all control sequences. Initiate the ANSI processor instance. This replaces carriage returns to emulate terminal output. Args: value: The ANSI string to process. # Replace windows style newlines # Remove anything before a carriage return if there is something after it to # emulate a carriage return in the output Coroutine that parses the ANSI escape sequences. This is modified version of the ANSI parser from prompt_toolkit retains all CSI escape sequences. Yields: Accepts characters from a string. # Everything between \001 and \002 should become a ZeroWidthEscape. # Check for backspace # TODO - remove last character from last non-ZeroWidthEscape fragment # Got a CSI sequence, try to compile a control sequence # Check for sixels # Got as DEC code # We expect "p1;p2;p3;q" + sixel data + "\x1b\" # char = yield # Check for hyperlinks # Next are any number (including none) of "parameter bytes" # Parse list of integer parameters # then any number of "intermediate bytes" # finally by a single "final byte" # Check if that escape sequence was a style: # Otherwise print a zero-width control sequence | 2.547259 | 3 |
backtest.py | Yanjing-PENG/index_prediction | 0 | 6622117 | <reponame>Yanjing-PENG/index_prediction
# -*- encoding:utf-8 -*-
from getdata import getData
from signaltrade import signaltrade
from tradestats import tradestats
from plot import plot_net_value
from configue import M, T
import pandas as pd
# set the display parameters for pandas DataFrame
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# set some basic parameters
init_capital = 3000
quantity = 1
fee_rate = 0.0001
# do the backtest
data_1m = getData()
signaltrade_result = signaltrade(data_1m, 0.003, M, T, quantity, fee_rate)
orderbook = signaltrade_result[1]
tradedate = signaltrade_result[2]
# get detailed backtest performance
stats = tradestats(orderbook, init_capital, tradedate)
# save backtest performance into excel file
writer = pd.ExcelWriter('backtest_result.xlsx')
stats.to_excel(writer, 'stats', index=False)
orderbook.to_excel(writer, 'orderbook', index=False)
writer.save()
# plot the net value figure
start_time = signaltrade_result[0][tradedate[0]].loc[0, 'time']
plot_net_value(orderbook, start_time, init_capital)
# print out the backtest performance
print('-------------------------------------------------------------------------------')
print(stats)
print('-------------------------------------------------------------------------------')
| # -*- encoding:utf-8 -*-
from getdata import getData
from signaltrade import signaltrade
from tradestats import tradestats
from plot import plot_net_value
from configue import M, T
import pandas as pd
# set the display parameters for pandas DataFrame
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# set some basic parameters
init_capital = 3000
quantity = 1
fee_rate = 0.0001
# do the backtest
data_1m = getData()
signaltrade_result = signaltrade(data_1m, 0.003, M, T, quantity, fee_rate)
orderbook = signaltrade_result[1]
tradedate = signaltrade_result[2]
# get detailed backtest performance
stats = tradestats(orderbook, init_capital, tradedate)
# save backtest performance into excel file
writer = pd.ExcelWriter('backtest_result.xlsx')
stats.to_excel(writer, 'stats', index=False)
orderbook.to_excel(writer, 'orderbook', index=False)
writer.save()
# plot the net value figure
start_time = signaltrade_result[0][tradedate[0]].loc[0, 'time']
plot_net_value(orderbook, start_time, init_capital)
# print out the backtest performance
print('-------------------------------------------------------------------------------')
print(stats)
print('-------------------------------------------------------------------------------') | en | 0.532896 | # -*- encoding:utf-8 -*- # set the display parameters for pandas DataFrame # set some basic parameters # do the backtest # get detailed backtest performance # save backtest performance into excel file # plot the net value figure # print out the backtest performance | 2.747642 | 3 |
honeycomb/utils/config_utils.py | omercnet/honeycomb | 81 | 6622118 | <reponame>omercnet/honeycomb
# -*- coding: utf-8 -*-
"""Honeycomb Config Utilities."""
from __future__ import unicode_literals, absolute_import
import os
import re
import json
import logging
import six
import yaml
from honeycomb import defs, exceptions
from honeycomb.error_messages import CONFIG_FIELD_TYPE_ERROR
logger = logging.getLogger(__name__)
def config_field_type(field, cls):
"""Validate a config field against a type.
Similar functionality to :func:`validate_field_matches_type` but returns :obj:`honeycomb.defs.ConfigField`
"""
return defs.ConfigField(lambda _: isinstance(_, cls),
lambda: CONFIG_FIELD_TYPE_ERROR.format(field, cls.__name__))
def validate_config(config_json, fields):
"""Validate a JSON file configuration against list of :obj:`honeycomb.defs.ConfigField`."""
for field_name, validator_obj in six.iteritems(fields):
field_value = config_json.get(field_name, None)
if field_value is None:
raise exceptions.ConfigFieldMissing(field_name)
if not validator_obj.validator_func(field_value):
raise exceptions.ConfigFieldValidationError(field_name, field_value, validator_obj.get_error_message())
def get_config_parameters(plugin_path):
"""Return the parameters section from config.json."""
json_config_path = os.path.join(plugin_path, defs.CONFIG_FILE_NAME)
with open(json_config_path, "r") as f:
config = json.load(f)
return config.get(defs.PARAMETERS, [])
def validate_config_parameters(config_json, allowed_keys, allowed_types):
"""Validate parameters in config file."""
custom_fields = config_json.get(defs.PARAMETERS, [])
for field in custom_fields:
validate_field(field, allowed_keys, allowed_types)
default = field.get(defs.DEFAULT)
field_type = field.get(defs.TYPE)
if default:
validate_field_matches_type(field[defs.VALUE], default, field_type)
def validate_field_matches_type(field, value, field_type, select_items=None, _min=None, _max=None):
"""Validate a config field against a specific type."""
if (field_type == defs.TEXT_TYPE and not isinstance(value, six.string_types)) or \
(field_type == defs.STRING_TYPE and not isinstance(value, six.string_types)) or \
(field_type == defs.BOOLEAN_TYPE and not isinstance(value, bool)) or \
(field_type == defs.INTEGER_TYPE and not isinstance(value, int)):
raise exceptions.ConfigFieldTypeMismatch(field, value, field_type)
if field_type == defs.INTEGER_TYPE:
if _min and value < _min:
raise exceptions.ConfigFieldTypeMismatch(field, value, "must be higher than {}".format(_min))
if _max and value > _max:
raise exceptions.ConfigFieldTypeMismatch(field, value, "must be lower than {}".format(_max))
if field_type == defs.SELECT_TYPE:
from honeycomb.utils.plugin_utils import get_select_items
items = get_select_items(select_items)
if value not in items:
raise exceptions.ConfigFieldTypeMismatch(field, value, "one of: {}".format(", ".join(items)))
def get_truetype(value):
"""Convert a string to a pythonized parameter."""
if value in ["true", "True", "y", "Y", "yes"]:
return True
if value in ["false", "False", "n", "N", "no"]:
return False
if value.isdigit():
return int(value)
return str(value)
def validate_field(field, allowed_keys, allowed_types):
"""Validate field is allowed and valid."""
for key, value in field.items():
if key not in allowed_keys:
raise exceptions.ParametersFieldError(key, "property")
if key == defs.TYPE:
if value not in allowed_types:
raise exceptions.ParametersFieldError(value, key)
if key == defs.VALUE:
if not is_valid_field_name(value):
raise exceptions.ParametersFieldError(value, "field name")
def is_valid_field_name(value):
"""Ensure field name is valid."""
leftovers = re.sub(r"\w", "", value)
leftovers = re.sub(r"-", "", leftovers)
if leftovers != "" or value[0].isdigit() or value[0] in ["-", "_"] or " " in value:
return False
return True
def process_config(ctx, configfile):
"""Process a yaml config with instructions.
This is a heavy method that loads lots of content, so we only run the imports if its called.
"""
from honeycomb.commands.service.run import run as service_run
# from honeycomb.commands.service.logs import logs as service_logs
from honeycomb.commands.service.install import install as service_install
from honeycomb.commands.integration.install import install as integration_install
from honeycomb.commands.integration.configure import configure as integration_configure
VERSION = "version"
SERVICES = defs.SERVICES
INTEGRATIONS = defs.INTEGRATIONS
required_top_keys = [VERSION, SERVICES]
supported_versions = [1]
def validate_yml(config):
for key in required_top_keys:
if key not in config:
raise exceptions.ConfigFieldMissing(key)
version = config.get(VERSION)
if version not in supported_versions:
raise exceptions.ConfigFieldTypeMismatch(VERSION, version,
"one of: {}".format(repr(supported_versions)))
def install_plugins(services, integrations):
for cmd, kwargs in [(service_install, {SERVICES: services}),
(integration_install, {INTEGRATIONS: integrations})]:
try:
ctx.invoke(cmd, **kwargs)
except SystemExit:
# If a plugin is already installed honeycomb will exit abnormally
pass
def parameters_to_string(parameters_dict):
return ["{}={}".format(k, v) for k, v in parameters_dict.items()]
def configure_integrations(integrations):
for integration in integrations:
args_list = parameters_to_string(config[INTEGRATIONS][integration].get(defs.PARAMETERS, dict()))
ctx.invoke(integration_configure, integration=integration, args=args_list)
def run_services(services, integrations):
# TODO: Enable support with multiple services as daemon, and run service.logs afterwards
# tricky part is that services launched as daemon are exited with os._exit(0) so you
# can't catch it.
for service in services:
args_list = parameters_to_string(config[SERVICES][service].get(defs.PARAMETERS, dict()))
ctx.invoke(service_run, service=service, integration=integrations, args=args_list)
# TODO: Silence normal stdout and follow honeycomb.debug.json instead
# This would make monitoring containers and collecting logs easier
with open(configfile, "rb") as fh:
config = yaml.load(fh.read())
validate_yml(config)
services = config.get(SERVICES).keys()
integrations = config.get(INTEGRATIONS).keys() if config.get(INTEGRATIONS) else []
install_plugins(services, integrations)
configure_integrations(integrations)
run_services(services, integrations)
| # -*- coding: utf-8 -*-
"""Honeycomb Config Utilities."""
from __future__ import unicode_literals, absolute_import
import os
import re
import json
import logging
import six
import yaml
from honeycomb import defs, exceptions
from honeycomb.error_messages import CONFIG_FIELD_TYPE_ERROR
logger = logging.getLogger(__name__)
def config_field_type(field, cls):
"""Validate a config field against a type.
Similar functionality to :func:`validate_field_matches_type` but returns :obj:`honeycomb.defs.ConfigField`
"""
return defs.ConfigField(lambda _: isinstance(_, cls),
lambda: CONFIG_FIELD_TYPE_ERROR.format(field, cls.__name__))
def validate_config(config_json, fields):
"""Validate a JSON file configuration against list of :obj:`honeycomb.defs.ConfigField`."""
for field_name, validator_obj in six.iteritems(fields):
field_value = config_json.get(field_name, None)
if field_value is None:
raise exceptions.ConfigFieldMissing(field_name)
if not validator_obj.validator_func(field_value):
raise exceptions.ConfigFieldValidationError(field_name, field_value, validator_obj.get_error_message())
def get_config_parameters(plugin_path):
"""Return the parameters section from config.json."""
json_config_path = os.path.join(plugin_path, defs.CONFIG_FILE_NAME)
with open(json_config_path, "r") as f:
config = json.load(f)
return config.get(defs.PARAMETERS, [])
def validate_config_parameters(config_json, allowed_keys, allowed_types):
"""Validate parameters in config file."""
custom_fields = config_json.get(defs.PARAMETERS, [])
for field in custom_fields:
validate_field(field, allowed_keys, allowed_types)
default = field.get(defs.DEFAULT)
field_type = field.get(defs.TYPE)
if default:
validate_field_matches_type(field[defs.VALUE], default, field_type)
def validate_field_matches_type(field, value, field_type, select_items=None, _min=None, _max=None):
"""Validate a config field against a specific type."""
if (field_type == defs.TEXT_TYPE and not isinstance(value, six.string_types)) or \
(field_type == defs.STRING_TYPE and not isinstance(value, six.string_types)) or \
(field_type == defs.BOOLEAN_TYPE and not isinstance(value, bool)) or \
(field_type == defs.INTEGER_TYPE and not isinstance(value, int)):
raise exceptions.ConfigFieldTypeMismatch(field, value, field_type)
if field_type == defs.INTEGER_TYPE:
if _min and value < _min:
raise exceptions.ConfigFieldTypeMismatch(field, value, "must be higher than {}".format(_min))
if _max and value > _max:
raise exceptions.ConfigFieldTypeMismatch(field, value, "must be lower than {}".format(_max))
if field_type == defs.SELECT_TYPE:
from honeycomb.utils.plugin_utils import get_select_items
items = get_select_items(select_items)
if value not in items:
raise exceptions.ConfigFieldTypeMismatch(field, value, "one of: {}".format(", ".join(items)))
def get_truetype(value):
"""Convert a string to a pythonized parameter."""
if value in ["true", "True", "y", "Y", "yes"]:
return True
if value in ["false", "False", "n", "N", "no"]:
return False
if value.isdigit():
return int(value)
return str(value)
def validate_field(field, allowed_keys, allowed_types):
"""Validate field is allowed and valid."""
for key, value in field.items():
if key not in allowed_keys:
raise exceptions.ParametersFieldError(key, "property")
if key == defs.TYPE:
if value not in allowed_types:
raise exceptions.ParametersFieldError(value, key)
if key == defs.VALUE:
if not is_valid_field_name(value):
raise exceptions.ParametersFieldError(value, "field name")
def is_valid_field_name(value):
"""Ensure field name is valid."""
leftovers = re.sub(r"\w", "", value)
leftovers = re.sub(r"-", "", leftovers)
if leftovers != "" or value[0].isdigit() or value[0] in ["-", "_"] or " " in value:
return False
return True
def process_config(ctx, configfile):
"""Process a yaml config with instructions.
This is a heavy method that loads lots of content, so we only run the imports if its called.
"""
from honeycomb.commands.service.run import run as service_run
# from honeycomb.commands.service.logs import logs as service_logs
from honeycomb.commands.service.install import install as service_install
from honeycomb.commands.integration.install import install as integration_install
from honeycomb.commands.integration.configure import configure as integration_configure
VERSION = "version"
SERVICES = defs.SERVICES
INTEGRATIONS = defs.INTEGRATIONS
required_top_keys = [VERSION, SERVICES]
supported_versions = [1]
def validate_yml(config):
for key in required_top_keys:
if key not in config:
raise exceptions.ConfigFieldMissing(key)
version = config.get(VERSION)
if version not in supported_versions:
raise exceptions.ConfigFieldTypeMismatch(VERSION, version,
"one of: {}".format(repr(supported_versions)))
def install_plugins(services, integrations):
for cmd, kwargs in [(service_install, {SERVICES: services}),
(integration_install, {INTEGRATIONS: integrations})]:
try:
ctx.invoke(cmd, **kwargs)
except SystemExit:
# If a plugin is already installed honeycomb will exit abnormally
pass
def parameters_to_string(parameters_dict):
return ["{}={}".format(k, v) for k, v in parameters_dict.items()]
def configure_integrations(integrations):
for integration in integrations:
args_list = parameters_to_string(config[INTEGRATIONS][integration].get(defs.PARAMETERS, dict()))
ctx.invoke(integration_configure, integration=integration, args=args_list)
def run_services(services, integrations):
# TODO: Enable support with multiple services as daemon, and run service.logs afterwards
# tricky part is that services launched as daemon are exited with os._exit(0) so you
# can't catch it.
for service in services:
args_list = parameters_to_string(config[SERVICES][service].get(defs.PARAMETERS, dict()))
ctx.invoke(service_run, service=service, integration=integrations, args=args_list)
# TODO: Silence normal stdout and follow honeycomb.debug.json instead
# This would make monitoring containers and collecting logs easier
with open(configfile, "rb") as fh:
config = yaml.load(fh.read())
validate_yml(config)
services = config.get(SERVICES).keys()
integrations = config.get(INTEGRATIONS).keys() if config.get(INTEGRATIONS) else []
install_plugins(services, integrations)
configure_integrations(integrations)
run_services(services, integrations) | en | 0.844932 | # -*- coding: utf-8 -*- Honeycomb Config Utilities. Validate a config field against a type. Similar functionality to :func:`validate_field_matches_type` but returns :obj:`honeycomb.defs.ConfigField` Validate a JSON file configuration against list of :obj:`honeycomb.defs.ConfigField`. Return the parameters section from config.json. Validate parameters in config file. Validate a config field against a specific type. Convert a string to a pythonized parameter. Validate field is allowed and valid. Ensure field name is valid. Process a yaml config with instructions. This is a heavy method that loads lots of content, so we only run the imports if its called. # from honeycomb.commands.service.logs import logs as service_logs # If a plugin is already installed honeycomb will exit abnormally # TODO: Enable support with multiple services as daemon, and run service.logs afterwards # tricky part is that services launched as daemon are exited with os._exit(0) so you # can't catch it. # TODO: Silence normal stdout and follow honeycomb.debug.json instead # This would make monitoring containers and collecting logs easier | 2.443598 | 2 |
amocrm_api_client/make_json_request/core/MakeJsonRequestException.py | iqtek/amocrm_api_client | 0 | 6622119 | from typing import Mapping
from typing import Optional
from amocrm_api_client.exceptions import AmocrmClientException
__all__ = [
"MakeJsonRequestException",
]
class MakeJsonRequestException(AmocrmClientException):
__slots__ = (
"status_code",
"headers",
"content",
)
def __init__(
self,
status_code: Optional[int] = None,
headers: Optional[Mapping[str, str]] = None,
content: Optional[str] = None,
) -> None:
super().__init__(
f"status_code: {status_code}, headers: {headers}, content: {content}."
)
self.status_code = status_code
self.headers = headers
self.content = content
| from typing import Mapping
from typing import Optional
from amocrm_api_client.exceptions import AmocrmClientException
__all__ = [
"MakeJsonRequestException",
]
class MakeJsonRequestException(AmocrmClientException):
__slots__ = (
"status_code",
"headers",
"content",
)
def __init__(
self,
status_code: Optional[int] = None,
headers: Optional[Mapping[str, str]] = None,
content: Optional[str] = None,
) -> None:
super().__init__(
f"status_code: {status_code}, headers: {headers}, content: {content}."
)
self.status_code = status_code
self.headers = headers
self.content = content
| none | 1 | 2.513432 | 3 | |
python3/koans/a_package_folder/__init__.py | digiaonline/python_koans | 1 | 6622120 | <filename>python3/koans/a_package_folder/__init__.py
#!/usr/bin/env python
an_attribute = 1984 | <filename>python3/koans/a_package_folder/__init__.py
#!/usr/bin/env python
an_attribute = 1984 | ru | 0.26433 | #!/usr/bin/env python | 1.143534 | 1 |
multitest_transport/cli/cluster.py | maksonlee/multitest_transport | 0 | 6622121 | <reponame>maksonlee/multitest_transport
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to handle cluster commands.
Cluster commands use Docker Swarm to manage multiple MTT replica nodes. However,
this feature is currently not working because Docker Swarm does not support
--privilege option when creating a service. See the link below for details:
https://github.com/docker/swarmkit/issues/1030
"""
import copy
from multitest_transport.cli import command_util
from multitest_transport.cli import config
CONFIG_PATH_FORMAT = '~/.config/mtt/clusters/%s.ini'
class ClusterRegistry(object):
"""A class to store cluster configs."""
def __init__(self):
# A model cluster config.
self._config = config.Config(filename=None)
self._config.DefineField('manager_host')
self._config.DefineField('manager_join_token')
self._config.DefineField('worker_join_token')
self._config_map = {}
def _GetConfigPath(self, name):
return CONFIG_PATH_FORMAT % name
def GetConfig(self, name):
"""Return a cluster config for a given name.
Args:
name: a cluster name.
Returns:
a cluster config.
"""
name = name.lower()
if name not in self._config_map:
filename = self._GetConfigPath(name)
field_map = copy.deepcopy(self._config.field_map)
self._config_map[name] = config.Config(filename, field_map=field_map)
self._config_map[name].Load()
return self._config_map[name]
class ClusterCommandHandler(object):
"""A handler for cluster commands."""
def __init__(self):
self._command_map = {
'create': self.Create,
'add_node': self.AddNode,
'remove_node': self.RemoveNode,
}
self._registry = ClusterRegistry()
def Run(self, args):
self._command_map[args.command](args)
def AddParser(self, subparsers):
"""Add a command argument parser.
Args:
subparsers: an argparse subparsers object.
"""
parser = subparsers.add_parser(
'cluster', help='Create and manage MTT clusters.')
parser.add_argument(
'command', choices=self._command_map.keys())
parser.add_argument('--name')
parser.add_argument('--host', default=None)
parser.add_argument('--token', default=None)
parser.add_argument('--ssh_user', default=None)
parser.set_defaults(func=self.Run)
def Create(self, args):
"""Creates a cluster.
This actually creates a Docker swarm and deploy a MTT service on it.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if mtt_control_server_url or host is not set.
"""
if not config.config.mtt_control_server_url:
raise ValueError('mtt_control_server_url must be set.')
if not args.host:
raise ValueError('--host option must be set')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
cluster_config = self._registry.GetConfig(args.name)
docker_context.Run(['swarm', 'init'])
# TODO: get token ID and store it.
docker_context.Run([
'service', 'create', '--name', 'mtt', '--env',
'MTT_CONTROL_SERVER_URL=%s' % config.config.mtt_control_server_url,
'--mode', 'global', 'gcr.io/android-mtt/mtt'
])
cluster_config.manager_host = args.host
cluster_config.Save()
def AddNode(self, args):
"""Adds a node to an existing cluster.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if a host or a token is missing.
"""
if not args.host:
raise ValueError('--host must be provided')
if not args.token:
raise ValueError('--token must be provided')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
cluster_config = self._registry.GetConfig(args.name)
if args.host == cluster_config.manager_host:
raise ValueError(
'%s is already a manager node for %s cluster' % (
args.host, args.name))
docker_context.Run(
[
'swarm', 'join',
'--token', args.token,
'%s:2377' % cluster_config.manager_host])
def RemoveNode(self, args):
"""Removes a node from an existing cluster.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if a host or a token is missing.
"""
if not args.host:
raise ValueError('--host must be provided')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
docker_context.Run(
['swarm', 'leave', '--force'])
| # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to handle cluster commands.
Cluster commands use Docker Swarm to manage multiple MTT replica nodes. However,
this feature is currently not working because Docker Swarm does not support
--privilege option when creating a service. See the link below for details:
https://github.com/docker/swarmkit/issues/1030
"""
import copy
from multitest_transport.cli import command_util
from multitest_transport.cli import config
CONFIG_PATH_FORMAT = '~/.config/mtt/clusters/%s.ini'
class ClusterRegistry(object):
"""A class to store cluster configs."""
def __init__(self):
# A model cluster config.
self._config = config.Config(filename=None)
self._config.DefineField('manager_host')
self._config.DefineField('manager_join_token')
self._config.DefineField('worker_join_token')
self._config_map = {}
def _GetConfigPath(self, name):
return CONFIG_PATH_FORMAT % name
def GetConfig(self, name):
"""Return a cluster config for a given name.
Args:
name: a cluster name.
Returns:
a cluster config.
"""
name = name.lower()
if name not in self._config_map:
filename = self._GetConfigPath(name)
field_map = copy.deepcopy(self._config.field_map)
self._config_map[name] = config.Config(filename, field_map=field_map)
self._config_map[name].Load()
return self._config_map[name]
class ClusterCommandHandler(object):
"""A handler for cluster commands."""
def __init__(self):
self._command_map = {
'create': self.Create,
'add_node': self.AddNode,
'remove_node': self.RemoveNode,
}
self._registry = ClusterRegistry()
def Run(self, args):
self._command_map[args.command](args)
def AddParser(self, subparsers):
"""Add a command argument parser.
Args:
subparsers: an argparse subparsers object.
"""
parser = subparsers.add_parser(
'cluster', help='Create and manage MTT clusters.')
parser.add_argument(
'command', choices=self._command_map.keys())
parser.add_argument('--name')
parser.add_argument('--host', default=None)
parser.add_argument('--token', default=None)
parser.add_argument('--ssh_user', default=None)
parser.set_defaults(func=self.Run)
def Create(self, args):
"""Creates a cluster.
This actually creates a Docker swarm and deploy a MTT service on it.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if mtt_control_server_url or host is not set.
"""
if not config.config.mtt_control_server_url:
raise ValueError('mtt_control_server_url must be set.')
if not args.host:
raise ValueError('--host option must be set')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
cluster_config = self._registry.GetConfig(args.name)
docker_context.Run(['swarm', 'init'])
# TODO: get token ID and store it.
docker_context.Run([
'service', 'create', '--name', 'mtt', '--env',
'MTT_CONTROL_SERVER_URL=%s' % config.config.mtt_control_server_url,
'--mode', 'global', 'gcr.io/android-mtt/mtt'
])
cluster_config.manager_host = args.host
cluster_config.Save()
def AddNode(self, args):
"""Adds a node to an existing cluster.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if a host or a token is missing.
"""
if not args.host:
raise ValueError('--host must be provided')
if not args.token:
raise ValueError('--token must be provided')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
cluster_config = self._registry.GetConfig(args.name)
if args.host == cluster_config.manager_host:
raise ValueError(
'%s is already a manager node for %s cluster' % (
args.host, args.name))
docker_context.Run(
[
'swarm', 'join',
'--token', args.token,
'%s:2377' % cluster_config.manager_host])
def RemoveNode(self, args):
"""Removes a node from an existing cluster.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if a host or a token is missing.
"""
if not args.host:
raise ValueError('--host must be provided')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
docker_context.Run(
['swarm', 'leave', '--force']) | en | 0.732138 | # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A module to handle cluster commands. Cluster commands use Docker Swarm to manage multiple MTT replica nodes. However, this feature is currently not working because Docker Swarm does not support --privilege option when creating a service. See the link below for details: https://github.com/docker/swarmkit/issues/1030 A class to store cluster configs. # A model cluster config. Return a cluster config for a given name. Args: name: a cluster name. Returns: a cluster config. A handler for cluster commands. Add a command argument parser. Args: subparsers: an argparse subparsers object. Creates a cluster. This actually creates a Docker swarm and deploy a MTT service on it. Args: args: an argparse.ArgumentParser object. Raises: ValueError: if mtt_control_server_url or host is not set. # TODO: get token ID and store it. Adds a node to an existing cluster. Args: args: an argparse.ArgumentParser object. Raises: ValueError: if a host or a token is missing. Removes a node from an existing cluster. Args: args: an argparse.ArgumentParser object. Raises: ValueError: if a host or a token is missing. | 2.188802 | 2 |
test/unittests/test.py | dutille/doconce | 305 | 6622122 | import itertools
from invoke import format_html
import os
def read_options(subdir):
default = ()
infile = os.path.join(subdir, 'OPTIONS')
if not os.path.isfile(infile):
return default
with open(infile, 'r') as f:
OPTIONS = tuple(l.strip() for l in f.readlines())
return OPTIONS
def error_msg(args, out, err):
s = "COMMAND:\n{}\n".format(' '.join(args))
s += "STDOUT:\n{}\n\nSTDERR:\n{}\n".format(out, err)
return s
SUBDIRS = ('header')
def _test_subdir(subdir):
cwd = os.path.abspath(os.curdir)
OPTIONS = read_options(subdir)
n = len(OPTIONS)
try:
os.chdir(subdir)
for comb in itertools.product((False, True), repeat=n):
opts = []
outfile = subdir
for i, c in enumerate(comb):
if c:
opts.append('-D%s' % OPTIONS[i])
outfile += '__'+OPTIONS[i]
opts.append('--html_output='+outfile)
retval, out, err, args = format_html('generic', opts)
msg = error_msg(args, out, err)
assert retval == 0, msg
finally:
os.chdir(cwd)
def test_header():
_test_subdir('header')
if __name__ == '__main__':
test_header()
| import itertools
from invoke import format_html
import os
def read_options(subdir):
default = ()
infile = os.path.join(subdir, 'OPTIONS')
if not os.path.isfile(infile):
return default
with open(infile, 'r') as f:
OPTIONS = tuple(l.strip() for l in f.readlines())
return OPTIONS
def error_msg(args, out, err):
s = "COMMAND:\n{}\n".format(' '.join(args))
s += "STDOUT:\n{}\n\nSTDERR:\n{}\n".format(out, err)
return s
SUBDIRS = ('header')
def _test_subdir(subdir):
cwd = os.path.abspath(os.curdir)
OPTIONS = read_options(subdir)
n = len(OPTIONS)
try:
os.chdir(subdir)
for comb in itertools.product((False, True), repeat=n):
opts = []
outfile = subdir
for i, c in enumerate(comb):
if c:
opts.append('-D%s' % OPTIONS[i])
outfile += '__'+OPTIONS[i]
opts.append('--html_output='+outfile)
retval, out, err, args = format_html('generic', opts)
msg = error_msg(args, out, err)
assert retval == 0, msg
finally:
os.chdir(cwd)
def test_header():
_test_subdir('header')
if __name__ == '__main__':
test_header()
| none | 1 | 2.290767 | 2 | |
week 1/2/2c.py | Monxun/SmoothStack | 0 | 6622123 | <gh_stars>0
#############################
# Three's a crowd: pt. 1 / pt. 2
people = ['Matt', 'Mark', 'Luke', 'Ringo']
def test_crowd(people):
if len(people) > 3:
print("This room's crowded!")
else:
print("Room's all good.")
test_crowd(people)
people.pop()
test_crowd(people)
#############################
# Six is a Mob
people = ['Matt', 'Mark', 'Luke', 'Ringo', 'Daphne', 'Lilly']
def test_mob(people):
if len(people) > 5:
print("There's a MOB in this room!")
elif len(people) >= 3:
print("This room's crowded!")
elif len(people) > 0:
print("Room's all good.")
else:
print("Room's empty")
test_mob(people)
test_mob(people[0:3])
test_mob(people[0:1]) | #############################
# Three's a crowd: pt. 1 / pt. 2
people = ['Matt', 'Mark', 'Luke', 'Ringo']
def test_crowd(people):
if len(people) > 3:
print("This room's crowded!")
else:
print("Room's all good.")
test_crowd(people)
people.pop()
test_crowd(people)
#############################
# Six is a Mob
people = ['Matt', 'Mark', 'Luke', 'Ringo', 'Daphne', 'Lilly']
def test_mob(people):
if len(people) > 5:
print("There's a MOB in this room!")
elif len(people) >= 3:
print("This room's crowded!")
elif len(people) > 0:
print("Room's all good.")
else:
print("Room's empty")
test_mob(people)
test_mob(people[0:3])
test_mob(people[0:1]) | de | 0.531589 | ############################# # Three's a crowd: pt. 1 / pt. 2 ############################# # Six is a Mob | 3.969133 | 4 |
tests/unit/test_unicode.py | simon-engledew/sshim | 16 | 6622124 | # -*- coding: utf8 -*-
import unittest
import sshim
import re
import codecs
import six
from . import connect
class TestUnicode(unittest.TestCase):
def test_unicode_echo(self):
def decode(value):
if isinstance(value, six.text_type):
return value
return codecs.decode(value, 'utf8')
def echo(script):
groups = script.expect(re.compile(six.u('(?P<value>.*)'))).groupdict()
value = groups['value']
assert value == six.u('£test')
script.writeline(six.u('return {0}').format(value))
with sshim.Server(echo, address='127.0.0.1', port=0, encoding='utf8') as server:
with connect(server) as fileobj:
fileobj.write(six.u('£test\n').encode('utf8'))
fileobj.flush()
assert decode(fileobj.readline()) == six.u('£test\r\n')
assert decode(fileobj.readline()) == six.u('return £test\r\n')
| # -*- coding: utf8 -*-
import unittest
import sshim
import re
import codecs
import six
from . import connect
class TestUnicode(unittest.TestCase):
def test_unicode_echo(self):
def decode(value):
if isinstance(value, six.text_type):
return value
return codecs.decode(value, 'utf8')
def echo(script):
groups = script.expect(re.compile(six.u('(?P<value>.*)'))).groupdict()
value = groups['value']
assert value == six.u('£test')
script.writeline(six.u('return {0}').format(value))
with sshim.Server(echo, address='127.0.0.1', port=0, encoding='utf8') as server:
with connect(server) as fileobj:
fileobj.write(six.u('£test\n').encode('utf8'))
fileobj.flush()
assert decode(fileobj.readline()) == six.u('£test\r\n')
assert decode(fileobj.readline()) == six.u('return £test\r\n')
| en | 0.406466 | # -*- coding: utf8 -*- | 3.019623 | 3 |
utils/sort.py | mymsimple/plate_generator | 10 | 6622125 | # encoding='utf-8'
import os
'''
按字符数量做排序,如下:
['京 7577'] 冀 20417
['津 3843'] =====> 京 7577
['冀 20417'] 津 3843
'''
def char_sort(old_txt):
with open(old_txt, "r", encoding="utf-8") as f:
char_list = []
c_list = []
for line in f.readlines():
line = line.replace("[","")
line = line.replace("]", "")
line = line.replace("'", "")
char, c = line.split()
char_list.append(char)
c_list.append(int(c))
char_dict = dict(zip(char_list,c_list))
char_dict_sort = sorted(char_dict.items(), key=lambda x: x[1], reverse=True)
return char_dict_sort
def main(old_txt,sort_txt):
char_dict_sort = char_sort(old_txt)
with open(sort_txt, "w", encoding="utf-8") as f:
for c in char_dict_sort:
list1 = list(c)
str1 = list1[0] + ' ' + str(list1[1])
f.write(str1 + "\n")
if __name__ == "__main__":
old_txt = "data/char_count.txt"
sort_txt = "data/char_sort.txt"
main(old_txt,sort_txt) | # encoding='utf-8'
import os
'''
按字符数量做排序,如下:
['京 7577'] 冀 20417
['津 3843'] =====> 京 7577
['冀 20417'] 津 3843
'''
def char_sort(old_txt):
with open(old_txt, "r", encoding="utf-8") as f:
char_list = []
c_list = []
for line in f.readlines():
line = line.replace("[","")
line = line.replace("]", "")
line = line.replace("'", "")
char, c = line.split()
char_list.append(char)
c_list.append(int(c))
char_dict = dict(zip(char_list,c_list))
char_dict_sort = sorted(char_dict.items(), key=lambda x: x[1], reverse=True)
return char_dict_sort
def main(old_txt,sort_txt):
char_dict_sort = char_sort(old_txt)
with open(sort_txt, "w", encoding="utf-8") as f:
for c in char_dict_sort:
list1 = list(c)
str1 = list1[0] + ' ' + str(list1[1])
f.write(str1 + "\n")
if __name__ == "__main__":
old_txt = "data/char_count.txt"
sort_txt = "data/char_sort.txt"
main(old_txt,sort_txt) | zh | 0.337309 | # encoding='utf-8' 按字符数量做排序,如下: ['京 7577'] 冀 20417 ['津 3843'] =====> 京 7577 ['冀 20417'] 津 3843 | 3.344179 | 3 |
plata_charts/apps.py | eonpatapon/plata-charts | 0 | 6622126 | <gh_stars>0
from django.apps import AppConfig
class Config(AppConfig):
name = 'plata_charts'
verbose_name = 'Charts'
| from django.apps import AppConfig
class Config(AppConfig):
name = 'plata_charts'
verbose_name = 'Charts' | none | 1 | 1.101264 | 1 | |
kirberichuk/urls.py | kirberich/kirberich.uk | 0 | 6622127 | from django.conf.urls import patterns, include, url
import session_csrf
session_csrf.monkeypatch()
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^_ah/', include('djangae.urls')),
url(r'^admin/?', include(admin.site.urls)),
url(r'^summernote/', include('django_summernote.urls')),
url(r'^', include('core.urls')),
)
| from django.conf.urls import patterns, include, url
import session_csrf
session_csrf.monkeypatch()
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^_ah/', include('djangae.urls')),
url(r'^admin/?', include(admin.site.urls)),
url(r'^summernote/', include('django_summernote.urls')),
url(r'^', include('core.urls')),
)
| none | 1 | 1.827613 | 2 | |
src/configs/config.py | Callet91/sw | 0 | 6622128 | <reponame>Callet91/sw
'''Config file for example model'''
example_config = {
'activation': 'relu',
'batch': 32,
'epochs': 5,
'loss': 'sparse_categorical_crossentropy',
'metrics': 'accuracy',
'optimizer': 'adam'
} | '''Config file for example model'''
example_config = {
'activation': 'relu',
'batch': 32,
'epochs': 5,
'loss': 'sparse_categorical_crossentropy',
'metrics': 'accuracy',
'optimizer': 'adam'
} | en | 0.69694 | Config file for example model | 1.612994 | 2 |
board.py | naderabdalghani/othello | 0 | 6622129 | <reponame>naderabdalghani/othello
from tkinter import *
import numpy as np
import PIL.Image
import PIL.ImageTk
import math
from othello import Othello
from agent import Agent
from constants import WHITE, BLACK, VALID_MOVE, WHITE_IMG, BLACK_IMG, NEXT_MOVE_IMG, BLACK_TURN_TEXT, WHITE_TURN_TEXT, \
BLACK_WON_TEXT, WHITE_WON_TEXT, DRAW_TEXT, BLACK_LOADING_TEXT, WHITE_LOADING_TEXT, GAME_IN_PROGRESS, BLACK_WON, \
WHITE_WON, DRAW, LOG_FILE, LAST_MOVE
class Board(Frame):
def __init__(self,
parent,
n,
size,
color,
black_player_type,
white_player_type,
black_hints,
white_hints,
black_depth,
white_depth,
black_evaluation_fn,
white_evaluation_fn,
black_move_ordering,
white_move_ordering):
open(LOG_FILE, "w")
# Initialize agents
self.black = Agent(BLACK,
black_player_type,
black_hints,
black_depth,
black_evaluation_fn,
black_move_ordering)
self.white = Agent(WHITE,
white_player_type,
white_hints,
white_depth,
white_evaluation_fn,
white_move_ordering)
if self.black.agent_type == "computer":
with open(LOG_FILE, "a") as f:
f.write("Black is initialized with the following parameters:\n"
"Depth: {}\nEvaluation Function Type: {}\nMove Ordering: {}".format(
self.black.depth, self.black.evaluation_fn, self.black.move_ordering
))
f.write("\n\n_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_\n\n")
if self.white.agent_type == "computer":
with open(LOG_FILE, "a") as f:
f.write("White is initialized with the following parameters:\n"
"Depth: {}\nEvaluation Function Type: {}\nMove Ordering: {}".format(
self.white.depth, self.white.evaluation_fn, self.white.move_ordering
))
f.write("\n\n_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_\n\n")
# Initialize game object
self.game = Othello(n)
# Pass turn to black as black always starts first
self.current_player = self.black
# Initialize board parameters
n = 2 ** math.ceil(math.log2(n))
self.n = n
self.size = size
self.color = color
# Initialize images
self.image_size = math.floor(size * 0.75)
image = PIL.Image.open(WHITE_IMG)
image = image.resize((self.image_size, self.image_size))
self.white_img = PIL.ImageTk.PhotoImage(image)
image = PIL.Image.open(BLACK_IMG)
image = image.resize((self.image_size, self.image_size))
self.black_img = PIL.ImageTk.PhotoImage(image)
image = PIL.Image.open(NEXT_MOVE_IMG)
image = image.resize((self.image_size, self.image_size))
self.next_move_img = PIL.ImageTk.PhotoImage(image)
# Initialize widgets (board, scoreboard)
Frame.__init__(self, parent, bg="gray")
self.black_score_var = IntVar(value=self.game.black_score)
self.white_score_var = IntVar(value=self.game.white_score)
if self.current_player.agent_type == "computer":
self.game_info_var = StringVar(value=BLACK_LOADING_TEXT)
else:
self.game_info_var = StringVar(value=BLACK_TURN_TEXT)
self.canvas = Canvas(self, borderwidth=0, highlightthickness=0, width=n * size, height=n * size, bg="gray")
self.score_board = Canvas(self, width=n * size, height=60, bg="gray", highlightthickness=0)
self.black_score_widget = Label(self.score_board, compound=LEFT, image=self.black_img,
text=self.game.black_score, bg="gray", padx=25,
textvariable=self.black_score_var, font='System 30 bold')
self.white_score_widget = Label(self.score_board, compound=RIGHT, image=self.white_img,
text=self.game.white_score, bg="gray", padx=25,
textvariable=self.white_score_var, font='System 30 bold')
self.info_widget = Label(self.score_board, compound=RIGHT, text=BLACK_TURN_TEXT, bg="gray", font='System 15',
textvariable=self.game_info_var)
self.black_score_widget.image = self.black_img
self.white_score_widget.image = self.white_img
self.moves_btns = []
# Render widgets
self.canvas.pack(side="top", fill="both", expand=True, padx=4, pady=4)
self.score_board.pack(side="bottom", fill="both", expand=True, padx=4, pady=4)
self.black_score_widget.pack(side="left")
self.info_widget.pack(side="left", expand=True)
self.white_score_widget.pack(side="right")
self.canvas.bind("<Destroy>", self.quit)
self.window_destroyed = False
self.initialize_board()
if self.current_player.agent_type == "computer":
self.canvas.after(1000, self.run_player_move)
else:
self.run_player_move()
def set_game_info_text(self, event=GAME_IN_PROGRESS):
if event == GAME_IN_PROGRESS:
if self.current_player.identifier == WHITE and self.current_player.agent_type == "computer":
self.game_info_var.set(WHITE_LOADING_TEXT)
if self.current_player.identifier == BLACK and self.current_player.agent_type == "computer":
self.game_info_var.set(BLACK_LOADING_TEXT)
if self.current_player.identifier == WHITE and self.current_player.agent_type == "human":
self.game_info_var.set(WHITE_TURN_TEXT)
if self.current_player.identifier == BLACK and self.current_player.agent_type == "human":
self.game_info_var.set(BLACK_TURN_TEXT)
elif event == BLACK_WON:
self.game_info_var.set(BLACK_WON_TEXT)
with open(LOG_FILE, "a") as f:
f.write("\n_*_*_*_*_*_*_*_*_*_*_*_* BLACK WON *_*_*_*_*_*_*_*_*_*_*_*_\n\n")
elif event == WHITE_WON:
self.game_info_var.set(WHITE_WON_TEXT)
with open(LOG_FILE, "a") as f:
f.write("\n_*_*_*_*_*_*_*_*_*_*_*_* WHITE WON *_*_*_*_*_*_*_*_*_*_*_*_\n\n")
elif event == DRAW:
self.game_info_var.set(DRAW_TEXT)
with open(LOG_FILE, "a") as f:
f.write("\n_*_*_*_*_*_*_*_*_*_*_*_* DRAW *_*_*_*_*_*_*_*_*_*_*_*_\n\n")
if event == BLACK_WON or event == WHITE_WON or event == DRAW:
if self.black.agent_type == "computer":
with open(LOG_FILE, "a") as f:
f.write("BLACK average branching factor = {}\n"
.format(self.black.total_branching_factor / self.black.turns))
f.write("BLACK average effective branching factor = {}\n"
.format(self.black.total_effective_branching_factor / self.black.turns))
f.write("BLACK average execution time = {}\n"
.format(self.black.total_execution_time / self.black.turns))
f.write("\n_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_\n\n")
f.write("WHITE average branching factor = {}\n"
.format(self.white.total_branching_factor / self.white.turns))
f.write("WHITE average effective branching factor = {}\n"
.format(self.white.total_effective_branching_factor / self.white.turns))
f.write("WHITE average execution time = {}\n"
.format(self.white.total_execution_time / self.white.turns))
def run_player_move(self, move=None):
pass_turn_to_computer = False
if self.current_player.agent_type == "human":
if move is not None:
self.game.apply_move(self.current_player.identifier, move)
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
event = self.game.status()
if event == GAME_IN_PROGRESS:
if self.current_player.agent_type == "human":
moves = self.game.move_generator(self.current_player.identifier)
if len(moves) == 0: # If a player doesn't have a move, pass the play to the other player
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
moves = self.game.move_generator(self.current_player.identifier)
if len(moves) == 0:
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
event = self.game.status()
elif self.current_player.agent_type == "computer":
pass_turn_to_computer = True
self.black_score_var.set(self.game.black_score)
self.white_score_var.set(self.game.white_score)
self.set_game_info_text(event)
self.refresh()
if pass_turn_to_computer and event == GAME_IN_PROGRESS:
self.canvas.after(0, self.run_player_move)
elif self.current_player.agent_type == "computer":
player_move = self.current_player.get_move(self.game, self.current_player.identifier)
if player_move is not None:
self.game.apply_move(self.current_player.identifier, player_move)
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
event = self.game.status()
if event == GAME_IN_PROGRESS:
if self.current_player.agent_type == "human":
moves = self.game.move_generator(self.current_player.identifier)
if len(moves) == 0: # If a player doesn't have a move, pass the play to the other player
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
pass_turn_to_computer = True
elif self.current_player.agent_type == "computer":
pass_turn_to_computer = True
self.black_score_var.set(self.game.black_score)
self.white_score_var.set(self.game.white_score)
self.set_game_info_text(event)
self.refresh()
if pass_turn_to_computer and event == GAME_IN_PROGRESS:
self.canvas.after(0, self.run_player_move)
def add_piece(self, kind, row, column, hints=False):
x0 = (column * self.size) + int(self.size / 2)
y0 = (row * self.size) + int(self.size / 2)
if kind == WHITE:
self.canvas.create_image(x0, y0, image=self.white_img, tags="piece", anchor=CENTER)
elif kind == BLACK:
self.canvas.create_image(x0, y0, image=self.black_img, tags="piece", anchor=CENTER)
elif kind == VALID_MOVE:
move_btn = Button(self, bg=self.color, activebackground=self.color, relief=FLAT, overrelief=FLAT,
command=lambda: self.run_player_move([row, column]), anchor=CENTER)
if hints:
move_btn.configure(image=self.next_move_img)
self.moves_btns.append(move_btn)
self.canvas.create_window(x0, y0, anchor=CENTER, window=move_btn, height=self.size - 1, width=self.size - 1,
tags="move")
elif kind == LAST_MOVE:
self.canvas.create_oval(x0-5, y0-5, x0+5, y0+5, fill="red", tags="last_move", )
def update_images(self):
self.image_size = math.floor(self.size * 0.75)
image = PIL.Image.open(WHITE_IMG)
image = image.resize((self.image_size, self.image_size))
self.white_img = PIL.ImageTk.PhotoImage(image)
image = PIL.Image.open(BLACK_IMG)
image = image.resize((self.image_size, self.image_size))
self.black_img = PIL.ImageTk.PhotoImage(image)
image = PIL.Image.open(NEXT_MOVE_IMG)
image = image.resize((self.image_size, self.image_size))
self.next_move_img = PIL.ImageTk.PhotoImage(image)
def refresh(self):
if self.window_destroyed:
return
self.canvas.delete("last_move")
self.canvas.delete("piece")
self.canvas.delete("move")
for btn in self.moves_btns:
btn.destroy()
del btn
white_pieces_indices = np.argwhere(self.game.state == WHITE)
black_pieces_indices = np.argwhere(self.game.state == BLACK)
next_move_indices = np.argwhere(self.game.state == VALID_MOVE)
last_move_index = None
if self.game.last_move is not None:
last_move_index = self.game.last_move
for index in white_pieces_indices:
self.add_piece(WHITE, index[0], index[1])
for index in black_pieces_indices:
self.add_piece(BLACK, index[0], index[1])
if self.current_player.agent_type == "human":
for index in next_move_indices:
self.add_piece(VALID_MOVE, index[0], index[1], self.current_player.hints)
if last_move_index is not None:
self.add_piece(LAST_MOVE, last_move_index.x, last_move_index.y)
self.canvas.tag_raise("move")
self.canvas.tag_raise("piece")
self.canvas.tag_raise("last_move")
self.canvas.tag_lower("square")
self.canvas.update()
def initialize_board(self):
for row in range(self.n):
for col in range(self.n):
x1 = (col * self.size)
y1 = (row * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill=self.color, tags="square")
white_pieces_indices = np.argwhere(self.game.state == WHITE)
black_pieces_indices = np.argwhere(self.game.state == BLACK)
next_move_indices = np.argwhere(self.game.state == VALID_MOVE)
for index in white_pieces_indices:
self.add_piece(WHITE, index[0], index[1])
for index in black_pieces_indices:
self.add_piece(BLACK, index[0], index[1])
if self.current_player.agent_type == "human":
for index in next_move_indices:
self.add_piece(VALID_MOVE, index[0], index[1], self.current_player.hints)
self.canvas.tag_raise("move")
self.canvas.tag_raise("piece")
self.canvas.tag_lower("square")
self.canvas.update()
def quit(self, event=None):
self.window_destroyed = True
self.destroy()
| from tkinter import *
import numpy as np
import PIL.Image
import PIL.ImageTk
import math
from othello import Othello
from agent import Agent
from constants import WHITE, BLACK, VALID_MOVE, WHITE_IMG, BLACK_IMG, NEXT_MOVE_IMG, BLACK_TURN_TEXT, WHITE_TURN_TEXT, \
BLACK_WON_TEXT, WHITE_WON_TEXT, DRAW_TEXT, BLACK_LOADING_TEXT, WHITE_LOADING_TEXT, GAME_IN_PROGRESS, BLACK_WON, \
WHITE_WON, DRAW, LOG_FILE, LAST_MOVE
class Board(Frame):
def __init__(self,
parent,
n,
size,
color,
black_player_type,
white_player_type,
black_hints,
white_hints,
black_depth,
white_depth,
black_evaluation_fn,
white_evaluation_fn,
black_move_ordering,
white_move_ordering):
open(LOG_FILE, "w")
# Initialize agents
self.black = Agent(BLACK,
black_player_type,
black_hints,
black_depth,
black_evaluation_fn,
black_move_ordering)
self.white = Agent(WHITE,
white_player_type,
white_hints,
white_depth,
white_evaluation_fn,
white_move_ordering)
if self.black.agent_type == "computer":
with open(LOG_FILE, "a") as f:
f.write("Black is initialized with the following parameters:\n"
"Depth: {}\nEvaluation Function Type: {}\nMove Ordering: {}".format(
self.black.depth, self.black.evaluation_fn, self.black.move_ordering
))
f.write("\n\n_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_\n\n")
if self.white.agent_type == "computer":
with open(LOG_FILE, "a") as f:
f.write("White is initialized with the following parameters:\n"
"Depth: {}\nEvaluation Function Type: {}\nMove Ordering: {}".format(
self.white.depth, self.white.evaluation_fn, self.white.move_ordering
))
f.write("\n\n_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_\n\n")
# Initialize game object
self.game = Othello(n)
# Pass turn to black as black always starts first
self.current_player = self.black
# Initialize board parameters
n = 2 ** math.ceil(math.log2(n))
self.n = n
self.size = size
self.color = color
# Initialize images
self.image_size = math.floor(size * 0.75)
image = PIL.Image.open(WHITE_IMG)
image = image.resize((self.image_size, self.image_size))
self.white_img = PIL.ImageTk.PhotoImage(image)
image = PIL.Image.open(BLACK_IMG)
image = image.resize((self.image_size, self.image_size))
self.black_img = PIL.ImageTk.PhotoImage(image)
image = PIL.Image.open(NEXT_MOVE_IMG)
image = image.resize((self.image_size, self.image_size))
self.next_move_img = PIL.ImageTk.PhotoImage(image)
# Initialize widgets (board, scoreboard)
Frame.__init__(self, parent, bg="gray")
self.black_score_var = IntVar(value=self.game.black_score)
self.white_score_var = IntVar(value=self.game.white_score)
if self.current_player.agent_type == "computer":
self.game_info_var = StringVar(value=BLACK_LOADING_TEXT)
else:
self.game_info_var = StringVar(value=BLACK_TURN_TEXT)
self.canvas = Canvas(self, borderwidth=0, highlightthickness=0, width=n * size, height=n * size, bg="gray")
self.score_board = Canvas(self, width=n * size, height=60, bg="gray", highlightthickness=0)
self.black_score_widget = Label(self.score_board, compound=LEFT, image=self.black_img,
text=self.game.black_score, bg="gray", padx=25,
textvariable=self.black_score_var, font='System 30 bold')
self.white_score_widget = Label(self.score_board, compound=RIGHT, image=self.white_img,
text=self.game.white_score, bg="gray", padx=25,
textvariable=self.white_score_var, font='System 30 bold')
self.info_widget = Label(self.score_board, compound=RIGHT, text=BLACK_TURN_TEXT, bg="gray", font='System 15',
textvariable=self.game_info_var)
self.black_score_widget.image = self.black_img
self.white_score_widget.image = self.white_img
self.moves_btns = []
# Render widgets
self.canvas.pack(side="top", fill="both", expand=True, padx=4, pady=4)
self.score_board.pack(side="bottom", fill="both", expand=True, padx=4, pady=4)
self.black_score_widget.pack(side="left")
self.info_widget.pack(side="left", expand=True)
self.white_score_widget.pack(side="right")
self.canvas.bind("<Destroy>", self.quit)
self.window_destroyed = False
self.initialize_board()
if self.current_player.agent_type == "computer":
self.canvas.after(1000, self.run_player_move)
else:
self.run_player_move()
def set_game_info_text(self, event=GAME_IN_PROGRESS):
if event == GAME_IN_PROGRESS:
if self.current_player.identifier == WHITE and self.current_player.agent_type == "computer":
self.game_info_var.set(WHITE_LOADING_TEXT)
if self.current_player.identifier == BLACK and self.current_player.agent_type == "computer":
self.game_info_var.set(BLACK_LOADING_TEXT)
if self.current_player.identifier == WHITE and self.current_player.agent_type == "human":
self.game_info_var.set(WHITE_TURN_TEXT)
if self.current_player.identifier == BLACK and self.current_player.agent_type == "human":
self.game_info_var.set(BLACK_TURN_TEXT)
elif event == BLACK_WON:
self.game_info_var.set(BLACK_WON_TEXT)
with open(LOG_FILE, "a") as f:
f.write("\n_*_*_*_*_*_*_*_*_*_*_*_* BLACK WON *_*_*_*_*_*_*_*_*_*_*_*_\n\n")
elif event == WHITE_WON:
self.game_info_var.set(WHITE_WON_TEXT)
with open(LOG_FILE, "a") as f:
f.write("\n_*_*_*_*_*_*_*_*_*_*_*_* WHITE WON *_*_*_*_*_*_*_*_*_*_*_*_\n\n")
elif event == DRAW:
self.game_info_var.set(DRAW_TEXT)
with open(LOG_FILE, "a") as f:
f.write("\n_*_*_*_*_*_*_*_*_*_*_*_* DRAW *_*_*_*_*_*_*_*_*_*_*_*_\n\n")
if event == BLACK_WON or event == WHITE_WON or event == DRAW:
if self.black.agent_type == "computer":
with open(LOG_FILE, "a") as f:
f.write("BLACK average branching factor = {}\n"
.format(self.black.total_branching_factor / self.black.turns))
f.write("BLACK average effective branching factor = {}\n"
.format(self.black.total_effective_branching_factor / self.black.turns))
f.write("BLACK average execution time = {}\n"
.format(self.black.total_execution_time / self.black.turns))
f.write("\n_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_\n\n")
f.write("WHITE average branching factor = {}\n"
.format(self.white.total_branching_factor / self.white.turns))
f.write("WHITE average effective branching factor = {}\n"
.format(self.white.total_effective_branching_factor / self.white.turns))
f.write("WHITE average execution time = {}\n"
.format(self.white.total_execution_time / self.white.turns))
def run_player_move(self, move=None):
pass_turn_to_computer = False
if self.current_player.agent_type == "human":
if move is not None:
self.game.apply_move(self.current_player.identifier, move)
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
event = self.game.status()
if event == GAME_IN_PROGRESS:
if self.current_player.agent_type == "human":
moves = self.game.move_generator(self.current_player.identifier)
if len(moves) == 0: # If a player doesn't have a move, pass the play to the other player
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
moves = self.game.move_generator(self.current_player.identifier)
if len(moves) == 0:
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
event = self.game.status()
elif self.current_player.agent_type == "computer":
pass_turn_to_computer = True
self.black_score_var.set(self.game.black_score)
self.white_score_var.set(self.game.white_score)
self.set_game_info_text(event)
self.refresh()
if pass_turn_to_computer and event == GAME_IN_PROGRESS:
self.canvas.after(0, self.run_player_move)
elif self.current_player.agent_type == "computer":
player_move = self.current_player.get_move(self.game, self.current_player.identifier)
if player_move is not None:
self.game.apply_move(self.current_player.identifier, player_move)
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
event = self.game.status()
if event == GAME_IN_PROGRESS:
if self.current_player.agent_type == "human":
moves = self.game.move_generator(self.current_player.identifier)
if len(moves) == 0: # If a player doesn't have a move, pass the play to the other player
self.current_player = self.black if self.current_player.identifier == WHITE else self.white
pass_turn_to_computer = True
elif self.current_player.agent_type == "computer":
pass_turn_to_computer = True
self.black_score_var.set(self.game.black_score)
self.white_score_var.set(self.game.white_score)
self.set_game_info_text(event)
self.refresh()
if pass_turn_to_computer and event == GAME_IN_PROGRESS:
self.canvas.after(0, self.run_player_move)
def add_piece(self, kind, row, column, hints=False):
x0 = (column * self.size) + int(self.size / 2)
y0 = (row * self.size) + int(self.size / 2)
if kind == WHITE:
self.canvas.create_image(x0, y0, image=self.white_img, tags="piece", anchor=CENTER)
elif kind == BLACK:
self.canvas.create_image(x0, y0, image=self.black_img, tags="piece", anchor=CENTER)
elif kind == VALID_MOVE:
move_btn = Button(self, bg=self.color, activebackground=self.color, relief=FLAT, overrelief=FLAT,
command=lambda: self.run_player_move([row, column]), anchor=CENTER)
if hints:
move_btn.configure(image=self.next_move_img)
self.moves_btns.append(move_btn)
self.canvas.create_window(x0, y0, anchor=CENTER, window=move_btn, height=self.size - 1, width=self.size - 1,
tags="move")
elif kind == LAST_MOVE:
self.canvas.create_oval(x0-5, y0-5, x0+5, y0+5, fill="red", tags="last_move", )
def update_images(self):
self.image_size = math.floor(self.size * 0.75)
image = PIL.Image.open(WHITE_IMG)
image = image.resize((self.image_size, self.image_size))
self.white_img = PIL.ImageTk.PhotoImage(image)
image = PIL.Image.open(BLACK_IMG)
image = image.resize((self.image_size, self.image_size))
self.black_img = PIL.ImageTk.PhotoImage(image)
image = PIL.Image.open(NEXT_MOVE_IMG)
image = image.resize((self.image_size, self.image_size))
self.next_move_img = PIL.ImageTk.PhotoImage(image)
def refresh(self):
if self.window_destroyed:
return
self.canvas.delete("last_move")
self.canvas.delete("piece")
self.canvas.delete("move")
for btn in self.moves_btns:
btn.destroy()
del btn
white_pieces_indices = np.argwhere(self.game.state == WHITE)
black_pieces_indices = np.argwhere(self.game.state == BLACK)
next_move_indices = np.argwhere(self.game.state == VALID_MOVE)
last_move_index = None
if self.game.last_move is not None:
last_move_index = self.game.last_move
for index in white_pieces_indices:
self.add_piece(WHITE, index[0], index[1])
for index in black_pieces_indices:
self.add_piece(BLACK, index[0], index[1])
if self.current_player.agent_type == "human":
for index in next_move_indices:
self.add_piece(VALID_MOVE, index[0], index[1], self.current_player.hints)
if last_move_index is not None:
self.add_piece(LAST_MOVE, last_move_index.x, last_move_index.y)
self.canvas.tag_raise("move")
self.canvas.tag_raise("piece")
self.canvas.tag_raise("last_move")
self.canvas.tag_lower("square")
self.canvas.update()
def initialize_board(self):
for row in range(self.n):
for col in range(self.n):
x1 = (col * self.size)
y1 = (row * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill=self.color, tags="square")
white_pieces_indices = np.argwhere(self.game.state == WHITE)
black_pieces_indices = np.argwhere(self.game.state == BLACK)
next_move_indices = np.argwhere(self.game.state == VALID_MOVE)
for index in white_pieces_indices:
self.add_piece(WHITE, index[0], index[1])
for index in black_pieces_indices:
self.add_piece(BLACK, index[0], index[1])
if self.current_player.agent_type == "human":
for index in next_move_indices:
self.add_piece(VALID_MOVE, index[0], index[1], self.current_player.hints)
self.canvas.tag_raise("move")
self.canvas.tag_raise("piece")
self.canvas.tag_lower("square")
self.canvas.update()
def quit(self, event=None):
self.window_destroyed = True
self.destroy() | en | 0.936307 | # Initialize agents # Initialize game object # Pass turn to black as black always starts first # Initialize board parameters # Initialize images # Initialize widgets (board, scoreboard) # Render widgets # If a player doesn't have a move, pass the play to the other player # If a player doesn't have a move, pass the play to the other player | 2.932645 | 3 |
plainenglish.py | anthonycurtisadler/ARCADES | 7 | 6622130 | ### contains queries, labels, alerts for plain English language, as well as the commands.
### and various input terms.
### NOTE that the names of the commands cannot be changed since
from globalconstants import DASH, PLUS, CARET,\
VERTLINE, EOL, DOLLAR, POUND, SEMICOLON, QUESTIONMARK
import commandscript
def make_commands(text):
text = text.lower()
return (text[0], text, text.capitalize(), text[0].upper())
ADDTERMS = make_commands('add')
DELETETERMS = make_commands('delete')
SHOWTERMS = make_commands('show')
QUITTERMS = make_commands('quit')
CLEARTERMS = make_commands('clear')
QUITALLTERMS = ('a','all','A','All',
'ALL','Quitall','QUITALL',
'quitall')
LEARNTERMS = make_commands('learn')
UNLEARNTERMS = make_commands('unnlearn')
BREAKTERMS = make_commands('break')
NEWTERMS = make_commands('new')
YESTERMS = ['yes',
'Yes',
'yeah',
'sure',
'whatever',
'ja',
'jawohl']
NOTERMS = ['no',
'No',
'no way',
'absolutely no',
'god no',
'heaven forbid']
class Queries:
def __init__(self):
self.OPEN_NEW1 = "(N)o to open "
self.OPEN_NEW2 = " (Y)es to open a different notebook, or (Q)uit "
self.RESUME_PROJECTS = 'RESUME PROJECTS? (y)es (no) or list of projects to resume!'
self.MOVE_SHELVES = "DO YOU WANT TO MOVE (S)HELF,SE(Q)UENCES, AND "+\
"(P)ROJECTS TO DATABASE? ENTER ALL THAT APPLY"
self.INITIAL_MENU = "(1) Display commands \n"+\
"(2) display commands in compact mode\n"+\
"(3) Start in Betamode \n"+\
"(4) Start in regular mode \n"+ \
"(5) Start in the advanced mode \n"+\
"(6) View file registry"
self.SELECTING_NOTEBOOK ="""Name or index of notebook,
(N)ew to open a new notebook,
or quit(A)ll to close all notebooks"""
self.SELECT_OPEN_NOTEBOOK ="""Name or index of notebook,
(N)ew to open a new notebook,
or(Q)uit to quit the current notebook,
Quit(A)ll to quit all notebooks."""
self.SELECT_NOTEBOOK_HEADING = '/C/ SELECT NOTEBOOK'
self.CHARACTERS_TO_CONVERT = 'Character to convert? '
self.RANGE_FROM = 'Range from? '
self.RANGE_TO = 'Range to? '
self.INDEX_TO_MERGE = 'Index to merge? '
self.DESTINATION = 'Destination? '
self.SOURCE_TO_FROM = 'Source from / to? '
self.STRICT_RANGE_TO_FROM = 'Strict range from/to? '
self.RANGE_TO_FROM = 'Range from / to? '
self.EDIT_OPTIONS = 'ENTER new text, or RETURN to keep,'+\
'or '+DASH+' to DELETE, or '\
+PLUS+' to insert new line before,'\
+ ' or '+DASH+DASH+ ' to delete all subsequent lines '\
+' or '+CARET+' to replace! And '\
+VERTLINE+'to add an EOL mark.'\
+EOL+DOLLAR+'To append before or'\
+POUND+' to append after! '
self.ENTER_KEYWORDS = 'Enter the keywords that you wish to keep! '
self.SELECT_FILE ='Enter the number of'\
+' file to open, or name of new file, or'\
+EOL+'(B)ack to return to initial directory! '
self.OPEN_CONFIRM = 'Are you sure you want to open: '
self.AUTOKEYS_KEEP = 'Numbers of autokeys to keep,'\
+'to delete (start list with $)'\
+'or ALL to delete all autokeys, or SWITCH?'
self.DELETE_CONF_BEG = 'Are you sure you want to delete? '
self.DELETE_CONF_END = ' from the entire notebase. This cannot be undone! '
self.REVISE_DELETE_BEG = 'Revise '
self.REVISE_DELETE_END = ' to ____ ? ... or delete? '
self.RESUME_ABORTED_NOTE = 'Resume aborted note? '
self.KEYS = 'Keys? '
self.NEW_KEY_LIST = '<yes> to keep all, '\
+'<no> to discard all, '\
+'or enter a selected range? '
self.ENTER_SEARCH_TERM ='Enter composite search term, '\
+' e.g [1]%[2]!- Begin '\
+'with $ to show notes! '
self.ADDITIONAL_KEYS = 'Additional keys '\
+'to apply to'\
+' inputed paragraphs? '
self.INCLUDE = 'Include? '
self.KEYWORDS_TO_ADD = 'enter keywords to add? '
self.CONTINUE = 'Continue? '
self.DELETE_FROM_TO = 'Delete from/to? '
self.CHILD_DEPTH = 'Depth of children to display? '
self.DEMARC_MARK = 'Demarcating mark? '
self.CHILD_KILL = 'Child to kill? '
self.LEVELS_TO_SHOW = 'Levels to show? '
self.SUB_OR_MAKE_CHILDREN = '[S]ubordinate, '\
+' [M]ake compact or [C]hildren? '
self.NO_CHILDREN = 'No children? '
self.ADD_TO_AUTOKEYS = 'Add to autokeys? '
self.COPY_HOW_MANY = 'Copy how many? '
self.LEVELS_TO_SHOW = 'Levels to show? '
self.SEARCH_PHRASE = 'Search phrase? '
self.CONFLATE_EMBED = '[C]onflate or [E]mbed? '
self.WIDTH = 'Width? '
self.INDEX = 'Index? '
self.INDEX_OR_RANGE = 'Index or Indexrange? '
self.COLUMNS = 'Columns? '
self.BREAKER = 'Breaker? '
self.BREAK_MARK = 'Break mark? '
self.SURE = 'Are you sure? '
self.FIELDNAME = 'Fieldname? '
self.READ_ONLY = 'Read only? '
self.OPEN_DIFFERENT = 'Open a different notebook or QUIT? '
self.BETA_MODE = 'Do you wish to use '\
+'NOTESCRIPTION in the betamode? '
self.START_COMMAND = 'SPACE to SKIP, '\
+'TRIPPLESPACE for COMPACT MODE'
self.LANGUAGE_SUFFIX = 'Language + suffix'
self.LANGUAGE = 'Language? '
self.DISPLAY_STREAM = 'Display stream? '
self.DETERMINANT = 'Determinant ymd*hsx? '
self.PURGE_WHAT = ' purge a(llcaps) u(pper) l(ower).TERMS ? '
self.SUFFIX = 'Suffix? '
self.LEARN_WHAT = 'Learn that what? '
self.IS_WHAT = ' is a what? '
self.WHICH_COMMAND = 'Which command? '
self.MENU_ONE = '[, >,<, ] (Q)uit '
self.KEYS_TO_ELIMINATE = 'Keys to eliminate? '
self.INCLUDE_META = 'Include metadata? '
self.SHOW_INDEXES = 'Show indexes? '
self.JUMP_AHEAD_BY = 'Jump ahead how much? '
self.OLD_USER = 'Old user? '
self.NEW_USER = 'New user? '
self.UNLEARN_BEG = 'Unlearn that what? '
self.UNLEARN_END = ' is a what? '
self.NEW_LIMIT_LIST = 'New limit list? '\
+' Enter range, F for flipbook,'\
+'or R to reset! '
self.FROM = 'From? '
self.TO = 'To? '
self.SAVE_TO = 'SAve to? '
self.LONG_MAX = 'Maximum number of notes displayed in longform? '
self.KEY_COUNT = 'Keycount? '
self.EMPTY_BREAK_NEW = '(e)mpty,(b)reak,(n)ewnote? '
self.SPECS = 'Specs . terms to purge? '
self.SET_KEY_TRIM = 'Set trim for displaying keywords? '
self.SET_TEXT_TRIM = 'Set trim for displaying text? '
self.NEW_NOTE_SIZE = 'New note size? '
self.OPEN_AS_NEW = 'Open as new file? '
self.FIRST_NEWEST_ALL = 'f(irst) (n)ewest (a)ll (i)ndex? '
self.DETERMINANT2 = 'Determinant? '
self.NAME_FOR = 'Name for '
self.UNDO_UP_TO = 'Undo up to? '
self.TOTO = ' to '
self.ALSO_ABORT = ' or ABORT to abort'
self.OTHERS_TO_PURGE = 'Other keywords to purge? '
self.EXCLUDE_ALL_CAPS = 'Exclude all-cap keywords? '
self.EXCLUDE_CAPITALIZED = 'Exclude capitalized keywords? '
self.WHAT_TO_PURGE = 'purge (c)apitalized, (a)ll caps, (l)ower case'
self.RETURN_QUIT = ' Exit noteentry after how many returns?'
self.CLUSTER = 'Cluster? '
self.KEY_MACRO_NAME = 'Key macro name? '
self.KEY = 'Key? '
self.PROJECT_NAME = 'Project name? '
self.INDENT_MULTIPLIER = 'Indent multiplier? '
self.SMALL_SIZE = 'Small size? '
self.CLEAR_DEFAULT_KEYS = 'Clear default keys? '
self.SIDE = 'Go to side? '
self.SIDES = 'Number of sides? '
self.TEXT_TO_SAVE = 'TEXT to save? '
self.SAVE_TO_FILE = 'File to save to? '
self.FOLDER = 'In folder? '
self.TEXT_TO_PRINT = 'TEXT to print '
self.FLIP_AT = 'Flip at? '
self.SHOW_ALL_NOTES = 'Do you want to show all the notes in the notebook? '
self.DIVIDE_PICKLE = "Do you want to divide "\
+" the pickle file?" +\
" (Y)yes to divide (D)on't ask again? "
self.LANGUAGE = 'Language? '
self.LANGUAGE_SELECT = 'es(panol) fr(ench) en(glish) de(utsch)? '
self.FUNCTION_NAME = 'Function name? '
self.TEXT_TO_CONVERT = 'Text to convert? '
self.TEXT_TO_INTERPRET = 'Text to interpret? '
self.INCLUDE_PROJECTS = 'Include projects? '
self.SEQ_FORM_ONE = 'Formatting after each sequence? (s) for space,' + EOL + \
'(l) for EOL, (c) for COMMA and SPACE, ' + EOL + \
'(b) for break, (n) for new or OTHER TEXT '
self.SEQ_FORM_TWO = 'Formatting after all sequence? (e) for emptychar, '+ EOL + \
'(l) for EOL, (b)reak, (n)ew or OTHER TEXT '
self.MAIN_SEQUENCES = 'Main sequences? Enter as a list separated by commas or (d)efaults! '
self.REGISTRY = '(o)pen as read only \n'+\
'(c)orrect registry and continue' +\
'\n (s)elect another?'
self.RECON_KEY = 'Reconstitute key dictionary? '
self.RECON_WORD = 'Reconstitute word dictionary? '
self.RESUME_FROM_WHERE = 'Do you want to start from where you left off?'
class Alerts:
def __init__(self):
self.ADDED_TO_DATABASE_REGISTER = " ADDED TO DATABASE REGISTER"
self.ATTENTION = '/C/ ATTENTION'
self.MOVING_NOTE = 'MOVING NOTE DICTIONARY FROM SHELF!'
self.SELECTED = '/C/ SELECTED'
self.CONSTITUTING_WORD_DICT = '/C/ CONSTITUTING WORD DICTIONARY!'
self.WAIT = '/C/ PLEASE WAIT!'
self.EDITING = '/C/EDITING NOTE'
self.ON = 'ON'
self.OFF = 'OFF'
self.LEARNED_BEG = 'I learned that '
self.LEARNED_MIDDLE = ' is a(n) '
self.NOTE_ADDED = 'Note added at'
self.CHANGE = 'Change'
self.TO = 'to'
self.REHOMED = 'SUCCESSFULLY REHOMED!'
self.ITERATOR_RESET = 'ITERATOR RESET'
self.KEYS_FOR_DATES = 'KEYS FOR DATES'
self.APPEARS_BEG = ' APPEARS'
self.APPEARS_END = ' TIMES. FREQUENCY='
self.FAILED_CONF_LOAD = '/C/ FAILED TO LOAD CONFIGURATION FILE'
self.CREATING_NEW_CONF = '/C/ CREATING NEW CONFIGURATION FILE'
self.NEW_PICKLE = '/C/ NEW PICKLE FILE'
self.ENTER_DOCUMENTATION = '/C/ ENTER documentation '\
+'TO LOAD INSTRUCTIONS'
self.IS_INCONSISTENT = '/C/ NOTEBOOK IS INCONSISTENT'
self.STILL_INCONSISTENT = '/C/ STILL INCONSISTENT'
self.IS_CONSISTENT = '/C/ NOTEBOOK IS CONSISTENT'
self.TOO_MANY_INDEXES = '/C/ TOO MANY INDEXES!'
self.IS_CLOSING = '/C/ IS CLOSING!'
self.OPENING = '/C/ WILL BE OPENED AS '
self.ALREADY_OPEN = ' IS ALREADY OPEN!'
self.DELETE_FROM_TO = '/C/ DELETE FROM / TO'
self.EXLUDE_ALL_CAPS = 'Exclude all-cap keywords? '
self.EXCLUDE_CAPITALIZED = 'Exclude capitalized keywords? '
self.NOT_YET_CLUSTERED = '/C/ NOT YET CLUSTERED'
self.FLIP_CHANGED = 'FLIPBOOK changed to '
self.SAVING = '/C/ SAVING '
self.WORD_DICT_CONSTITUTED = '/C/ WORD DICTIONARY CONSTITUTED'
self.NOT_REGULAR = '/C/ NOT REGULAR'
self.ADDED = '/C/ ADDED'
self.MOVING_FROM = '/C/ MOVING FROM '
self.COPIED_TO_TEMP = ' COPIED TO TEMPORARY BUFFER!'
self.NOTE = 'NOTE '
self.MOVED_TO = ' MOVED TO '
self.COPIED_TO = ' COPIED TO '
self.MOVING_TO = 'MOVING TO '
self.COPYING_TO = 'COPYING to '
self.OLD = '/C/ Old '
self.KEYS = 'Keys? '
self.FIELDS = ' Fields?'
self.LOADING_FILE = '/c/ LOADING FILE'
self.REVISE_DELETE_END = ' Enter new term, RETURN to keep, or (d)elete!'
self.ALREADY_IN_USE = '/C/ ALREADY IN USE'
self.STILL_CHANGED = 'Still change? '
self.INDEX = 'INDEX '
self.NOT_FOUND_IN_NOTEBASE = ' NOT FOUND IN NOTEBOOK!'
self.NO_DICTIONARY_OBJECT = '/C/ NO DICTIONARY OBJECT'
self.NEW_SEQUENCE = 'NEW SEQUENCE DICTIONARY CREATED OF TYPE '
self.OVERWRITTEN = 'OVERWRITTEN. NEW SEQUENCE DICTIONARY CREATED OF TYPE '
self.RECONSTITUTING_INDEXES = 'RECONSTITING INDEX SEQUENCE '
self.WAS_DELETED = ' HAS BEEN DELETED!'
self.DELETE = 'DELETE '
self.FAILED = 'FAILED '
self.SAVED = ' SAVED! '
self.TOO_LARGE = 'TOO LARGE '
self.ADDED_TO_KEYLIST = ' added to keylist! '
self.SUCCESSFULLY_RESUMED = 'Successfully resumed!'
self.NOT_CLOSED = ' is still in use or has not been closed properly!'
class Labels:
def __init__(self):
self.SELECT = '/C/ SELECT'
self.ENTRYCOMMANDS = '/C/ ENTRYCOMMANDS'
self.SEARCHES = '/C/ SEARCHES'
self.CLUSTER = '/C/ CLUSTER'
self.CONFIGURATIONS = '/C/ CONFIGURATIONS'
self.ALL_COMMANDS = '/C/ ALL COMMANDS'
self.ALWAYS_NEXT = '/C/ ALWAYS NEXT'
self.ALWAYS_CHILD = '/C/ ALWAYS CHILD'
self.MARKED = '/C/ MARKED'
self.DEPTH = '/C/ DEPTH'
self.DEFAULT_KEYS = '/C/ DEFAULT KEYS'
self.GRABBED_KEYS = '/C/GRABBED KEYS'
self.RESULT_FOR = 'RESULT FOR '
self.INDEXES = '/C/ INDEXES'
self.KEYS = '/C/ KEYS'
self.ITERATOR_SHOW = '/C/ SHOW INDEXES WITH ITERATOR RESET'
self.CAPKEYS = '/C/ CAPKEYS'
self.PROPER_NAMES = '/C/ PROPER NAMES'
self.OTHER_KEYS = '/C/ OTHER KEYS'
self.SHOW_TOP = '/C/ SHOW THE TOP NOTE WITH CHILDRED'
self.TAGS = '/C/ TAGS'
self.PURGEKEYS = '/C/ PURGEKEY SETTINGS'
self.FIELD = '/C/ FIELDS'
self.FILE_ERROR = '/C/ FILE ERROR!'
self.CONSTITUTING_KEY_FREQ = ' /C/CONSTITUTING KEY'\
+' FREQUENCY DICTIONAR!'
self.WELCOME_HEAD = '/C/ WELCOME'
self.WELCOME_BODY = '/C/ WELCOME TO ARCADES!'
self.MAX_DEPTH = '/C/ MAXIMUM INDEX DEPTH'
self.LIMIT_LIST_RESET = '/C/ LIMIT LIST RESET'
self.LIMIT_LIST = '/C/ LIMIT LIST'
self.FORMATTING_HELP = '/C/ FORMATTING HELP'
self.STREAMS = '/C/ STREAM'
self.DETERMINANT = '/C/ DETERMINANTS'
self.HEADER = '/C/ HEADER'
self.FOOTER = '/C/ FOOTER'
self.LEFT_MARG ='/C/ LEFTMARGIN'
self.AUTOBACKUP = '/C/ AUTOBACKUP'
self.RECTIFY = '/C/ RECTIFY'
self.AUTOMULTI ='/C/ AUTOMULTI DISPLAY'
self.QUICK_ENTER = '/C/ QUICK ENTER'
self.METADATA = '/C/ METADATA FOR NOTE #'
self.CURTAIL = '/C/ CURTAIL'
self.LIMIT_LIST_CHANGED = '/C/ LIMIT LIST CHANGED TO'
self.SHOW_CONFIG_BOX = '/C/ SHOW CONFIGURATION IN BOXES'
self.PURGE_KEYS = '/C/ PURGEKEY SETTINGS'
self.KEY_TRIM = '/C/ KEY TRIM'
self.TEXT_TRIM = '/C/ TEXT TRIM'
self.SIZE ='/C/ SIZE'
self.FLIPOUT = '/C/ FLIPOUT'
self.SHORTSHOW = '/C/ SHORTSHOW'
self.NAME_INTERPRET = '/C/ NAME INTERPRET'
self.ITERATOR_SHOW = '/C/ SHOW INDEXES WITH ITERATOR RESET '
self.NONE = '/C/ NONE '
self.COMMAND_EQ = '/C/ COMMAND = '
self.CONCORDANCE = '/C/ CONCORDANCE '
self.TO_UNDO = '/C/ TO UNDO '
self.DELETED = '/C/ DELETED NOTES '
self.CONFIG_SAVED = '/C/ CONFIGURATION SAVED '
self.VARIABLES = '/C/ VARIABLES '
self.KEYS_BEFORE = '/C/ KEYS BEFORE '
self.KEYS_AFTER = '/C/ KEYS AFTER '
self.CARRY_OVER_KEYS = '/C/ CARRY OVER KEYS '
self.CARRY_ALL = '/C/ CARRY OVER ALL PARENTS '
self.SETTINGS = '/C/ SETTINGS '
self.RETURN_QUIT_ON = '/C/ RETURNQUIT '
self.CLUSTERS = '/C/ CLUSTERS '
self.PROJECT_DISPLAY = '# |PROJECTNAME| INDEX | KEYS '
self.NEGATIVE_RESULTS = '/C/ SHOW NEGATIVE RESULTS '
self.INDENT_MULTIPLIER = '/C/ INDENT MULTIPLIER '
self.ITERATOR = '/C/ ITERATOR '
self.MUST_BE_BETWEEN = '/C/ MUST BE BETWEEN '
self.AND = ' AND '
self.SMALL_SIZE = '/C/ SMALL SIZE '
self.LONG_MAX = '/C/ LONGMAX '
self.SIDE = '/C/ SIDE '
self.SIDES = '/C/ SIDES '
self.FLIP_AT = '/C/ FLIP AT '
self.TAG_DEFAULT = '/C/ TAG DEFAULT '
self.USE_SEQUENCE = '/C/ USE SEQUENCE '
self.NO_FLASH = "/C/ DON'T SHOW FLASH CARDS "
self.CHECK_SPELLING = '/C/ SPELL CHECK '
self.FLASHMODE = '/C/ FLASHMODE '
self.SHOW_DATE = '/C/ SHOW DATE '
self.SORT_BY_DATE= '/C/ SORT BY DATE '
self.ORDER_KEYS = '/C/ ORDER KEYS '
self.ENTER_HELP= '/C/ ENTERHELP '
self.CHILDREN_TOO = '/C/ CHILDREN TOO '
self.SHOW_IMAGES = '/C/ SHOW IMAGES '
self.SHOW_TEXTFILES = '/C/ SHOW TEXTFILES '
self.DELETE_WHEN_EDITING = '/C/ DELETE WHEN EDITING '
self.VARIABLE_SIZE = '/C/ VARIABLE SIZE '
self.SEQUENCE_IN_TEXT = '/C/ SEQUENCE IN TEXT '
self.MAIN_SEQUENCES = '/C/ MAIN SEQUENCES '
self.SEQ_FORM_ONE = '/C/ FIRST SEQUENCE FORM '
self.SEQ_FORM_TWO = '/C/ SECOND SEQUENCE FORM '
self.FROM_TEXT = '/C/ KEYWORDS FROM TEXT '
self.CONVERT_BY_LINE = '/C/ CONVERT BY LINE '
self.ADD_DIAGNOSTICS = '/C/ ADD DIAGNOSTICS '
self.PREVIOUS_PROJECTS = '/C/ PREVIOUS PROJECTS!'
self.OPEN_FILES = '/C/ OPEN FILES'
self.OPENING = 'opening '
self.APPLY_ABR_INP = '/C/ APPLY INPUT ABBREVIATIONS'
self.KEY_INPUT_MODE = '/C/ KEY EDIT MODE'
self.CARRY_KEYS = '/C/ CARRY KEYS'
self.ABRIDGEDFORMAT = '/C/ ABRIDGED FORMAT'
class Spelling:
def __init__(self):
self.INPUT_MENU = 'Press RETURN to keep,'\
+'DOUBLESPACE to quit,'\
+'SPACE+RETURN to add,'\
+'enter new spelling, '\
+'[start with a space to ADD],'\
+'or a number from the following list'
self.SMALL_INPUT_MENU = 'Press RETURN to keep,'\
+'SPACE+RETURN to add,'\
+'DOUBLESPACE to quit, '\
+'enter new spelling '\
'[start with a space to ADD]'
self.IS_MISPELLED = 'is mispelled'
self.SPELLING_DICTIONARY = '/C/SPELLING DICTIONARY'
self.WORDS_TO_DELETE = 'A(dd) new word\nD(elete)'\
+'\nL(oad)words from text'\
+'\nS(how) words\nC(hange) language'\
+'\n(E)rase\nX(change database)\n(Q)uit'
self.TEXT_TO_ADD = 'Text to add?'
self.ARE_YOU_SURE = 'Are you sure?'
self.THERE_ARE = 'There are '
self.MISSPELLED = ' misspelled words!'
self.SKIP_CORRECTIONS = 'Press SPACE+RETURN to skip corrections'
self.WORD_TO_ADD = 'New word to add?'
self.WORD_TO_DELETE = 'Words to delete?'
self.LANGUAGE_SELECT = 'es(panol) fr(ench) en(glish) de(utsch)?'
class DefaultConsoles:
def __init__(self):
self.KEY_DEF = 'KEYWORDS : DEFINITIONS'
self.ADD_MENU = 'A)dd'
self.DELETE_MENU = 'D)elete'
self.SHOW_MENU = 'S)how'
self.CLEAR_MENU= 'C)lear'
self.QUIT_MENU = 'Q)uit'
self.LEARN_MENU = '(L)earn'
self.UNLEARN_MENU = '(U)nlearn'
self.KEYMACRO = 'Keymacro'
self.KEYS = 'Keys?'
self.DEFINITIONS = 'Definitions?'
self.DELETE = 'Delete?'
self.CLEAR = 'Are you sure you want to clear?'
self.ADD = 'Add| '
self.DELETING = 'DELETING'
self.FROM_THIS = 'From this (short)? '
self.TO_THIS = 'to this(long) ? '
self.I_KNOW = 'I know that '
self.IS_WHAT_IT_IS = ' is what it is'
self.IS_AN = ' is a(n) '
self.LEARN_THAT_THIS = 'Learn that this?'
self.IS_WHAT = 'is what?'
self.UNLEARN_THAT_THIS = 'Unlearn that this?'
self.ARE_YOU_SURE = 'Are you sure you want to clear?'
labels = Labels ()
binary_settings = {'abbreviateinput':('self.apply_abr_inp',labels.APPLY_ABR_INP),
'keyeditmode':('self.vertmode',labels.KEY_INPUT_MODE),
'showtags':('self.tagdefault',labels.TAG_DEFAULT),
'usesequence':('self.usesequence',labels.USE_SEQUENCE),
'boxconfigs':('self.box_configs', labels.SHOW_CONFIG_BOX),
'autobackup':('self.autobackup', labels.AUTOBACKUP),
'curtail':("self.default_dict['curtail']",labels.CURTAIL),
'itshow':("self.default_dict['setitflag']",labels.ITERATOR_SHOW),
'noflash':("self.no_flash",labels.NO_FLASH),
'spelling':("self.check_spelling",labels.CHECK_SPELLING),
'flashmode':("self.flipmode",labels.FLASHMODE),
'showdate':("self.default_dict['showdate']",labels.SHOW_DATE),
'sortbydate':("self.default_dict['sortbydate']",labels.SORT_BY_DATE),
'orderkeys':("self.default_dict['orderkeys']",labels.ORDER_KEYS),
'enterhelp':("self.default_dict['enterhelp']",labels.ENTER_HELP),
'childrentoo':("self.children_too",labels.CHILDREN_TOO),
'flipout':("self.flipout",labels.FLIPOUT),
'shortshow':("self.shortshow",labels.SHORTSHOW),
'fulltop':("self.show_full_top",labels.SHOW_TOP),
'rectify':("self.rectify",labels.RECTIFY),
'formathelp':("self.default_dict['formattinghelp']",labels.FORMATTING_HELP),
'automulti':("self.auto_multi",labels.AUTOMULTI),
'quickenter':("self.quickenter",labels.QUICK_ENTER),
'keysbefore':("self.default_dict['keysbefore']",labels.KEYS_BEFORE),
'keysafter':("self.default_dict['keysafter']",labels.KEYS_AFTER),
'carryoverkeys':("self.default_dict['carryoverkeys']",labels.CARRY_OVER_KEYS),
'carryall':("self.default_dict['carryall']",labels.CARRY_ALL),
'returnquit':("self.default_dict['returnquiton']",labels.RETURN_QUIT_ON),
'rqon':("self.default_dict['returnquiton']",labels.RETURN_QUIT_ON),
'negresults':("self.negative_results",labels.NEGATIVE_RESULTS),
'negativeresults':("self.negative_results",labels.NEGATIVE_RESULTS),
'nr':("self.negative_results",labels.NEGATIVE_RESULTS),
'iteratemode':("self.iteratormode",labels.ITERATOR),
'showimages':("self.show_images",labels.SHOW_IMAGES),
'showtext':("self.show_text",labels.SHOW_TEXTFILES),
'editdelete':("self.delete_by_edit",labels.DELETE_WHEN_EDITING),
'variablesize':("self.default_dict['variablesize']",labels.VARIABLE_SIZE),
'seqintext':("self.default_dict['sequences_in_text']",labels.SEQUENCE_IN_TEXT),
'convertbyline':("self.default_dict['convertbyline']",labels.CONVERT_BY_LINE),
'nodiagnostics':("self.add_diagnostics",labels.ADD_DIAGNOSTICS),
'carrykeys':("self.carry_keys",labels.CARRY_KEYS),
'abridgedformat':("self.abridgedformat",labels.ABRIDGEDFORMAT),
'nameinterpret':("self.name_interpret",labels.NAME_INTERPRET),
'useallphabets':("self.use_alphabets","USE ALPHABETS"),
'equivmultiply':("self.search_equiv_multiplied","EQUIVALENCE MULTIPLIER"),
'converttextphrase':("self.convert_text_terms","Convert multiple word terms for text search")}
LOAD_COM = 'self.loadtext_com(otherterms=otherterms,predicate=predicate)'
AUTOKEY_COM = 'self.autokey_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
LIMITLIST_COM = 'self.limitlist_com(mainterm=mainterm,otherterms=otherterms)'
STREAM_COM = 'self.stream_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
COPY_COM = 'self.copy_com (mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
DEFAULT_COM = 'self.default_com(mainterm=mainterm,otherterms=otherterms)'
LOADBY_COM = 'self.loadby_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
ELIMINATE_COM = 'self.eliminate_com(mainterm=mainterm,otherterms=otherterms)'
DETERM_COM = 'self.determ_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
SPELLING_COM = 'self.spelling_com(mainterm=mainterm,longphrase=longphrase,otherterms=otherterms,predicate=predicate)'
CULKEYS_COM = 'self.culkeys_com(mainterm=mainterm)'
FLIP_COM = 'self.flip_com(mainterm=mainterm,otherterms=otherterms,longphrase=longphrase,totalterms=totalterms)'
RESIZE_COM = 'self.resize_etc_com(longphrase=longphrase,mainterm=mainterm,otherterms=otherterms,predicate=predicate,totalterms=0)'
REFORMATING_COM = 'self.reformating_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate,longphrase=longphrase)'
COPY_MOVE_SEARCH_COM = 'self.copy_move_search_com(longphrase=longphrase,mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
JSON_COM = 'self.json_com(longphrase=longphrase,mainterm=mainterm,otherterms=otherterms,predicate=predicate,totalterms=0)'
simple_commands = {'dumpprojects':JSON_COM,
'loadprojects':JSON_COM,
'clearprojects':JSON_COM,
'dumpgeneralknowledge':JSON_COM,
'dumpknowledge':JSON_COM,
'loadknowledge':JSON_COM,
'loadgeneralknowledge':JSON_COM,
'showknowledge':JSON_COM,
'setsides':RESIZE_COM,
'convertdefinitions':RESIZE_COM,
'newconvertmode':RESIZE_COM,
'switchconvertmode':RESIZE_COM,
'showallconvertmodes':RESIZE_COM,
'test':RESIZE_COM,
'setflipat':RESIZE_COM,
'flexflip':RESIZE_COM,
'setsuppresskeys':DETERM_COM,
'flashforward':'self.side+=1',
'ff':'self.side+=1',
'flashback':'self.side-=1',
'fb':'self.side-=1',
'flashreset':'self.side = 0',
'fr':'self.side = 0',
'ft':RESIZE_COM,
'cleargeneralknowlege':RESIZE_COM,
'generalknowledge':RESIZE_COM,
'general':RESIZE_COM,
'gk':RESIZE_COM,
'cleargeneralknowledge':RESIZE_COM,
'reconstitutegeneralknowledge':RESIZE_COM,
'switchgeneralknowledge':RESIZE_COM,
'flashto':RESIZE_COM,
'tutorial':RESIZE_COM,
'updateuser':RESIZE_COM,
'updatesize':RESIZE_COM,
'run':RESIZE_COM,
'interpret':RESIZE_COM,
'reader':RESIZE_COM,
'indexer':RESIZE_COM,
'cleartempsuppresskeys':'self.keypurger.temporary=set()',
'clearsuppresskeys':DETERM_COM,
'addsuppresskeys':DETERM_COM,
'deletesuppresskeys':DETERM_COM,
'showsuppresskeys':DETERM_COM,
'diagnosticnote':'diagnostics.addline("##"+input("?"))',
'variables':'self.show_variables()',
'showvariables':'self.show_variables()',
'showvar':'self.show_variables()',
'clearmarks': "self.default_dict['marked'].clear()",
'allchildren': 'self.iterator.change_level(0)',
'inc': 'self.showall_incremental(index=str(lastup))',
'quickall': 'self.showall(quick=True)',
'refresh': 'self.constitute_word_dict()',
'undomany': 'self.undo_many()',
'redo': 'self.redo()',
'printformout': 'print(self.format_output())',
'saveconfigurations': 'self.configuration.save()',
'loadconfigurations': 'self.configuration.load()',
'showconfigurations': 'self.configuration.show(self.box_configs)',
'killclusters': "self.set_iterator(flag=self.default_dict['setitflag'])",
'allknowledge': "self.default_dict['knower'].bore(self.display_buffer)",
'autodefaults': 'self.autodefaults()',
'reconstitutesequences': 'self.reconstitute_sequences()',
'restoreallprojects': RESIZE_COM,
'restoreproject':RESIZE_COM,
'searchlog': 'self.show_search_log()',
'resultlog': 'self.show_search_log(enterlist=self.result_buffer)',
'plainenglish': "switchlanguage(language='ple')",
'language':RESIZE_COM,
'calculate':RESIZE_COM,
'politeenglish': "switchlanguage(language='poe')",
'rudeenglish': "switchlanguage(language='rue')",
'clearsearchlog': "self.searchlog = []",
'clearlog': "self.searchlog = []",
'changekeydefinitions': "self.default_dict['definitions'].console()",
'changeequivalences': "self.default_dict['equivalences'].console()",
'yieldequivalences':"self.default_dict['equivalences'].toggle()",
'changeknowledge': "self.default_dict['knower'].console()",
'spelldictionary': 'self.speller.console()',
'keystags': 'self.keys_for_tags()',
'marked':'self.marked_com(mainterm=mainterm,otherterms=otherterms)',
'addmarks':'self.marked_com(mainterm=mainterm,otherterms=otherterms)',
'deletemarks':'self.marked_com(mainterm=mainterm,otherterms=otherterms)',
'documentation':'self.documentation_com()',
'showsettings':'self.show_settings()',
'showdefaults':'self.show_defaults()',
'showiterators':'self.show_iterators()',
'randomon':"self.iterator.random_on()",
'randomoff':"self.iterator.random_off()",
'clearsuspended':"self.suspended_sequences = set()",
'suspendkey':RESIZE_COM,
'unsuspendkey':RESIZE_COM,
'alphabets':RESIZE_COM,
'sort':COPY_MOVE_SEARCH_COM,
'fetch':COPY_MOVE_SEARCH_COM,
'reverse':COPY_MOVE_SEARCH_COM,
'branchone':DEFAULT_COM,
'branchtwo':DEFAULT_COM,
'overrideextract':DEFAULT_COM,
'branchthree':DEFAULT_COM,
'loadtext':LOAD_COM,
'lt':LOAD_COM,
'echo':RESIZE_COM,
'clearautokeys':AUTOKEY_COM,
'clearkeys':AUTOKEY_COM,
'addkeys':AUTOKEY_COM,
'addkey':AUTOKEY_COM,
'addautokeys':AUTOKEY_COM,
'changekeys':AUTOKEY_COM,
'editdefaultkeys':AUTOKEY_COM,
'ak':AUTOKEY_COM,
'deleteautokey':AUTOKEY_COM,
'deletekey':AUTOKEY_COM,
'dk':AUTOKEY_COM,
'autokeys':AUTOKEY_COM,
'save':RESIZE_COM,
'defaultkeys':AUTOKEY_COM,
'afk':AUTOKEY_COM,
'showlimitlist':LIMITLIST_COM,
'resetlimitlist':LIMITLIST_COM,
'starttutorial':RESIZE_COM,
'resetll':LIMITLIST_COM,
'limitlist':LIMITLIST_COM,
'streams':STREAM_COM,
'deletestream':STREAM_COM,
'copyto':COPY_COM,
'copyfrom':COPY_COM,
'clearcommandmacros':DEFAULT_COM,
'clearknowledge':DEFAULT_COM,
'clearcodes':DEFAULT_COM,
'clearmacros':DEFAULT_COM,
'clearkeydefinitions':DEFAULT_COM,
'clearkeymacros':DEFAULT_COM,
'defaultcommandmacros':DEFAULT_COM,
'defaultkeymacros':DEFAULT_COM,
'recordkeydefinitions':DEFAULT_COM,
'recordkeymacros':DEFAULT_COM,
'recordcodes':DEFAULT_COM,
'recordmacros':DEFAULT_COM,
'recordknowledge':DEFAULT_COM,
'recordcommandmacros':DEFAULT_COM,
'changegeneralknowledge':DEFAULT_COM,
'changecodes':DEFAULT_COM,
'changemacros':DEFAULT_COM,
'changekeymacros':DEFAULT_COM,
'changecommandmacros':DEFAULT_COM,
'learn':DEFAULT_COM,
'forget':DEFAULT_COM,
'defaultcodes':DEFAULT_COM,
'clearcodes':DEFAULT_COM,
'defaultmacros':DEFAULT_COM,
'defaultknowledge':DEFAULT_COM,
'defaultkeydefinitions':DEFAULT_COM,
'loadbyparagraph':LOADBY_COM,
'splitload':LOADBY_COM,
'deletedefaultkeys':AUTOKEY_COM,
'deleteautokeys':AUTOKEY_COM,
'eliminateblanks':ELIMINATE_COM,
'eliminatekeys':ELIMINATE_COM,
'changedeterminant':DETERM_COM,
'changedet':DETERM_COM,
'showdeterminant':DETERM_COM,
'showdet':DETERM_COM,
'clearpurgekeys':DETERM_COM,
'setpurgekeys':DETERM_COM,
'showpurgekeys':DETERM_COM,
'showspelling':SPELLING_COM,
'defaultspelling':SPELLING_COM,
'capkeys':CULKEYS_COM,
'upperkeys':CULKEYS_COM,
'lowerkeys':CULKEYS_COM,
'flipbook':FLIP_COM,
'showflip':FLIP_COM,
'runinterpret':RESIZE_COM,
'showflipbook':FLIP_COM,
'conflate':RESIZE_COM,
'undo':RESIZE_COM,
'deletefield':RESIZE_COM,
'fields':RESIZE_COM,
'resize':RESIZE_COM,
'size':RESIZE_COM,
'sz':RESIZE_COM,
'keytrim':RESIZE_COM,
'texttrim':RESIZE_COM,
'editnote':RESIZE_COM,
'truthtable':RESIZE_COM,
'keyin':RESIZE_COM,
'explode':RESIZE_COM,
'load':RESIZE_COM,
'en':RESIZE_COM,
'editnotekeys':RESIZE_COM,
'indentmultiplier':RESIZE_COM,
'setreturnquit':RESIZE_COM,
'enk':RESIZE_COM,
'editnotetext':RESIZE_COM,
'ent':RESIZE_COM,
'compress':RESIZE_COM,
'rehome':RESIZE_COM,
'showdel':RESIZE_COM,
'permdel':RESIZE_COM,
'clear':RESIZE_COM,
'undel':RESIZE_COM,
'addfield':RESIZE_COM,
'cluster':RESIZE_COM,
'descendents':RESIZE_COM,
'cpara':RESIZE_COM,
SEMICOLON:RESIZE_COM,
'setlongmax':RESIZE_COM,
'purgefrom':RESIZE_COM,
'showuser':RESIZE_COM,
'link':RESIZE_COM,
'loop':RESIZE_COM,
'chain':RESIZE_COM,
'unlink':RESIZE_COM,
'newkeys':RESIZE_COM,
'header':RESIZE_COM,
'footer':RESIZE_COM,
'leftmargin':RESIZE_COM,
'deeper':RESIZE_COM,
'shallower':RESIZE_COM,
'testdate':RESIZE_COM,
'changeuser':RESIZE_COM,
'formout':RESIZE_COM,
'findwithin':RESIZE_COM,
'inspect':RESIZE_COM,
'updatetags':RESIZE_COM,
'showmeta':RESIZE_COM,
'text':COPY_MOVE_SEARCH_COM,
'depth':RESIZE_COM,
'delete':RESIZE_COM,
'showsequences':RESIZE_COM,
'reconstitutesequences':RESIZE_COM,
'showsequence':RESIZE_COM,
'del':RESIZE_COM,
'gc':RESIZE_COM,
'gocluster':RESIZE_COM,
'd':RESIZE_COM,
'killchild':RESIZE_COM,
'all':RESIZE_COM,
DOLLAR:RESIZE_COM,
DOLLAR+DOLLAR:RESIZE_COM,
'show':RESIZE_COM,
's':RESIZE_COM,
'histogram':RESIZE_COM,
'keysfortags':RESIZE_COM,
'terms':RESIZE_COM,
'???':RESIZE_COM,
'indexes':RESIZE_COM,
'ind':RESIZE_COM,
'i':RESIZE_COM,
'reform':RESIZE_COM,
'override':RESIZE_COM,
'showdepth':RESIZE_COM,
'refreshfreq':RESIZE_COM,
'cleardatedict':RESIZE_COM,
'multi':RESIZE_COM,
'sheet':RESIZE_COM,
'rsheet':RESIZE_COM,
'resumesheet':RESIZE_COM,
'createworkpad':RESIZE_COM,
'padshow':RESIZE_COM,
'addtopad':RESIZE_COM,
'a':RESIZE_COM,
'showpad':RESIZE_COM,
'emptypadstack':RESIZE_COM,
'renewpad':RESIZE_COM,
'currentpad':RESIZE_COM,
'switchpad':RESIZE_COM,
'allpads':RESIZE_COM,
'tosheetshelf':RESIZE_COM,
'selectsheet':RESIZE_COM,
'showstream':RESIZE_COM,
'constdates':RESIZE_COM,
'constitutedates':RESIZE_COM,
'showdatedict':RESIZE_COM,
'showdatedictpurge':RESIZE_COM,
'makedates':RESIZE_COM,
'actdet':RESIZE_COM,
'put':RESIZE_COM,
'activedet':RESIZE_COM,
'grabkeys':RESIZE_COM,
'invert':RESIZE_COM,
'correctkeys':REFORMATING_COM,
'help':RESIZE_COM,
'grabdefaultkeys':RESIZE_COM,
'smallsize':RESIZE_COM,
'grabautokeys':RESIZE_COM,
'mergemany':REFORMATING_COM,
'mm':REFORMATING_COM,
'columns':REFORMATING_COM,
'col':REFORMATING_COM,
'split':REFORMATING_COM,
'helpall':REFORMATING_COM,
'sidenote':REFORMATING_COM,
'revise':REFORMATING_COM,
'rev':REFORMATING_COM,
'keys':COPY_MOVE_SEARCH_COM,
'key':COPY_MOVE_SEARCH_COM,
'k':COPY_MOVE_SEARCH_COM,
'search':COPY_MOVE_SEARCH_COM,
'globalsearch':COPY_MOVE_SEARCH_COM,
QUESTIONMARK:COPY_MOVE_SEARCH_COM,
'move':COPY_MOVE_SEARCH_COM,
'copy':COPY_MOVE_SEARCH_COM,
'dictionaryload':RESIZE_COM,
'seqformone':RESIZE_COM,
'seqformtwo':RESIZE_COM,
'mainsequences':RESIZE_COM,
'fromtext':RESIZE_COM}
| ### contains queries, labels, alerts for plain English language, as well as the commands.
### and various input terms.
### NOTE that the names of the commands cannot be changed since
from globalconstants import DASH, PLUS, CARET,\
VERTLINE, EOL, DOLLAR, POUND, SEMICOLON, QUESTIONMARK
import commandscript
def make_commands(text):
text = text.lower()
return (text[0], text, text.capitalize(), text[0].upper())
ADDTERMS = make_commands('add')
DELETETERMS = make_commands('delete')
SHOWTERMS = make_commands('show')
QUITTERMS = make_commands('quit')
CLEARTERMS = make_commands('clear')
QUITALLTERMS = ('a','all','A','All',
'ALL','Quitall','QUITALL',
'quitall')
LEARNTERMS = make_commands('learn')
UNLEARNTERMS = make_commands('unnlearn')
BREAKTERMS = make_commands('break')
NEWTERMS = make_commands('new')
YESTERMS = ['yes',
'Yes',
'yeah',
'sure',
'whatever',
'ja',
'jawohl']
NOTERMS = ['no',
'No',
'no way',
'absolutely no',
'god no',
'heaven forbid']
class Queries:
def __init__(self):
self.OPEN_NEW1 = "(N)o to open "
self.OPEN_NEW2 = " (Y)es to open a different notebook, or (Q)uit "
self.RESUME_PROJECTS = 'RESUME PROJECTS? (y)es (no) or list of projects to resume!'
self.MOVE_SHELVES = "DO YOU WANT TO MOVE (S)HELF,SE(Q)UENCES, AND "+\
"(P)ROJECTS TO DATABASE? ENTER ALL THAT APPLY"
self.INITIAL_MENU = "(1) Display commands \n"+\
"(2) display commands in compact mode\n"+\
"(3) Start in Betamode \n"+\
"(4) Start in regular mode \n"+ \
"(5) Start in the advanced mode \n"+\
"(6) View file registry"
self.SELECTING_NOTEBOOK ="""Name or index of notebook,
(N)ew to open a new notebook,
or quit(A)ll to close all notebooks"""
self.SELECT_OPEN_NOTEBOOK ="""Name or index of notebook,
(N)ew to open a new notebook,
or(Q)uit to quit the current notebook,
Quit(A)ll to quit all notebooks."""
self.SELECT_NOTEBOOK_HEADING = '/C/ SELECT NOTEBOOK'
self.CHARACTERS_TO_CONVERT = 'Character to convert? '
self.RANGE_FROM = 'Range from? '
self.RANGE_TO = 'Range to? '
self.INDEX_TO_MERGE = 'Index to merge? '
self.DESTINATION = 'Destination? '
self.SOURCE_TO_FROM = 'Source from / to? '
self.STRICT_RANGE_TO_FROM = 'Strict range from/to? '
self.RANGE_TO_FROM = 'Range from / to? '
self.EDIT_OPTIONS = 'ENTER new text, or RETURN to keep,'+\
'or '+DASH+' to DELETE, or '\
+PLUS+' to insert new line before,'\
+ ' or '+DASH+DASH+ ' to delete all subsequent lines '\
+' or '+CARET+' to replace! And '\
+VERTLINE+'to add an EOL mark.'\
+EOL+DOLLAR+'To append before or'\
+POUND+' to append after! '
self.ENTER_KEYWORDS = 'Enter the keywords that you wish to keep! '
self.SELECT_FILE ='Enter the number of'\
+' file to open, or name of new file, or'\
+EOL+'(B)ack to return to initial directory! '
self.OPEN_CONFIRM = 'Are you sure you want to open: '
self.AUTOKEYS_KEEP = 'Numbers of autokeys to keep,'\
+'to delete (start list with $)'\
+'or ALL to delete all autokeys, or SWITCH?'
self.DELETE_CONF_BEG = 'Are you sure you want to delete? '
self.DELETE_CONF_END = ' from the entire notebase. This cannot be undone! '
self.REVISE_DELETE_BEG = 'Revise '
self.REVISE_DELETE_END = ' to ____ ? ... or delete? '
self.RESUME_ABORTED_NOTE = 'Resume aborted note? '
self.KEYS = 'Keys? '
self.NEW_KEY_LIST = '<yes> to keep all, '\
+'<no> to discard all, '\
+'or enter a selected range? '
self.ENTER_SEARCH_TERM ='Enter composite search term, '\
+' e.g [1]%[2]!- Begin '\
+'with $ to show notes! '
self.ADDITIONAL_KEYS = 'Additional keys '\
+'to apply to'\
+' inputed paragraphs? '
self.INCLUDE = 'Include? '
self.KEYWORDS_TO_ADD = 'enter keywords to add? '
self.CONTINUE = 'Continue? '
self.DELETE_FROM_TO = 'Delete from/to? '
self.CHILD_DEPTH = 'Depth of children to display? '
self.DEMARC_MARK = 'Demarcating mark? '
self.CHILD_KILL = 'Child to kill? '
self.LEVELS_TO_SHOW = 'Levels to show? '
self.SUB_OR_MAKE_CHILDREN = '[S]ubordinate, '\
+' [M]ake compact or [C]hildren? '
self.NO_CHILDREN = 'No children? '
self.ADD_TO_AUTOKEYS = 'Add to autokeys? '
self.COPY_HOW_MANY = 'Copy how many? '
self.LEVELS_TO_SHOW = 'Levels to show? '
self.SEARCH_PHRASE = 'Search phrase? '
self.CONFLATE_EMBED = '[C]onflate or [E]mbed? '
self.WIDTH = 'Width? '
self.INDEX = 'Index? '
self.INDEX_OR_RANGE = 'Index or Indexrange? '
self.COLUMNS = 'Columns? '
self.BREAKER = 'Breaker? '
self.BREAK_MARK = 'Break mark? '
self.SURE = 'Are you sure? '
self.FIELDNAME = 'Fieldname? '
self.READ_ONLY = 'Read only? '
self.OPEN_DIFFERENT = 'Open a different notebook or QUIT? '
self.BETA_MODE = 'Do you wish to use '\
+'NOTESCRIPTION in the betamode? '
self.START_COMMAND = 'SPACE to SKIP, '\
+'TRIPPLESPACE for COMPACT MODE'
self.LANGUAGE_SUFFIX = 'Language + suffix'
self.LANGUAGE = 'Language? '
self.DISPLAY_STREAM = 'Display stream? '
self.DETERMINANT = 'Determinant ymd*hsx? '
self.PURGE_WHAT = ' purge a(llcaps) u(pper) l(ower).TERMS ? '
self.SUFFIX = 'Suffix? '
self.LEARN_WHAT = 'Learn that what? '
self.IS_WHAT = ' is a what? '
self.WHICH_COMMAND = 'Which command? '
self.MENU_ONE = '[, >,<, ] (Q)uit '
self.KEYS_TO_ELIMINATE = 'Keys to eliminate? '
self.INCLUDE_META = 'Include metadata? '
self.SHOW_INDEXES = 'Show indexes? '
self.JUMP_AHEAD_BY = 'Jump ahead how much? '
self.OLD_USER = 'Old user? '
self.NEW_USER = 'New user? '
self.UNLEARN_BEG = 'Unlearn that what? '
self.UNLEARN_END = ' is a what? '
self.NEW_LIMIT_LIST = 'New limit list? '\
+' Enter range, F for flipbook,'\
+'or R to reset! '
self.FROM = 'From? '
self.TO = 'To? '
self.SAVE_TO = 'SAve to? '
self.LONG_MAX = 'Maximum number of notes displayed in longform? '
self.KEY_COUNT = 'Keycount? '
self.EMPTY_BREAK_NEW = '(e)mpty,(b)reak,(n)ewnote? '
self.SPECS = 'Specs . terms to purge? '
self.SET_KEY_TRIM = 'Set trim for displaying keywords? '
self.SET_TEXT_TRIM = 'Set trim for displaying text? '
self.NEW_NOTE_SIZE = 'New note size? '
self.OPEN_AS_NEW = 'Open as new file? '
self.FIRST_NEWEST_ALL = 'f(irst) (n)ewest (a)ll (i)ndex? '
self.DETERMINANT2 = 'Determinant? '
self.NAME_FOR = 'Name for '
self.UNDO_UP_TO = 'Undo up to? '
self.TOTO = ' to '
self.ALSO_ABORT = ' or ABORT to abort'
self.OTHERS_TO_PURGE = 'Other keywords to purge? '
self.EXCLUDE_ALL_CAPS = 'Exclude all-cap keywords? '
self.EXCLUDE_CAPITALIZED = 'Exclude capitalized keywords? '
self.WHAT_TO_PURGE = 'purge (c)apitalized, (a)ll caps, (l)ower case'
self.RETURN_QUIT = ' Exit noteentry after how many returns?'
self.CLUSTER = 'Cluster? '
self.KEY_MACRO_NAME = 'Key macro name? '
self.KEY = 'Key? '
self.PROJECT_NAME = 'Project name? '
self.INDENT_MULTIPLIER = 'Indent multiplier? '
self.SMALL_SIZE = 'Small size? '
self.CLEAR_DEFAULT_KEYS = 'Clear default keys? '
self.SIDE = 'Go to side? '
self.SIDES = 'Number of sides? '
self.TEXT_TO_SAVE = 'TEXT to save? '
self.SAVE_TO_FILE = 'File to save to? '
self.FOLDER = 'In folder? '
self.TEXT_TO_PRINT = 'TEXT to print '
self.FLIP_AT = 'Flip at? '
self.SHOW_ALL_NOTES = 'Do you want to show all the notes in the notebook? '
self.DIVIDE_PICKLE = "Do you want to divide "\
+" the pickle file?" +\
" (Y)yes to divide (D)on't ask again? "
self.LANGUAGE = 'Language? '
self.LANGUAGE_SELECT = 'es(panol) fr(ench) en(glish) de(utsch)? '
self.FUNCTION_NAME = 'Function name? '
self.TEXT_TO_CONVERT = 'Text to convert? '
self.TEXT_TO_INTERPRET = 'Text to interpret? '
self.INCLUDE_PROJECTS = 'Include projects? '
self.SEQ_FORM_ONE = 'Formatting after each sequence? (s) for space,' + EOL + \
'(l) for EOL, (c) for COMMA and SPACE, ' + EOL + \
'(b) for break, (n) for new or OTHER TEXT '
self.SEQ_FORM_TWO = 'Formatting after all sequence? (e) for emptychar, '+ EOL + \
'(l) for EOL, (b)reak, (n)ew or OTHER TEXT '
self.MAIN_SEQUENCES = 'Main sequences? Enter as a list separated by commas or (d)efaults! '
self.REGISTRY = '(o)pen as read only \n'+\
'(c)orrect registry and continue' +\
'\n (s)elect another?'
self.RECON_KEY = 'Reconstitute key dictionary? '
self.RECON_WORD = 'Reconstitute word dictionary? '
self.RESUME_FROM_WHERE = 'Do you want to start from where you left off?'
class Alerts:
def __init__(self):
self.ADDED_TO_DATABASE_REGISTER = " ADDED TO DATABASE REGISTER"
self.ATTENTION = '/C/ ATTENTION'
self.MOVING_NOTE = 'MOVING NOTE DICTIONARY FROM SHELF!'
self.SELECTED = '/C/ SELECTED'
self.CONSTITUTING_WORD_DICT = '/C/ CONSTITUTING WORD DICTIONARY!'
self.WAIT = '/C/ PLEASE WAIT!'
self.EDITING = '/C/EDITING NOTE'
self.ON = 'ON'
self.OFF = 'OFF'
self.LEARNED_BEG = 'I learned that '
self.LEARNED_MIDDLE = ' is a(n) '
self.NOTE_ADDED = 'Note added at'
self.CHANGE = 'Change'
self.TO = 'to'
self.REHOMED = 'SUCCESSFULLY REHOMED!'
self.ITERATOR_RESET = 'ITERATOR RESET'
self.KEYS_FOR_DATES = 'KEYS FOR DATES'
self.APPEARS_BEG = ' APPEARS'
self.APPEARS_END = ' TIMES. FREQUENCY='
self.FAILED_CONF_LOAD = '/C/ FAILED TO LOAD CONFIGURATION FILE'
self.CREATING_NEW_CONF = '/C/ CREATING NEW CONFIGURATION FILE'
self.NEW_PICKLE = '/C/ NEW PICKLE FILE'
self.ENTER_DOCUMENTATION = '/C/ ENTER documentation '\
+'TO LOAD INSTRUCTIONS'
self.IS_INCONSISTENT = '/C/ NOTEBOOK IS INCONSISTENT'
self.STILL_INCONSISTENT = '/C/ STILL INCONSISTENT'
self.IS_CONSISTENT = '/C/ NOTEBOOK IS CONSISTENT'
self.TOO_MANY_INDEXES = '/C/ TOO MANY INDEXES!'
self.IS_CLOSING = '/C/ IS CLOSING!'
self.OPENING = '/C/ WILL BE OPENED AS '
self.ALREADY_OPEN = ' IS ALREADY OPEN!'
self.DELETE_FROM_TO = '/C/ DELETE FROM / TO'
self.EXLUDE_ALL_CAPS = 'Exclude all-cap keywords? '
self.EXCLUDE_CAPITALIZED = 'Exclude capitalized keywords? '
self.NOT_YET_CLUSTERED = '/C/ NOT YET CLUSTERED'
self.FLIP_CHANGED = 'FLIPBOOK changed to '
self.SAVING = '/C/ SAVING '
self.WORD_DICT_CONSTITUTED = '/C/ WORD DICTIONARY CONSTITUTED'
self.NOT_REGULAR = '/C/ NOT REGULAR'
self.ADDED = '/C/ ADDED'
self.MOVING_FROM = '/C/ MOVING FROM '
self.COPIED_TO_TEMP = ' COPIED TO TEMPORARY BUFFER!'
self.NOTE = 'NOTE '
self.MOVED_TO = ' MOVED TO '
self.COPIED_TO = ' COPIED TO '
self.MOVING_TO = 'MOVING TO '
self.COPYING_TO = 'COPYING to '
self.OLD = '/C/ Old '
self.KEYS = 'Keys? '
self.FIELDS = ' Fields?'
self.LOADING_FILE = '/c/ LOADING FILE'
self.REVISE_DELETE_END = ' Enter new term, RETURN to keep, or (d)elete!'
self.ALREADY_IN_USE = '/C/ ALREADY IN USE'
self.STILL_CHANGED = 'Still change? '
self.INDEX = 'INDEX '
self.NOT_FOUND_IN_NOTEBASE = ' NOT FOUND IN NOTEBOOK!'
self.NO_DICTIONARY_OBJECT = '/C/ NO DICTIONARY OBJECT'
self.NEW_SEQUENCE = 'NEW SEQUENCE DICTIONARY CREATED OF TYPE '
self.OVERWRITTEN = 'OVERWRITTEN. NEW SEQUENCE DICTIONARY CREATED OF TYPE '
self.RECONSTITUTING_INDEXES = 'RECONSTITING INDEX SEQUENCE '
self.WAS_DELETED = ' HAS BEEN DELETED!'
self.DELETE = 'DELETE '
self.FAILED = 'FAILED '
self.SAVED = ' SAVED! '
self.TOO_LARGE = 'TOO LARGE '
self.ADDED_TO_KEYLIST = ' added to keylist! '
self.SUCCESSFULLY_RESUMED = 'Successfully resumed!'
self.NOT_CLOSED = ' is still in use or has not been closed properly!'
class Labels:
def __init__(self):
self.SELECT = '/C/ SELECT'
self.ENTRYCOMMANDS = '/C/ ENTRYCOMMANDS'
self.SEARCHES = '/C/ SEARCHES'
self.CLUSTER = '/C/ CLUSTER'
self.CONFIGURATIONS = '/C/ CONFIGURATIONS'
self.ALL_COMMANDS = '/C/ ALL COMMANDS'
self.ALWAYS_NEXT = '/C/ ALWAYS NEXT'
self.ALWAYS_CHILD = '/C/ ALWAYS CHILD'
self.MARKED = '/C/ MARKED'
self.DEPTH = '/C/ DEPTH'
self.DEFAULT_KEYS = '/C/ DEFAULT KEYS'
self.GRABBED_KEYS = '/C/GRABBED KEYS'
self.RESULT_FOR = 'RESULT FOR '
self.INDEXES = '/C/ INDEXES'
self.KEYS = '/C/ KEYS'
self.ITERATOR_SHOW = '/C/ SHOW INDEXES WITH ITERATOR RESET'
self.CAPKEYS = '/C/ CAPKEYS'
self.PROPER_NAMES = '/C/ PROPER NAMES'
self.OTHER_KEYS = '/C/ OTHER KEYS'
self.SHOW_TOP = '/C/ SHOW THE TOP NOTE WITH CHILDRED'
self.TAGS = '/C/ TAGS'
self.PURGEKEYS = '/C/ PURGEKEY SETTINGS'
self.FIELD = '/C/ FIELDS'
self.FILE_ERROR = '/C/ FILE ERROR!'
self.CONSTITUTING_KEY_FREQ = ' /C/CONSTITUTING KEY'\
+' FREQUENCY DICTIONAR!'
self.WELCOME_HEAD = '/C/ WELCOME'
self.WELCOME_BODY = '/C/ WELCOME TO ARCADES!'
self.MAX_DEPTH = '/C/ MAXIMUM INDEX DEPTH'
self.LIMIT_LIST_RESET = '/C/ LIMIT LIST RESET'
self.LIMIT_LIST = '/C/ LIMIT LIST'
self.FORMATTING_HELP = '/C/ FORMATTING HELP'
self.STREAMS = '/C/ STREAM'
self.DETERMINANT = '/C/ DETERMINANTS'
self.HEADER = '/C/ HEADER'
self.FOOTER = '/C/ FOOTER'
self.LEFT_MARG ='/C/ LEFTMARGIN'
self.AUTOBACKUP = '/C/ AUTOBACKUP'
self.RECTIFY = '/C/ RECTIFY'
self.AUTOMULTI ='/C/ AUTOMULTI DISPLAY'
self.QUICK_ENTER = '/C/ QUICK ENTER'
self.METADATA = '/C/ METADATA FOR NOTE #'
self.CURTAIL = '/C/ CURTAIL'
self.LIMIT_LIST_CHANGED = '/C/ LIMIT LIST CHANGED TO'
self.SHOW_CONFIG_BOX = '/C/ SHOW CONFIGURATION IN BOXES'
self.PURGE_KEYS = '/C/ PURGEKEY SETTINGS'
self.KEY_TRIM = '/C/ KEY TRIM'
self.TEXT_TRIM = '/C/ TEXT TRIM'
self.SIZE ='/C/ SIZE'
self.FLIPOUT = '/C/ FLIPOUT'
self.SHORTSHOW = '/C/ SHORTSHOW'
self.NAME_INTERPRET = '/C/ NAME INTERPRET'
self.ITERATOR_SHOW = '/C/ SHOW INDEXES WITH ITERATOR RESET '
self.NONE = '/C/ NONE '
self.COMMAND_EQ = '/C/ COMMAND = '
self.CONCORDANCE = '/C/ CONCORDANCE '
self.TO_UNDO = '/C/ TO UNDO '
self.DELETED = '/C/ DELETED NOTES '
self.CONFIG_SAVED = '/C/ CONFIGURATION SAVED '
self.VARIABLES = '/C/ VARIABLES '
self.KEYS_BEFORE = '/C/ KEYS BEFORE '
self.KEYS_AFTER = '/C/ KEYS AFTER '
self.CARRY_OVER_KEYS = '/C/ CARRY OVER KEYS '
self.CARRY_ALL = '/C/ CARRY OVER ALL PARENTS '
self.SETTINGS = '/C/ SETTINGS '
self.RETURN_QUIT_ON = '/C/ RETURNQUIT '
self.CLUSTERS = '/C/ CLUSTERS '
self.PROJECT_DISPLAY = '# |PROJECTNAME| INDEX | KEYS '
self.NEGATIVE_RESULTS = '/C/ SHOW NEGATIVE RESULTS '
self.INDENT_MULTIPLIER = '/C/ INDENT MULTIPLIER '
self.ITERATOR = '/C/ ITERATOR '
self.MUST_BE_BETWEEN = '/C/ MUST BE BETWEEN '
self.AND = ' AND '
self.SMALL_SIZE = '/C/ SMALL SIZE '
self.LONG_MAX = '/C/ LONGMAX '
self.SIDE = '/C/ SIDE '
self.SIDES = '/C/ SIDES '
self.FLIP_AT = '/C/ FLIP AT '
self.TAG_DEFAULT = '/C/ TAG DEFAULT '
self.USE_SEQUENCE = '/C/ USE SEQUENCE '
self.NO_FLASH = "/C/ DON'T SHOW FLASH CARDS "
self.CHECK_SPELLING = '/C/ SPELL CHECK '
self.FLASHMODE = '/C/ FLASHMODE '
self.SHOW_DATE = '/C/ SHOW DATE '
self.SORT_BY_DATE= '/C/ SORT BY DATE '
self.ORDER_KEYS = '/C/ ORDER KEYS '
self.ENTER_HELP= '/C/ ENTERHELP '
self.CHILDREN_TOO = '/C/ CHILDREN TOO '
self.SHOW_IMAGES = '/C/ SHOW IMAGES '
self.SHOW_TEXTFILES = '/C/ SHOW TEXTFILES '
self.DELETE_WHEN_EDITING = '/C/ DELETE WHEN EDITING '
self.VARIABLE_SIZE = '/C/ VARIABLE SIZE '
self.SEQUENCE_IN_TEXT = '/C/ SEQUENCE IN TEXT '
self.MAIN_SEQUENCES = '/C/ MAIN SEQUENCES '
self.SEQ_FORM_ONE = '/C/ FIRST SEQUENCE FORM '
self.SEQ_FORM_TWO = '/C/ SECOND SEQUENCE FORM '
self.FROM_TEXT = '/C/ KEYWORDS FROM TEXT '
self.CONVERT_BY_LINE = '/C/ CONVERT BY LINE '
self.ADD_DIAGNOSTICS = '/C/ ADD DIAGNOSTICS '
self.PREVIOUS_PROJECTS = '/C/ PREVIOUS PROJECTS!'
self.OPEN_FILES = '/C/ OPEN FILES'
self.OPENING = 'opening '
self.APPLY_ABR_INP = '/C/ APPLY INPUT ABBREVIATIONS'
self.KEY_INPUT_MODE = '/C/ KEY EDIT MODE'
self.CARRY_KEYS = '/C/ CARRY KEYS'
self.ABRIDGEDFORMAT = '/C/ ABRIDGED FORMAT'
class Spelling:
def __init__(self):
self.INPUT_MENU = 'Press RETURN to keep,'\
+'DOUBLESPACE to quit,'\
+'SPACE+RETURN to add,'\
+'enter new spelling, '\
+'[start with a space to ADD],'\
+'or a number from the following list'
self.SMALL_INPUT_MENU = 'Press RETURN to keep,'\
+'SPACE+RETURN to add,'\
+'DOUBLESPACE to quit, '\
+'enter new spelling '\
'[start with a space to ADD]'
self.IS_MISPELLED = 'is mispelled'
self.SPELLING_DICTIONARY = '/C/SPELLING DICTIONARY'
self.WORDS_TO_DELETE = 'A(dd) new word\nD(elete)'\
+'\nL(oad)words from text'\
+'\nS(how) words\nC(hange) language'\
+'\n(E)rase\nX(change database)\n(Q)uit'
self.TEXT_TO_ADD = 'Text to add?'
self.ARE_YOU_SURE = 'Are you sure?'
self.THERE_ARE = 'There are '
self.MISSPELLED = ' misspelled words!'
self.SKIP_CORRECTIONS = 'Press SPACE+RETURN to skip corrections'
self.WORD_TO_ADD = 'New word to add?'
self.WORD_TO_DELETE = 'Words to delete?'
self.LANGUAGE_SELECT = 'es(panol) fr(ench) en(glish) de(utsch)?'
class DefaultConsoles:
def __init__(self):
self.KEY_DEF = 'KEYWORDS : DEFINITIONS'
self.ADD_MENU = 'A)dd'
self.DELETE_MENU = 'D)elete'
self.SHOW_MENU = 'S)how'
self.CLEAR_MENU= 'C)lear'
self.QUIT_MENU = 'Q)uit'
self.LEARN_MENU = '(L)earn'
self.UNLEARN_MENU = '(U)nlearn'
self.KEYMACRO = 'Keymacro'
self.KEYS = 'Keys?'
self.DEFINITIONS = 'Definitions?'
self.DELETE = 'Delete?'
self.CLEAR = 'Are you sure you want to clear?'
self.ADD = 'Add| '
self.DELETING = 'DELETING'
self.FROM_THIS = 'From this (short)? '
self.TO_THIS = 'to this(long) ? '
self.I_KNOW = 'I know that '
self.IS_WHAT_IT_IS = ' is what it is'
self.IS_AN = ' is a(n) '
self.LEARN_THAT_THIS = 'Learn that this?'
self.IS_WHAT = 'is what?'
self.UNLEARN_THAT_THIS = 'Unlearn that this?'
self.ARE_YOU_SURE = 'Are you sure you want to clear?'
labels = Labels ()
binary_settings = {'abbreviateinput':('self.apply_abr_inp',labels.APPLY_ABR_INP),
'keyeditmode':('self.vertmode',labels.KEY_INPUT_MODE),
'showtags':('self.tagdefault',labels.TAG_DEFAULT),
'usesequence':('self.usesequence',labels.USE_SEQUENCE),
'boxconfigs':('self.box_configs', labels.SHOW_CONFIG_BOX),
'autobackup':('self.autobackup', labels.AUTOBACKUP),
'curtail':("self.default_dict['curtail']",labels.CURTAIL),
'itshow':("self.default_dict['setitflag']",labels.ITERATOR_SHOW),
'noflash':("self.no_flash",labels.NO_FLASH),
'spelling':("self.check_spelling",labels.CHECK_SPELLING),
'flashmode':("self.flipmode",labels.FLASHMODE),
'showdate':("self.default_dict['showdate']",labels.SHOW_DATE),
'sortbydate':("self.default_dict['sortbydate']",labels.SORT_BY_DATE),
'orderkeys':("self.default_dict['orderkeys']",labels.ORDER_KEYS),
'enterhelp':("self.default_dict['enterhelp']",labels.ENTER_HELP),
'childrentoo':("self.children_too",labels.CHILDREN_TOO),
'flipout':("self.flipout",labels.FLIPOUT),
'shortshow':("self.shortshow",labels.SHORTSHOW),
'fulltop':("self.show_full_top",labels.SHOW_TOP),
'rectify':("self.rectify",labels.RECTIFY),
'formathelp':("self.default_dict['formattinghelp']",labels.FORMATTING_HELP),
'automulti':("self.auto_multi",labels.AUTOMULTI),
'quickenter':("self.quickenter",labels.QUICK_ENTER),
'keysbefore':("self.default_dict['keysbefore']",labels.KEYS_BEFORE),
'keysafter':("self.default_dict['keysafter']",labels.KEYS_AFTER),
'carryoverkeys':("self.default_dict['carryoverkeys']",labels.CARRY_OVER_KEYS),
'carryall':("self.default_dict['carryall']",labels.CARRY_ALL),
'returnquit':("self.default_dict['returnquiton']",labels.RETURN_QUIT_ON),
'rqon':("self.default_dict['returnquiton']",labels.RETURN_QUIT_ON),
'negresults':("self.negative_results",labels.NEGATIVE_RESULTS),
'negativeresults':("self.negative_results",labels.NEGATIVE_RESULTS),
'nr':("self.negative_results",labels.NEGATIVE_RESULTS),
'iteratemode':("self.iteratormode",labels.ITERATOR),
'showimages':("self.show_images",labels.SHOW_IMAGES),
'showtext':("self.show_text",labels.SHOW_TEXTFILES),
'editdelete':("self.delete_by_edit",labels.DELETE_WHEN_EDITING),
'variablesize':("self.default_dict['variablesize']",labels.VARIABLE_SIZE),
'seqintext':("self.default_dict['sequences_in_text']",labels.SEQUENCE_IN_TEXT),
'convertbyline':("self.default_dict['convertbyline']",labels.CONVERT_BY_LINE),
'nodiagnostics':("self.add_diagnostics",labels.ADD_DIAGNOSTICS),
'carrykeys':("self.carry_keys",labels.CARRY_KEYS),
'abridgedformat':("self.abridgedformat",labels.ABRIDGEDFORMAT),
'nameinterpret':("self.name_interpret",labels.NAME_INTERPRET),
'useallphabets':("self.use_alphabets","USE ALPHABETS"),
'equivmultiply':("self.search_equiv_multiplied","EQUIVALENCE MULTIPLIER"),
'converttextphrase':("self.convert_text_terms","Convert multiple word terms for text search")}
LOAD_COM = 'self.loadtext_com(otherterms=otherterms,predicate=predicate)'
AUTOKEY_COM = 'self.autokey_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
LIMITLIST_COM = 'self.limitlist_com(mainterm=mainterm,otherterms=otherterms)'
STREAM_COM = 'self.stream_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
COPY_COM = 'self.copy_com (mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
DEFAULT_COM = 'self.default_com(mainterm=mainterm,otherterms=otherterms)'
LOADBY_COM = 'self.loadby_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
ELIMINATE_COM = 'self.eliminate_com(mainterm=mainterm,otherterms=otherterms)'
DETERM_COM = 'self.determ_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
SPELLING_COM = 'self.spelling_com(mainterm=mainterm,longphrase=longphrase,otherterms=otherterms,predicate=predicate)'
CULKEYS_COM = 'self.culkeys_com(mainterm=mainterm)'
FLIP_COM = 'self.flip_com(mainterm=mainterm,otherterms=otherterms,longphrase=longphrase,totalterms=totalterms)'
RESIZE_COM = 'self.resize_etc_com(longphrase=longphrase,mainterm=mainterm,otherterms=otherterms,predicate=predicate,totalterms=0)'
REFORMATING_COM = 'self.reformating_com(mainterm=mainterm,otherterms=otherterms,predicate=predicate,longphrase=longphrase)'
COPY_MOVE_SEARCH_COM = 'self.copy_move_search_com(longphrase=longphrase,mainterm=mainterm,otherterms=otherterms,predicate=predicate)'
JSON_COM = 'self.json_com(longphrase=longphrase,mainterm=mainterm,otherterms=otherterms,predicate=predicate,totalterms=0)'
simple_commands = {'dumpprojects':JSON_COM,
'loadprojects':JSON_COM,
'clearprojects':JSON_COM,
'dumpgeneralknowledge':JSON_COM,
'dumpknowledge':JSON_COM,
'loadknowledge':JSON_COM,
'loadgeneralknowledge':JSON_COM,
'showknowledge':JSON_COM,
'setsides':RESIZE_COM,
'convertdefinitions':RESIZE_COM,
'newconvertmode':RESIZE_COM,
'switchconvertmode':RESIZE_COM,
'showallconvertmodes':RESIZE_COM,
'test':RESIZE_COM,
'setflipat':RESIZE_COM,
'flexflip':RESIZE_COM,
'setsuppresskeys':DETERM_COM,
'flashforward':'self.side+=1',
'ff':'self.side+=1',
'flashback':'self.side-=1',
'fb':'self.side-=1',
'flashreset':'self.side = 0',
'fr':'self.side = 0',
'ft':RESIZE_COM,
'cleargeneralknowlege':RESIZE_COM,
'generalknowledge':RESIZE_COM,
'general':RESIZE_COM,
'gk':RESIZE_COM,
'cleargeneralknowledge':RESIZE_COM,
'reconstitutegeneralknowledge':RESIZE_COM,
'switchgeneralknowledge':RESIZE_COM,
'flashto':RESIZE_COM,
'tutorial':RESIZE_COM,
'updateuser':RESIZE_COM,
'updatesize':RESIZE_COM,
'run':RESIZE_COM,
'interpret':RESIZE_COM,
'reader':RESIZE_COM,
'indexer':RESIZE_COM,
'cleartempsuppresskeys':'self.keypurger.temporary=set()',
'clearsuppresskeys':DETERM_COM,
'addsuppresskeys':DETERM_COM,
'deletesuppresskeys':DETERM_COM,
'showsuppresskeys':DETERM_COM,
'diagnosticnote':'diagnostics.addline("##"+input("?"))',
'variables':'self.show_variables()',
'showvariables':'self.show_variables()',
'showvar':'self.show_variables()',
'clearmarks': "self.default_dict['marked'].clear()",
'allchildren': 'self.iterator.change_level(0)',
'inc': 'self.showall_incremental(index=str(lastup))',
'quickall': 'self.showall(quick=True)',
'refresh': 'self.constitute_word_dict()',
'undomany': 'self.undo_many()',
'redo': 'self.redo()',
'printformout': 'print(self.format_output())',
'saveconfigurations': 'self.configuration.save()',
'loadconfigurations': 'self.configuration.load()',
'showconfigurations': 'self.configuration.show(self.box_configs)',
'killclusters': "self.set_iterator(flag=self.default_dict['setitflag'])",
'allknowledge': "self.default_dict['knower'].bore(self.display_buffer)",
'autodefaults': 'self.autodefaults()',
'reconstitutesequences': 'self.reconstitute_sequences()',
'restoreallprojects': RESIZE_COM,
'restoreproject':RESIZE_COM,
'searchlog': 'self.show_search_log()',
'resultlog': 'self.show_search_log(enterlist=self.result_buffer)',
'plainenglish': "switchlanguage(language='ple')",
'language':RESIZE_COM,
'calculate':RESIZE_COM,
'politeenglish': "switchlanguage(language='poe')",
'rudeenglish': "switchlanguage(language='rue')",
'clearsearchlog': "self.searchlog = []",
'clearlog': "self.searchlog = []",
'changekeydefinitions': "self.default_dict['definitions'].console()",
'changeequivalences': "self.default_dict['equivalences'].console()",
'yieldequivalences':"self.default_dict['equivalences'].toggle()",
'changeknowledge': "self.default_dict['knower'].console()",
'spelldictionary': 'self.speller.console()',
'keystags': 'self.keys_for_tags()',
'marked':'self.marked_com(mainterm=mainterm,otherterms=otherterms)',
'addmarks':'self.marked_com(mainterm=mainterm,otherterms=otherterms)',
'deletemarks':'self.marked_com(mainterm=mainterm,otherterms=otherterms)',
'documentation':'self.documentation_com()',
'showsettings':'self.show_settings()',
'showdefaults':'self.show_defaults()',
'showiterators':'self.show_iterators()',
'randomon':"self.iterator.random_on()",
'randomoff':"self.iterator.random_off()",
'clearsuspended':"self.suspended_sequences = set()",
'suspendkey':RESIZE_COM,
'unsuspendkey':RESIZE_COM,
'alphabets':RESIZE_COM,
'sort':COPY_MOVE_SEARCH_COM,
'fetch':COPY_MOVE_SEARCH_COM,
'reverse':COPY_MOVE_SEARCH_COM,
'branchone':DEFAULT_COM,
'branchtwo':DEFAULT_COM,
'overrideextract':DEFAULT_COM,
'branchthree':DEFAULT_COM,
'loadtext':LOAD_COM,
'lt':LOAD_COM,
'echo':RESIZE_COM,
'clearautokeys':AUTOKEY_COM,
'clearkeys':AUTOKEY_COM,
'addkeys':AUTOKEY_COM,
'addkey':AUTOKEY_COM,
'addautokeys':AUTOKEY_COM,
'changekeys':AUTOKEY_COM,
'editdefaultkeys':AUTOKEY_COM,
'ak':AUTOKEY_COM,
'deleteautokey':AUTOKEY_COM,
'deletekey':AUTOKEY_COM,
'dk':AUTOKEY_COM,
'autokeys':AUTOKEY_COM,
'save':RESIZE_COM,
'defaultkeys':AUTOKEY_COM,
'afk':AUTOKEY_COM,
'showlimitlist':LIMITLIST_COM,
'resetlimitlist':LIMITLIST_COM,
'starttutorial':RESIZE_COM,
'resetll':LIMITLIST_COM,
'limitlist':LIMITLIST_COM,
'streams':STREAM_COM,
'deletestream':STREAM_COM,
'copyto':COPY_COM,
'copyfrom':COPY_COM,
'clearcommandmacros':DEFAULT_COM,
'clearknowledge':DEFAULT_COM,
'clearcodes':DEFAULT_COM,
'clearmacros':DEFAULT_COM,
'clearkeydefinitions':DEFAULT_COM,
'clearkeymacros':DEFAULT_COM,
'defaultcommandmacros':DEFAULT_COM,
'defaultkeymacros':DEFAULT_COM,
'recordkeydefinitions':DEFAULT_COM,
'recordkeymacros':DEFAULT_COM,
'recordcodes':DEFAULT_COM,
'recordmacros':DEFAULT_COM,
'recordknowledge':DEFAULT_COM,
'recordcommandmacros':DEFAULT_COM,
'changegeneralknowledge':DEFAULT_COM,
'changecodes':DEFAULT_COM,
'changemacros':DEFAULT_COM,
'changekeymacros':DEFAULT_COM,
'changecommandmacros':DEFAULT_COM,
'learn':DEFAULT_COM,
'forget':DEFAULT_COM,
'defaultcodes':DEFAULT_COM,
'clearcodes':DEFAULT_COM,
'defaultmacros':DEFAULT_COM,
'defaultknowledge':DEFAULT_COM,
'defaultkeydefinitions':DEFAULT_COM,
'loadbyparagraph':LOADBY_COM,
'splitload':LOADBY_COM,
'deletedefaultkeys':AUTOKEY_COM,
'deleteautokeys':AUTOKEY_COM,
'eliminateblanks':ELIMINATE_COM,
'eliminatekeys':ELIMINATE_COM,
'changedeterminant':DETERM_COM,
'changedet':DETERM_COM,
'showdeterminant':DETERM_COM,
'showdet':DETERM_COM,
'clearpurgekeys':DETERM_COM,
'setpurgekeys':DETERM_COM,
'showpurgekeys':DETERM_COM,
'showspelling':SPELLING_COM,
'defaultspelling':SPELLING_COM,
'capkeys':CULKEYS_COM,
'upperkeys':CULKEYS_COM,
'lowerkeys':CULKEYS_COM,
'flipbook':FLIP_COM,
'showflip':FLIP_COM,
'runinterpret':RESIZE_COM,
'showflipbook':FLIP_COM,
'conflate':RESIZE_COM,
'undo':RESIZE_COM,
'deletefield':RESIZE_COM,
'fields':RESIZE_COM,
'resize':RESIZE_COM,
'size':RESIZE_COM,
'sz':RESIZE_COM,
'keytrim':RESIZE_COM,
'texttrim':RESIZE_COM,
'editnote':RESIZE_COM,
'truthtable':RESIZE_COM,
'keyin':RESIZE_COM,
'explode':RESIZE_COM,
'load':RESIZE_COM,
'en':RESIZE_COM,
'editnotekeys':RESIZE_COM,
'indentmultiplier':RESIZE_COM,
'setreturnquit':RESIZE_COM,
'enk':RESIZE_COM,
'editnotetext':RESIZE_COM,
'ent':RESIZE_COM,
'compress':RESIZE_COM,
'rehome':RESIZE_COM,
'showdel':RESIZE_COM,
'permdel':RESIZE_COM,
'clear':RESIZE_COM,
'undel':RESIZE_COM,
'addfield':RESIZE_COM,
'cluster':RESIZE_COM,
'descendents':RESIZE_COM,
'cpara':RESIZE_COM,
SEMICOLON:RESIZE_COM,
'setlongmax':RESIZE_COM,
'purgefrom':RESIZE_COM,
'showuser':RESIZE_COM,
'link':RESIZE_COM,
'loop':RESIZE_COM,
'chain':RESIZE_COM,
'unlink':RESIZE_COM,
'newkeys':RESIZE_COM,
'header':RESIZE_COM,
'footer':RESIZE_COM,
'leftmargin':RESIZE_COM,
'deeper':RESIZE_COM,
'shallower':RESIZE_COM,
'testdate':RESIZE_COM,
'changeuser':RESIZE_COM,
'formout':RESIZE_COM,
'findwithin':RESIZE_COM,
'inspect':RESIZE_COM,
'updatetags':RESIZE_COM,
'showmeta':RESIZE_COM,
'text':COPY_MOVE_SEARCH_COM,
'depth':RESIZE_COM,
'delete':RESIZE_COM,
'showsequences':RESIZE_COM,
'reconstitutesequences':RESIZE_COM,
'showsequence':RESIZE_COM,
'del':RESIZE_COM,
'gc':RESIZE_COM,
'gocluster':RESIZE_COM,
'd':RESIZE_COM,
'killchild':RESIZE_COM,
'all':RESIZE_COM,
DOLLAR:RESIZE_COM,
DOLLAR+DOLLAR:RESIZE_COM,
'show':RESIZE_COM,
's':RESIZE_COM,
'histogram':RESIZE_COM,
'keysfortags':RESIZE_COM,
'terms':RESIZE_COM,
'???':RESIZE_COM,
'indexes':RESIZE_COM,
'ind':RESIZE_COM,
'i':RESIZE_COM,
'reform':RESIZE_COM,
'override':RESIZE_COM,
'showdepth':RESIZE_COM,
'refreshfreq':RESIZE_COM,
'cleardatedict':RESIZE_COM,
'multi':RESIZE_COM,
'sheet':RESIZE_COM,
'rsheet':RESIZE_COM,
'resumesheet':RESIZE_COM,
'createworkpad':RESIZE_COM,
'padshow':RESIZE_COM,
'addtopad':RESIZE_COM,
'a':RESIZE_COM,
'showpad':RESIZE_COM,
'emptypadstack':RESIZE_COM,
'renewpad':RESIZE_COM,
'currentpad':RESIZE_COM,
'switchpad':RESIZE_COM,
'allpads':RESIZE_COM,
'tosheetshelf':RESIZE_COM,
'selectsheet':RESIZE_COM,
'showstream':RESIZE_COM,
'constdates':RESIZE_COM,
'constitutedates':RESIZE_COM,
'showdatedict':RESIZE_COM,
'showdatedictpurge':RESIZE_COM,
'makedates':RESIZE_COM,
'actdet':RESIZE_COM,
'put':RESIZE_COM,
'activedet':RESIZE_COM,
'grabkeys':RESIZE_COM,
'invert':RESIZE_COM,
'correctkeys':REFORMATING_COM,
'help':RESIZE_COM,
'grabdefaultkeys':RESIZE_COM,
'smallsize':RESIZE_COM,
'grabautokeys':RESIZE_COM,
'mergemany':REFORMATING_COM,
'mm':REFORMATING_COM,
'columns':REFORMATING_COM,
'col':REFORMATING_COM,
'split':REFORMATING_COM,
'helpall':REFORMATING_COM,
'sidenote':REFORMATING_COM,
'revise':REFORMATING_COM,
'rev':REFORMATING_COM,
'keys':COPY_MOVE_SEARCH_COM,
'key':COPY_MOVE_SEARCH_COM,
'k':COPY_MOVE_SEARCH_COM,
'search':COPY_MOVE_SEARCH_COM,
'globalsearch':COPY_MOVE_SEARCH_COM,
QUESTIONMARK:COPY_MOVE_SEARCH_COM,
'move':COPY_MOVE_SEARCH_COM,
'copy':COPY_MOVE_SEARCH_COM,
'dictionaryload':RESIZE_COM,
'seqformone':RESIZE_COM,
'seqformtwo':RESIZE_COM,
'mainsequences':RESIZE_COM,
'fromtext':RESIZE_COM}
| en | 0.887995 | ### contains queries, labels, alerts for plain English language, as well as the commands. ### and various input terms. ### NOTE that the names of the commands cannot be changed since Name or index of notebook,
(N)ew to open a new notebook,
or quit(A)ll to close all notebooks Name or index of notebook,
(N)ew to open a new notebook,
or(Q)uit to quit the current notebook,
Quit(A)ll to quit all notebooks. #' #"+input("?"))', | 2.737755 | 3 |
py_midiplus_fit/Fader.py | soccermitchy/py_midiplus_waves | 0 | 6622131 |
class Fader:
controller = None
channel: int = None
select_btn_id: int = None
solo_btn_id: int = None
mute_btn_id: int = None
fader_touch_id: int = None
fader_id: int = None
def __init__(self, controller, channel: int):
self.controller = controller
self.channel = channel
# Init IDs for everything
self.select_btn_id = channel - 1
self.knob_id = self.select_btn_id + 0x10
self.solo_btn_id = self.select_btn_id + 0x20
self.mute_btn_id = self.select_btn_id + 0x30
self.fader_touch_id = self.select_btn_id + 0x60
self.fader_id = self.select_btn_id + 0xE0
self.callbacks = {}
if channel == 17: # Whee, special cases!
self.select_btn_id = 0x70
self.knob_id = 0x71
self.solo_btn_id = 0x72
self.mute_btn_id = 0x73
self.fader_touch_id = 0x7F
self.fader_id = 0xAF
def set_row(self, row: int, text: str):
self.controller.write_single_row_single_screen(self.channel, row, text)
def set_all(self, text: str):
self.controller.write_all_rows_single_screen(self.channel, text)
def set_fader(self, val: int):
self.controller.set_fader(self.channel, val)
def set_led_select(self, state: bool):
self.controller.set_led_channel_select(self.channel, state)
def set_led_solo(self, state: bool):
self.controller.set_led_channel_solo(self.channel, state)
def set_led_mute(self, state: bool):
self.controller.set_led_channel_mute(self.channel, state)
def register_callback(self, name: str, callback):
self.callbacks[name] = callback
def fire_callback(self, name, *args):
if name in self.callbacks:
self.callbacks[name](self, *args)
|
class Fader:
controller = None
channel: int = None
select_btn_id: int = None
solo_btn_id: int = None
mute_btn_id: int = None
fader_touch_id: int = None
fader_id: int = None
def __init__(self, controller, channel: int):
self.controller = controller
self.channel = channel
# Init IDs for everything
self.select_btn_id = channel - 1
self.knob_id = self.select_btn_id + 0x10
self.solo_btn_id = self.select_btn_id + 0x20
self.mute_btn_id = self.select_btn_id + 0x30
self.fader_touch_id = self.select_btn_id + 0x60
self.fader_id = self.select_btn_id + 0xE0
self.callbacks = {}
if channel == 17: # Whee, special cases!
self.select_btn_id = 0x70
self.knob_id = 0x71
self.solo_btn_id = 0x72
self.mute_btn_id = 0x73
self.fader_touch_id = 0x7F
self.fader_id = 0xAF
def set_row(self, row: int, text: str):
self.controller.write_single_row_single_screen(self.channel, row, text)
def set_all(self, text: str):
self.controller.write_all_rows_single_screen(self.channel, text)
def set_fader(self, val: int):
self.controller.set_fader(self.channel, val)
def set_led_select(self, state: bool):
self.controller.set_led_channel_select(self.channel, state)
def set_led_solo(self, state: bool):
self.controller.set_led_channel_solo(self.channel, state)
def set_led_mute(self, state: bool):
self.controller.set_led_channel_mute(self.channel, state)
def register_callback(self, name: str, callback):
self.callbacks[name] = callback
def fire_callback(self, name, *args):
if name in self.callbacks:
self.callbacks[name](self, *args)
| en | 0.738553 | # Init IDs for everything # Whee, special cases! | 2.350243 | 2 |
retrieve_any_layer.py | tyler-hayes/Deep_SLDA | 32 | 6622132 | import torch.nn as nn
def get_name_to_module(model):
name_to_module = {}
for m in model.named_modules():
name_to_module[m[0]] = m[1]
return name_to_module
def get_activation(all_outputs, name):
def hook(model, input, output):
all_outputs[name] = output.detach()
return hook
def add_hooks(model, outputs, output_layer_names):
"""
:param model:
:param outputs: Outputs from layers specified in `output_layer_names` will be stored in `output` variable
:param output_layer_names:
:return:
"""
name_to_module = get_name_to_module(model)
for output_layer_name in output_layer_names:
name_to_module[output_layer_name].register_forward_hook(get_activation(outputs, output_layer_name))
class ModelWrapper(nn.Module):
def __init__(self, model, output_layer_names, return_single=False):
super(ModelWrapper, self).__init__()
self.model = model
self.output_layer_names = output_layer_names
self.outputs = {}
self.return_single = return_single
add_hooks(self.model, self.outputs, self.output_layer_names)
def forward(self, x):
self.model(x)
output_vals = [self.outputs[output_layer_name] for output_layer_name in self.output_layer_names]
if self.return_single:
return output_vals[0]
else:
return output_vals
| import torch.nn as nn
def get_name_to_module(model):
name_to_module = {}
for m in model.named_modules():
name_to_module[m[0]] = m[1]
return name_to_module
def get_activation(all_outputs, name):
def hook(model, input, output):
all_outputs[name] = output.detach()
return hook
def add_hooks(model, outputs, output_layer_names):
"""
:param model:
:param outputs: Outputs from layers specified in `output_layer_names` will be stored in `output` variable
:param output_layer_names:
:return:
"""
name_to_module = get_name_to_module(model)
for output_layer_name in output_layer_names:
name_to_module[output_layer_name].register_forward_hook(get_activation(outputs, output_layer_name))
class ModelWrapper(nn.Module):
def __init__(self, model, output_layer_names, return_single=False):
super(ModelWrapper, self).__init__()
self.model = model
self.output_layer_names = output_layer_names
self.outputs = {}
self.return_single = return_single
add_hooks(self.model, self.outputs, self.output_layer_names)
def forward(self, x):
self.model(x)
output_vals = [self.outputs[output_layer_name] for output_layer_name in self.output_layer_names]
if self.return_single:
return output_vals[0]
else:
return output_vals
| en | 0.547805 | :param model: :param outputs: Outputs from layers specified in `output_layer_names` will be stored in `output` variable :param output_layer_names: :return: | 2.754383 | 3 |
code/ch_09_tuples/_01_unpack_and_move_in.py | NogNoa/write-pythonic-code-demos | 679 | 6622133 | <reponame>NogNoa/write-pythonic-code-demos
# tuples are defined as:
t = (7, 11, "cat", [1, 1, 3, 5, 8])
print(t)
t = 7, 11, "cat", [1, 1, 3, 5, 8]
# print(t)
# t = 7,
# print(t, len(t))
# create a tuple, grab a value.
print(t[2])
# we can assign individual variables:
t = 7, "cat", 11
# n = [0]
# a = [1]
# show them
n, a, _ = t
print("n={}, a={}".format(n, a))
# can also assign on a single line:
x, y = 1, 2
print(x, y)
# You'll find this often in loops (remember numerical for-in loops):
for idx, item in enumerate(['hat', 'cat', 'mat', 'that']):
print("{} -> {}".format(idx, item))
| # tuples are defined as:
t = (7, 11, "cat", [1, 1, 3, 5, 8])
print(t)
t = 7, 11, "cat", [1, 1, 3, 5, 8]
# print(t)
# t = 7,
# print(t, len(t))
# create a tuple, grab a value.
print(t[2])
# we can assign individual variables:
t = 7, "cat", 11
# n = [0]
# a = [1]
# show them
n, a, _ = t
print("n={}, a={}".format(n, a))
# can also assign on a single line:
x, y = 1, 2
print(x, y)
# You'll find this often in loops (remember numerical for-in loops):
for idx, item in enumerate(['hat', 'cat', 'mat', 'that']):
print("{} -> {}".format(idx, item)) | en | 0.876494 | # tuples are defined as: # print(t) # t = 7, # print(t, len(t)) # create a tuple, grab a value. # we can assign individual variables: # n = [0] # a = [1] # show them # can also assign on a single line: # You'll find this often in loops (remember numerical for-in loops): | 4.4553 | 4 |
medium/problem738/Solution.py | cutoutsy/leetcode | 1 | 6622134 | <gh_stars>1-10
class Solution:
def monotoneIncreasingDigits(self, N):
"""
:type N: int
:rtype: int
"""
if N < 10: return N
n, inv_index = N, -1
num = [int(d) for d in str(n)[::-1]]
for i in range(1, len(num)):
if num[i] > num[i - 1] or (inv_index != -1 and num[inv_index] == num[i]):
inv_index = i
if inv_index == -1: return N
for i in range(inv_index): num[i] = 9
num[inv_index] -= 1
return int(''.join([str(i) for i in num[::-1]]))
| class Solution:
def monotoneIncreasingDigits(self, N):
"""
:type N: int
:rtype: int
"""
if N < 10: return N
n, inv_index = N, -1
num = [int(d) for d in str(n)[::-1]]
for i in range(1, len(num)):
if num[i] > num[i - 1] or (inv_index != -1 and num[inv_index] == num[i]):
inv_index = i
if inv_index == -1: return N
for i in range(inv_index): num[i] = 9
num[inv_index] -= 1
return int(''.join([str(i) for i in num[::-1]])) | en | 0.43098 | :type N: int :rtype: int | 2.967426 | 3 |
edabit/hard/recursive_length_of_string/recursive_length_of_string.py | ticotheps/practice_problems | 0 | 6622135 | <gh_stars>0
"""
RECURSION: LENGTH OF A STRING
Instructions:
- Write a function that returns the length of a string. Make your function
recursive.
Examples:
- length('apple') -> 5
- length('make') -> 4
- length('a') -> 1
- length('') -> 0
"""
"""
----- 4 Phases of The U.P.E.R. Problem-Solving Framework -----
PHASE I: UNDERSTAND [the problem]
- Objective:
- Write a recursive algorithm that takes in a single input string and
returns a single output, which is the length of the string.
- Definitions:
- Recursive:
- "a function that calls itself from within its own function
body."
- "something that defines itself in terms of itself."
- Anatomy of a Recursive Function:
(1) A recurrence relation
- A sequence based on a rule that gives the next term as a function
of the previous term(s).
(2) A termination condition (AKA "base case")
- The condition at which recursion will stop.
- Expected Input(s):
- Number Of: 1
- Data Type(s): string
- Var Name(s): 'txt'
- Expected Output(s):
- Number Of: 1
- Data Type(s): integer
- Var Name(s): 'len_of_txt'
- Edge Cases & Constraints:
- Can the given input be a non-string data type?
- No. It MUST be a string.
- Can the given input be an empty string?
- Yes. The length of the given string would be 0.
-------------------------------------------------------------------------------
PHASE II: [devise a] PLAN
- [Iterative] Brute Force Solution:
(1) Define a function that takes in a single string input and returns the
length of the given input string as an integer.
(2) Declare a variable, 'len_of_txt', and initialize it with an integer
value of 0.
(3) Find the length of 'txt' by calling the '.len()' method on 'txt' and
then set the value of 'len_of_txt' equal to the resulting length.
(4) Return the value of 'len_of_txt'.
- [Recursive] Brute Force Solution:
(1) Define a function that takes in a single string input and returns the
length of the given input string as an integer.
(2) Define a base case where if 'txt' is an empty string, return 0.
(3) Return 1 and make a recursive call to the 'length()' function, passing
in the range of indices for the given input string where the 'start' is the
next index and the 'stop' is the end of the string.
-------------------------------------------------------------------------------
PHASE III: EXECUTE [the plan] (Please See Below)
-------------------------------------------------------------------------------
PHASE IV: REFLECT ON/REFACTOR [the plan]
- Asymptotic Analysis:
- Iterative Solution:
- Time Complexity: O(n) -> 'linear'
- Space Complexity: O(1) -> 'constant'
- Recursive Solution:
- Time Complexity: O(n) -> 'linear'
- Space Complexity: O(nm) -> 'm' = maximum depth of recursion tree
"""
# ITERATIVE SOLUTION
# def length(txt):
# len_of_txt = len(txt)
# print(f"len_of_txt = {len_of_txt}")
# return len_of_txt
# print(length('apple')) # 5
# print(length('make')) # 4
# print(length('a')) # 1
# print(length('')) # 0
# RECURSIVE SOLUTION
def length(txt):
if txt == '':
return 0
return 1 + length(txt[1:]) | """
RECURSION: LENGTH OF A STRING
Instructions:
- Write a function that returns the length of a string. Make your function
recursive.
Examples:
- length('apple') -> 5
- length('make') -> 4
- length('a') -> 1
- length('') -> 0
"""
"""
----- 4 Phases of The U.P.E.R. Problem-Solving Framework -----
PHASE I: UNDERSTAND [the problem]
- Objective:
- Write a recursive algorithm that takes in a single input string and
returns a single output, which is the length of the string.
- Definitions:
- Recursive:
- "a function that calls itself from within its own function
body."
- "something that defines itself in terms of itself."
- Anatomy of a Recursive Function:
(1) A recurrence relation
- A sequence based on a rule that gives the next term as a function
of the previous term(s).
(2) A termination condition (AKA "base case")
- The condition at which recursion will stop.
- Expected Input(s):
- Number Of: 1
- Data Type(s): string
- Var Name(s): 'txt'
- Expected Output(s):
- Number Of: 1
- Data Type(s): integer
- Var Name(s): 'len_of_txt'
- Edge Cases & Constraints:
- Can the given input be a non-string data type?
- No. It MUST be a string.
- Can the given input be an empty string?
- Yes. The length of the given string would be 0.
-------------------------------------------------------------------------------
PHASE II: [devise a] PLAN
- [Iterative] Brute Force Solution:
(1) Define a function that takes in a single string input and returns the
length of the given input string as an integer.
(2) Declare a variable, 'len_of_txt', and initialize it with an integer
value of 0.
(3) Find the length of 'txt' by calling the '.len()' method on 'txt' and
then set the value of 'len_of_txt' equal to the resulting length.
(4) Return the value of 'len_of_txt'.
- [Recursive] Brute Force Solution:
(1) Define a function that takes in a single string input and returns the
length of the given input string as an integer.
(2) Define a base case where if 'txt' is an empty string, return 0.
(3) Return 1 and make a recursive call to the 'length()' function, passing
in the range of indices for the given input string where the 'start' is the
next index and the 'stop' is the end of the string.
-------------------------------------------------------------------------------
PHASE III: EXECUTE [the plan] (Please See Below)
-------------------------------------------------------------------------------
PHASE IV: REFLECT ON/REFACTOR [the plan]
- Asymptotic Analysis:
- Iterative Solution:
- Time Complexity: O(n) -> 'linear'
- Space Complexity: O(1) -> 'constant'
- Recursive Solution:
- Time Complexity: O(n) -> 'linear'
- Space Complexity: O(nm) -> 'm' = maximum depth of recursion tree
"""
# ITERATIVE SOLUTION
# def length(txt):
# len_of_txt = len(txt)
# print(f"len_of_txt = {len_of_txt}")
# return len_of_txt
# print(length('apple')) # 5
# print(length('make')) # 4
# print(length('a')) # 1
# print(length('')) # 0
# RECURSIVE SOLUTION
def length(txt):
if txt == '':
return 0
return 1 + length(txt[1:]) | en | 0.659925 | RECURSION: LENGTH OF A STRING Instructions: - Write a function that returns the length of a string. Make your function recursive. Examples: - length('apple') -> 5 - length('make') -> 4 - length('a') -> 1 - length('') -> 0 ----- 4 Phases of The U.P.E.R. Problem-Solving Framework ----- PHASE I: UNDERSTAND [the problem] - Objective: - Write a recursive algorithm that takes in a single input string and returns a single output, which is the length of the string. - Definitions: - Recursive: - "a function that calls itself from within its own function body." - "something that defines itself in terms of itself." - Anatomy of a Recursive Function: (1) A recurrence relation - A sequence based on a rule that gives the next term as a function of the previous term(s). (2) A termination condition (AKA "base case") - The condition at which recursion will stop. - Expected Input(s): - Number Of: 1 - Data Type(s): string - Var Name(s): 'txt' - Expected Output(s): - Number Of: 1 - Data Type(s): integer - Var Name(s): 'len_of_txt' - Edge Cases & Constraints: - Can the given input be a non-string data type? - No. It MUST be a string. - Can the given input be an empty string? - Yes. The length of the given string would be 0. ------------------------------------------------------------------------------- PHASE II: [devise a] PLAN - [Iterative] Brute Force Solution: (1) Define a function that takes in a single string input and returns the length of the given input string as an integer. (2) Declare a variable, 'len_of_txt', and initialize it with an integer value of 0. (3) Find the length of 'txt' by calling the '.len()' method on 'txt' and then set the value of 'len_of_txt' equal to the resulting length. (4) Return the value of 'len_of_txt'. - [Recursive] Brute Force Solution: (1) Define a function that takes in a single string input and returns the length of the given input string as an integer. (2) Define a base case where if 'txt' is an empty string, return 0. (3) Return 1 and make a recursive call to the 'length()' function, passing in the range of indices for the given input string where the 'start' is the next index and the 'stop' is the end of the string. ------------------------------------------------------------------------------- PHASE III: EXECUTE [the plan] (Please See Below) ------------------------------------------------------------------------------- PHASE IV: REFLECT ON/REFACTOR [the plan] - Asymptotic Analysis: - Iterative Solution: - Time Complexity: O(n) -> 'linear' - Space Complexity: O(1) -> 'constant' - Recursive Solution: - Time Complexity: O(n) -> 'linear' - Space Complexity: O(nm) -> 'm' = maximum depth of recursion tree # ITERATIVE SOLUTION # def length(txt): # len_of_txt = len(txt) # print(f"len_of_txt = {len_of_txt}") # return len_of_txt # print(length('apple')) # 5 # print(length('make')) # 4 # print(length('a')) # 1 # print(length('')) # 0 # RECURSIVE SOLUTION | 4.133552 | 4 |
programs/programs8112021/menudrivenprog.py | VishalAgr11/CSE-programs | 1 | 6622136 | '''
WAP to menu driven program to find the add, sub,mul,div with 2 inputs
'''
choice=0
while choice!=5:
choice=int(input("\n\n1. for add \n2. for sub\n3. for mul\n4. for div\n5. to exit\n"))
l=input("Enter 2 numbers: ").split()
if choice==5:
break
a,b=l
if choice==1:
print("Addition:",float(a)+float(b))
elif choice==2:
print("Subtraction:",float(a)-float(b))
elif choice==3:
print("Multiplication:",float(a)*float(b))
elif choice==4:
print("Division:",float(a)/float(b))
| '''
WAP to menu driven program to find the add, sub,mul,div with 2 inputs
'''
choice=0
while choice!=5:
choice=int(input("\n\n1. for add \n2. for sub\n3. for mul\n4. for div\n5. to exit\n"))
l=input("Enter 2 numbers: ").split()
if choice==5:
break
a,b=l
if choice==1:
print("Addition:",float(a)+float(b))
elif choice==2:
print("Subtraction:",float(a)-float(b))
elif choice==3:
print("Multiplication:",float(a)*float(b))
elif choice==4:
print("Division:",float(a)/float(b))
| en | 0.731572 | WAP to menu driven program to find the add, sub,mul,div with 2 inputs | 3.700828 | 4 |
tests/test_mca.py | kormilitzin/Prince | 10 | 6622137 | <gh_stars>1-10
import numpy as np
import pandas as pd
import pytest
from prince import MCA
from tests import util as test_util
@pytest.fixture
def df():
"""The original dataframe."""
return pd.read_csv('tests/data/ogm.csv', index_col=0)
@pytest.fixture
def indicator_matrix(df):
"""The indicator matrix of the original dataframe."""
return pd.get_dummies(df)
@pytest.fixture
def n(indicator_matrix):
"""The number of rows."""
n, _ = indicator_matrix.shape
return n
@pytest.fixture
def p(indicator_matrix):
"""The number of columns in the indicator matrix."""
_, p = indicator_matrix.shape
return p
@pytest.fixture
def q(df):
"""The number of columns in the initial dataframe."""
_, q = df.shape
return q
@pytest.fixture
def k(p):
"""The number of principal components to compute."""
return p
@pytest.fixture
def N(indicator_matrix):
"""The total number of observed value."""
return np.sum(indicator_matrix.values)
@pytest.fixture
def mca(df, k):
"""The executed CA."""
return MCA(df, n_components=k)
def test_dimensions(mca, n, p):
"""Check the dimensions are correct."""
assert mca.X.shape == (n, p)
def test_eigenvectors_dimensions(mca, n, p, k):
"""Check the eigenvectors have the expected dimensions."""
assert mca.svd.U.shape == (n, k)
assert mca.svd.s.shape == (k,)
assert mca.svd.V.shape == (k, p)
def test_total_sum(mca, N):
"""Check the total number of values is correct."""
assert mca.N == N
def test_frequencies(mca, N, indicator_matrix):
"""Check the frequencies sums up to 1 and that the original data mcan be obtained by
multiplying the frequencies by N."""
assert np.isclose(mca.P.sum().sum(), 1)
assert np.allclose(mca.P * N, indicator_matrix)
def test_row_sums_sum(mca):
"""Check the row sums sum up to 1."""
assert np.isclose(mca.row_sums.sum(), 1)
def test_row_sums_shape(mca, n):
"""Check the row sums is a vector of length `n`."""
assert mca.row_sums.shape == (n,)
def test_column_sums_sum(mca):
"""Check the column sums sum up to 1."""
assert np.isclose(mca.column_sums.sum(), 1)
def test_column_sums_shape(mca, p):
"""Check the row sums is a vector of length `p`."""
assert mca.column_sums.shape == (p,)
def test_expected_frequencies_shape(mca, n, p):
"""Check the expected frequencies matrix is of shape `(n, p)`."""
assert mca.expected_frequencies.shape == (n, p)
def test_expected_frequencies_sum(mca, p):
"""Check the expected frequencies matrix sums to 1."""
assert np.isclose(np.sum(mca.expected_frequencies.values), 1)
def test_eigenvalues_dimensions(mca, k):
"""Check the eigenvalues is a vector of length `k`."""
assert len(mca.eigenvalues) == k
def test_eigenvalues_sorted(mca):
"""Check the eigenvalues are sorted in descending order."""
assert test_util.is_sorted(mca.eigenvalues)
def test_eigenvalues_total_inertia(mca):
"""Check the eigenvalues sums to the same amount as the total inertia."""
assert np.isclose(sum(mca.eigenvalues), mca.total_inertia)
def test_eigenvalues_singular_values(mca):
"""Check the eigenvalues are the squares of the singular values."""
for eigenvalue, singular_value in zip(mca.eigenvalues, mca.svd.s):
assert np.isclose(eigenvalue, np.square(singular_value))
def test_explained_inertia_decreases(mca):
"""Check the explained inertia decreases."""
assert test_util.is_sorted(mca.explained_inertia)
def test_explained_inertia_sum(mca):
"""Check the explained inertia sums to 1."""
assert np.isclose(sum(mca.explained_inertia), 1)
def test_cumulative_explained_inertia(mca):
"""Check the cumulative explained inertia is correct."""
assert np.array_equal(mca.cumulative_explained_inertia, np.cumsum(mca.explained_inertia))
def test_row_component_contributions(mca):
"""Check the sum of row contributions is equal to the total inertia."""
for _, col_sum in mca.row_component_contributions.sum(axis='rows').iteritems():
assert np.isclose(col_sum, 1)
def test_row_cosine_similarities_shape(mca, n, k):
"""Check the shape of the variable correlations is coherent."""
assert mca.row_cosine_similarities.shape == (n, k)
def test_row_cosine_similarities_bounded(mca, n, k):
"""Check the variable correlations are bounded between -1 and 1."""
assert (-1 <= mca.row_cosine_similarities).sum().sum() == n * k
assert (mca.row_cosine_similarities <= 1).sum().sum() == n * k
def test_row_profiles_shape(mca, n, p):
"""Check the row profiles is a matrix of shape (n, p)."""
assert mca.row_profiles.shape == (n, p)
def test_row_profiles_sum(mca):
"""Check the row profiles sum up to 1 for each row."""
for _, row_sum in mca.row_profiles.sum(axis='columns').iteritems():
assert np.isclose(row_sum, 1)
def test_column_component_contributions(mca):
"""Check the sum of column contributions is equal to the total inertia."""
for _, col_sum in mca.column_component_contributions.sum(axis='columns').iteritems():
assert np.isclose(col_sum, 1)
def test_column_cosine_similarities_shape(mca, p, k):
"""Check the shape of the variable correlations is coherent."""
assert mca.column_cosine_similarities.shape == (p, k)
def test_column_cosine_similarities_bounded(mca, p, k):
"""Check the variable correlations are bounded between -1 and 1."""
assert (-1 <= mca.column_cosine_similarities).sum().sum() == p * k
assert (mca.column_cosine_similarities <= 1).sum().sum() == p * k
def test_column_profiles_shape(mca, n, p):
"""Check the column profiles is a matrix of shape `(n, p)`."""
assert mca.column_profiles.shape == (n, p)
def test_column_profiles_sum(mca):
"""Check the column profiles sum up to 1 for each column."""
for _, column_sum in mca.column_profiles.sum(axis='rows').iteritems():
assert np.isclose(column_sum, 1)
def test_column_correlations_shape(mca, q, k):
"""Check the shape of the variable correlations is coherent."""
assert mca.column_correlations.shape == (q, k)
def test_column_correlations_bounded(mca, q, k):
"""Check the variable correlations are bounded between -1 and 1."""
assert (-1 <= mca.column_correlations).sum().sum() == q * k
assert (mca.column_correlations <= 1).sum().sum() == q * k
| import numpy as np
import pandas as pd
import pytest
from prince import MCA
from tests import util as test_util
@pytest.fixture
def df():
"""The original dataframe."""
return pd.read_csv('tests/data/ogm.csv', index_col=0)
@pytest.fixture
def indicator_matrix(df):
"""The indicator matrix of the original dataframe."""
return pd.get_dummies(df)
@pytest.fixture
def n(indicator_matrix):
"""The number of rows."""
n, _ = indicator_matrix.shape
return n
@pytest.fixture
def p(indicator_matrix):
"""The number of columns in the indicator matrix."""
_, p = indicator_matrix.shape
return p
@pytest.fixture
def q(df):
"""The number of columns in the initial dataframe."""
_, q = df.shape
return q
@pytest.fixture
def k(p):
"""The number of principal components to compute."""
return p
@pytest.fixture
def N(indicator_matrix):
"""The total number of observed value."""
return np.sum(indicator_matrix.values)
@pytest.fixture
def mca(df, k):
"""The executed CA."""
return MCA(df, n_components=k)
def test_dimensions(mca, n, p):
"""Check the dimensions are correct."""
assert mca.X.shape == (n, p)
def test_eigenvectors_dimensions(mca, n, p, k):
"""Check the eigenvectors have the expected dimensions."""
assert mca.svd.U.shape == (n, k)
assert mca.svd.s.shape == (k,)
assert mca.svd.V.shape == (k, p)
def test_total_sum(mca, N):
"""Check the total number of values is correct."""
assert mca.N == N
def test_frequencies(mca, N, indicator_matrix):
"""Check the frequencies sums up to 1 and that the original data mcan be obtained by
multiplying the frequencies by N."""
assert np.isclose(mca.P.sum().sum(), 1)
assert np.allclose(mca.P * N, indicator_matrix)
def test_row_sums_sum(mca):
"""Check the row sums sum up to 1."""
assert np.isclose(mca.row_sums.sum(), 1)
def test_row_sums_shape(mca, n):
"""Check the row sums is a vector of length `n`."""
assert mca.row_sums.shape == (n,)
def test_column_sums_sum(mca):
"""Check the column sums sum up to 1."""
assert np.isclose(mca.column_sums.sum(), 1)
def test_column_sums_shape(mca, p):
"""Check the row sums is a vector of length `p`."""
assert mca.column_sums.shape == (p,)
def test_expected_frequencies_shape(mca, n, p):
"""Check the expected frequencies matrix is of shape `(n, p)`."""
assert mca.expected_frequencies.shape == (n, p)
def test_expected_frequencies_sum(mca, p):
"""Check the expected frequencies matrix sums to 1."""
assert np.isclose(np.sum(mca.expected_frequencies.values), 1)
def test_eigenvalues_dimensions(mca, k):
"""Check the eigenvalues is a vector of length `k`."""
assert len(mca.eigenvalues) == k
def test_eigenvalues_sorted(mca):
"""Check the eigenvalues are sorted in descending order."""
assert test_util.is_sorted(mca.eigenvalues)
def test_eigenvalues_total_inertia(mca):
"""Check the eigenvalues sums to the same amount as the total inertia."""
assert np.isclose(sum(mca.eigenvalues), mca.total_inertia)
def test_eigenvalues_singular_values(mca):
"""Check the eigenvalues are the squares of the singular values."""
for eigenvalue, singular_value in zip(mca.eigenvalues, mca.svd.s):
assert np.isclose(eigenvalue, np.square(singular_value))
def test_explained_inertia_decreases(mca):
"""Check the explained inertia decreases."""
assert test_util.is_sorted(mca.explained_inertia)
def test_explained_inertia_sum(mca):
"""Check the explained inertia sums to 1."""
assert np.isclose(sum(mca.explained_inertia), 1)
def test_cumulative_explained_inertia(mca):
"""Check the cumulative explained inertia is correct."""
assert np.array_equal(mca.cumulative_explained_inertia, np.cumsum(mca.explained_inertia))
def test_row_component_contributions(mca):
"""Check the sum of row contributions is equal to the total inertia."""
for _, col_sum in mca.row_component_contributions.sum(axis='rows').iteritems():
assert np.isclose(col_sum, 1)
def test_row_cosine_similarities_shape(mca, n, k):
"""Check the shape of the variable correlations is coherent."""
assert mca.row_cosine_similarities.shape == (n, k)
def test_row_cosine_similarities_bounded(mca, n, k):
"""Check the variable correlations are bounded between -1 and 1."""
assert (-1 <= mca.row_cosine_similarities).sum().sum() == n * k
assert (mca.row_cosine_similarities <= 1).sum().sum() == n * k
def test_row_profiles_shape(mca, n, p):
"""Check the row profiles is a matrix of shape (n, p)."""
assert mca.row_profiles.shape == (n, p)
def test_row_profiles_sum(mca):
"""Check the row profiles sum up to 1 for each row."""
for _, row_sum in mca.row_profiles.sum(axis='columns').iteritems():
assert np.isclose(row_sum, 1)
def test_column_component_contributions(mca):
"""Check the sum of column contributions is equal to the total inertia."""
for _, col_sum in mca.column_component_contributions.sum(axis='columns').iteritems():
assert np.isclose(col_sum, 1)
def test_column_cosine_similarities_shape(mca, p, k):
"""Check the shape of the variable correlations is coherent."""
assert mca.column_cosine_similarities.shape == (p, k)
def test_column_cosine_similarities_bounded(mca, p, k):
"""Check the variable correlations are bounded between -1 and 1."""
assert (-1 <= mca.column_cosine_similarities).sum().sum() == p * k
assert (mca.column_cosine_similarities <= 1).sum().sum() == p * k
def test_column_profiles_shape(mca, n, p):
"""Check the column profiles is a matrix of shape `(n, p)`."""
assert mca.column_profiles.shape == (n, p)
def test_column_profiles_sum(mca):
"""Check the column profiles sum up to 1 for each column."""
for _, column_sum in mca.column_profiles.sum(axis='rows').iteritems():
assert np.isclose(column_sum, 1)
def test_column_correlations_shape(mca, q, k):
"""Check the shape of the variable correlations is coherent."""
assert mca.column_correlations.shape == (q, k)
def test_column_correlations_bounded(mca, q, k):
"""Check the variable correlations are bounded between -1 and 1."""
assert (-1 <= mca.column_correlations).sum().sum() == q * k
assert (mca.column_correlations <= 1).sum().sum() == q * k | en | 0.830109 | The original dataframe. The indicator matrix of the original dataframe. The number of rows. The number of columns in the indicator matrix. The number of columns in the initial dataframe. The number of principal components to compute. The total number of observed value. The executed CA. Check the dimensions are correct. Check the eigenvectors have the expected dimensions. Check the total number of values is correct. Check the frequencies sums up to 1 and that the original data mcan be obtained by multiplying the frequencies by N. Check the row sums sum up to 1. Check the row sums is a vector of length `n`. Check the column sums sum up to 1. Check the row sums is a vector of length `p`. Check the expected frequencies matrix is of shape `(n, p)`. Check the expected frequencies matrix sums to 1. Check the eigenvalues is a vector of length `k`. Check the eigenvalues are sorted in descending order. Check the eigenvalues sums to the same amount as the total inertia. Check the eigenvalues are the squares of the singular values. Check the explained inertia decreases. Check the explained inertia sums to 1. Check the cumulative explained inertia is correct. Check the sum of row contributions is equal to the total inertia. Check the shape of the variable correlations is coherent. Check the variable correlations are bounded between -1 and 1. Check the row profiles is a matrix of shape (n, p). Check the row profiles sum up to 1 for each row. Check the sum of column contributions is equal to the total inertia. Check the shape of the variable correlations is coherent. Check the variable correlations are bounded between -1 and 1. Check the column profiles is a matrix of shape `(n, p)`. Check the column profiles sum up to 1 for each column. Check the shape of the variable correlations is coherent. Check the variable correlations are bounded between -1 and 1. | 2.755355 | 3 |
array/maxProfit2.py | saai/LeetcodePythonSolutions | 0 | 6622138 | <filename>array/maxProfit2.py<gh_stars>0
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
profits = 0
min_v = -1
max_v = -1
n = len(prices)
for i in range(n):
p = prices[i]
if p < min_v or min_v == -1:
min_v = p
max_v = p
elif p > max_v:
max_v = p
if i>0 and p>prices[i-1]:
profits = max((profits+p-prices[i-1]),p-min_v)
return profits
# get every profits you can get.
def maxProfit2(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
max_p = 0
i = 0
while(i < len(prices)):
while(i+1< len(prices) and prices[i] >= prices[i+1]):
i += 1
low = prices[i]
while(i+1 < len(prices) and prices[i]<=prices[i+1]):
i += 1
high = prices[i]
max_p += high - low
i += 1
return max_p | <filename>array/maxProfit2.py<gh_stars>0
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
profits = 0
min_v = -1
max_v = -1
n = len(prices)
for i in range(n):
p = prices[i]
if p < min_v or min_v == -1:
min_v = p
max_v = p
elif p > max_v:
max_v = p
if i>0 and p>prices[i-1]:
profits = max((profits+p-prices[i-1]),p-min_v)
return profits
# get every profits you can get.
def maxProfit2(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
max_p = 0
i = 0
while(i < len(prices)):
while(i+1< len(prices) and prices[i] >= prices[i+1]):
i += 1
low = prices[i]
while(i+1 < len(prices) and prices[i]<=prices[i+1]):
i += 1
high = prices[i]
max_p += high - low
i += 1
return max_p | en | 0.606497 | :type prices: List[int] :rtype: int # get every profits you can get. :type prices: List[int] :rtype: int | 3.29592 | 3 |
src/setup.py | Aniana0/dvcfg_pytools | 0 | 6622139 | <reponame>Aniana0/dvcfg_pytools
from setuptools import setup
setup(
name='dvcfg_pytools',
version='1.1.0',
description='Python module for editing .dvcfg in Python.',
author='Aniana0',
author_email='<EMAIL>',
url='https://github.com/Aniana0/dvcfg_pytools',
py_modules=['dvcfg_pytools'])
| from setuptools import setup
setup(
name='dvcfg_pytools',
version='1.1.0',
description='Python module for editing .dvcfg in Python.',
author='Aniana0',
author_email='<EMAIL>',
url='https://github.com/Aniana0/dvcfg_pytools',
py_modules=['dvcfg_pytools']) | none | 1 | 1.071367 | 1 | |
main.py | psiang/OSMcrop | 0 | 6622140 | import capture
import getmap
import cutmap
if __name__ == '__main__':
# 59.9055,24.7385,60.3133,25.2727 赫尔基辛
# 60.1607,24.9191,60.1739,24.9700
# 60.16446,24.93824,60.16776,24.95096
# 60.1162,24.7522,60.3041,25.2466
name = "Helsinki"
tif_file = "google_17m.tif"
tfw_file = "google_17m.tfw"
# lat1, lon1, lat2, lon2 = 60.1162,24.7522,60.3041,25.2466
key_list = {
"landuse": ["residential"]
}
# # get tif
# x = getmap.getpic(lat1, lon1, lat2, lon2,
# 17, source='google', style='s', outfile=tif_file)
# getmap.my_file_out(x, tfw_file, "keep")
# get aoi and poi
capture.get_poi_aoi(name, key_list)
# cut tif
cutmap.cut_aoi(name + "_aoi.csv", name, tfw_file, tif_file)
| import capture
import getmap
import cutmap
if __name__ == '__main__':
# 59.9055,24.7385,60.3133,25.2727 赫尔基辛
# 60.1607,24.9191,60.1739,24.9700
# 60.16446,24.93824,60.16776,24.95096
# 60.1162,24.7522,60.3041,25.2466
name = "Helsinki"
tif_file = "google_17m.tif"
tfw_file = "google_17m.tfw"
# lat1, lon1, lat2, lon2 = 60.1162,24.7522,60.3041,25.2466
key_list = {
"landuse": ["residential"]
}
# # get tif
# x = getmap.getpic(lat1, lon1, lat2, lon2,
# 17, source='google', style='s', outfile=tif_file)
# getmap.my_file_out(x, tfw_file, "keep")
# get aoi and poi
capture.get_poi_aoi(name, key_list)
# cut tif
cutmap.cut_aoi(name + "_aoi.csv", name, tfw_file, tif_file)
| en | 0.605635 | # 59.9055,24.7385,60.3133,25.2727 赫尔基辛 # 60.1607,24.9191,60.1739,24.9700 # 60.16446,24.93824,60.16776,24.95096 # 60.1162,24.7522,60.3041,25.2466 # lat1, lon1, lat2, lon2 = 60.1162,24.7522,60.3041,25.2466 # # get tif # x = getmap.getpic(lat1, lon1, lat2, lon2, # 17, source='google', style='s', outfile=tif_file) # getmap.my_file_out(x, tfw_file, "keep") # get aoi and poi # cut tif | 2.509427 | 3 |
Week3_code-drills/week-03/day-01/06/challenge-prompt.py | ruturajshete1008/code-drills | 1 | 6622141 | <reponame>ruturajshete1008/code-drills<filename>Week3_code-drills/week-03/day-01/06/challenge-prompt.py
# create a list, list_1, with 0, 1 ,2 ,3 as values
# create list, list_2 with 4,5,6,7 as values
# create list, list_3 with 8,9,10,11 as values
# create list, list_4 with 12,13,14,15 as values
# print the first list_1
# print each index of the list_1
# print the second list_2
# print each index of the second list_2
# print the third list_3
# print each index of the third list_3
# print the fourth list_4
# print each index of the fourth list_4
| # create a list, list_1, with 0, 1 ,2 ,3 as values
# create list, list_2 with 4,5,6,7 as values
# create list, list_3 with 8,9,10,11 as values
# create list, list_4 with 12,13,14,15 as values
# print the first list_1
# print each index of the list_1
# print the second list_2
# print each index of the second list_2
# print the third list_3
# print each index of the third list_3
# print the fourth list_4
# print each index of the fourth list_4 | en | 0.651502 | # create a list, list_1, with 0, 1 ,2 ,3 as values # create list, list_2 with 4,5,6,7 as values # create list, list_3 with 8,9,10,11 as values # create list, list_4 with 12,13,14,15 as values # print the first list_1 # print each index of the list_1 # print the second list_2 # print each index of the second list_2 # print the third list_3 # print each index of the third list_3 # print the fourth list_4 # print each index of the fourth list_4 | 3.770475 | 4 |
techreviewproj/techreviewapp/apps.py | yonny23/techreviewproject | 0 | 6622142 | from django.apps import AppConfig
class TechreviewappConfig(AppConfig):
name = 'techreviewapp'
| from django.apps import AppConfig
class TechreviewappConfig(AppConfig):
name = 'techreviewapp'
| none | 1 | 1.040944 | 1 | |
facebook_login.py | grassym/p366 | 0 | 6622143 | <reponame>grassym/p366
#!/usr/bin/python
# -*- coding: utf-8 -*-
import facebook
import requests
file = open("/home/olia/Documents/fuck_the_code/facebook_credentials.txt")
app_id = file.readline()
app_secret = file.readline()
graph_api_token = file.readline()
# print ("app_id = " + str(app_id))
# print ("app_secret = " + str(app_secret))
# def get_fb_token(app_id, app_secret):
# payload = {'grant_type': 'client_credentials', 'client_id': app_id, 'client_secret': app_secret}
# file = requests.post('https://graph.facebook.com/oauth/access_token?', params = payload)
# # print file.text #to test what the FB api responded with
# result = file.text.split("=")[1]
# print result #to test the TOKEN
# return result
# token = get_fb_token(app_id, app_secret)
graph = facebook.GraphAPI(graph_api_token)
tt = graph.get_object("me")
#print tt#['friends']
friends = graph.get_connections("me", "friends")['data']
print friends | #!/usr/bin/python
# -*- coding: utf-8 -*-
import facebook
import requests
file = open("/home/olia/Documents/fuck_the_code/facebook_credentials.txt")
app_id = file.readline()
app_secret = file.readline()
graph_api_token = file.readline()
# print ("app_id = " + str(app_id))
# print ("app_secret = " + str(app_secret))
# def get_fb_token(app_id, app_secret):
# payload = {'grant_type': 'client_credentials', 'client_id': app_id, 'client_secret': app_secret}
# file = requests.post('https://graph.facebook.com/oauth/access_token?', params = payload)
# # print file.text #to test what the FB api responded with
# result = file.text.split("=")[1]
# print result #to test the TOKEN
# return result
# token = get_fb_token(app_id, app_secret)
graph = facebook.GraphAPI(graph_api_token)
tt = graph.get_object("me")
#print tt#['friends']
friends = graph.get_connections("me", "friends")['data']
print friends | en | 0.400096 | #!/usr/bin/python # -*- coding: utf-8 -*- # print ("app_id = " + str(app_id)) # print ("app_secret = " + str(app_secret)) # def get_fb_token(app_id, app_secret): # payload = {'grant_type': 'client_credentials', 'client_id': app_id, 'client_secret': app_secret} # file = requests.post('https://graph.facebook.com/oauth/access_token?', params = payload) # # print file.text #to test what the FB api responded with # result = file.text.split("=")[1] # print result #to test the TOKEN # return result # token = get_fb_token(app_id, app_secret) #print tt#['friends'] | 3.21508 | 3 |
tests/window_flags.py | stonewell/eim | 0 | 6622144 | from PySide6.QtCore import Slot, Qt, QRect, QSize
from PySide6.QtGui import QColor, QPainter, QTextFormat
from PySide6.QtWidgets import QPlainTextEdit, QWidget, QTextEdit, QPushButton, QVBoxLayout, QHBoxLayout, QGroupBox, QGridLayout, QCheckBox, QRadioButton, QApplication
class PreviewWindow(QWidget):
def __init__(self, parent=None):
super(PreviewWindow, self).__init__(parent)
self.textEdit = QTextEdit()
self.textEdit.setReadOnly(True)
self.textEdit.setLineWrapMode(QTextEdit.NoWrap)
closeButton = QPushButton("&Close")
closeButton.clicked.connect(self.close)
layout =QVBoxLayout()
layout.addWidget(self.textEdit)
layout.addWidget(closeButton)
self.setLayout(layout)
self.setWindowTitle("Preview")
def setWindowFlags(self, flags):
super(PreviewWindow, self).setWindowFlags(flags)
flag_type = (flags & Qt.WindowType_Mask)
if flag_type == Qt.Window:
text = "Qt.Window"
elif flag_type == Qt.Dialog:
text = "Qt.Dialog"
elif flag_type == Qt.Sheet:
text = "Qt.Sheet"
elif flag_type == Qt.Drawer:
text = "Qt.Drawer"
elif flag_type == Qt.Popup:
text = "Qt.Popup"
elif flag_type == Qt.Tool:
text = "Qt.Tool"
elif flag_type == Qt.ToolTip:
text = "Qt.ToolTip"
elif flag_type == Qt.SplashScreen:
text = "Qt.SplashScreen"
else:
text = ""
if flags & Qt.MSWindowsFixedSizeDialogHint:
text += "\n| Qt.MSWindowsFixedSizeDialogHint"
if flags & Qt.X11BypassWindowManagerHint:
text += "\n| Qt.X11BypassWindowManagerHint"
if flags & Qt.FramelessWindowHint:
text += "\n| Qt.FramelessWindowHint"
if flags & Qt.WindowTitleHint:
text += "\n| Qt.WindowTitleHint"
if flags & Qt.WindowSystemMenuHint:
text += "\n| Qt.WindowSystemMenuHint"
if flags & Qt.WindowMinimizeButtonHint:
text += "\n| Qt.WindowMinimizeButtonHint"
if flags & Qt.WindowMaximizeButtonHint:
text += "\n| Qt.WindowMaximizeButtonHint"
if flags & Qt.WindowCloseButtonHint:
text += "\n| Qt.WindowCloseButtonHint"
if flags & Qt.WindowContextHelpButtonHint:
text += "\n| Qt.WindowContextHelpButtonHint"
if flags & Qt.WindowShadeButtonHint:
text += "\n| Qt.WindowShadeButtonHint"
if flags & Qt.WindowStaysOnTopHint:
text += "\n| Qt.WindowStaysOnTopHint"
if flags & Qt.WindowStaysOnBottomHint:
text += "\n| Qt.WindowStaysOnBottomHint"
if flags & Qt.CustomizeWindowHint:
text += "\n| Qt.CustomizeWindowHint"
self.textEdit.setPlainText(text)
class ControllerWindow(QWidget):
def __init__(self):
super(ControllerWindow, self).__init__()
self.previewWindow = PreviewWindow(self)
self.createTypeGroupBox()
self.createHintsGroupBox()
quitButton = QPushButton("&Quit")
quitButton.clicked.connect(self.close)
bottomLayout = QHBoxLayout()
bottomLayout.addStretch()
bottomLayout.addWidget(quitButton)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.typeGroupBox)
mainLayout.addWidget(self.hintsGroupBox)
mainLayout.addLayout(bottomLayout)
self.setLayout(mainLayout)
self.setWindowTitle("Window Flags")
self.updatePreview()
def updatePreview(self):
flags = Qt.WindowFlags()
if self.windowRadioButton.isChecked():
flags = Qt.Window
elif self.dialogRadioButton.isChecked():
flags = Qt.Dialog
elif self.sheetRadioButton.isChecked():
flags = Qt.Sheet
elif self.drawerRadioButton.isChecked():
flags = Qt.Drawer
elif self.popupRadioButton.isChecked():
flags = Qt.Popup
elif self.toolRadioButton.isChecked():
flags = Qt.Tool
elif self.toolTipRadioButton.isChecked():
flags = Qt.ToolTip
elif self.splashScreenRadioButton.isChecked():
flags = Qt.SplashScreen
if self.msWindowsFixedSizeDialogCheckBox.isChecked():
flags |= Qt.MSWindowsFixedSizeDialogHint
if self.x11BypassWindowManagerCheckBox.isChecked():
flags |= Qt.X11BypassWindowManagerHint
if self.framelessWindowCheckBox.isChecked():
flags |= Qt.FramelessWindowHint
if self.windowTitleCheckBox.isChecked():
flags |= Qt.WindowTitleHint
if self.windowSystemMenuCheckBox.isChecked():
flags |= Qt.WindowSystemMenuHint
if self.windowMinimizeButtonCheckBox.isChecked():
flags |= Qt.WindowMinimizeButtonHint
if self.windowMaximizeButtonCheckBox.isChecked():
flags |= Qt.WindowMaximizeButtonHint
if self.windowCloseButtonCheckBox.isChecked():
flags |= Qt.WindowCloseButtonHint
if self.windowContextHelpButtonCheckBox.isChecked():
flags |= Qt.WindowContextHelpButtonHint
if self.windowShadeButtonCheckBox.isChecked():
flags |= Qt.WindowShadeButtonHint
if self.windowStaysOnTopCheckBox.isChecked():
flags |= Qt.WindowStaysOnTopHint
if self.windowStaysOnBottomCheckBox.isChecked():
flags |= Qt.WindowStaysOnBottomHint
if self.customizeWindowHintCheckBox.isChecked():
flags |= Qt.CustomizeWindowHint
self.previewWindow.setWindowFlags(flags)
pos = self.previewWindow.pos()
if pos.x() < 0:
pos.setX(0)
if pos.y() < 0:
pos.setY(0)
self.previewWindow.move(pos)
self.previewWindow.show()
def createTypeGroupBox(self):
self.typeGroupBox = QGroupBox("Type")
self.windowRadioButton = self.createRadioButton("Window")
self.dialogRadioButton = self.createRadioButton("Dialog")
self.sheetRadioButton = self.createRadioButton("Sheet")
self.drawerRadioButton = self.createRadioButton("Drawer")
self.popupRadioButton = self.createRadioButton("Popup")
self.toolRadioButton = self.createRadioButton("Tool")
self.toolTipRadioButton = self.createRadioButton("Tooltip")
self.splashScreenRadioButton = self.createRadioButton("Splash screen")
self.windowRadioButton.setChecked(True)
layout = QGridLayout()
layout.addWidget(self.windowRadioButton, 0, 0)
layout.addWidget(self.dialogRadioButton, 1, 0)
layout.addWidget(self.sheetRadioButton, 2, 0)
layout.addWidget(self.drawerRadioButton, 3, 0)
layout.addWidget(self.popupRadioButton, 0, 1)
layout.addWidget(self.toolRadioButton, 1, 1)
layout.addWidget(self.toolTipRadioButton, 2, 1)
layout.addWidget(self.splashScreenRadioButton, 3, 1)
self.typeGroupBox.setLayout(layout)
def createHintsGroupBox(self):
self.hintsGroupBox = QGroupBox("Hints")
self.msWindowsFixedSizeDialogCheckBox = self.createCheckBox("MS Windows fixed size dialog")
self.x11BypassWindowManagerCheckBox = self.createCheckBox("X11 bypass window manager")
self.framelessWindowCheckBox = self.createCheckBox("Frameless window")
self.windowTitleCheckBox = self.createCheckBox("Window title")
self.windowSystemMenuCheckBox = self.createCheckBox("Window system menu")
self.windowMinimizeButtonCheckBox = self.createCheckBox("Window minimize button")
self.windowMaximizeButtonCheckBox = self.createCheckBox("Window maximize button")
self.windowCloseButtonCheckBox = self.createCheckBox("Window close button")
self.windowContextHelpButtonCheckBox = self.createCheckBox("Window context help button")
self.windowShadeButtonCheckBox = self.createCheckBox("Window shade button")
self.windowStaysOnTopCheckBox = self.createCheckBox("Window stays on top")
self.windowStaysOnBottomCheckBox = self.createCheckBox("Window stays on bottom")
self.customizeWindowHintCheckBox = self.createCheckBox("Customize window")
layout = QGridLayout()
layout.addWidget(self.msWindowsFixedSizeDialogCheckBox, 0, 0)
layout.addWidget(self.x11BypassWindowManagerCheckBox, 1, 0)
layout.addWidget(self.framelessWindowCheckBox, 2, 0)
layout.addWidget(self.windowTitleCheckBox, 3, 0)
layout.addWidget(self.windowSystemMenuCheckBox, 4, 0)
layout.addWidget(self.windowMinimizeButtonCheckBox, 0, 1)
layout.addWidget(self.windowMaximizeButtonCheckBox, 1, 1)
layout.addWidget(self.windowCloseButtonCheckBox, 2, 1)
layout.addWidget(self.windowContextHelpButtonCheckBox, 3, 1)
layout.addWidget(self.windowShadeButtonCheckBox, 4, 1)
layout.addWidget(self.windowStaysOnTopCheckBox, 5, 1)
layout.addWidget(self.windowStaysOnBottomCheckBox, 6, 1)
layout.addWidget(self.customizeWindowHintCheckBox, 5, 0)
self.hintsGroupBox.setLayout(layout)
def createCheckBox(self, text):
checkBox = QCheckBox(text)
checkBox.clicked.connect(self.updatePreview)
return checkBox
def createRadioButton(self, text):
button = QRadioButton(text)
button.clicked.connect(self.updatePreview)
return button
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
controller = ControllerWindow()
controller.show()
sys.exit(app.exec())
| from PySide6.QtCore import Slot, Qt, QRect, QSize
from PySide6.QtGui import QColor, QPainter, QTextFormat
from PySide6.QtWidgets import QPlainTextEdit, QWidget, QTextEdit, QPushButton, QVBoxLayout, QHBoxLayout, QGroupBox, QGridLayout, QCheckBox, QRadioButton, QApplication
class PreviewWindow(QWidget):
def __init__(self, parent=None):
super(PreviewWindow, self).__init__(parent)
self.textEdit = QTextEdit()
self.textEdit.setReadOnly(True)
self.textEdit.setLineWrapMode(QTextEdit.NoWrap)
closeButton = QPushButton("&Close")
closeButton.clicked.connect(self.close)
layout =QVBoxLayout()
layout.addWidget(self.textEdit)
layout.addWidget(closeButton)
self.setLayout(layout)
self.setWindowTitle("Preview")
def setWindowFlags(self, flags):
super(PreviewWindow, self).setWindowFlags(flags)
flag_type = (flags & Qt.WindowType_Mask)
if flag_type == Qt.Window:
text = "Qt.Window"
elif flag_type == Qt.Dialog:
text = "Qt.Dialog"
elif flag_type == Qt.Sheet:
text = "Qt.Sheet"
elif flag_type == Qt.Drawer:
text = "Qt.Drawer"
elif flag_type == Qt.Popup:
text = "Qt.Popup"
elif flag_type == Qt.Tool:
text = "Qt.Tool"
elif flag_type == Qt.ToolTip:
text = "Qt.ToolTip"
elif flag_type == Qt.SplashScreen:
text = "Qt.SplashScreen"
else:
text = ""
if flags & Qt.MSWindowsFixedSizeDialogHint:
text += "\n| Qt.MSWindowsFixedSizeDialogHint"
if flags & Qt.X11BypassWindowManagerHint:
text += "\n| Qt.X11BypassWindowManagerHint"
if flags & Qt.FramelessWindowHint:
text += "\n| Qt.FramelessWindowHint"
if flags & Qt.WindowTitleHint:
text += "\n| Qt.WindowTitleHint"
if flags & Qt.WindowSystemMenuHint:
text += "\n| Qt.WindowSystemMenuHint"
if flags & Qt.WindowMinimizeButtonHint:
text += "\n| Qt.WindowMinimizeButtonHint"
if flags & Qt.WindowMaximizeButtonHint:
text += "\n| Qt.WindowMaximizeButtonHint"
if flags & Qt.WindowCloseButtonHint:
text += "\n| Qt.WindowCloseButtonHint"
if flags & Qt.WindowContextHelpButtonHint:
text += "\n| Qt.WindowContextHelpButtonHint"
if flags & Qt.WindowShadeButtonHint:
text += "\n| Qt.WindowShadeButtonHint"
if flags & Qt.WindowStaysOnTopHint:
text += "\n| Qt.WindowStaysOnTopHint"
if flags & Qt.WindowStaysOnBottomHint:
text += "\n| Qt.WindowStaysOnBottomHint"
if flags & Qt.CustomizeWindowHint:
text += "\n| Qt.CustomizeWindowHint"
self.textEdit.setPlainText(text)
class ControllerWindow(QWidget):
def __init__(self):
super(ControllerWindow, self).__init__()
self.previewWindow = PreviewWindow(self)
self.createTypeGroupBox()
self.createHintsGroupBox()
quitButton = QPushButton("&Quit")
quitButton.clicked.connect(self.close)
bottomLayout = QHBoxLayout()
bottomLayout.addStretch()
bottomLayout.addWidget(quitButton)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.typeGroupBox)
mainLayout.addWidget(self.hintsGroupBox)
mainLayout.addLayout(bottomLayout)
self.setLayout(mainLayout)
self.setWindowTitle("Window Flags")
self.updatePreview()
def updatePreview(self):
flags = Qt.WindowFlags()
if self.windowRadioButton.isChecked():
flags = Qt.Window
elif self.dialogRadioButton.isChecked():
flags = Qt.Dialog
elif self.sheetRadioButton.isChecked():
flags = Qt.Sheet
elif self.drawerRadioButton.isChecked():
flags = Qt.Drawer
elif self.popupRadioButton.isChecked():
flags = Qt.Popup
elif self.toolRadioButton.isChecked():
flags = Qt.Tool
elif self.toolTipRadioButton.isChecked():
flags = Qt.ToolTip
elif self.splashScreenRadioButton.isChecked():
flags = Qt.SplashScreen
if self.msWindowsFixedSizeDialogCheckBox.isChecked():
flags |= Qt.MSWindowsFixedSizeDialogHint
if self.x11BypassWindowManagerCheckBox.isChecked():
flags |= Qt.X11BypassWindowManagerHint
if self.framelessWindowCheckBox.isChecked():
flags |= Qt.FramelessWindowHint
if self.windowTitleCheckBox.isChecked():
flags |= Qt.WindowTitleHint
if self.windowSystemMenuCheckBox.isChecked():
flags |= Qt.WindowSystemMenuHint
if self.windowMinimizeButtonCheckBox.isChecked():
flags |= Qt.WindowMinimizeButtonHint
if self.windowMaximizeButtonCheckBox.isChecked():
flags |= Qt.WindowMaximizeButtonHint
if self.windowCloseButtonCheckBox.isChecked():
flags |= Qt.WindowCloseButtonHint
if self.windowContextHelpButtonCheckBox.isChecked():
flags |= Qt.WindowContextHelpButtonHint
if self.windowShadeButtonCheckBox.isChecked():
flags |= Qt.WindowShadeButtonHint
if self.windowStaysOnTopCheckBox.isChecked():
flags |= Qt.WindowStaysOnTopHint
if self.windowStaysOnBottomCheckBox.isChecked():
flags |= Qt.WindowStaysOnBottomHint
if self.customizeWindowHintCheckBox.isChecked():
flags |= Qt.CustomizeWindowHint
self.previewWindow.setWindowFlags(flags)
pos = self.previewWindow.pos()
if pos.x() < 0:
pos.setX(0)
if pos.y() < 0:
pos.setY(0)
self.previewWindow.move(pos)
self.previewWindow.show()
def createTypeGroupBox(self):
self.typeGroupBox = QGroupBox("Type")
self.windowRadioButton = self.createRadioButton("Window")
self.dialogRadioButton = self.createRadioButton("Dialog")
self.sheetRadioButton = self.createRadioButton("Sheet")
self.drawerRadioButton = self.createRadioButton("Drawer")
self.popupRadioButton = self.createRadioButton("Popup")
self.toolRadioButton = self.createRadioButton("Tool")
self.toolTipRadioButton = self.createRadioButton("Tooltip")
self.splashScreenRadioButton = self.createRadioButton("Splash screen")
self.windowRadioButton.setChecked(True)
layout = QGridLayout()
layout.addWidget(self.windowRadioButton, 0, 0)
layout.addWidget(self.dialogRadioButton, 1, 0)
layout.addWidget(self.sheetRadioButton, 2, 0)
layout.addWidget(self.drawerRadioButton, 3, 0)
layout.addWidget(self.popupRadioButton, 0, 1)
layout.addWidget(self.toolRadioButton, 1, 1)
layout.addWidget(self.toolTipRadioButton, 2, 1)
layout.addWidget(self.splashScreenRadioButton, 3, 1)
self.typeGroupBox.setLayout(layout)
def createHintsGroupBox(self):
self.hintsGroupBox = QGroupBox("Hints")
self.msWindowsFixedSizeDialogCheckBox = self.createCheckBox("MS Windows fixed size dialog")
self.x11BypassWindowManagerCheckBox = self.createCheckBox("X11 bypass window manager")
self.framelessWindowCheckBox = self.createCheckBox("Frameless window")
self.windowTitleCheckBox = self.createCheckBox("Window title")
self.windowSystemMenuCheckBox = self.createCheckBox("Window system menu")
self.windowMinimizeButtonCheckBox = self.createCheckBox("Window minimize button")
self.windowMaximizeButtonCheckBox = self.createCheckBox("Window maximize button")
self.windowCloseButtonCheckBox = self.createCheckBox("Window close button")
self.windowContextHelpButtonCheckBox = self.createCheckBox("Window context help button")
self.windowShadeButtonCheckBox = self.createCheckBox("Window shade button")
self.windowStaysOnTopCheckBox = self.createCheckBox("Window stays on top")
self.windowStaysOnBottomCheckBox = self.createCheckBox("Window stays on bottom")
self.customizeWindowHintCheckBox = self.createCheckBox("Customize window")
layout = QGridLayout()
layout.addWidget(self.msWindowsFixedSizeDialogCheckBox, 0, 0)
layout.addWidget(self.x11BypassWindowManagerCheckBox, 1, 0)
layout.addWidget(self.framelessWindowCheckBox, 2, 0)
layout.addWidget(self.windowTitleCheckBox, 3, 0)
layout.addWidget(self.windowSystemMenuCheckBox, 4, 0)
layout.addWidget(self.windowMinimizeButtonCheckBox, 0, 1)
layout.addWidget(self.windowMaximizeButtonCheckBox, 1, 1)
layout.addWidget(self.windowCloseButtonCheckBox, 2, 1)
layout.addWidget(self.windowContextHelpButtonCheckBox, 3, 1)
layout.addWidget(self.windowShadeButtonCheckBox, 4, 1)
layout.addWidget(self.windowStaysOnTopCheckBox, 5, 1)
layout.addWidget(self.windowStaysOnBottomCheckBox, 6, 1)
layout.addWidget(self.customizeWindowHintCheckBox, 5, 0)
self.hintsGroupBox.setLayout(layout)
def createCheckBox(self, text):
checkBox = QCheckBox(text)
checkBox.clicked.connect(self.updatePreview)
return checkBox
def createRadioButton(self, text):
button = QRadioButton(text)
button.clicked.connect(self.updatePreview)
return button
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
controller = ControllerWindow()
controller.show()
sys.exit(app.exec())
| none | 1 | 2.124129 | 2 | |
upvote/gae/datastore/models/exemption_test.py | iwikmai/upvote | 453 | 6622145 | <gh_stars>100-1000
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exemption models."""
import datetime
import mock
from upvote.gae.datastore import test_utils
from upvote.gae.datastore.models import exemption
from upvote.gae.datastore.models import host as host_models
from upvote.gae.lib.testing import basetest
from upvote.shared import constants
class MysteryHost(host_models.Host):
"""A Host Model which doesn't implement GetPlatformName()."""
class ExemptionTest(basetest.UpvoteTestCase):
def testCanChangeToState(self):
exm = test_utils.CreateExemption('aaa').get() # Initial state is REQUESTED
self.assertTrue(exm.CanChangeToState(constants.EXEMPTION_STATE.PENDING))
self.assertFalse(exm.CanChangeToState(constants.EXEMPTION_STATE.APPROVED))
def testGet(self):
host_id = '12345'
self.assertIsNone(exemption.Exemption.Get(host_id))
test_utils.CreateExemption(host_id)
self.assertIsNotNone(exemption.Exemption.Get(host_id))
def testExists(self):
host_id = '12345'
self.assertFalse(exemption.Exemption.Exists(host_id))
test_utils.CreateExemption(host_id)
self.assertTrue(exemption.Exemption.Exists(host_id))
def testGetPlatform_Unknown(self):
host_id = MysteryHost().put().id()
exm_key = test_utils.CreateExemption(host_id)
with self.assertRaises(exemption.UnknownPlatformError):
exemption.Exemption.GetPlatform(exm_key)
def testGetPlatform_Success(self):
host_id = test_utils.CreateSantaHost().key.id()
exm_key = test_utils.CreateExemption(host_id)
self.assertEqual(
constants.PLATFORM.MACOS, exemption.Exemption.GetPlatform(exm_key))
host_id = test_utils.CreateBit9Host().key.id()
exm_key = test_utils.CreateExemption(host_id)
self.assertEqual(
constants.PLATFORM.WINDOWS, exemption.Exemption.GetPlatform(exm_key))
def testGetHostId(self):
expected_host_id = test_utils.CreateSantaHost().key.id()
exm_key = test_utils.CreateExemption(expected_host_id)
actual_host_id = exemption.Exemption.GetHostId(exm_key)
self.assertEqual(expected_host_id, actual_host_id)
@mock.patch.object(exemption.monitoring, 'state_changes')
def testInsert_Success(self, mock_metric):
self.assertEntityCount(exemption.Exemption, 0)
host_id = 'valid_host_id'
actual_key = exemption.Exemption.Insert(
host_id, datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.DEVELOPER_MACOS)
expected_key = exemption.Exemption.CreateKey(host_id)
self.assertEqual(expected_key, actual_key)
self.assertEntityCount(exemption.Exemption, 1)
self.assertIsNotNone(expected_key.get())
mock_metric.Increment.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
@mock.patch.object(exemption.monitoring, 'state_changes')
def testInsert_AlreadyExistsError(self, mock_metric):
self.assertEntityCount(exemption.Exemption, 0)
host_id = 'valid_host_id'
exemption.Exemption.Insert(
host_id, datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.DEVELOPER_MACOS)
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
# Attempt a duplicate insertion.
with self.assertRaises(exemption.AlreadyExistsError):
exemption.Exemption.Insert(
host_id, datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.DEVELOPER_MACOS)
@mock.patch.object(exemption.monitoring, 'state_changes')
def testChangeState_InvalidExemptionError(self, mock_metric):
exm_key = exemption.Exemption.CreateKey('invalid_host_id')
with self.assertRaises(exemption.InvalidExemptionError):
exemption.Exemption.ChangeState(
exm_key, constants.EXEMPTION_STATE.APPROVED)
mock_metric.Increment.assert_not_called()
@mock.patch.object(exemption.monitoring, 'state_changes')
def testChangeState_InvalidStateChangeError(self, mock_metric):
host_id = 'valid_host_id'
exm_key = exemption.Exemption.Insert(
host_id, datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.DEVELOPER_MACOS)
mock_metric.Increment.assert_called_once()
mock_metric.reset_mock()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
with self.assertRaises(exemption.InvalidStateChangeError):
exemption.Exemption.ChangeState(
exm_key, constants.EXEMPTION_STATE.APPROVED)
mock_metric.Increment.assert_not_called()
@mock.patch.object(exemption.monitoring, 'state_changes')
def testChangeState_InvalidDetailsError(self, mock_metric):
self.assertEntityCount(exemption.Exemption, 0)
host_id = 'valid_host_id'
exm_key = exemption.Exemption.Insert(
host_id,
datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.OTHER,
other_text='Test')
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_called_once()
mock_metric.reset_mock()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
bad_details = ['aaa', None, 'bbb']
with self.assertRaises(exemption.InvalidDetailsError):
exemption.Exemption.ChangeState(
exm_key, constants.EXEMPTION_STATE.PENDING, details=bad_details)
exm = exm_key.get()
self.assertEqual(constants.EXEMPTION_STATE.REQUESTED, exm.state)
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_not_called()
@mock.patch.object(exemption.monitoring, 'state_changes')
def testChangeState_Success(self, mock_metric):
self.assertEntityCount(exemption.Exemption, 0)
host_id = 'valid_host_id'
exm_key = exemption.Exemption.Insert(
host_id,
datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.OTHER,
other_text='Test')
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_called_once()
mock_metric.reset_mock()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
exemption.Exemption.ChangeState(
exm_key, constants.EXEMPTION_STATE.PENDING)
exm = exm_key.get()
self.assertEqual(constants.EXEMPTION_STATE.PENDING, exm.state)
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
if __name__ == '__main__':
basetest.main()
| # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exemption models."""
import datetime
import mock
from upvote.gae.datastore import test_utils
from upvote.gae.datastore.models import exemption
from upvote.gae.datastore.models import host as host_models
from upvote.gae.lib.testing import basetest
from upvote.shared import constants
class MysteryHost(host_models.Host):
"""A Host Model which doesn't implement GetPlatformName()."""
class ExemptionTest(basetest.UpvoteTestCase):
def testCanChangeToState(self):
exm = test_utils.CreateExemption('aaa').get() # Initial state is REQUESTED
self.assertTrue(exm.CanChangeToState(constants.EXEMPTION_STATE.PENDING))
self.assertFalse(exm.CanChangeToState(constants.EXEMPTION_STATE.APPROVED))
def testGet(self):
host_id = '12345'
self.assertIsNone(exemption.Exemption.Get(host_id))
test_utils.CreateExemption(host_id)
self.assertIsNotNone(exemption.Exemption.Get(host_id))
def testExists(self):
host_id = '12345'
self.assertFalse(exemption.Exemption.Exists(host_id))
test_utils.CreateExemption(host_id)
self.assertTrue(exemption.Exemption.Exists(host_id))
def testGetPlatform_Unknown(self):
host_id = MysteryHost().put().id()
exm_key = test_utils.CreateExemption(host_id)
with self.assertRaises(exemption.UnknownPlatformError):
exemption.Exemption.GetPlatform(exm_key)
def testGetPlatform_Success(self):
host_id = test_utils.CreateSantaHost().key.id()
exm_key = test_utils.CreateExemption(host_id)
self.assertEqual(
constants.PLATFORM.MACOS, exemption.Exemption.GetPlatform(exm_key))
host_id = test_utils.CreateBit9Host().key.id()
exm_key = test_utils.CreateExemption(host_id)
self.assertEqual(
constants.PLATFORM.WINDOWS, exemption.Exemption.GetPlatform(exm_key))
def testGetHostId(self):
expected_host_id = test_utils.CreateSantaHost().key.id()
exm_key = test_utils.CreateExemption(expected_host_id)
actual_host_id = exemption.Exemption.GetHostId(exm_key)
self.assertEqual(expected_host_id, actual_host_id)
@mock.patch.object(exemption.monitoring, 'state_changes')
def testInsert_Success(self, mock_metric):
self.assertEntityCount(exemption.Exemption, 0)
host_id = 'valid_host_id'
actual_key = exemption.Exemption.Insert(
host_id, datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.DEVELOPER_MACOS)
expected_key = exemption.Exemption.CreateKey(host_id)
self.assertEqual(expected_key, actual_key)
self.assertEntityCount(exemption.Exemption, 1)
self.assertIsNotNone(expected_key.get())
mock_metric.Increment.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
@mock.patch.object(exemption.monitoring, 'state_changes')
def testInsert_AlreadyExistsError(self, mock_metric):
self.assertEntityCount(exemption.Exemption, 0)
host_id = 'valid_host_id'
exemption.Exemption.Insert(
host_id, datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.DEVELOPER_MACOS)
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
# Attempt a duplicate insertion.
with self.assertRaises(exemption.AlreadyExistsError):
exemption.Exemption.Insert(
host_id, datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.DEVELOPER_MACOS)
@mock.patch.object(exemption.monitoring, 'state_changes')
def testChangeState_InvalidExemptionError(self, mock_metric):
exm_key = exemption.Exemption.CreateKey('invalid_host_id')
with self.assertRaises(exemption.InvalidExemptionError):
exemption.Exemption.ChangeState(
exm_key, constants.EXEMPTION_STATE.APPROVED)
mock_metric.Increment.assert_not_called()
@mock.patch.object(exemption.monitoring, 'state_changes')
def testChangeState_InvalidStateChangeError(self, mock_metric):
host_id = 'valid_host_id'
exm_key = exemption.Exemption.Insert(
host_id, datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.DEVELOPER_MACOS)
mock_metric.Increment.assert_called_once()
mock_metric.reset_mock()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
with self.assertRaises(exemption.InvalidStateChangeError):
exemption.Exemption.ChangeState(
exm_key, constants.EXEMPTION_STATE.APPROVED)
mock_metric.Increment.assert_not_called()
@mock.patch.object(exemption.monitoring, 'state_changes')
def testChangeState_InvalidDetailsError(self, mock_metric):
self.assertEntityCount(exemption.Exemption, 0)
host_id = 'valid_host_id'
exm_key = exemption.Exemption.Insert(
host_id,
datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.OTHER,
other_text='Test')
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_called_once()
mock_metric.reset_mock()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
bad_details = ['aaa', None, 'bbb']
with self.assertRaises(exemption.InvalidDetailsError):
exemption.Exemption.ChangeState(
exm_key, constants.EXEMPTION_STATE.PENDING, details=bad_details)
exm = exm_key.get()
self.assertEqual(constants.EXEMPTION_STATE.REQUESTED, exm.state)
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_not_called()
@mock.patch.object(exemption.monitoring, 'state_changes')
def testChangeState_Success(self, mock_metric):
self.assertEntityCount(exemption.Exemption, 0)
host_id = 'valid_host_id'
exm_key = exemption.Exemption.Insert(
host_id,
datetime.datetime.utcnow(),
constants.EXEMPTION_REASON.OTHER,
other_text='Test')
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_called_once()
mock_metric.reset_mock()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
exemption.Exemption.ChangeState(
exm_key, constants.EXEMPTION_STATE.PENDING)
exm = exm_key.get()
self.assertEqual(constants.EXEMPTION_STATE.PENDING, exm.state)
self.assertEntityCount(exemption.Exemption, 1)
mock_metric.Increment.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.EXEMPTION)
if __name__ == '__main__':
basetest.main() | en | 0.849424 | # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for exemption models. A Host Model which doesn't implement GetPlatformName(). # Initial state is REQUESTED # Attempt a duplicate insertion. | 1.9862 | 2 |
mpisppy/extensions/avgminmaxer.py | Matthew-Signorotti/mpi-sppy | 2 | 6622146 | <reponame>Matthew-Signorotti/mpi-sppy
# Copyright 2020 by <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
# This software is distributed under the 3-clause BSD License.
# An extension to compute and output avg, min, max for
# a component (e.g., first stage cost).
# DLW, Feb 2019
# This extension uses options["avgminmax_name"]
import mpisppy.extensions.xhatbase
class MinMaxAvg(mpisppy.extensions.xhatbase.XhatBase):
"""
Args:
ph (PH object): the calling object
rank (int): mpi process rank of currently running process
"""
def __init__(self, ph, rank, n_proc):
super().__init__(ph, rank, n_proc)
self.compstr = self.ph.options["avgminmax_name"]
def pre_iter0(self):
return
def post_iter0(self):
avgv, minv, maxv = self.ph.avg_min_max(self.compstr)
if (self.cylinder_rank == 0):
print (" ### ", self.compstr,": avg, min, max, max-min", avgv, minv, maxv, maxv-minv)
def miditer(self, PHIter, conv):
return
def enditer(self, PHIter):
avgv, minv, maxv = self.ph.avg_min_max(self.compstr)
if (self.cylinder_rank == 0):
print (" ### ", self.compstr,": avg, min, max, max-min", avgv, minv, maxv, maxv-minv)
def post_everything(self, PHIter, conv):
return
| # Copyright 2020 by <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
# This software is distributed under the 3-clause BSD License.
# An extension to compute and output avg, min, max for
# a component (e.g., first stage cost).
# DLW, Feb 2019
# This extension uses options["avgminmax_name"]
import mpisppy.extensions.xhatbase
class MinMaxAvg(mpisppy.extensions.xhatbase.XhatBase):
"""
Args:
ph (PH object): the calling object
rank (int): mpi process rank of currently running process
"""
def __init__(self, ph, rank, n_proc):
super().__init__(ph, rank, n_proc)
self.compstr = self.ph.options["avgminmax_name"]
def pre_iter0(self):
return
def post_iter0(self):
avgv, minv, maxv = self.ph.avg_min_max(self.compstr)
if (self.cylinder_rank == 0):
print (" ### ", self.compstr,": avg, min, max, max-min", avgv, minv, maxv, maxv-minv)
def miditer(self, PHIter, conv):
return
def enditer(self, PHIter):
avgv, minv, maxv = self.ph.avg_min_max(self.compstr)
if (self.cylinder_rank == 0):
print (" ### ", self.compstr,": avg, min, max, max-min", avgv, minv, maxv, maxv-minv)
def post_everything(self, PHIter, conv):
return | en | 0.687813 | # Copyright 2020 by <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> # This software is distributed under the 3-clause BSD License. # An extension to compute and output avg, min, max for # a component (e.g., first stage cost). # DLW, Feb 2019 # This extension uses options["avgminmax_name"] Args: ph (PH object): the calling object rank (int): mpi process rank of currently running process ### ", self.compstr,": avg, min, max, max-min", avgv, minv, maxv, maxv-minv) ### ", self.compstr,": avg, min, max, max-min", avgv, minv, maxv, maxv-minv) | 2.101052 | 2 |
src/commands/start.py | rsoorajs/TorrentSeedr | 0 | 6622147 | import json
import asyncio
import requests, json
from src.objs import *
from src.commands.addTorrent import addTorrent
from src.functions.keyboard import mainReplyKeyboard, githubAuthKeyboard
# Start handler
@bot.message_handler(commands=['start'])
def start(message):
userId = message.from_user.id
params = message.text.split()[1] if len(message.text.split()) > 1 else None
userLanguage = dbSql.getSetting(userId, 'language')
if not params:
bot.send_message(message.chat.id, text=language['greet'][userLanguage], reply_markup=mainReplyKeyboard(userId, userLanguage))
#! If start paramater is passed
if params:
sent = bot.send_message(message.chat.id, text=language['processing'][userLanguage])
#! If add torrent paramater is passed via database key
if params.startswith('addTorrentDb'):
key = params[13:]
magnetLink = dbSql.getMagnet(key)
asyncio.run(addTorrent(message, userLanguage, magnetLink, messageId=sent.id))
#! If add torrent paramater is passed via URL
elif params.startswith('addTorrentURL'):
url = f'https://is.gd/{params[14:]}'
response = requests.get(url, allow_redirects=False)
magnetLink = response.headers['Location'] if 'Location' in response.headers else None
asyncio.run(addTorrent(message, userLanguage, magnetLink, messageId=sent.id))
#! Github oauth
elif params.startswith('oauth'):
code = params[6:]
params = {'client_id': 'ba5e2296f2bbe59f5097', 'client_secret': config['githubSecret'], 'code':code}
response = requests.get('https://github.com/login/oauth/access_token', params=params)
#! Successfully authenticated
if response.text[:13] == 'access_token=':
accessToken = response.text[13:].split('&', 1)[0]
headers = {'Authorization': f'token {accessToken}'}
response = requests.get('https://api.github.com/user', headers=headers).json()
if 'login' in response:
bot.edit_message_text(language['loggedInAs'][userLanguage].format(f"<a href='https://github.com/{response['login']}'>{response['login'].capitalize()}</a>"), chat_id=sent.chat.id, message_id=sent.id)
following = requests.get(f"https://api.github.com/users/{response['login']}/following").json()
#! User is following
if any(dicT['login'] == 'hemantapkh' for dicT in following):
dbSql.setSetting(userId, 'githubId', response['id'])
bot.send_message(chat_id=message.chat.id, text=language['thanksGithub'][userLanguage])
#! User is not following
else:
bot.send_message(chat_id=message.chat.id, text=language['ghNotFollowed'][userLanguage], reply_markup=githubAuthKeyboard(userLanguage))
#! Error
else:
bot.edit_message_text(language['processFailed'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
else:
data = requests.get(f"https://torrentseedrbot.herokuapp.com/getdata?key={config['databaseKey']}&id={params}")
data = json.loads(data.content)
if data['status'] == 'success':
data = json.loads(data['data'])
login(sent, userLanguage, data)
else:
bot.edit_message_text(language['processFailed'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#: Account login
def login(sent, userLanguage, data):
userId = sent.chat.id
ac = dbSql.getDefaultAc(userId)
if ac and ac['email'] and ac['password']:
data = {
'username': ac['email'],
'password': ac['password'],
'rememberme': 'on',
'g-recaptcha-response': data['captchaResponse'],
'h-captcha-response': data['captchaResponse']
}
response = requests.post('https://www.seedr.cc/auth/login', data=data)
cookies = requests.utils.dict_from_cookiejar(response.cookies)
response = response.json()
#! If account logged in successfully
if cookies:
dbSql.updateAcColumn(userId, response['user_id'], 'cookie', json.dumps(cookies))
bot.delete_message(sent.chat.id, sent.id)
bot.send_message(chat_id=sent.chat.id, text=language['loggedInAs'][userLanguage].format(response['username']), reply_markup=mainReplyKeyboard(userId, userLanguage))
else:
#! Captcha failed
if response['reason_phrase'] in ['RECAPTCHA_UNSOLVED', 'RECAPTCHA_FAILED']:
bot.edit_message_text(language['captchaFailled'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#! Wrong username or password
elif response['reason_phrase'] == 'INCORRECT_PASSWORD':
bot.edit_message_text(language['incorrectDbPassword'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#! Unknown error
else:
bot.edit_message_text(language['unknownError'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
| import json
import asyncio
import requests, json
from src.objs import *
from src.commands.addTorrent import addTorrent
from src.functions.keyboard import mainReplyKeyboard, githubAuthKeyboard
# Start handler
@bot.message_handler(commands=['start'])
def start(message):
userId = message.from_user.id
params = message.text.split()[1] if len(message.text.split()) > 1 else None
userLanguage = dbSql.getSetting(userId, 'language')
if not params:
bot.send_message(message.chat.id, text=language['greet'][userLanguage], reply_markup=mainReplyKeyboard(userId, userLanguage))
#! If start paramater is passed
if params:
sent = bot.send_message(message.chat.id, text=language['processing'][userLanguage])
#! If add torrent paramater is passed via database key
if params.startswith('addTorrentDb'):
key = params[13:]
magnetLink = dbSql.getMagnet(key)
asyncio.run(addTorrent(message, userLanguage, magnetLink, messageId=sent.id))
#! If add torrent paramater is passed via URL
elif params.startswith('addTorrentURL'):
url = f'https://is.gd/{params[14:]}'
response = requests.get(url, allow_redirects=False)
magnetLink = response.headers['Location'] if 'Location' in response.headers else None
asyncio.run(addTorrent(message, userLanguage, magnetLink, messageId=sent.id))
#! Github oauth
elif params.startswith('oauth'):
code = params[6:]
params = {'client_id': 'ba5e2296f2bbe59f5097', 'client_secret': config['githubSecret'], 'code':code}
response = requests.get('https://github.com/login/oauth/access_token', params=params)
#! Successfully authenticated
if response.text[:13] == 'access_token=':
accessToken = response.text[13:].split('&', 1)[0]
headers = {'Authorization': f'token {accessToken}'}
response = requests.get('https://api.github.com/user', headers=headers).json()
if 'login' in response:
bot.edit_message_text(language['loggedInAs'][userLanguage].format(f"<a href='https://github.com/{response['login']}'>{response['login'].capitalize()}</a>"), chat_id=sent.chat.id, message_id=sent.id)
following = requests.get(f"https://api.github.com/users/{response['login']}/following").json()
#! User is following
if any(dicT['login'] == 'hemantapkh' for dicT in following):
dbSql.setSetting(userId, 'githubId', response['id'])
bot.send_message(chat_id=message.chat.id, text=language['thanksGithub'][userLanguage])
#! User is not following
else:
bot.send_message(chat_id=message.chat.id, text=language['ghNotFollowed'][userLanguage], reply_markup=githubAuthKeyboard(userLanguage))
#! Error
else:
bot.edit_message_text(language['processFailed'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
else:
data = requests.get(f"https://torrentseedrbot.herokuapp.com/getdata?key={config['databaseKey']}&id={params}")
data = json.loads(data.content)
if data['status'] == 'success':
data = json.loads(data['data'])
login(sent, userLanguage, data)
else:
bot.edit_message_text(language['processFailed'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#: Account login
def login(sent, userLanguage, data):
userId = sent.chat.id
ac = dbSql.getDefaultAc(userId)
if ac and ac['email'] and ac['password']:
data = {
'username': ac['email'],
'password': ac['password'],
'rememberme': 'on',
'g-recaptcha-response': data['captchaResponse'],
'h-captcha-response': data['captchaResponse']
}
response = requests.post('https://www.seedr.cc/auth/login', data=data)
cookies = requests.utils.dict_from_cookiejar(response.cookies)
response = response.json()
#! If account logged in successfully
if cookies:
dbSql.updateAcColumn(userId, response['user_id'], 'cookie', json.dumps(cookies))
bot.delete_message(sent.chat.id, sent.id)
bot.send_message(chat_id=sent.chat.id, text=language['loggedInAs'][userLanguage].format(response['username']), reply_markup=mainReplyKeyboard(userId, userLanguage))
else:
#! Captcha failed
if response['reason_phrase'] in ['RECAPTCHA_UNSOLVED', 'RECAPTCHA_FAILED']:
bot.edit_message_text(language['captchaFailled'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#! Wrong username or password
elif response['reason_phrase'] == 'INCORRECT_PASSWORD':
bot.edit_message_text(language['incorrectDbPassword'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
#! Unknown error
else:
bot.edit_message_text(language['unknownError'][userLanguage], chat_id=sent.chat.id, message_id=sent.id)
| en | 0.473293 | # Start handler #! If start paramater is passed #! If add torrent paramater is passed via database key #! If add torrent paramater is passed via URL #! Github oauth #! Successfully authenticated #! User is following #! User is not following #! Error #: Account login #! If account logged in successfully #! Captcha failed #! Wrong username or password #! Unknown error | 2.532515 | 3 |
src/route_viewer/show_args.py | masaharu-kato-lab/firefly_algorithm | 2 | 6622148 | #!env/bin/python
import pickle
import argparse
import json
import sys
import os
sys.path.append(os.path.dirname(__file__) + '/../route_planner')
def main():
argp = argparse.ArgumentParser(description='Route binary arguments checker')
argp.add_argument('input', type=str, help='Input binary pickle file path')
args = argp.parse_args()
with open(args.input, mode='rb') as f:
out_bin = pickle.load(f)
print(out_bin.args)
if __name__ == '__main__':
main()
| #!env/bin/python
import pickle
import argparse
import json
import sys
import os
sys.path.append(os.path.dirname(__file__) + '/../route_planner')
def main():
argp = argparse.ArgumentParser(description='Route binary arguments checker')
argp.add_argument('input', type=str, help='Input binary pickle file path')
args = argp.parse_args()
with open(args.input, mode='rb') as f:
out_bin = pickle.load(f)
print(out_bin.args)
if __name__ == '__main__':
main()
| ru | 0.18615 | #!env/bin/python | 2.904024 | 3 |
visionRecog.py | kureuetan/img_recog_for_non_eng_speakers | 0 | 6622149 | <reponame>kureuetan/img_recog_for_non_eng_speakers
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VisionRecog class is the group of functions in order to take the picture by Picamera,
analyze the image by Google Cloud Vision API, return recognized objects by texts,
translate into local languages by Google translated API, and talk them by artificial voice
created by Google TextToSpeech API.
For using this file, 'words.py' needs to be placed in the same folder.
The file is intended to be used for vision_recog_with_button.py.
Picamera should be equipped in Raspberry Pi 3.
"""
import io
import logging
import os
import picamera
import signal
import subprocess
from time import sleep
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
from google.cloud import texttospeech
from google.cloud import translate_v2
from words import words_dict
class VisionRecog():
def __init__(self, lang='en', lang_code='en-US'):
self.lang = lang
self.lang_code = lang_code
self.client = vision.ImageAnnotatorClient()
def get_hints(self):
hints = words_dict[self.lang]['read_text'] + words_dict[self.lang]['read_logo'] + words_dict[self.lang]['label_detect'] + words_dict[self.lang]['finish_list']
return hints
def getImage(self):
raspistillPID = subprocess.check_output(["pidof", "raspistill"])
os.kill(int(raspistillPID), signal.SIGUSR1)
sleep(0.5)
# If you want to change the image path, change it.
file_name = "/home/pi/aiyimage.jpg"
return file_name
def read_image(self, file_name):
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
return content
def label_detect(self,content):
image = types.Image(content=content)
# Performs label detection on the image file
response = self.client.label_detection(image=image)
labels = response.label_annotations
label_strings = ','.join([label.description for label in labels])
return label_strings
def detect_text(self,content):
image = vision.types.Image(content=content)
response = self.client.text_detection(image=image)
texts = response.text_annotations
texts_content = [text for text in texts]
texts_strings = texts_content[0].description
texts_locale = texts_content[0].locale
return texts_strings, texts_locale
def detect_logo(self,content):
image = vision.types.Image(content=content)
response = self.client.logo_detection(image=image)
logos = response.logo_annotations
logo_strings = ','.join([logo.description for logo in logos])
return logo_strings
def translate_results(self,text,texts_locale='en'):
translate_client = translate_v2.Client()
if texts_locale == self.lang:
logging.info(f"{words_dict[self.lang]['text_loaded']}\n{text}")
return text
else:
target = self.lang
translation = translate_client.translate(
text,
target_language=target)
logging.info(f"{words_dict[self.lang]['text_original']}\n{text}")
translated_results = f"{words_dict[self.lang]['text_resulted']}\n{translation['translatedText']}"
logging.info(translated_results)
return translated_results
def recognition_process(self, text):
if text in words_dict[self.lang]['read_text']:
file_name = self.getImage()
content = self.read_image(file_name)
texts_results = self.detect_text(content)
vision_results = texts_results[0]
texts_locale = texts_results[1]
results = self.translate_results(vision_results, texts_locale)
elif text in words_dict[self.lang]['read_logo']:
file_name = self.getImage()
content = self.read_image(file_name)
vision_results = self.detect_logo(content)
if vision_results:
results = self.translate_results(vision_results)
else:
self.show_say('not_logo')
results = None
elif text in words_dict[self.lang]['label_detect']:
file_name = self.getImage()
content = self.read_image(file_name)
vision_results = self.label_detect(content)
results = self.translate_results(vision_results)
return results
def say(self,phrase):
client = texttospeech.TextToSpeechClient()
input_text = texttospeech.types.SynthesisInput(ssml=phrase)
# Note: the voice can also be specified by name.
# Names of voices can be retrieved with client.list_voices().
voice = texttospeech.types.VoiceSelectionParams(
language_code=self.lang_code,
ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE)
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3)
response = client.synthesize_speech(input_text, voice, audio_config)
# The response's audio_content is binary.
with open('output.mp3', 'wb') as out:
out.write(response.audio_content)
# print('Audio content written to file "output.mp3"')
# Reproduction of the sound (mpg321)
subprocess.run(['mpg321', '-q','-g 20','output.mp3'])
def show_say(self, phrase, voice=False):
phrase = words_dict[self.lang][phrase]
logging.info(phrase)
if voice:
self.say(phrase)
| #!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VisionRecog class is the group of functions in order to take the picture by Picamera,
analyze the image by Google Cloud Vision API, return recognized objects by texts,
translate into local languages by Google translated API, and talk them by artificial voice
created by Google TextToSpeech API.
For using this file, 'words.py' needs to be placed in the same folder.
The file is intended to be used for vision_recog_with_button.py.
Picamera should be equipped in Raspberry Pi 3.
"""
import io
import logging
import os
import picamera
import signal
import subprocess
from time import sleep
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
from google.cloud import texttospeech
from google.cloud import translate_v2
from words import words_dict
class VisionRecog():
def __init__(self, lang='en', lang_code='en-US'):
self.lang = lang
self.lang_code = lang_code
self.client = vision.ImageAnnotatorClient()
def get_hints(self):
hints = words_dict[self.lang]['read_text'] + words_dict[self.lang]['read_logo'] + words_dict[self.lang]['label_detect'] + words_dict[self.lang]['finish_list']
return hints
def getImage(self):
raspistillPID = subprocess.check_output(["pidof", "raspistill"])
os.kill(int(raspistillPID), signal.SIGUSR1)
sleep(0.5)
# If you want to change the image path, change it.
file_name = "/home/pi/aiyimage.jpg"
return file_name
def read_image(self, file_name):
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
return content
def label_detect(self,content):
image = types.Image(content=content)
# Performs label detection on the image file
response = self.client.label_detection(image=image)
labels = response.label_annotations
label_strings = ','.join([label.description for label in labels])
return label_strings
def detect_text(self,content):
image = vision.types.Image(content=content)
response = self.client.text_detection(image=image)
texts = response.text_annotations
texts_content = [text for text in texts]
texts_strings = texts_content[0].description
texts_locale = texts_content[0].locale
return texts_strings, texts_locale
def detect_logo(self,content):
image = vision.types.Image(content=content)
response = self.client.logo_detection(image=image)
logos = response.logo_annotations
logo_strings = ','.join([logo.description for logo in logos])
return logo_strings
def translate_results(self,text,texts_locale='en'):
translate_client = translate_v2.Client()
if texts_locale == self.lang:
logging.info(f"{words_dict[self.lang]['text_loaded']}\n{text}")
return text
else:
target = self.lang
translation = translate_client.translate(
text,
target_language=target)
logging.info(f"{words_dict[self.lang]['text_original']}\n{text}")
translated_results = f"{words_dict[self.lang]['text_resulted']}\n{translation['translatedText']}"
logging.info(translated_results)
return translated_results
def recognition_process(self, text):
if text in words_dict[self.lang]['read_text']:
file_name = self.getImage()
content = self.read_image(file_name)
texts_results = self.detect_text(content)
vision_results = texts_results[0]
texts_locale = texts_results[1]
results = self.translate_results(vision_results, texts_locale)
elif text in words_dict[self.lang]['read_logo']:
file_name = self.getImage()
content = self.read_image(file_name)
vision_results = self.detect_logo(content)
if vision_results:
results = self.translate_results(vision_results)
else:
self.show_say('not_logo')
results = None
elif text in words_dict[self.lang]['label_detect']:
file_name = self.getImage()
content = self.read_image(file_name)
vision_results = self.label_detect(content)
results = self.translate_results(vision_results)
return results
def say(self,phrase):
client = texttospeech.TextToSpeechClient()
input_text = texttospeech.types.SynthesisInput(ssml=phrase)
# Note: the voice can also be specified by name.
# Names of voices can be retrieved with client.list_voices().
voice = texttospeech.types.VoiceSelectionParams(
language_code=self.lang_code,
ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE)
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3)
response = client.synthesize_speech(input_text, voice, audio_config)
# The response's audio_content is binary.
with open('output.mp3', 'wb') as out:
out.write(response.audio_content)
# print('Audio content written to file "output.mp3"')
# Reproduction of the sound (mpg321)
subprocess.run(['mpg321', '-q','-g 20','output.mp3'])
def show_say(self, phrase, voice=False):
phrase = words_dict[self.lang][phrase]
logging.info(phrase)
if voice:
self.say(phrase) | en | 0.864442 | #!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. VisionRecog class is the group of functions in order to take the picture by Picamera, analyze the image by Google Cloud Vision API, return recognized objects by texts, translate into local languages by Google translated API, and talk them by artificial voice created by Google TextToSpeech API. For using this file, 'words.py' needs to be placed in the same folder. The file is intended to be used for vision_recog_with_button.py. Picamera should be equipped in Raspberry Pi 3. # Imports the Google Cloud client library # If you want to change the image path, change it. # Performs label detection on the image file # Note: the voice can also be specified by name. # Names of voices can be retrieved with client.list_voices(). # The response's audio_content is binary. # print('Audio content written to file "output.mp3"') # Reproduction of the sound (mpg321) | 2.247475 | 2 |
mushroom_rl/utils/callbacks/callback.py | PuzeLiu/mushroom-rl | 344 | 6622150 | <reponame>PuzeLiu/mushroom-rl
class Callback(object):
"""
Interface for all basic callbacks. Implements a list in which it is possible
to store data and methods to query and clean the content stored by the
callback.
"""
def __init__(self):
"""
Constructor.
"""
self._data_list = list()
def __call__(self, dataset):
"""
Add samples to the samples list.
Args:
dataset (list): the samples to collect.
"""
raise NotImplementedError
def get(self):
"""
Returns:
The current collected data as a list.
"""
return self._data_list
def clean(self):
"""
Delete the current stored data list
"""
self._data_list = list()
| class Callback(object):
"""
Interface for all basic callbacks. Implements a list in which it is possible
to store data and methods to query and clean the content stored by the
callback.
"""
def __init__(self):
"""
Constructor.
"""
self._data_list = list()
def __call__(self, dataset):
"""
Add samples to the samples list.
Args:
dataset (list): the samples to collect.
"""
raise NotImplementedError
def get(self):
"""
Returns:
The current collected data as a list.
"""
return self._data_list
def clean(self):
"""
Delete the current stored data list
"""
self._data_list = list() | en | 0.884976 | Interface for all basic callbacks. Implements a list in which it is possible to store data and methods to query and clean the content stored by the callback. Constructor. Add samples to the samples list. Args: dataset (list): the samples to collect. Returns: The current collected data as a list. Delete the current stored data list | 3.802602 | 4 |